hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bd06c839ad9fb686e29fc427b87e7ada487f3903 | 821 | py | Python | src/main.py | frgfm/poker_buddy | fae034957bc02acaf7f635f8ac078554f31104a4 | [
"MIT"
] | 1 | 2020-10-01T08:11:21.000Z | 2020-10-01T08:11:21.000Z | src/main.py | frgfm/poker-buddy | fae034957bc02acaf7f635f8ac078554f31104a4 | [
"MIT"
] | null | null | null | src/main.py | frgfm/poker-buddy | fae034957bc02acaf7f635f8ac078554f31104a4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
These objects and functions are part of a larger poker assistant project.
Content of this script enable the user to simulate game of Texas Holdem Poker.
'''
__author__ = 'François-Guillaume Fernandez'
__license__ = 'MIT License'
__version__ = '0.1'
__maintainer__ = 'François-Guillaume Fernandez'
__status__ = 'Development'
from game import Player, Game, get_hand_value, get_winners
def main():
players = [Player() for k in range(4)]
g = Game(players)
g.deal()
g.hit()
g.hit()
g.hit()
hands = []
for k in range(4):
print('Player %s: %s' % (k, g.get_player_cards(k)))
hands.append(get_hand_value(g.community_cards + g.players[k].cards))
print(hands[-1])
print('Winner:', get_winners(hands))
if __name__ == "__main__":
main()
| 24.147059 | 78 | 0.663825 |
74a70fc66e98524c9960f00e33a44a6beba5af90 | 6,285 | py | Python | experiments/vitchyr/goal_distribution/representation_learning/exps_20_07_24/exp6_relative_distance_set.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2020-10-23T14:40:09.000Z | 2020-10-23T14:40:09.000Z | experiments/vitchyr/goal_distribution/representation_learning/exps_20_07_24/exp6_relative_distance_set.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/vitchyr/goal_distribution/representation_learning/exps_20_07_24/exp6_relative_distance_set.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | 1 | 2021-05-27T20:38:45.000Z | 2021-05-27T20:38:45.000Z | import rlkit.misc.hyperparameter as hyp
from multiworld.envs.pygame import PickAndPlaceEnv
from rlkit.launchers.launcher_util import run_experiment
from rlkit.torch.sets.rl_launcher import disco_experiment
if __name__ == "__main__":
variant = dict(
env_class=PickAndPlaceEnv,
env_kwargs=dict(
# Environment dynamics
action_scale=1.0,
boundary_dist=4,
ball_radius=0.75,
object_radius=0.50,
cursor_visual_radius=1.5,
object_visual_radius=1.,
min_grab_distance=0.5,
walls=None,
# Rewards
action_l2norm_penalty=0,
reward_type="dense",
success_threshold=0.60,
# Reset settings
fixed_goal=None,
# Visualization settings
images_are_rgb=True,
render_dt_msec=0,
render_onscreen=False,
render_size=84,
show_goal=False,
goal_samplers=None,
goal_sampling_mode='random',
num_presampled_goals=10000,
object_reward_only=False,
init_position_strategy='random',
num_objects=2,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
sac_trainer_kwargs=dict(
discount=0.99,
soft_target_tau=1e-3,
target_update_period=1,
use_automatic_entropy_tuning=True,
reward_scale='auto_normalize_by_max_magnitude',
),
max_path_length=100,
# max_path_length=20,
algo_kwargs=dict(
batch_size=128,
num_epochs=501,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
# batch_size=5,
# num_epochs=1,
# num_eval_steps_per_epoch=20*8,
# num_expl_steps_per_train_loop=20*8,
# num_trains_per_train_loop=20,
# min_num_steps_before_training=20,
),
replay_buffer_kwargs=dict(
fraction_future_context=0.0,
fraction_distribution_context=0.8,
max_size=int(1e6),
),
observation_key='latent_observation',
# desired_goal_key='latent_desired_goal',
save_video=True,
save_video_kwargs=dict(
save_video_period=10,
pad_color=50,
subpad_length=1,
pad_length=1,
num_columns_per_rollout=4,
),
renderer_kwargs=dict(
# create_image_format='HWC',
# output_image_format='CWH',
output_image_format='CHW',
# flatten_image=True,
# normalize_image=False,
),
create_vae_kwargs=dict(
latent_dim=128,
encoder_cnn_kwargs=dict(
kernel_sizes=[5, 3, 3],
n_channels=[16, 32, 64],
strides=[3, 2, 2],
paddings=[0, 0, 0],
pool_type='none',
hidden_activation='relu',
normalization_type='layer',
),
encoder_mlp_kwargs=dict(
hidden_sizes=[],
),
decoder_dcnn_kwargs=dict(
kernel_sizes=[3, 3, 6],
n_channels=[32, 16, 3],
strides=[2, 2, 3],
paddings=[0, 0, 0],
),
decoder_mlp_kwargs=dict(
hidden_sizes=[256, 256],
),
use_fancy_architecture=True,
decoder_distribution='gaussian_learned_global_scalar_variance',
),
vae_trainer_kwargs=dict(
vae_lr=1e-3,
vae_visualization_config=dict(
num_recons=5,
num_samples=20,
# debug_period=50,
debug_period=20,
unnormalize_images=True,
image_format='CHW',
),
beta=1,
# set_loss_weight=1,
# beta=0.001,
set_loss_weight=0,
),
data_loader_kwargs=dict(
batch_size=128,
),
vae_algo_kwargs=dict(
num_iters=501,
num_epochs_per_iter=1,
progress_csv_file_name='vae_progress.csv',
),
generate_set_for_vae_pretraining_kwargs=dict(
num_sets=6,
num_samples_per_set=128,
),
generate_set_for_rl_kwargs=dict(
num_sets=4,
num_samples_per_set=128,
set_configs=[
dict(
version='move_a_to_b',
a_axis_to_b_axis={
0: 2,
1: 3,
}
),
],
save_to_filename='cursor_to_obj1_sets128samples.pickle',
# saved_filename='cursor_to_obj1_sets128samples.pickle',
),
num_ungrouped_images=12800,
)
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 2
mode = 'sss'
exp_prefix = '20-07-24-exp6-relative-distance'
search_space = {
# 'vae_algo_kwargs.num_iters': [0],
# 'algo_kwargs.num_epochs': [1],
'vae_trainer_kwargs.set_loss_weight': [
0, 1, 10, 100
],
'vae_algo_kwargs.num_iters': [
501, 101,
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = list(sweeper.iterate_hyperparameters())
for _ in range(n_seeds):
for exp_id, variant in enumerate(variants):
variant['exp_id'] = exp_id
run_experiment(
disco_experiment,
exp_name=exp_prefix,
prepend_date_to_exp_name=False,
num_exps_per_instance=2,
mode=mode,
variant=variant,
# slurm_config_name='cpu',
use_gpu=True,
# gpu_id=1,
)
| 31.582915 | 75 | 0.523946 |
90b5814a00ae9ae8ff7a501926e0af195ebb1d82 | 4,007 | py | Python | tables/wikipedia-scripts/extract.py | yash-srivastava19/sempre | b27c06906da33e345c645ff9470132bf6d1c26dc | [
"Apache-2.0"
] | 812 | 2015-01-08T01:58:39.000Z | 2022-03-24T02:43:05.000Z | tables/wikipedia-scripts/extract.py | yash-srivastava19/sempre | b27c06906da33e345c645ff9470132bf6d1c26dc | [
"Apache-2.0"
] | 181 | 2015-01-26T21:54:04.000Z | 2022-03-09T17:52:04.000Z | tables/wikipedia-scripts/extract.py | yash-srivastava19/sempre | b27c06906da33e345c645ff9470132bf6d1c26dc | [
"Apache-2.0"
] | 314 | 2015-01-14T11:23:08.000Z | 2022-03-07T02:36:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Extract categories and their members from Wikipedia dump."""
import sys, os, re, argparse, json, gzip
from codecs import open
from collections import defaultdict
HIDDEN_CAT_CAT = 15961454
################################################################
# List all pages in category 0 or 4
# Output: bimap between ID and (namespace, name)
def get_name_bimap():
id_to_name, name_to_id = {}, {}
with gzip.open('pages-filtered.tsv.gz') as fin:
for i, line in enumerate(fin):
if i % 1000000 == 0:
print >> sys.stderr, 'Processing %d ...' % i
line = line[:-1].split('\t')
_id = int(line[0])
_name = (int(line[1]), line[2])
id_to_name[_id] = _name
name_to_id[_name] = _id
print >> sys.stderr, 'Found %d pages' % len(id_to_name)
return id_to_name, name_to_id
def read_hidden_cats():
with open('hidden-cats') as fin:
hidden_cats = set(int(x) for x in fin)
hidden_cats.add(HIDDEN_CAT_CAT)
print >> sys.stderr, 'Found %d hidden cats' % len(hidden_cats)
return hidden_cats
################################################################
# List all (page, category) pairs
# Output: mapping from cat id to page ids
def get_cat_to_pages(id_to_name, name_to_id):
cat_to_pages = defaultdict(list)
with gzip.open('links-filtered.tsv.gz') as fin:
for i, line in enumerate(fin):
if i % 1000000 == 0:
print >> sys.stderr, 'Reading %d ...' % i
line = line[:-1].split('\t')
page = int(line[0])
cat = int(line[1])
if page in id_to_name and cat in id_to_name:
cat_to_pages[cat].append(page)
return cat_to_pages
# Pre-process links.tsv.gz
def dump_page_cat_pairs(id_to_name, name_to_id, hidden_cats):
with gzip.open('links.tsv.gz') as fin:
with gzip.open('links-filtered.tsv.gz', 'w') as fout:
num_found = 0
for i, line in enumerate(fin):
if i % 1000000 == 0:
print >> sys.stderr, 'Reading %d (%d found so far) ...' % \
(i, num_found)
line = line[:-1].split('\t')
try:
page = int(line[0])
cat = name_to_id[(14, line[1])]
if page in id_to_name and cat not in hidden_cats:
fout.write('%d\t%d\n' % (page, cat))
num_found += 1
except LookupError:
pass
################################################################
# Only keep useful categories
def filter_cats(cat_to_pages, name_to_id):
print >> sys.stderr, 'Before: %d cats' % len(cat_to_pages)
for cat in cat_to_pages.keys():
if len(cat_to_pages[cat]) < 3:
del cat_to_pages[cat]
print >> sys.stderr, ' After: %d cats' % len(cat_to_pages)
################################################################
# Dump to file
def dump(cat_to_pages, id_to_name):
with gzip.open('grouped.txt.gz', 'w') as fout:
for i, (cat, pages) in enumerate(cat_to_pages.iteritems()):
if i % 100000 == 0:
print >> sys.stderr, 'Dumping %d ...' % i
fout.write(str(cat) + '\t' + id_to_name[cat][1] + '\n')
fout.write(str(len(pages)) + '\n')
for page in pages:
fout.write(str(page) + '\t' + id_to_name[page][1] + '\n')
fout.write('\n')
def main():
parser = argparse.ArgumentParser()
args = parser.parse_args()
id_to_name, name_to_id = get_name_bimap()
################
#hidden_cats = read_hidden_cats()
#dump_page_cat_pairs(id_to_name, name_to_id, hidden_cats)
#exit(0)
################
cat_to_pages = get_cat_to_pages(id_to_name, name_to_id)
filter_cats(cat_to_pages, name_to_id)
dump(cat_to_pages, id_to_name)
if __name__ == '__main__':
main()
| 37.448598 | 79 | 0.529823 |
a1dbab7a0520e4c0a24b61c8fcea291a531b1c62 | 24,089 | py | Python | api/handlers/ws_domains/lobster.py | visigoth/V3SPA | b608589076bf82d91538493bc2c549c25a0bef33 | [
"BSD-3-Clause"
] | 33 | 2016-09-12T14:24:07.000Z | 2017-08-03T19:20:03.000Z | api/handlers/ws_domains/lobster.py | visigoth/V3SPA | b608589076bf82d91538493bc2c549c25a0bef33 | [
"BSD-3-Clause"
] | 4 | 2016-08-22T19:19:16.000Z | 2016-11-28T00:40:54.000Z | api/handlers/ws_domains/lobster.py | visigoth/V3SPA | b608589076bf82d91538493bc2c549c25a0bef33 | [
"BSD-3-Clause"
] | 8 | 2016-09-12T14:24:08.000Z | 2017-01-19T13:39:12.000Z | import api
import logging
logger = logging.getLogger(__name__)
from tornado import httpclient
import hashlib
import urllib
import api.support.decompose
import api.handlers.ws_domains as ws_domains
import api.jsonh
import json
__MIN_LSR_VERSION__ = 6
class LobsterDomain(object):
"""Docstring for LobsterDomain """
def __init__(self):
""" Test the connection to the lobster server. """
backend_uri = "http://{0}/version".format(
api.config.get('lobster_backend', 'uri'))
try:
http_client = httpclient.HTTPClient()
result = http_client.fetch(
backend_uri,
method='GET',
request_timeout=10.0
)
resp = api.db.json.loads(result.body)
self._lobster_version = resp['version']
if self._lobster_version < __MIN_LSR_VERSION__:
logger.critical("Required lobster version {0} but server speaks {1}"
.format(__MIN_LSR_VERSION__, self._lobster_version))
logger.info("Connected to lobster backend server v{0}"
.format(self._lobster_version))
except httpclient.HTTPError as e:
if e.code == 599:
raise Exception("backend:lobster - Unavailable")
else:
# Our request wasn't valid anyway, we just wanted a response
pass
@staticmethod
def _make_request(method, path, payload=None, timeout=90.0):
http_client = httpclient.HTTPClient()
backend_uri = "http://{0}{1}".format(
api.config.get('lobster_backend', 'uri'),
path)
logger.info("Fetching {0} from v3spa-server".format(backend_uri))
try:
if payload:
output = http_client.fetch(
backend_uri,
method=method,
body=payload,
request_timeout=timeout
)
else:
output = http_client.fetch(
backend_uri,
method=method,
request_timeout=timeout
)
except httpclient.HTTPError as e:
if e.code == 599:
logger.warning("Request timed out")
raise Exception("backend:lobster - Unavailable")
elif e.code == 500:
logger.warning("Error during request - {0}".format(e.response.body))
raise Exception("backend:lobster - {0}".format(e.response.body))
else:
logger.warning("Error during request - [{0}] {1}"
.format(e.code, e.message))
if api.config.get('main', 'debug'):
raise Exception(
"Backend error: [{0}] {1}".format(e.code, e.message))
else:
raise Exception("backend:lobster - Unspecified Error")
else:
logger.info("Request successful")
return output
@staticmethod
def get_annotation(hop, *names, **kw):
param = kw.get('annotation_param', 'annotations')
notes = filter(lambda x: x['name'] in names, hop[param])
if len(notes):
return notes
else:
return None
def path_walk(self, path, data, origin, lobster_data):
previous_dom = None
new_path = []
import urlparse
params = urlparse.parse_qs(data['params'])
jsondata = data['parameterized']
expanded_ids = params['id']
last_perm = None
last_object_class = None
for i, hop_info in enumerate(path):
hop = jsondata['connections'][hop_info['conn']]
if hop_info['left'] == origin:
fwd = 'right'
bwd = 'left'
else:
fwd = 'left'
bwd = 'right'
tried_expanding = False
while True:
try:
next_domain = jsondata['domains'][hop_info[fwd]]
from_dom = jsondata['domains'][hop_info[bwd]]
dest_port = jsondata['ports'][hop[fwd]]
except KeyError:
if tried_expanding is True:
raise Exception("Couldn't expand the graph to link from {0}"
.format(hop_info[fwd]))
else:
tried_expanding = True
expanded_ids.append(hop_info[fwd])
output = self._make_request(
'POST', '/parse?{0}'.format(
"&".join(["id={0}".format(hid)
for hid in expanded_ids])),
lobster_data)
result = api.db.json.loads(output.body)
jsondata['domains'].update(result['result']['domains'])
jsondata['connections'].update(result['result']['connections'])
jsondata['ports'].update(result['result']['ports'])
else:
break
is_type = self.get_annotation(
next_domain, 'Type', annotation_param='domainAnnotations')
if i == 0:
new_path.append({
'type': 'origin',
'name': from_dom['path'],
'hop': hop_info['conn']
})
print("Origin: {0}".format(from_dom['path']))
if (next_domain['class'] == 'Domtrans_pattern'
and next_domain != previous_dom):
attr = self.get_annotation(
next_domain, "Macro", annotation_param='domainAnnotations')
new_path.append({
'hop': hop_info['conn'],
'type': 'transition',
'name': attr[0]['args'][1]
})
print("Can transition via {0}".format(attr[0]['args'][1]))
# This is attribute membership connection:
elif self.get_annotation(hop, 'Attribute'):
if fwd == 'left':
fwd_arg = filter(lambda x: x['name'] == 'Lhs',
hop['annotations'])
else:
fwd_arg = filter(lambda x: x['name'] == 'Rhs',
hop['annotations'])
if (fwd_arg and fwd_arg[0]['args'][1] == 'attribute_subj') or \
dest_port['name'] == 'attribute_subj':
print("which is a member of")
new_path.append({
'hop': hop_info['conn'],
'type': 'member_of',
})
elif fwd_arg and fwd_arg[0]['args'][1] == 'member_obj' or \
dest_port['name'] == 'member_obj':
print("an attribute that contains")
new_path.append({
'hop': hop_info['conn'],
'type': 'attribute_contains',
})
elif dest_port['name'] == 'attribute_subj':
logger.critical("Encountered unexpected case when walking path. "
"This probably indicates a serious bug.")
pass
elif (next_domain['class'] != 'Domtrans_pattern'
and self.get_annotation(hop, 'Perm')):
perms = [x['args']
for x in self.get_annotation(hop, 'Perm')]
print("which has {0} permissions ".format(perms))
new_path.append({
'hop': hop_info['conn'],
'type': 'permission',
'name': perms
})
last_perm = new_path[-1]
else:
pass
if is_type:
new_path.append({
'hop': hop_info['conn'],
'type': 'type',
'name': next_domain['path']
})
if last_object_class is not None:
new_path[-1]['class'] = last_object_class
last_object_class = None
print("type '{0}'"
.format(next_domain['path'], last_object_class))
elif self.get_annotation(
next_domain, 'Attribute',
annotation_param='domainAnnotations'):
new_path.append({
'hop': hop_info['conn'],
'type': 'attribute',
'name': next_domain['path']
})
if dest_port['name'] == 'attribute_subj':
print("attribute {0}".format(next_domain['path']))
else:
if last_object_class is not None:
new_path[-1]['class'] = last_object_class
last_object_class = None
print("attribute type '{0}'"
.format(next_domain['path']))
else:
pass
if self.get_annotation(hop, 'CondExpr'):
new_path[-1]['condition'] = self.get_annotation(
hop, 'CondExpr')[0]['args']
origin = hop_info[fwd]
previous_dom = next_domain
import urllib
data['params'] = urllib.urlencode(params, doseq=True)
return new_path, last_perm
def query_reachability(self, msg):
""" Run a reachability test from a given domain """
logger.info("WS:query_reachability?%s" % msg['payload']['params'])
refpol_id = msg['payload']['policy']
del msg['payload']['policy']
refpol_id = api.db.idtype(refpol_id)
refpol = ws_domains.call('refpolicy', 'Read', refpol_id)
output = self._make_request(
'POST', '/paths?{0}'.format(msg['payload']['params']),
msg['payload']['text'])
result = api.db.json.loads(output.body)['result']
if result is not None:
for dest, paths in result.iteritems():
if dest == 'truncated':
continue
new_paths = {'dest_id': dest, 'perms': {}}
for path in paths:
logger.info('Gathering additional data for {0}'.format(path))
import urlparse
params = urlparse.parse_qs(msg['payload']['params'])
path_data, final_perm = self.path_walk(
path,
refpol.parsed,
params['id'][0],
msg['payload']['text'])
logger.info("Re-tabulated path data")
new_paths['dest'] = path_data[-1]['name']
for klass, perm in final_perm['name']:
new_paths['perms'][perm] = {
'hops': path,
'human': path_data,
'perm': perm,
'class': klass,
'endpoint': path_data[-1]
}
result[dest] = new_paths
return {
'label': msg['response_id'],
'payload': {'paths': result, 'data': refpol.parsed}
}
def export_selinux(self, msg):
""" Request that the server export the POSTed
lobster file as an SELinux policy.
"""
output = self._make_request(
'POST', '/export/selinux', msg)
jsondata = api.db.json.loads(output.body)
return jsondata['result']
def validate(self, msg):
""" Validate a Lobster file received from the IDE
"""
dsl = msg['payload']['text']
del msg['payload']['text']
dsl_hash = hashlib.md5(dsl).hexdigest()
refpol_id = msg['payload']['policy']
del msg['payload']['policy']
refpol_id = api.db.idtype(refpol_id)
refpol = ws_domains.call('refpolicy', 'Read', refpol_id)
logger.info("WS:validate?%s" % "&".join(
["{0}={1}".format(x, y) for x, y in msg['payload'].iteritems() if x != 'text']))
# If the DSL is identical, and the parameters are identical, just return the one we already
# translated.
if (refpol.parsed
and refpol.documents['dsl']['digest'] == dsl_hash
and refpol.parsed['params'] == msg['payload']['params']):
logger.info("Returning cached JSON")
return {
'label': msg['response_id'],
'payload': api.db.json.dumps(refpol.parsed)
}
else:
output = self._make_request(
'POST', '/parse?{0}'.format(msg['payload']['params']),
dsl)
jsondata = api.db.json.loads(output.body)
if msg['payload']['hide_unused_ports'] is True:
jsondata = self._filter_unused_ports(jsondata)
refpol['parsed'] = {
'version': jsondata['version'],
'errors': jsondata['errors'],
'parameterized': jsondata['result'],
'params': msg['payload']['params']
}
# If this DSL is different, then we need to recalculate the
# summarized version, which is parsed with paths=*
if (len(jsondata['errors']) == 0
and 'summary' not in refpol.parsed
or refpol.documents['dsl']['digest'] != dsl_hash):
output = self._make_request( 'POST', '/parse?path=*', dsl)
jsondata = api.db.json.loads(output.body)
refpol.parsed['full'] = jsondata['result']
if jsondata['result'] is not None:
refpol.parsed['summary'] = api.support.decompose.flatten_perms(jsondata['result'])
refpol.parsed['permset'] = [{'text': x, "id": x}
for x
in api.support.decompose.perm_set(
jsondata['result'])]
else:
refpol.parsed['summary'] = []
refpol.parsed['permset'] = []
refpol.Insert()
del refpol.parsed['full']
return {
'label': msg['response_id'],
'payload': api.db.json.dumps(refpol.parsed)
}
def _filter_unused_ports(self, data):
""" Filter out all of the ports which do not have a connection.
This includes their references inside domains, as well as their
presence in the port list. """
if 'errors' in data and len(data['errors']) > 0:
return data
connected_ports = set()
for ident, conn in data['result']['connections'].iteritems():
connected_ports.add(conn['right'])
connected_ports.add(conn['left'])
for port in data['result']['ports'].keys():
if port not in connected_ports:
del data['result']['ports'][port]
for domkey, domain in data['result']['domains'].iteritems():
domain['ports'][:] = [p for p in domain['ports']
if p in connected_ports]
return data
def translate_selinux(self, params):
""" Given a set of parameters of the form, return the
lobster DSL for the module.
{
"refpolicy": "minimal",
"modules": [
{ "name": "test",
"if": " ... source of .if file ...",
"te": " ... source of .te file ...",
"fc": " ... source of .fc file ..."
}
]
}
"""
logger.info("Params: {0}".format(params))
try:
endpoint_uri = '/projects/{0}/import/selinux'.format(params['refpolicy'])
payload = params if isinstance(params, basestring) else api.db.json.dumps(params)
output = self._make_request('POST', endpoint_uri, payload)
if len(api.db.json.loads(output.body)['errors']) > 0:
raise Exception(output['errors'])
endpoint_uri = '/projects/{0}/json'.format(params['refpolicy'])
output = self._make_request('GET', endpoint_uri)
except Exception as e:
raise api.DisplayError("Unable to import policy: {0}".format(e.message))
else:
return output.body
def fetch_graph(self, msg):
""" Return the JSON graph for the given policy. Params of the form
{
"payload":
{
"policy": "policyid"
}
}
"""
logger.info("Params: {0}".format(msg))
# Assume we have already run translate_selinux() during policy upload
# msg.payload.policy is the id
refpol_id = msg['payload']['policy']
refpol_id = api.db.idtype(refpol_id)
refpol = ws_domains.call('refpolicy', 'Read', refpol_id)
if ('parsed' not in refpol or 'parameterized' not in refpol['parsed']
or 'condensed_lobster' not in refpol['parsed']['parameterized']):
lobster_json = api.db.json.loads(refpol['documents']['dsl']['text'])
lobster_json = lobster_json['result']
connections = lobster_json['connections']
ports = lobster_json['ports']
domains = lobster_json['domains']
node_map = {}
link_map = {}
module_map = {}
node_list = []
link_list = []
module_list = []
for key in connections.keys():
conn = connections[key]
te_file = ""
perm_list = []
left_dom = domains[conn['left_dom']]
right_dom = domains[conn['right_dom']]
left_port = ports[conn['left']]
right_port = ports[conn['right']]
active_dom = left_dom
inactive_dom = right_dom
if right_port['name'] == 'active':
active_dom = right_dom
inactive_dom = left_dom
for annot in conn['annotations']:
if annot['name'] == 'Perm':
perm_list.append(annot['args']) # [class, perm]
elif annot['name'] == 'SourcePos':
te_file = annot['args'][0]
# Skip connections that do not have any permissions
if len(perm_list) == 0:
continue
# Skip this connection if one of the domains is not an Attribute or a Type
bothAreTypesOrAttrs = True
for dom in [left_dom, right_dom]:
validAnnots = []
for domAnnot in dom['domainAnnotations']:
if domAnnot['name'] == 'Attribute' or domAnnot['name'] == 'Type':
validAnnots.append(domAnnot)
if len(validAnnots) == 0:
bothAreTypesOrAttrs = bothAreTypesOrAttrs and False
if not bothAreTypesOrAttrs:
continue
mod_idx = module_map.get(active_dom['module'], -1)
if (mod_idx == -1):
mod_idx = len(module_list)
module_map[active_dom['module']] = mod_idx
module_list.append(active_dom['module'])
# Create/get the source node
source_key = active_dom['name']
source_idx = node_map.get(source_key, -1)
if source_idx == -1:
source_node = { 'n': source_key, 'm': mod_idx }
source_idx = len(node_list)
node_map[source_key] = source_idx
node_list.append(source_node)
# Loop over each class to create the object.class pairs
for perm in perm_list:
mod_idx = module_map.get(inactive_dom['module'], -1)
if (mod_idx == -1):
mod_idx = len(module_list)
module_map[inactive_dom['module']] = mod_idx
module_list.append(inactive_dom['module'])
target_key = inactive_dom['name'] + '.' + perm[0]
target_idx = node_map.get(target_key, -1)
if target_idx == -1:
target_node = { 'n': target_key, 'm': mod_idx }
target_idx = len(node_list)
node_map[target_key] = target_idx
node_list.append(target_node)
link_key = source_key + '-' + target_key
link = link_map.get(link_key, -1)
if link == -1:
link = {
's': source_idx,
't': target_idx,
'p': [perm[1]]
}
link_map[link_key] = link
link_list.append(link)
elif perm[1] not in link['p']:
link['p'].append(perm[1])
# Sparsify/compress the dicts/JSON objects
node_list = api.jsonh.dumps(node_list)
link_list = api.jsonh.dumps(link_list)
module_list = module_list
if 'parsed' not in refpol:
refpol['parsed'] = {
'version': '1.0',
'errors': [],
'parameterized': {}
}
refpol['parsed']['parameterized']['condensed_lobster'] = {
'modules': module_list,
'nodes': node_list,
'links': link_list
}
refpol.Insert()
# Don't send the rules or raw to the client
refpol['parsed']['parameterized'].pop('rules', None)
refpol['parsed']['parameterized'].pop('raw', None)
refpol['parsed']['parameterized'].pop('condensed', None)
return {
'label': msg['response_id'],
'payload': api.db.json.dumps(refpol.parsed)
}
# Return the cached version if available
return api.db.json.loads({})
def parse(self, msg):
""" Return the JSON graph for the given policy. Params of the form
{
"payload":
{
"policy": "policyid"
}
}
"""
logger.info("Params: {0}".format(msg))
# msg.payload.policy is the id
refpol_id = msg['payload']['policy']
del msg['payload']['policy']
refpol_id = api.db.idtype(refpol_id)
refpol = ws_domains.call('refpolicy', 'Read', refpol_id)
# If already parsed, just return the one we already translated.
if ('parsed' in refpol
and 'parameterized' in refpol['parsed']
and 'lobster_rules' in refpol['parsed']['parameterized']):
logger.info("Returning cached JSON")
else:
dsl_json = refpol['documents']['dsl']['text']
# Return the cached version if available
return {
'label': msg['response_id'],
'payload': api.db.json.dumps(refpol.parsed)
}
def handle(self, msg):
if msg['request'] == 'validate':
return self.validate(msg)
elif msg['request'] == 'query_reachability':
return self.query_reachability(msg)
elif msg['request'] == 'fetch_graph':
return self.fetch_graph(msg)
else:
raise Exception("Invalid message type for 'lobster' domain")
def __instantiate__():
return LobsterDomain()
| 36.665145 | 100 | 0.482295 |
7817bcfdb88b63b9877526ba12fa192abe5b75a2 | 965 | py | Python | tests/test_app_generation.py | singleton11/sdjat | be8c1a621fc380a0dc5af446d531c259531de345 | [
"MIT"
] | null | null | null | tests/test_app_generation.py | singleton11/sdjat | be8c1a621fc380a0dc5af446d531c259531de345 | [
"MIT"
] | 1 | 2016-12-04T16:08:50.000Z | 2016-12-04T16:08:50.000Z | tests/test_app_generation.py | singleton11/sdjat | be8c1a621fc380a0dc5af446d531c259531de345 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import unittest
from unittest import TestCase
import sh
from cookiecutter.main import cookiecutter
class AppGenerationTestCase(TestCase):
"""Test case for app generation"""
def tearDown(self):
sh.rm('-rf', 'polls')
def test_generation(self):
"""Test standard app generation"""
cookiecutter('../', no_input=True, overwrite_if_exists=True)
file_list = (
(
sh.ls('polls'),
(
'apps.py',
'__init__.py',
'models.py',
)
),
(
sh.ls('polls/tests'),
(
'__init__.py',
'test_models.py'
)
)
)
for ls in file_list:
for file in ls[1]:
self.assertIn(file, ls[0])
if __name__ == '__main__':
unittest.main()
| 21.931818 | 68 | 0.450777 |
8fe301f96f754f89b84274c541dd3905f8b2e5f5 | 618 | py | Python | account/migrations/0011_auto_20190818_0653.py | lilianwaweru/Bank | 05ef2b86beec98d1cf31f3da168bbf32efaa1e3f | [
"MIT"
] | null | null | null | account/migrations/0011_auto_20190818_0653.py | lilianwaweru/Bank | 05ef2b86beec98d1cf31f3da168bbf32efaa1e3f | [
"MIT"
] | 7 | 2020-02-12T02:30:17.000Z | 2021-10-06T02:49:08.000Z | account/migrations/0011_auto_20190818_0653.py | lilianwaweru/Bank | 05ef2b86beec98d1cf31f3da168bbf32efaa1e3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-18 06:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0010_auto_20190818_0625'),
]
operations = [
migrations.RemoveField(
model_name='historymerch',
name='access',
),
migrations.RemoveField(
model_name='historymerch',
name='bank',
),
migrations.RemoveField(
model_name='historymerch',
name='transaction',
),
]
| 22.071429 | 47 | 0.574434 |
28e01fcc1dd01f952921aa5ac9384f06dcb775f9 | 3,754 | py | Python | PaymentCalculator.py | tezosland/tezos-reward-distributer | f9825affe172f7cef88c72e63ead8feb064188f4 | [
"MIT"
] | 5 | 2018-11-04T16:57:43.000Z | 2019-07-24T20:22:45.000Z | PaymentCalculator.py | tezosland/tezos-reward-distributer | f9825affe172f7cef88c72e63ead8feb064188f4 | [
"MIT"
] | 1 | 2018-11-04T13:06:44.000Z | 2018-11-04T13:06:44.000Z | PaymentCalculator.py | tezosland/tezos-reward-distributer | f9825affe172f7cef88c72e63ead8feb064188f4 | [
"MIT"
] | null | null | null | from utils import floorf
class PaymentCalculator:
def __init__(self, founders_map, owners_map, reward_list, total_rewards, service_fee_calculator, cycle):
self.owners_map = owners_map
self.total_rewards = total_rewards
self.cycle = cycle
self.fee_calc = service_fee_calculator
self.reward_list = reward_list
self.founders_map = founders_map
self.total_service_fee = 0
#
# calculation details
#
# total reward = delegators reward + owners reward = delegators payment + delegators fee + owners payment
# delegators reward = delegators payment + delegators fee
# owners reward = owners payment = total reward - delegators reward
# founders reward = delegators fee = total reward - delegators reward
####
def calculate(self):
pymnts = []
# 1- calculate delegators payments
delegators_total_pymnt = 0
delegators_total_ratio = 0
delegators_total_fee = 0
for reward_item in self.reward_list:
reward = reward_item['reward']
ktAddress = reward_item['address']
ratio = reward_item['ratio']
pymnt_amnt = floorf(reward * (1 - self.fee_calc.calculate(ktAddress)), 3)
# this indicates, service fee is very low (e.g. 0) and pymnt_amnt is rounded up
if pymnt_amnt - reward > 0:
pymnt_amnt = reward
fee = (reward - pymnt_amnt)
pymnts.append({'payment': pymnt_amnt, 'fee': fee, 'reward': reward, 'address': ktAddress, 'ratio': ratio,
'cycle': self.cycle, 'type': 'D'})
delegators_total_pymnt = delegators_total_pymnt + pymnt_amnt
delegators_total_ratio = delegators_total_ratio + ratio
delegators_total_fee = delegators_total_fee + fee
# 2- calculate deposit owners payments. They share the remaining rewards according to their ratio (check config)
owners_total_payment = 0
owners_total_reward = self.total_rewards - (delegators_total_pymnt + delegators_total_fee)
for address, ratio in self.owners_map.items():
owner_pymnt_amnt = floorf(ratio * owners_total_reward, 3)
owners_total_payment = owners_total_payment + owner_pymnt_amnt
pymnts.append({'payment': owner_pymnt_amnt, 'fee': 0, 'address': address, 'cycle': self.cycle, 'type': 'O',
'ratio': ratio, 'reward': owner_pymnt_amnt})
# move remaining rewards to service fee bucket
self.total_service_fee = self.total_rewards - delegators_total_pymnt - owners_total_payment
# 3- service fee is shared among founders according to founders_map ratios
for address, ratio in self.founders_map.items():
pymnt_amnt = floorf(ratio * self.total_service_fee, 6)
pymnts.append(
{'payment': pymnt_amnt, 'fee': 0, 'address': address, 'cycle': self.cycle, 'type': 'F', 'ratio': ratio,
'reward': 0})
###
# sanity check
#####
total_sum = 0
for payment in pymnts:
total_sum = total_sum + payment['payment']
# if there is a minor difference due to floor function; it is added to last payment
if self.total_rewards - total_sum > 1e-6:
last_payment = pymnts[-1] # last payment, probably one of the founders
last_payment['payment'] = last_payment['payment'] + (self.total_rewards - total_sum)
# this must never return true
if abs(total_sum - self.total_rewards) > 5e-6:
raise Exception("Calculated reward {} is not equal total reward {}".format(total_sum, self.total_rewards))
return pymnts
| 43.149425 | 120 | 0.638785 |
d529ca36b3d98248388bc2bf0e546fd41eccc708 | 14,021 | py | Python | pennylane/qnn/keras.py | jsmz97/pennylane | de7b7c0b452c8d59867d11f84b9c332a36e08ab1 | [
"Apache-2.0"
] | 1 | 2019-03-15T03:32:58.000Z | 2019-03-15T03:32:58.000Z | pennylane/qnn/keras.py | jsmz97/pennylane | de7b7c0b452c8d59867d11f84b9c332a36e08ab1 | [
"Apache-2.0"
] | 1 | 2021-12-09T22:04:41.000Z | 2022-01-19T14:21:03.000Z | pennylane/qnn/keras.py | jsmz97/pennylane | de7b7c0b452c8d59867d11f84b9c332a36e08ab1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the classes and functions for integrating QNodes with the Keras Layer
API."""
import inspect
from collections.abc import Iterable
from typing import Optional
try:
import tensorflow as tf
from tensorflow.keras.layers import Layer
CORRECT_TF_VERSION = int(tf.__version__.split(".")[0]) > 1
except ImportError:
# The following allows this module to be imported even if TensorFlow is not installed. Users
# will instead see an ImportError when instantiating the KerasLayer.
from abc import ABC
Layer = ABC
CORRECT_TF_VERSION = False
class KerasLayer(Layer):
"""KerasLayer(qnode, weight_shapes: dict, output_dim, weight_specs: Optional[dict] = None, **kwargs)
Converts a :func:`~.QNode` to a Keras
`Layer <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer>`__.
The result can be used within the Keras
`Sequential <https://www.tensorflow.org/api_docs/python/tf/keras/Sequential>`__ or
`Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ classes for
creating quantum and hybrid models.
Args:
qnode (qml.QNode): the PennyLane QNode to be converted into a Keras Layer_
weight_shapes (dict[str, tuple]): a dictionary mapping from all weights used in the QNode to
their corresponding shapes
output_dim (int): the output dimension of the QNode
weight_specs (dict[str, dict]): An optional dictionary for users to provide additional
specifications for weights used in the QNode, such as the method of parameter
initialization. This specification is provided as a dictionary with keys given by the
arguments of the `add_weight()
<https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_weight>`__
method and values being the corresponding specification.
**kwargs: additional keyword arguments passed to the Layer_ base class
**Example**
First let's define the QNode that we want to convert into a Keras Layer_:
.. code-block:: python
n_qubits = 2
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def qnode(inputs, weights_0, weight_1):
qml.RX(inputs[0], wires=0)
qml.RX(inputs[1], wires=1)
qml.Rot(*weights_0, wires=0)
qml.RY(weight_1, wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
The signature of the QNode **must** contain an ``inputs`` named argument for input data,
with all other arguments to be treated as internal weights. We can then convert to a Keras
Layer_ with:
>>> weight_shapes = {"weights_0": 3, "weight_1": 1}
>>> qlayer = qml.qnn.KerasLayer(qnode, weight_shapes, output_dim=2)
The internal weights of the QNode are automatically initialized within the
:class:`~.KerasLayer` and must have their shapes specified in a ``weight_shapes`` dictionary.
It is then easy to combine with other neural network layers from the
`tensorflow.keras.layers <https://www.tensorflow.org/api_docs/python/tf/keras/layers>`__ module
and create a hybrid:
>>> clayer = tf.keras.layers.Dense(2)
>>> model = tf.keras.models.Sequential([qlayer, clayer])
.. UsageDetails::
**QNode signature**
The QNode must have a signature that satisfies the following conditions:
- Contain an ``inputs`` named argument for input data.
- All other arguments must accept an array or tensor and are treated as internal
weights of the QNode.
- All other arguments must have no default value.
- The ``inputs`` argument is permitted to have a default value provided the gradient with
respect to ``inputs`` is not required.
- There cannot be a variable number of positional or keyword arguments, e.g., no ``*args``
or ``**kwargs`` present in the signature.
**Initializing weights**
The optional ``weight_specs`` argument of :class:`~.KerasLayer` allows for a more
fine-grained specification of the QNode weights, such as the method of initialization and
any regularization or constraints. For example, the initialization method of the ``weights``
argument in the example above could be specified by:
.. code-block::
weight_specs = {"weights": {"initializer": "random_uniform"}}
The values of ``weight_specs`` are dictionaries with keys given by arguments of
the Keras
`add_weight() <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_weight>`__
method. For the ``"initializer"`` argument, one can specify a string such as
``"random_uniform"`` or an instance of an `Initializer
<https://www.tensorflow.org/api_docs/python/tf/keras/initializers>`__ class, such as
`tf.keras.initializers.RandomUniform <https://www.tensorflow.org/api_docs/python/tf/random_uniform_initializer>`__.
If ``weight_specs`` is not specified, weights will be added using the Keras default
initialization and without any regularization or constraints.
**Additional example**
The code block below shows how a circuit composed of templates from the
:doc:`/introduction/templates` module can be combined with classical
`Dense <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense>`__ layers to learn
the two-dimensional `moons <https://scikit-learn.org/stable/modules/generated/sklearn
.datasets.make_moons.html>`__ dataset.
.. code-block:: python
import pennylane as qml
import tensorflow as tf
import sklearn.datasets
n_qubits = 2
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def qnode(inputs, weights):
qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))
qml.templates.StronglyEntanglingLayers(weights, wires=range(n_qubits))
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
weight_shapes = {"weights": (3, n_qubits, 3)}
qlayer = qml.qnn.KerasLayer(qnode, weight_shapes, output_dim=2)
clayer1 = tf.keras.layers.Dense(2)
clayer2 = tf.keras.layers.Dense(2, activation="softmax")
model = tf.keras.models.Sequential([clayer1, qlayer, clayer2])
data = sklearn.datasets.make_moons()
X = tf.constant(data[0])
Y = tf.one_hot(data[1], depth=2)
opt = tf.keras.optimizers.SGD(learning_rate=0.5)
model.compile(opt, loss='mae')
The model can be trained using:
>>> model.fit(X, Y, epochs=8, batch_size=5)
Train on 100 samples
Epoch 1/8
100/100 [==============================] - 9s 90ms/sample - loss: 0.3524
Epoch 2/8
100/100 [==============================] - 9s 87ms/sample - loss: 0.2441
Epoch 3/8
100/100 [==============================] - 9s 87ms/sample - loss: 0.1908
Epoch 4/8
100/100 [==============================] - 9s 87ms/sample - loss: 0.1832
Epoch 5/8
100/100 [==============================] - 9s 88ms/sample - loss: 0.1596
Epoch 6/8
100/100 [==============================] - 9s 87ms/sample - loss: 0.1637
Epoch 7/8
100/100 [==============================] - 9s 86ms/sample - loss: 0.1613
Epoch 8/8
100/100 [==============================] - 9s 87ms/sample - loss: 0.1474
**Returning a state**
If your QNode returns the state of the quantum circuit using :func:`~.state` or
:func:`~.density_matrix`, you must immediately follow your quantum Keras Layer with a layer
that casts to reals. For example, you could use
`tf.keras.layers.Lambda <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Lambda>`__
with the function ``lambda x: tf.abs(x)``. This casting is required because TensorFlow's
Keras layers require a real input and are differentiated with respect to real parameters.
.. _Layer: https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer
"""
def __init__(
self, qnode, weight_shapes: dict, output_dim, weight_specs: Optional[dict] = None, **kwargs
):
if not CORRECT_TF_VERSION:
raise ImportError(
"KerasLayer requires TensorFlow version 2 or above. The latest "
"version of TensorFlow can be installed using:\n"
"pip install tensorflow --upgrade\nAlternatively, visit "
"https://www.tensorflow.org/install for detailed instructions."
)
self.weight_shapes = {
weight: (tuple(size) if isinstance(size, Iterable) else (size,) if size > 1 else ())
for weight, size in weight_shapes.items()
}
self._signature_validation(qnode, weight_shapes)
self.qnode = qnode
dtype = tf.float32 if tf.keras.backend.floatx() == tf.float32 else tf.float64
try:
# TODO: remove once the beta QNode is default
if self.qnode.diff_method != "backprop" or self.qnode.diff_method_change:
self.qnode.to_tf(dtype=dtype)
except AttributeError:
self.qnode.interface = "tf"
# Allows output_dim to be specified as an int or as a tuple, e.g, 5, (5,), (5, 2), [5, 2]
# Note: Single digit values will be considered an int and multiple as a tuple, e.g [5,] or (5,)
# are passed as integer 5 and [5, 2] will be passes as tuple (5, 2)
if isinstance(output_dim, Iterable) and len(output_dim) > 1:
self.output_dim = tuple(output_dim)
else:
self.output_dim = output_dim[0] if isinstance(output_dim, Iterable) else output_dim
self.weight_specs = weight_specs if weight_specs is not None else {}
self.qnode_weights = {}
super().__init__(dynamic=True, **kwargs)
def _signature_validation(self, qnode, weight_shapes):
sig = inspect.signature(qnode.func).parameters
if self.input_arg not in sig:
raise TypeError(
f"QNode must include an argument with name {self.input_arg} for inputting data"
)
if self.input_arg in set(weight_shapes.keys()):
raise ValueError(
f"{self.input_arg} argument should not have its dimension specified in "
f"weight_shapes"
)
param_kinds = [p.kind for p in sig.values()]
if inspect.Parameter.VAR_POSITIONAL in param_kinds:
raise TypeError("Cannot have a variable number of positional arguments")
if inspect.Parameter.VAR_KEYWORD not in param_kinds:
if set(weight_shapes.keys()) | {self.input_arg} != set(sig.keys()):
raise ValueError("Must specify a shape for every non-input parameter in the QNode")
def build(self, input_shape):
"""Initializes the QNode weights.
Args:
input_shape (tuple or tf.TensorShape): shape of input data
"""
for weight, size in self.weight_shapes.items():
spec = self.weight_specs.get(weight, {})
self.qnode_weights[weight] = self.add_weight(name=weight, shape=size, **spec)
super().build(input_shape)
def call(self, inputs):
"""Evaluates the QNode on input data using the initialized weights.
Args:
inputs (tensor): data to be processed
Returns:
tensor: output data
"""
if len(tf.shape(inputs)) > 1:
# If the input size is not 1-dimensional, unstack the input along its first dimension,
# recursively call the forward pass on each of the yielded tensors, and then stack the
# outputs back into the correct shape
reconstructor = []
for x in tf.unstack(inputs):
reconstructor.append(self.call(x))
return tf.stack(reconstructor)
return self._evaluate_qnode(inputs)
def _evaluate_qnode(self, x):
"""Evaluates a QNode for a single input datapoint.
Args:
x (tensor): the datapoint
Returns:
tensor: output datapoint
"""
kwargs = {**{self.input_arg: x}, **{k: 1.0 * w for k, w in self.qnode_weights.items()}}
return self.qnode(**kwargs)
def compute_output_shape(self, input_shape):
"""Computes the output shape after passing data of shape ``input_shape`` through the
QNode.
Args:
input_shape (tuple or tf.TensorShape): shape of input data
Returns:
tf.TensorShape: shape of output data
"""
return tf.TensorShape([input_shape[0]]).concatenate(self.output_dim)
def __str__(self):
detail = "<Quantum Keras Layer: func={}>"
return detail.format(self.qnode.func.__name__)
__repr__ = __str__
_input_arg = "inputs"
@property
def input_arg(self):
"""Name of the argument to be used as the input to the Keras
`Layer <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer>`__. Set to
``"inputs"``."""
return self._input_arg
| 42.105105 | 123 | 0.635333 |
d4accd9c8c7bbcc9b0ff5eac6f7e281e30c4f7c5 | 1,274 | py | Python | pyleecan/GUI/Dialog/DMachineSetup/SBar/PCondType21/Gen_PCondType21.py | Kelos-Zhu/pyleecan | 368f8379688e31a6c26d2c1cd426f21dfbceff2a | [
"Apache-2.0"
] | 2 | 2019-06-08T15:04:39.000Z | 2020-09-07T13:32:22.000Z | pyleecan/GUI/Dialog/DMachineSetup/SBar/PCondType21/Gen_PCondType21.py | lyhehehe/pyleecan | 421e9a843bf30d796415c77dc934546adffd1cd7 | [
"Apache-2.0"
] | null | null | null | pyleecan/GUI/Dialog/DMachineSetup/SBar/PCondType21/Gen_PCondType21.py | lyhehehe/pyleecan | 421e9a843bf30d796415c77dc934546adffd1cd7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""File generated according to PCondType21/gen_list.json
WARNING! All changes made in this file will be lost!
"""
from pyleecan.GUI.Dialog.DMachineSetup.SBar.PCondType21.Ui_PCondType21 import (
Ui_PCondType21,
)
class Gen_PCondType21(Ui_PCondType21):
def setupUi(self, PCondType21):
"""Abstract class to update the widget according to the csv doc
"""
Ui_PCondType21.setupUi(self, PCondType21)
# Setup of w_mat
txt = self.tr(u"""Material of the conductor""")
self.w_mat.setWhatsThis(txt)
self.w_mat.setToolTip(txt)
# Setup of lf_Hbar
self.lf_Hbar.validator().setBottom(0)
txt = self.tr(u"""Bar height""")
self.lf_Hbar.setWhatsThis(txt)
self.lf_Hbar.setToolTip(txt)
# Setup of in_Hbar
txt = self.tr(u"""Bar height""")
self.in_Hbar.setWhatsThis(txt)
self.in_Hbar.setToolTip(txt)
# Setup of in_Wbar
txt = self.tr(u"""Bar width""")
self.in_Wbar.setWhatsThis(txt)
self.in_Wbar.setToolTip(txt)
# Setup of lf_Wbar
self.lf_Wbar.validator().setBottom(0)
txt = self.tr(u"""Bar width""")
self.lf_Wbar.setWhatsThis(txt)
self.lf_Wbar.setToolTip(txt)
| 31.073171 | 79 | 0.632653 |
3acd92abc15a3379fcf1fa7212828efcc89a220f | 171 | py | Python | Helium LoraWAN/config.py | altaga/Healthium | ac852c27c2956b93b08932fb5df8512a9dc7bd02 | [
"MIT"
] | null | null | null | Helium LoraWAN/config.py | altaga/Healthium | ac852c27c2956b93b08932fb5df8512a9dc7bd02 | [
"MIT"
] | 2 | 2022-02-14T14:24:13.000Z | 2022-02-27T17:53:14.000Z | Helium LoraWAN/config.py | altaga/Healthium | ac852c27c2956b93b08932fb5df8512a9dc7bd02 | [
"MIT"
] | null | null | null | import ubinascii
app_eui = ubinascii.unhexlify('xxxxxxxxxxxxxx')
app_key = ubinascii.unhexlify('xxxxxxxxxxxxxx')
dev_eui = ubinascii.unhexlify('xxxxxxxxxxxxxxxxxxxxxxx')
| 28.5 | 56 | 0.824561 |
efcf42eb820a24ae9db80a690699f6b25349298a | 4,478 | py | Python | build/x86/python/m5/internal/param_I2CBus.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | build/x86/python/m5/internal/param_I2CBus.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | build/x86/python/m5/internal/param_I2CBus.py | billionshang/gem5 | 18cc4294f32315595f865d07d1f33434e92b06b2 | [
"BSD-3-Clause"
] | null | null | null | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_I2CBus', [dirname(__file__)])
except ImportError:
import _param_I2CBus
return _param_I2CBus
if fp is not None:
try:
_mod = imp.load_module('_param_I2CBus', fp, pathname, description)
finally:
fp.close()
return _mod
_param_I2CBus = swig_import_helper()
del swig_import_helper
else:
import _param_I2CBus
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import m5.internal.I2CDevice_vector
import m5.internal.param_I2CDevice
import m5.internal.param_SimObject
import m5.internal.drain
import m5.internal.serialize
import m5.internal.param_BasicPioDevice
import m5.internal.param_PioDevice
import m5.internal.param_System
import m5.internal.enum_MemoryMode
import m5.internal.AddrRange_vector
import m5.internal.AbstractMemory_vector
import m5.internal.param_AbstractMemory
import m5.internal.param_MemObject
import m5.internal.param_ClockedObject
import m5.internal.param_ClockDomain
class I2CBus(m5.internal.param_BasicPioDevice.BasicPioDevice):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
I2CBus_swigregister = _param_I2CBus.I2CBus_swigregister
I2CBus_swigregister(I2CBus)
class I2CBusParams(m5.internal.param_BasicPioDevice.BasicPioDeviceParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def create(self):
return _param_I2CBus.I2CBusParams_create(self)
devices = _swig_property(_param_I2CBus.I2CBusParams_devices_get, _param_I2CBus.I2CBusParams_devices_set)
def __init__(self):
this = _param_I2CBus.new_I2CBusParams()
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _param_I2CBus.delete_I2CBusParams
__del__ = lambda self: None
I2CBusParams_swigregister = _param_I2CBus.I2CBusParams_swigregister
I2CBusParams_swigregister(I2CBusParams)
| 30.462585 | 108 | 0.69272 |
c5272e4b83743914a602d9185129fdc74447cd87 | 7,900 | py | Python | tests/dependencies_test.py | pombredanne/imagemounter | 5d8ba6f2b4cd61b9ad259d5265f6fd7f74a0e82f | [
"MIT"
] | 89 | 2015-05-08T08:29:18.000Z | 2022-03-05T00:03:44.000Z | tests/dependencies_test.py | personx000/imagemounter | 5d8ba6f2b4cd61b9ad259d5265f6fd7f74a0e82f | [
"MIT"
] | 32 | 2015-04-08T19:59:51.000Z | 2022-03-24T15:06:02.000Z | tests/dependencies_test.py | personx000/imagemounter | 5d8ba6f2b4cd61b9ad259d5265f6fd7f74a0e82f | [
"MIT"
] | 47 | 2015-03-04T19:04:11.000Z | 2022-02-27T18:22:24.000Z | import os
import subprocess
import sys
import unittest
import unittest.mock as mock
from imagemounter.dependencies import (CommandDependency, Dependency,
DependencySection, MagicDependency, PythonModuleDependency, require)
from imagemounter.exceptions import CommandNotFoundError
class DependencyTest(unittest.TestCase):
@unittest.skip("depends on previous output of imount --check")
def test_imount_check_output(self):
# This test can be used to verify that the output of ``imount --check``
# hasn't changed. To use this, first run:
# imount --check > ~/imount-check-results.txt
# Then make any code changes and run this test (remove @unittest.skip).
self.maxDiff = None
expected = open(os.path.expanduser("~/imount-check-results.txt")).read()
actual = subprocess.check_output(['imount', '--check']).decode("utf-8")
self.assertEqual(expected, actual)
class CommandDependencyTest(unittest.TestCase):
def test_existing_dependency(self):
dep = CommandDependency('ls')
self.assertTrue(dep.is_available)
dep.require()
def test_existing_dependency_decorator(self):
dep = CommandDependency('ls')
@require(dep)
def test(x, y):
return x + y
self.assertEqual(test(1, 2), 3)
def test_missing_dependency(self):
dep = CommandDependency('lsxxxx')
self.assertFalse(dep.is_available)
self.assertRaises(CommandNotFoundError, dep.require)
def test_missing_dependency_decorator(self):
dep = CommandDependency('lsxxxx')
@require(dep)
def test(x, y):
return x + y
self.assertRaises(CommandNotFoundError, test)
@require(dep, none_on_failure=True)
def test2(x, y):
return x + y
self.assertEqual(None, test2(1, 2))
@mock.patch('imagemounter.dependencies._util')
def test_mocked_dependency(self, util):
util.command_exists.return_value = True
dep = CommandDependency('lsxxxx')
self.assertTrue(dep.is_available)
self.assertEqual(dep.printable_status, "INSTALLED lsxxxx")
@mock.patch('imagemounter.dependencies._util')
def test_dependency_status_message(self, util):
util.command_exists.return_value = False
dep = CommandDependency('ls')
self.assertFalse(dep.is_available)
self.assertEqual(dep.printable_status.strip(), "MISSING ls")
@mock.patch('imagemounter.dependencies._util')
def test_dependency_status_message_package(self, util):
util.command_exists.return_value = False
dep = CommandDependency('ls', package="core-utils")
self.assertFalse(dep.is_available)
expected = "MISSING ls part of the core-utils package"
self.assertEqual(dep.printable_status.strip(), expected)
@mock.patch('imagemounter.dependencies._util')
def test_dependency_status_message_why(self, util):
util.command_exists.return_value = False
dep = CommandDependency('ls', why="listing files")
self.assertFalse(dep.is_available)
expected = "MISSING ls needed for listing files"
self.assertEqual(dep.printable_status.strip(), expected)
@mock.patch('imagemounter.dependencies._util')
def test_dependency_status_message_package_why(self, util):
util.command_exists.return_value = False
dep = CommandDependency('ls', package="core-utils", why="listing files")
self.assertFalse(dep.is_available)
expected = "MISSING ls needed for listing files, part of the core-utils package"
self.assertEqual(dep.printable_status.strip(), expected)
class PythonModuleDependencyTest(unittest.TestCase):
def test_existing_dependency(self):
dep = PythonModuleDependency('sys')
self.assertTrue(dep.is_available)
def test_missing_dependency(self):
dep = PythonModuleDependency('foobarnonexistent')
self.assertFalse(dep.is_available)
@mock.patch('imagemounter.dependencies._util')
def test_mocked_dependency(self, util):
util.module_exists.return_value = True
dep = PythonModuleDependency('requests2')
self.assertTrue(dep.is_available)
self.assertEqual(dep.printable_status, "INSTALLED requests2")
@mock.patch('imagemounter.dependencies._util')
def test_mocked_status_message(self, util):
util.module_exists.return_value = False
dep = PythonModuleDependency('sys')
self.assertFalse(dep.is_available)
expected = "MISSING sys install using pip"
self.assertEqual(dep.printable_status, expected)
@mock.patch('imagemounter.dependencies._util')
def test_mocked_status_message_why(self, util):
util.module_exists.return_value = False
dep = PythonModuleDependency('sys', why="system functions")
self.assertFalse(dep.is_available)
expected = "MISSING sys needed for system functions, install using pip"
self.assertEqual(dep.printable_status, expected)
class MagicDependencyTest(unittest.TestCase):
def setUp(self):
self.magic = MagicDependency("python-magic")
def tearDown(self):
# After each test, remove the fake "magic" module we've created.
if 'magic' in sys.modules:
del sys.modules['magic']
@mock.patch('imagemounter.dependencies._util')
def test_not_exists(self, util):
util.module_exists.return_value = False
self.assertFalse(self.magic.is_available)
self.assertFalse(self.magic._importable)
expected = "MISSING python-magic install using pip"
self.assertEqual(self.magic.printable_status, expected)
def test_exists_pypi(self):
sys.modules['magic'] = mock.Mock(['from_file'])
self.assertTrue(self.magic.is_available)
self.assertTrue(self.magic.is_python_package)
self.assertFalse(self.magic.is_system_package)
expected = "INSTALLED python-magic (Python package)"
self.assertEqual(self.magic.printable_status, expected)
def test_exists_system(self):
sys.modules['magic'] = mock.Mock(['open'])
self.assertTrue(self.magic.is_available)
self.assertFalse(self.magic.is_python_package)
self.assertTrue(self.magic.is_system_package)
expected = "INSTALLED python-magic (system package)"
self.assertEqual(self.magic.printable_status, expected)
def test_exists_unknown(self):
sys.modules['magic'] = mock.Mock([])
self.assertTrue(self.magic._importable)
self.assertFalse(self.magic.is_available)
self.assertFalse(self.magic.is_python_package)
self.assertFalse(self.magic.is_system_package)
expected = "ERROR python-magic expecting python-magic, found other module named magic"
self.assertEqual(self.magic.printable_status, expected)
class DependencySectionTest(unittest.TestCase):
def test_section_no_deps(self):
section = DependencySection(name="empty section",
description='not needed',
deps=[])
expected = "-- empty section (not needed) --"
self.assertEqual(expected, section.printable_status)
def test_section_printable_status(self):
mock_dependency = mock.Mock()
mock_dependency.printable_status = "I'm just a mock"
section = DependencySection(name="fake section",
description='needed for stuff',
deps=[mock_dependency])
expected = "-- fake section (needed for stuff) --\n I'm just a mock"
self.assertEqual(expected, section.printable_status) | 40.512821 | 107 | 0.67038 |
5a88fc3226691612d0afb6b7527a6d60e9086efc | 2,788 | py | Python | machine-learning-gists/e22bab0df2af9e8dd66e75a4860e0150a1506bdf/snippet.py | qwbjtu2015/dockerizeme | 9039beacf281ea7058d721784ed4eff054453b09 | [
"Apache-2.0"
] | null | null | null | machine-learning-gists/e22bab0df2af9e8dd66e75a4860e0150a1506bdf/snippet.py | qwbjtu2015/dockerizeme | 9039beacf281ea7058d721784ed4eff054453b09 | [
"Apache-2.0"
] | null | null | null | machine-learning-gists/e22bab0df2af9e8dd66e75a4860e0150a1506bdf/snippet.py | qwbjtu2015/dockerizeme | 9039beacf281ea7058d721784ed4eff054453b09 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy.stats import multivariate_normal
from sklearn.metrics import f1_score
## 1.データの読み込み
def load_data(filename):
data = loadmat(filename)
return np.array(data['X']), np.array(data['Xval']), np.ravel(np.array(data['yval']))
X, Xval, yval = load_data("ex8data1.mat")
# データのプロット
def plot_data():
plt.plot(X[:,0], X[:, 1], "bx")
plt.xlabel("Latencey (ms)")
plt.ylabel("Throughput (mb/s)")
plot_data()
plt.show()
## 2.データの統計量を推定
def estimate_gaussian(X):
mu = np.mean(X, axis=0)
sigma2 = np.var(X, axis=0)
return mu, sigma2
mu, sigma2 = estimate_gaussian(X)
# 多変量正規分布の確率密度関数を計算
# 分散のベクトルを共分散行列に変形(対角要素=分散とする)
cov_matrix = np.diag(sigma2)
# (自分で定義してもいいが、scipy.stats.multivariate_normalを使うと楽)
p = multivariate_normal.pdf(X, mean=mu, cov=cov_matrix)
# 可視化
def visualize_fit(X, mu, sigma2):
plot_data()
X1, X2 = np.meshgrid(np.arange(0, 35, 0.5), np.arange(0, 35, 0.5))
Z = multivariate_normal.pdf(np.c_[np.ravel(X1), np.ravel(X2)], mean=mu, cov=np.diag(sigma2))
Z = Z.reshape(X1.shape)
if not np.isinf(np.sum(p)):
plt.contour(X1, X2, Z, levels=10**np.arange(-20, 0, 3, dtype="float"))
visualize_fit(X, mu, sigma2)
plt.show()
## 3.外れ値を探す
# 交差検証データに対するpvalを計算
pval = multivariate_normal.pdf(Xval, mean=mu, cov=cov_matrix)
# しきい値を選択
def select_threshold(yval, pval):
best_epsilon, best_f1 = 0, 0
steps = np.linspace(np.min(pval), np.max(pval), 1000)
for epsilon in steps:
pred_positive = pval < epsilon
f1 = f1_score(yval, pred_positive)
if f1 > best_f1:
best_f1, best_epsilon = f1, epsilon
return best_epsilon, best_f1
# εはハイパーパラメーターなので交差検証データに対してフィットさせる
epsilon, f1 = select_threshold(yval, pval)
print("Best epsilon found using cross-validation:", epsilon)
print("Best F1 on Cross Validation Set:", f1)
print(" (you should see a value epsilon of about 8.99e-05)")
print(" (you should see a Best F1 value of 0.875000)\n")
# 外れ値を探す
outliers = p < epsilon
# 外れ値のプロット
visualize_fit(X, mu, sigma2)
plt.plot(X[outliers, 0], X[outliers, 1], "ro", markerfacecolor="none", linewidth=2, markersize=10)
plt.show()
## 4.多次元の外れ値
# データの読み込み
X, Xval, yval = load_data("ex8data2.mat")
# 統計量の推定
mu, sigma2 = estimate_gaussian(X)
# 訓練データ
p = multivariate_normal.pdf(X, mean=mu, cov=np.diag(sigma2))
# 交差検証データ
pval = multivariate_normal.pdf(Xval, mean=mu, cov=np.diag(sigma2))
# しきい値の選択
epsilon, f1 = select_threshold(yval, pval)
print("Best epsilon found using cross-validation: ", epsilon)
print("Best F1 on Cross Validation Set: ", f1)
print(" (you should see a value epsilon of about 1.38e-18)")
print(" (you should see a Best F1 value of 0.615385)")
print("# Outliers found:", np.sum(p < epsilon)) | 33.590361 | 98 | 0.695481 |
0b5a46d195c0f2903356f4f29fa11460d0ce4c62 | 883 | py | Python | kerax/losses/__init__.py | umangjpatel/dnet | 21e1798643a6382a1d7960db4c5f3a22fa19a28a | [
"Unlicense"
] | 30 | 2019-11-29T07:34:57.000Z | 2020-12-29T12:25:17.000Z | kerax/losses/__init__.py | umangjpatel/dnet | 21e1798643a6382a1d7960db4c5f3a22fa19a28a | [
"Unlicense"
] | 18 | 2019-11-01T18:05:58.000Z | 2020-12-23T07:26:26.000Z | kerax/losses/__init__.py | umangjpatel/DNet | 21e1798643a6382a1d7960db4c5f3a22fa19a28a | [
"Unlicense"
] | 4 | 2020-01-25T01:16:30.000Z | 2020-06-19T16:36:36.000Z | from ..utils import Tensor, jnp
def BCELoss(predictions: Tensor, targets: Tensor) -> Tensor:
"""
BCE or Binary Cross Entropy loss function.
Useful for binary classification tasks.
:param predictions: Outputs of the network.
:param targets: Expected outputs of the network.
:return: binary cross-entropy loss value
"""
return -jnp.mean(a=(targets * jnp.log(predictions) + (1 - targets) * jnp.log(1 - predictions)))
def CCELoss(predictions: Tensor, targets: Tensor) -> Tensor:
"""
CCE or Categorical Cross Entropy loss function.
Useful for multi-class classification task.
:param predictions: Outputs of the network
:param targets: Expected outputs of the network.
:return: categorical cross-entopy loss value.
"""
return -jnp.mean(jnp.sum(predictions * targets, axis=1))
__all__ = [
"BCELoss",
"CCELoss"
]
| 29.433333 | 99 | 0.687429 |
9a03cdcc8c4274b1fc5f2a1ecbd2711cb272de8c | 14,240 | py | Python | webStorm-APICloud/python_tools/Lib/test/test_smtplib.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/test/test_smtplib.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | webStorm-APICloud/python_tools/Lib/test/test_smtplib.py | zzr925028429/androidyianyan | 8967fdba92473e8e65ee222515dfc54cdae5bb0b | [
"MIT"
] | null | null | null | import asyncore
import email.utils
import socket
import threading
import smtpd
import smtplib
import StringIO
import sys
import time
import select
from unittest import TestCase
from test import test_support
HOST = test_support.HOST
def server(evt, buf, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "220 Hola mundo\n", self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
def testBasic1(self):
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testBasic2(self):
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(smtp.sock.gettimeout() is None)
smtp.close()
def testTimeoutValue(self):
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(TestCase):
def setUp(self):
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
self.port = test_support.find_unused_port()
self.serv = smtpd.DebuggingServer((HOST, self.port), ('nowhere', -1))
serv_args = (self.serv, self.serv_evt, self.client_evt)
threading.Thread(target=debugging_server, args=serv_args).start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
# restore sys.stdout
sys.stdout = self.old_stdout
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, 'Ok')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, 'Ok')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testNotImplemented(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, 'Error: command "EHLO" not implemented')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testVRFY(self):
# VRFY isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, 'Error: command "VRFY" not implemented')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, 'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), 'Error: command "HELP" not implemented')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
class NonConnectingTests(TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises socket.error
self.assertRaises(socket.error, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(socket.error, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
class BadHELOServerTests(TestCase):
def setUp(self):
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "199 no hello for you!\n", self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@somewhere.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@somewhere.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
def smtp_EHLO(self, arg):
resp = '250-testhost\r\n' \
'250-EXPN\r\n' \
'250-SIZE 20000000\r\n' \
'250-STARTTLS\r\n' \
'250-DELIVERBY\r\n' \
'250 HELP'
self.push(resp)
def smtp_VRFY(self, arg):
# print '\nsmtp_VRFY(%r)\n' % arg
raw_addr = email.utils.parseaddr(arg)[1]
quoted_addr = smtplib.quoteaddr(arg)
if raw_addr in sim_users:
self.push('250 %s %s' % (sim_users[raw_addr], quoted_addr))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
# print '\nsmtp_EXPN(%r)\n' % arg
list_name = email.utils.parseaddr(arg)[1].lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
class SimSMTPServer(smtpd.SMTPServer):
def handle_accept(self):
conn, addr = self.accept()
channel = SimSMTPChannel(self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(TestCase):
def setUp(self):
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
self.port = test_support.find_unused_port()
self.serv = SimSMTPServer((HOST, self.port), ('nowhere', -1))
serv_args = (self.serv, self.serv_evt, self.client_evt)
threading.Thread(target=debugging_server, args=serv_args).start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, '%s %s' % (name, smtplib.quoteaddr(email)))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, 'No such user: %s' % smtplib.quoteaddr(u))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, '\n'.join(users))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, 'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def test_main(verbose=None):
test_support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests)
if __name__ == '__main__':
test_main()
| 34.479419 | 85 | 0.58757 |
6bf9cb8696a89af627ea4b7a263242f48cf490c2 | 86 | py | Python | djmapache/commonapp/forms.py | plungerman/django-djmapache | 3dfef0a207464e65b281974e30f20fa97354ed08 | [
"MIT"
] | null | null | null | djmapache/commonapp/forms.py | plungerman/django-djmapache | 3dfef0a207464e65b281974e30f20fa97354ed08 | [
"MIT"
] | 11 | 2019-12-06T16:03:05.000Z | 2022-03-04T17:10:25.000Z | djmapache/commonapp/forms.py | carthage-college/django-djmapache | 901f7f5eff37553212129509f2b062fff4fec0ed | [
"MIT"
] | null | null | null | from django import forms
class UploadForm(forms.Form):
phile = forms.FileField()
| 17.2 | 29 | 0.744186 |
3c7ddb8aa22a4ce8473afa601b4d960ee814a77e | 21 | py | Python | lapis/version.py | movermeyer/lapis | 623a3fc623a7a8fa8c292912b4172787f772f807 | [
"CC0-1.0"
] | 3 | 2015-02-21T05:32:35.000Z | 2017-04-22T21:18:32.000Z | lapis/version.py | movermeyer/lapis | 623a3fc623a7a8fa8c292912b4172787f772f807 | [
"CC0-1.0"
] | 32 | 2015-02-04T00:52:32.000Z | 2017-03-05T17:17:57.000Z | lapis/version.py | dandesousa/Lapis | 623a3fc623a7a8fa8c292912b4172787f772f807 | [
"CC0-1.0"
] | 1 | 2018-03-05T17:16:16.000Z | 2018-03-05T17:16:16.000Z | version="0.2.2b-dev"
| 10.5 | 20 | 0.666667 |
82f6f90e5c8c71e5f371145d519b05acb42907b0 | 4,596 | py | Python | smarc_keyboard_teleop/scripts/lolo_auv_teleop_joystick.py | Jollerprutt/smarc_utils | cf938dbddffbf745cb8d2cbc92c502e286f63b75 | [
"BSD-3-Clause"
] | 1 | 2022-03-19T10:55:37.000Z | 2022-03-19T10:55:37.000Z | smarc_keyboard_teleop/scripts/lolo_auv_teleop_joystick.py | Jollerprutt/smarc_utils | cf938dbddffbf745cb8d2cbc92c502e286f63b75 | [
"BSD-3-Clause"
] | 8 | 2018-01-26T10:58:47.000Z | 2021-06-06T11:10:51.000Z | smarc_keyboard_teleop/scripts/lolo_auv_teleop_joystick.py | Jollerprutt/smarc_utils | cf938dbddffbf745cb8d2cbc92c502e286f63b75 | [
"BSD-3-Clause"
] | 5 | 2017-10-17T08:21:36.000Z | 2021-03-24T16:35:01.000Z | #!/usr/bin/python
# Copyright 2018 Nils Bore (nbore@kth.se)
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pygame
from pygame.constants import K_LEFT, K_RIGHT, K_UP, K_DOWN, K_w, K_s
import rospy
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Joy
class TeleopServer(object):
def callback(self, image_msg):
try:
cv_image = self.bridge.imgmsg_to_cv2(image_msg, "rgb8")
except CvBridgeError as e:
print(e)
self.surface = pygame.image.frombuffer(cv_image.tostring(), cv_image.shape[:2], "RGB")
def joy_callback(self, data):
fin_angle = 60
thrust_level = 200.
fin0angle = 0. # top
fin1angle = 0. # left
fin2angle = 0. # down
fin3angle = 0. # right
fin4angle = 0. # down
fin5angle = 0. # right
backfinangle = 0. # right
thrust = 0.
fin0angle = fin_angle*data.axes[0]
fin1angle = fin_angle*data.axes[0]
fin2angle = -fin_angle*data.axes[0]
fin3angle = -fin_angle*data.axes[0]
fin4angle = fin_angle*data.axes[1]
fin5angle = -fin_angle*data.axes[1]
backfinangle = -fin_angle*data.axes[1]
header = Header()
self.thruster0.publish(header, thrust_level*data.axes[4])
self.thruster1.publish(header, thrust_level*data.axes[4])
self.fin0.publish(header, fin0angle)
self.fin1.publish(header, fin1angle)
self.fin2.publish(header, fin2angle)
self.fin3.publish(header, fin3angle)
self.fin4.publish(header, fin4angle)
self.fin5.publish(header, fin5angle)
self.backfin.publish(header, backfinangle)
def __init__(self):
rospy.init_node('keyboard_teleop', anonymous=True)
pygame.init()
self.surface = None
self.bridge = CvBridge()
self.auv_name = rospy.get_param(rospy.get_name() + '/auv_instance', 'lolo_auv')
self.thruster0 = rospy.Publisher(self.auv_name + '/thrusters/0/input', FloatStamped, queue_size=10)
self.thruster1 = rospy.Publisher(self.auv_name + '/thrusters/1/input', FloatStamped, queue_size=10)
self.fin0 = rospy.Publisher(self.auv_name + '/fins/1/input', FloatStamped, queue_size=10)
self.fin1 = rospy.Publisher(self.auv_name + '/fins/0/input', FloatStamped, queue_size=10)
self.fin2 = rospy.Publisher(self.auv_name + '/fins/2/input', FloatStamped, queue_size=10)
self.fin3 = rospy.Publisher(self.auv_name + '/fins/3/input', FloatStamped, queue_size=10)
self.fin4 = rospy.Publisher(self.auv_name + '/fins/4/input', FloatStamped, queue_size=10)
self.fin5 = rospy.Publisher(self.auv_name + '/fins/5/input', FloatStamped, queue_size=10)
self.backfin = rospy.Publisher(self.auv_name + '/back_fins/0/input', FloatStamped, queue_size=10)
rospy.Subscriber(self.auv_name + "/" + self.auv_name + "/camera_thruster/camera_image", Image, self.callback)
rospy.Subscriber("/joy", Joy, self.joy_callback)
screen = pygame.display.set_mode((200, 200))
pygame.display.flip()
clock = pygame.time.Clock()
while not rospy.is_shutdown():
if self.surface is not None:
screen.blit(self.surface, (0, 0))
pygame.display.update()
clock.tick(10)
pygame.event.pump()
if __name__ == "__main__":
teleop = TeleopServer()
| 43.358491 | 757 | 0.753699 |
283104436f8f837563fd0904daf005089cb1d89b | 1,248 | py | Python | app/seeds/comments.py | TolulopeVerissimo/InstantaneousGramme | adb1d6f61146bfbcc8c501564b94ce009271276a | [
"MIT",
"Unlicense"
] | 5 | 2021-02-19T19:07:35.000Z | 2021-04-09T11:35:45.000Z | app/seeds/comments.py | TolulopeVerissimo/InstantaneousGramme | adb1d6f61146bfbcc8c501564b94ce009271276a | [
"MIT",
"Unlicense"
] | 55 | 2021-02-23T08:16:06.000Z | 2021-08-10T18:04:06.000Z | app/seeds/comments.py | TolulopeVerissimo/InstantaneousGramme | adb1d6f61146bfbcc8c501564b94ce009271276a | [
"MIT",
"Unlicense"
] | null | null | null | from app.models import db, Comment
def seed_comments():
first = Comment(
user_id=1,
post_id=1,
content='Wow that is a nice photo'
)
second = Comment(
user_id=2,
post_id=1,
content='Very cool'
)
third = Comment(
user_id=3,
post_id=1,
content='Awesome'
)
fourth = Comment(
user_id=3,
post_id=2,
content='You go!'
)
fifth = Comment(
user_id=3,
post_id=2,
content='Cool!'
)
sixth = Comment(
user_id=3,
post_id=1,
content='awesome'
)
seventh = Comment(
user_id=2,
post_id=1,
content='radical'
)
db.session.add(first)
db.session.add(second)
db.session.add(third)
db.session.add(fourth)
db.session.add(fifth)
db.session.add(sixth)
db.session.add(seventh)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the users table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and resets
# the auto incrementing primary key
def undo_comments():
db.session.execute('TRUNCATE comments RESTART IDENTITY CASCADE;')
db.session.commit()
| 21.517241 | 69 | 0.585737 |
4eac3fe883c4f784f3e9c700a5ec670c8f30d771 | 5,433 | py | Python | ceilometerclient/tests/v1/test_resources.py | zqfan/python-ceilometerclient | 2d4c6446ff6985c3eb9c4742df1c8d0682dee6ea | [
"Apache-2.0"
] | null | null | null | ceilometerclient/tests/v1/test_resources.py | zqfan/python-ceilometerclient | 2d4c6446ff6985c3eb9c4742df1c8d0682dee6ea | [
"Apache-2.0"
] | null | null | null | ceilometerclient/tests/v1/test_resources.py | zqfan/python-ceilometerclient | 2d4c6446ff6985c3eb9c4742df1c8d0682dee6ea | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.openstack.common.apiclient import client
from ceilometerclient.openstack.common.apiclient import fake_client
from ceilometerclient.tests import utils
import ceilometerclient.v1.meters
fixtures = {
'/v1/resources': {
'GET': (
{},
{'resources': [
{
'resource_id': 'a',
'project_id': 'project_bla',
'user_id': 'freddy',
'timestamp': 'now',
'meter': ['this', 'that'],
'metadata': {'zxc_id': 'bla'},
},
{
'resource_id': 'b',
'project_id': 'dig_the_ditch',
'user_id': 'joey',
'timestamp': 'now',
'meter': ['this', 'that'],
'metadata': {'zxc_id': 'foo'},
},
]},
),
},
'/v1/users/joey/resources': {
'GET': (
{},
{'resources': [
{
'resource_id': 'b',
'project_id': 'dig_the_ditch',
'user_id': 'joey',
'timestamp': 'now',
'meter': ['this', 'that'],
'metadata': {'zxc_id': 'foo'},
},
]},
),
},
'/v1/resources?metadata.zxc_id=foo': {
'GET': (
{},
{'resources': [
{
'resource_id': 'b',
'project_id': 'dig_the_ditch',
'user_id': 'joey',
'timestamp': 'now',
'meter': ['this', 'that'],
'metadata': {'zxc_id': 'foo'},
},
]},
),
},
'/v1/projects/project_bla/resources': {
'GET': (
{},
{'resources': [
{
'resource_id': 'a',
'project_id': 'project_bla',
'user_id': 'freddy',
'timestamp': 'now',
'meter': ['this', 'that'],
'metadata': {'zxc_id': 'bla'},
},
]},
),
},
'/v1/resources?start_timestamp=now&end_timestamp=now': {
'GET': (
{},
{'resources': [
{
'resource_id': 'b',
'project_id': 'dig_the_ditch',
'user_id': 'joey',
'timestamp': 'now',
'meter': ['this', 'that'],
'metadata': {'zxc_id': 'foo'},
},
]},
),
},
}
class ResourceManagerTest(utils.BaseTestCase):
def setUp(self):
super(ResourceManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = ceilometerclient.v1.meters.ResourceManager(self.api)
def test_list_all(self):
resources = list(self.mgr.list())
expect = [
'GET', '/v1/resources'
]
self.http_client.assert_called(*expect)
self.assertEqual(len(resources), 2)
self.assertEqual(resources[0].resource_id, 'a')
self.assertEqual(resources[1].resource_id, 'b')
def test_list_by_user(self):
resources = list(self.mgr.list(user_id='joey'))
expect = [
'GET', '/v1/users/joey/resources'
]
self.http_client.assert_called(*expect)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].resource_id, 'b')
def test_list_by_metaquery(self):
resources = list(self.mgr.list(metaquery='metadata.zxc_id=foo'))
expect = [
'GET', '/v1/resources?metadata.zxc_id=foo'
]
self.http_client.assert_called(*expect)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].resource_id, 'b')
def test_list_by_project(self):
resources = list(self.mgr.list(project_id='project_bla'))
expect = [
'GET', '/v1/projects/project_bla/resources'
]
self.http_client.assert_called(*expect)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].resource_id, 'a')
def test_list_by_timestamp(self):
resources = list(self.mgr.list(start_timestamp='now',
end_timestamp='now'))
expect = [
'GET', '/v1/resources?start_timestamp=now&end_timestamp=now'
]
self.http_client.assert_called(*expect)
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].resource_id, 'b')
| 33.537037 | 78 | 0.490153 |
bff39380f72017c6be57a6fea8980ae6a709f45b | 13,873 | py | Python | python/coco_json_utils.py | ahmohamed1/cocosynth | b5ea836b3106c031180a13a55fb891397f090df0 | [
"MIT"
] | 1 | 2020-02-26T01:30:39.000Z | 2020-02-26T01:30:39.000Z | python/coco_json_utils.py | basedrhys/cocosynth | 3d9365af58552f45b093faf4ceac855a03436cf7 | [
"MIT"
] | null | null | null | python/coco_json_utils.py | basedrhys/cocosynth | 3d9365af58552f45b093faf4ceac855a03436cf7 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import numpy as np
import json
from pathlib import Path
from tqdm import tqdm
from skimage import measure, io
from shapely.geometry import Polygon, MultiPolygon
from PIL import Image
class InfoJsonUtils():
""" Creates an info object to describe a COCO dataset
"""
def create_coco_info(self, description, url, version, year, contributor, date_created):
""" Creates the "info" portion of COCO json
"""
info = dict()
info['description'] = description
info['url'] = url
info['version'] = version
info['year'] = year
info['contributor'] = contributor
info['date_created'] = date_created
return info
class LicenseJsonUtils():
""" Creates a license object to describe a COCO dataset
"""
def create_coco_license(self, url, license_id, name):
""" Creates the "licenses" portion of COCO json
"""
lic = dict()
lic['url'] = url
lic['id'] = license_id
lic['name'] = name
return lic
class CategoryJsonUtils():
""" Creates a category object to describe a COCO dataset
"""
def create_coco_category(self, supercategory, category_id, name):
category = dict()
category['supercategory'] = supercategory
category['id'] = category_id
category['name'] = name
return category
class ImageJsonUtils():
""" Creates an image object to describe a COCO dataset
"""
def create_coco_image(self, image_path, image_id, image_license):
""" Creates the "image" portion of COCO json
"""
# Open the image and get the size
image_file = Image.open(image_path)
width, height = image_file.size
image = dict()
image['license'] = image_license
image['file_name'] = image_path.name
image['width'] = width
image['height'] = height
image['id'] = image_id
return image
class AnnotationJsonUtils():
""" Creates an annotation object to describe a COCO dataset
"""
def __init__(self):
self.annotation_id_index = 0
def create_coco_annotations(self, image_mask_path, image_id, category_ids):
""" Takes a pixel-based RGB image mask and creates COCO annotations.
Args:
image_mask_path: a pathlib.Path to the image mask
image_id: the integer image id
category_ids: a dictionary of integer category ids keyed by RGB color (a tuple converted to a string)
e.g. {'(255, 0, 0)': {'category': 'owl', 'super_category': 'bird'} }
Returns:
annotations: a list of COCO annotation dictionaries that can
be converted to json. e.g.:
{
"segmentation": [[101.79,307.32,69.75,281.11,...,100.05,309.66]],
"area": 51241.3617,
"iscrowd": 0,
"image_id": 284725,
"bbox": [68.01,134.89,433.41,174.77],
"category_id": 6,
"id": 165690
}
"""
# Set class variables
self.image_id = image_id
self.category_ids = category_ids
# Make sure keys in category_ids are strings
for key in self.category_ids.keys():
if type(key) is not str:
raise TypeError('category_ids keys must be strings (e.g. "(0, 0, 255)")')
break
# Open and process image
self.mask_image = Image.open(image_mask_path)
self.mask_image = self.mask_image.convert('RGB')
self.width, self.height = self.mask_image.size
# Split up the multi-colored masks into multiple 0/1 bit masks
self._isolate_masks()
# Create annotations from the masks
self._create_annotations()
return self.annotations
def _isolate_masks(self):
# Breaks mask up into isolated masks based on color
self.isolated_masks = dict()
for x in range(self.width):
for y in range(self.height):
pixel_rgb = self.mask_image.getpixel((x,y))
pixel_rgb_str = str(pixel_rgb)
# If the pixel is any color other than black, add it to a respective isolated image mask
if not pixel_rgb == (0, 0, 0):
if self.isolated_masks.get(pixel_rgb_str) is None:
# Isolated mask doesn't have its own image yet, create one
# with 1-bit pixels, default black. Make room for 1 pixel of
# padding on each edge to allow the contours algorithm to work
# when shapes bleed up to the edge
self.isolated_masks[pixel_rgb_str] = Image.new('1', (self.width + 2, self.height + 2))
# Add the pixel to the mask image, shifting by 1 pixel to account for padding
self.isolated_masks[pixel_rgb_str].putpixel((x + 1, y + 1), 1)
def _create_annotations(self):
# Creates annotations for each isolated mask
# Each image may have multiple annotations, so create an array
self.annotations = []
for key, mask in self.isolated_masks.items():
annotation = dict()
annotation['segmentation'] = []
annotation['iscrowd'] = 0
annotation['image_id'] = self.image_id
if not self.category_ids.get(key):
print(f'category color not found: {key}; check for missing category or antialiasing')
continue
annotation['category_id'] = self.category_ids[key]
annotation['id'] = self._next_annotation_id()
# Find contours in the isolated mask
contours = measure.find_contours(mask, 0.5, positive_orientation='low')
polygons = []
for contour in contours:
# Flip from (row, col) representation to (x, y)
# and subtract the padding pixel
for i in range(len(contour)):
row, col = contour[i]
contour[i] = (col - 1, row - 1)
# Make a polygon and simplify it
poly = Polygon(contour)
poly = poly.simplify(1.0, preserve_topology=False)
if (poly.area > 16): # Ignore tiny polygons
if (poly.geom_type == 'MultiPolygon'):
# if MultiPolygon, take the smallest convex Polygon containing all the points in the object
poly = poly.convex_hull
if (poly.geom_type == 'Polygon'): # Ignore if still not a Polygon (could be a line or point)
polygons.append(poly)
segmentation = np.array(poly.exterior.coords).ravel().tolist()
annotation['segmentation'].append(segmentation)
if len(polygons) == 0:
# This item doesn't have any visible polygons, ignore it
# (This can happen if a randomly placed foreground is covered up
# by other foregrounds)
continue
# Combine the polygons to calculate the bounding box and area
multi_poly = MultiPolygon(polygons)
x, y, max_x, max_y = multi_poly.bounds
self.width = max_x - x
self.height = max_y - y
annotation['bbox'] = (x, y, self.width, self.height)
annotation['area'] = multi_poly.area
# Finally, add this annotation to the list
self.annotations.append(annotation)
def _next_annotation_id(self):
# Gets the next annotation id
# Note: This is not a unique id. It simply starts at 0 and increments each time it is called
a_id = self.annotation_id_index
self.annotation_id_index += 1
return a_id
class CocoJsonCreator():
def validate_and_process_args(self, args):
""" Validates the arguments coming in from the command line and performs
initial processing
Args:
args: ArgumentParser arguments
"""
# Validate the mask definition file exists
mask_definition_file = Path(args.mask_definition)
if not (mask_definition_file.exists and mask_definition_file.is_file()):
raise FileNotFoundError(f'mask definition file was not found: {mask_definition_file}')
# Load the mask definition json
with open(mask_definition_file) as json_file:
self.mask_definitions = json.load(json_file)
self.dataset_dir = mask_definition_file.parent
# Validate the dataset info file exists
dataset_info_file = Path(args.dataset_info)
if not (dataset_info_file.exists() and dataset_info_file.is_file()):
raise FileNotFoundError(f'dataset info file was not found: {dataset_info_file}')
# Load the dataset info json
with open(dataset_info_file) as json_file:
self.dataset_info = json.load(json_file)
assert 'info' in self.dataset_info, 'dataset_info JSON was missing "info"'
assert 'license' in self.dataset_info, 'dataset_info JSON was missing "license"'
def create_info(self):
""" Creates the "info" piece of the COCO json
"""
info_json = self.dataset_info['info']
iju = InfoJsonUtils()
return iju.create_coco_info(
description = info_json['description'],
version = info_json['version'],
url = info_json['url'],
year = info_json['year'],
contributor = info_json['contributor'],
date_created = info_json['date_created']
)
def create_licenses(self):
""" Creates the "license" portion of the COCO json
"""
license_json = self.dataset_info['license']
lju = LicenseJsonUtils()
lic = lju.create_coco_license(
url = license_json['url'],
license_id = license_json['id'],
name = license_json['name']
)
return [lic]
def create_categories(self):
""" Creates the "categories" portion of the COCO json
Returns:
categories: category objects that become part of the final json
category_ids_by_name: a lookup dictionary for category ids based
on the name of the category
"""
cju = CategoryJsonUtils()
categories = []
category_ids_by_name = dict()
category_id = 1 # 0 is reserved for the background
super_categories = self.mask_definitions['super_categories']
for super_category, _categories in super_categories.items():
for category_name in _categories:
categories.append(cju.create_coco_category(super_category, category_id, category_name))
category_ids_by_name[category_name] = category_id
category_id += 1
return categories, category_ids_by_name
def create_images_and_annotations(self, category_ids_by_name):
""" Creates the list of images (in json) and the annotations for each
image for the "image" and "annotations" portions of the COCO json
"""
iju = ImageJsonUtils()
aju = AnnotationJsonUtils()
image_objs = []
annotation_objs = []
image_license = self.dataset_info['license']['id']
image_id = 0
mask_count = len(self.mask_definitions['masks'])
print(f'Processing {mask_count} mask definitions...')
# For each mask definition, create image and annotations
for file_name, mask_def in tqdm(self.mask_definitions['masks'].items()):
# Create a coco image json item
image_path = Path(self.dataset_dir) / file_name
image_obj = iju.create_coco_image(
image_path,
image_id,
image_license)
image_objs.append(image_obj)
mask_path = Path(self.dataset_dir) / mask_def['mask']
# Create a dict of category ids keyed by rgb_color
category_ids_by_rgb = dict()
for rgb_color, category in mask_def['color_categories'].items():
category_ids_by_rgb[rgb_color] = category_ids_by_name[category['category']]
annotation_obj = aju.create_coco_annotations(mask_path, image_id, category_ids_by_rgb)
annotation_objs += annotation_obj # Add the new annotations to the existing list
image_id += 1
return image_objs, annotation_objs
def main(self, args):
self.validate_and_process_args(args)
info = self.create_info()
licenses = self.create_licenses()
categories, category_ids_by_name = self.create_categories()
images, annotations = self.create_images_and_annotations(category_ids_by_name)
master_obj = {
'info': info,
'licenses': licenses,
'images': images,
'annotations': annotations,
'categories': categories
}
# Write the json to a file
output_path = Path(self.dataset_dir) / 'coco_instances.json'
with open(output_path, 'w+') as output_file:
json.dump(master_obj, output_file)
print(f'Annotations successfully written to file:\n{output_path}')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Generate COCO JSON")
parser.add_argument("-md", "--mask_definition", dest="mask_definition",
help="path to a mask definition JSON file, generated by MaskJsonUtils module")
parser.add_argument("-di", "--dataset_info", dest="dataset_info",
help="path to a dataset info JSON file")
args = parser.parse_args()
cjc = CocoJsonCreator()
cjc.main(args)
| 38.859944 | 115 | 0.6047 |
be7f57f9e6397b3f7972d4202582afe3c9d8cdef | 3,406 | py | Python | engines/dotflow2/session_mongodb.py | NunoEdgarGFlowHub/rhizome | 6fcb77c4cc38e662cd805fc5df7845b4c97c5ea0 | [
"MIT"
] | 8 | 2018-10-30T10:11:33.000Z | 2020-12-01T05:36:19.000Z | engines/dotflow2/session_mongodb.py | NunoEdgarGFlowHub/rhizome | 6fcb77c4cc38e662cd805fc5df7845b4c97c5ea0 | [
"MIT"
] | 16 | 2018-10-26T00:04:11.000Z | 2021-04-30T20:59:14.000Z | engines/dotflow2/session_mongodb.py | SeedVault/bbot-py | b94ef5e75411ac4a214f5ac54d04ce00d9108ec0 | [
"MIT"
] | 3 | 2019-03-11T13:42:47.000Z | 2019-12-03T13:19:33.000Z | """MongoDB repository."""
import logging
import json
import datetime
from typing import Any
from pymongo import MongoClient, DeleteMany
from .session import Session
class SessionMongoDB(Session):
"""MongoDB session."""
def __init__(self, config: dict, dotbot: dict=None) -> None:
"""Set up MongoDB."""
super().__init__(config)
if 'uri' not in config:
raise RuntimeError("FATAL ERR: Missing config var uri")
uri = config["uri"]
self.client = MongoClient(uri)
parts = uri.split("/")
last_part = parts.pop()
parts = last_part.split("?")
self.database_name = parts[0]
self.user_data = self.client[self.database_name]["user_data"]
def reset_all(self, user_id: str) -> None:
"""
Delete all data from a user.
:param user_id: User ID
"""
super().reset_all(user_id)
self.user_data.delete_many({'userId': user_id})
def get(self, user_id: str, key: str) -> Any:
"""
Retrieve a value from a user's session.
:param user_id: User ID
:param key: Key to retrieve
"""
data = self.user_data.find_one({'userId': user_id})
if data is None:
return ""
var_value = self.get_dot_notation(data, key)
if var_value is None:
return ""
return var_value
def set(self, user_id: str, key: str, value: str) -> None:
"""
Set a value in a user's session.
:param user_id: User ID
:param key: Key to set
:param value: Value to set
"""
self.user_data.update_one({'userId': user_id}, {"$set": {key: value}}, upsert=True)
def push(self, user_id: str, key: str, value: str):
"""
Pushed an element to an array
:param user_id:
:param key:
:param value:
:return:
"""
self.user_data.update_one({'userId': user_id}, {"$push": {key: value}}, upsert=True)
def set_var(self, user_id: str, key: str, value: any) -> None:
"""
Set any user data for later use.
:param user_id: User ID
:param key: Key to set
:param value: Value to set
"""
key = 'user_vars.' + key
# value = json.dumps(value) << @TODO not needed? (it adds double quotes to the value)
return self.set(user_id, key, value)
def get_var(self, user_id: str, key=None) -> Any:
"""
Retrieve any user data for later use.
:param user_id: User ID
:param key: Key to set
"""
final_key = "user_vars"
if key:
final_key += "." + key
ret = self.get(user_id, final_key)
if not key and type(ret) is not dict: # if asked for all user vars return empty dict, not empty string
ret = {}
return ret
def get_dot_notation(self, d: dict, dotted_key: str) -> Any:
"""
Allows to retrieve values from a dict using dot notation
:param d: Dictionary
:param keys: Regular key or key with dot notation
"""
if "." in dotted_key:
key, rest = dotted_key.split(".", 1)
if d.get(key, None) is None:
return None
return self.get_dot_notation(d[key], rest)
else:
return d.get(dotted_key, None)
| 28.14876 | 111 | 0.556371 |
4455820af7778ebef5e94592a6298c3ac17ab21b | 5,552 | py | Python | falcon/routing/util.py | bibekjoshi54/falcon | 6357a5959e71b59f261a581d168bfd8ab6952ca8 | [
"Apache-2.0"
] | 1 | 2020-04-18T12:58:51.000Z | 2020-04-18T12:58:51.000Z | falcon/routing/util.py | bibekjoshi54/falcon | 6357a5959e71b59f261a581d168bfd8ab6952ca8 | [
"Apache-2.0"
] | 1 | 2021-07-24T15:39:29.000Z | 2021-07-24T15:39:29.000Z | falcon/routing/util.py | bibekjoshi54/falcon | 6357a5959e71b59f261a581d168bfd8ab6952ca8 | [
"Apache-2.0"
] | 1 | 2020-03-06T13:38:33.000Z | 2020-03-06T13:38:33.000Z | # Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routing utilities."""
import re
from falcon import constants, responders
class SuffixedMethodNotFoundError(Exception):
def __init__(self, message):
super(SuffixedMethodNotFoundError, self).__init__(message)
self.message = message
# NOTE(kgriffs): Published method; take care to avoid breaking changes.
def compile_uri_template(template):
"""Compile the given URI template string into a pattern matcher.
This function can be used to construct custom routing engines that
iterate through a list of possible routes, attempting to match
an incoming request against each route's compiled regular expression.
Each field is converted to a named group, so that when a match
is found, the fields can be easily extracted using
:py:meth:`re.MatchObject.groupdict`.
This function does not support the more flexible templating
syntax used in the default router. Only simple paths with bracketed
field expressions are recognized. For example::
/
/books
/books/{isbn}
/books/{isbn}/characters
/books/{isbn}/characters/{name}
Also, note that if the template contains a trailing slash character,
it will be stripped in order to normalize the routing logic.
Args:
template(str): The template to compile. Note that field names are
restricted to ASCII a-z, A-Z, and the underscore character.
Returns:
tuple: (template_field_names, template_regex)
"""
if not isinstance(template, str):
raise TypeError('uri_template is not a string')
if not template.startswith('/'):
raise ValueError("uri_template must start with '/'")
if '//' in template:
raise ValueError("uri_template may not contain '//'")
if template != '/' and template.endswith('/'):
template = template[:-1]
# template names should be able to start with A-Za-z
# but also contain 0-9_ in the remaining portion
expression_pattern = r'{([a-zA-Z]\w*)}'
# Get a list of field names
fields = set(re.findall(expression_pattern, template))
# Convert Level 1 var patterns to equivalent named regex groups
escaped = re.sub(r'[\.\(\)\[\]\?\*\+\^\|]', r'\\\g<0>', template)
pattern = re.sub(expression_pattern, r'(?P<\1>[^/]+)', escaped)
pattern = r'\A' + pattern + r'\Z'
return fields, re.compile(pattern, re.IGNORECASE)
def map_http_methods(resource, suffix=None):
"""Maps HTTP methods (e.g., GET, POST) to methods of a resource object.
Args:
resource: An object with *responder* methods, following the naming
convention *on_\\**, that correspond to each method the resource
supports. For example, if a resource supports GET and POST, it
should define ``on_get(self, req, resp)`` and
``on_post(self, req, resp)``.
Keyword Args:
suffix (str): Optional responder name suffix for this route. If
a suffix is provided, Falcon will map GET requests to
``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``,
etc.
Returns:
dict: A mapping of HTTP methods to explicitly defined resource responders.
"""
method_map = {}
for method in constants.COMBINED_METHODS:
try:
responder_name = 'on_' + method.lower()
if suffix:
responder_name += '_' + suffix
responder = getattr(resource, responder_name)
except AttributeError:
# resource does not implement this method
pass
else:
# Usually expect a method, but any callable will do
if callable(responder):
method_map[method] = responder
# If suffix is specified and doesn't map to any methods, raise an error
if suffix and not method_map:
raise SuffixedMethodNotFoundError('No responders found for the specified suffix')
return method_map
def set_default_responders(method_map, asgi=False):
"""Maps HTTP methods not explicitly defined on a resource to default responders.
Args:
method_map: A dict with HTTP methods mapped to responders explicitly
defined in a resource.
asgi (bool): ``True`` if using an ASGI app, ``False`` otherwise
(default ``False``).
"""
# Attach a resource for unsupported HTTP methods
allowed_methods = sorted(list(method_map.keys()))
if 'OPTIONS' not in method_map:
# OPTIONS itself is intentionally excluded from the Allow header
opt_responder = responders.create_default_options(allowed_methods, asgi=asgi)
method_map['OPTIONS'] = opt_responder
allowed_methods.append('OPTIONS')
na_responder = responders.create_method_not_allowed(allowed_methods, asgi=asgi)
for method in constants.COMBINED_METHODS:
if method not in allowed_methods:
method_map[method] = na_responder
| 35.363057 | 89 | 0.673271 |
ded8f612a0615b416c1a7cee8d286c10c70861bb | 1,122 | py | Python | recipes/transfer/monddyyyy_to_iso.py | svetasmirnova/mysqlcookbook | 8cb370b9b91ef35f4654b774bac019e2b636ac67 | [
"CC0-1.0"
] | 1 | 2022-03-01T16:45:38.000Z | 2022-03-01T16:45:38.000Z | recipes/transfer/monddyyyy_to_iso.py | svetasmirnova/mysqlcookbook | 8cb370b9b91ef35f4654b774bac019e2b636ac67 | [
"CC0-1.0"
] | null | null | null | recipes/transfer/monddyyyy_to_iso.py | svetasmirnova/mysqlcookbook | 8cb370b9b91ef35f4654b774bac019e2b636ac67 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/python3
# monddyyyy_to_iso.py: Convert dates from mon[.] dd, yyyy to ISO format.
# Assumes tab-delimited, linefeed-terminated input
import re
import sys
import fileinput
import warnings
map = {"jan": 1, "feb": 2, "mar": 3, "apr": 4, "may": 5, "jun": 6,
"jul": 7, "aug": 8, "sep": 9, "oct": 10, "nov": 11, "dec": 12
} # map 3-char month abbreviations to numeric month
for line in fileinput.input(sys.argv[1:]):
values = line.rstrip().split("\t", 10000) # split, preserving all fields
for i in range(0, len(values)):
# reformat the value if it matches the pattern, otherwise assume
# that it's not a date in the required format and leave it alone
m = re.match('^([^.]+)\.? (\d+), (\d+)$', values[i])
if m:
# use lowercase month name
(month, day, year) = (m.group(1).lower(), int(m.group(2)), int(m.group(3)))
#@ _CHECK_VALIDITY_
if month in map:
#@ _CHECK_VALIDITY_
values[i] = "%04d-%02d-%02d" % (year, map[month], day)
else:
# warn, but don't reformat
warnings.warn("%s bad date?" % (values[i]))
print("\t".join(values))
| 35.0625 | 81 | 0.605169 |
0df8d82a26428ea65b0e53d134d86121c943e27b | 1,110 | py | Python | Bioinformatics IV/Week V/SpectralDictionarySize.py | egeulgen/Bioinformatics_Specialization | 38581b471a54c41d780d9eeb26a7033eb57f3a01 | [
"MIT"
] | 3 | 2021-04-03T23:46:42.000Z | 2021-08-08T01:19:32.000Z | Bioinformatics IV/Week V/SpectralDictionarySize.py | egeulgen/Bioinformatics_Specialization | 38581b471a54c41d780d9eeb26a7033eb57f3a01 | [
"MIT"
] | null | null | null | Bioinformatics IV/Week V/SpectralDictionarySize.py | egeulgen/Bioinformatics_Specialization | 38581b471a54c41d780d9eeb26a7033eb57f3a01 | [
"MIT"
] | null | null | null | import sys
mass_file=open('integer_mass_table.txt')
masses = []
for line in mass_file:
aa, mass = line.rstrip().split(' ')
masses.append(int(mass))
# masses = [4, 5]
def SpectralDictionarySize(spectral_vector, threshold, max_score):
m = len(spectral_vector)
Size = {}
Size[0] = {}
Size[0][0] = 1
for t in range(1, max_score + 1):
Size[0][t] = 0
for i in range(1, m + 1):
Size[i] = {}
for t in range(max_score + 1):
Size[i][t] = 0
for a in masses:
if (i - a) >= 0 and (t - spectral_vector[i - 1]) >= 0 and (t - spectral_vector[i - 1]) <= max_score:
Size[i][t] += Size[i - a][t - spectral_vector[i - 1]]
final_size = 0
for t in range(threshold, max_score + 1):
final_size += Size[m][t]
return final_size
if __name__ == "__main__":
tmp = sys.stdin.read().splitlines()
spectral_vector = [int(x) for x in tmp[0].rstrip().split(' ')]
threshold = int(tmp[1])
max_score = int(tmp[2])
print(SpectralDictionarySize(spectral_vector, threshold, max_score)) | 27.073171 | 116 | 0.568468 |
b452364e4b3ee85f2c0ca5ac584c78ec15061743 | 299 | py | Python | app/capacity.py | hs14/optimize_schedule_ga | a2a6ed0d6f208c86a39f03f0236a7cc51328b857 | [
"MIT"
] | null | null | null | app/capacity.py | hs14/optimize_schedule_ga | a2a6ed0d6f208c86a39f03f0236a7cc51328b857 | [
"MIT"
] | null | null | null | app/capacity.py | hs14/optimize_schedule_ga | a2a6ed0d6f208c86a39f03f0236a7cc51328b857 | [
"MIT"
] | null | null | null | class Capacity(object):
'''
定員の情報
'''
def __init__(self, maximum, minimum=1):
'''
Parameters
----------
maximum : int
最大人数(定員)
minimum : int
最少人数
'''
self.maximum = maximum
self.minimum = minimum | 19.933333 | 43 | 0.434783 |
15709053631f2e1e54ea6499a03116e943f63ff1 | 14,055 | py | Python | nnunet/evaluation/evaluator.py | anxingle/nnUNet_simple | 9c69bc5a005d5305b27d6d214dc16ac25c4ead76 | [
"Apache-2.0"
] | null | null | null | nnunet/evaluation/evaluator.py | anxingle/nnUNet_simple | 9c69bc5a005d5305b27d6d214dc16ac25c4ead76 | [
"Apache-2.0"
] | null | null | null | nnunet/evaluation/evaluator.py | anxingle/nnUNet_simple | 9c69bc5a005d5305b27d6d214dc16ac25c4ead76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import inspect
import json
import hashlib
from datetime import datetime
from multiprocessing.pool import Pool
import numpy as np
import pandas as pd
import SimpleITK as sitk
from nnunet.evaluation.metrics import ConfusionMatrix, ALL_METRICS
from batchgenerators.utilities.file_and_folder_operations import save_json, subfiles, join
from collections import OrderedDict
class Evaluator:
"""Object that holds test and reference segmentations with label information
and computes a number of metrics on the two. 'labels' must either be an
iterable of numeric values (or tuples thereof) or a dictionary with string
names and numeric values.
"""
default_metrics = [
"False Positive Rate",
"Dice",
"Jaccard",
"Precision",
"Recall",
"Accuracy",
"False Omission Rate",
"Negative Predictive Value",
"False Negative Rate",
"True Negative Rate",
"False Discovery Rate",
"Total Positives Test",
"Total Positives Reference"
]
default_advanced_metrics = [
#"Hausdorff Distance",
"Hausdorff Distance 95",
#"Avg. Surface Distance",
#"Avg. Symmetric Surface Distance"
]
def __init__(self,
test=None,
reference=None,
labels=None,
metrics=None,
advanced_metrics=None,
nan_for_nonexisting=True):
self.test = None
self.reference = None
self.confusion_matrix = ConfusionMatrix()
self.labels = None
self.nan_for_nonexisting = nan_for_nonexisting
self.result = None
self.metrics = []
if metrics is None:
for m in self.default_metrics:
self.metrics.append(m)
else:
for m in metrics:
self.metrics.append(m)
self.advanced_metrics = []
if advanced_metrics is None:
for m in self.default_advanced_metrics:
self.advanced_metrics.append(m)
else:
for m in advanced_metrics:
self.advanced_metrics.append(m)
self.set_reference(reference)
self.set_test(test)
if labels is not None:
self.set_labels(labels)
else:
if test is not None and reference is not None:
self.construct_labels()
def set_test(self, test):
"""Set the test segmentation."""
self.test = test
def set_reference(self, reference):
"""Set the reference segmentation."""
self.reference = reference
def set_labels(self, labels):
"""Set the labels.
:param labels= may be a dictionary (int->str), a set (of ints), a tuple (of ints) or a list (of ints). Labels
will only have names if you pass a dictionary"""
if isinstance(labels, dict):
self.labels = collections.OrderedDict(labels)
elif isinstance(labels, set):
self.labels = list(labels)
elif isinstance(labels, np.ndarray):
self.labels = [i for i in labels]
elif isinstance(labels, (list, tuple)):
self.labels = labels
else:
raise TypeError("Can only handle dict, list, tuple, set & numpy array, but input is of type {}".format(type(labels)))
def construct_labels(self):
"""Construct label set from unique entries in segmentations."""
if self.test is None and self.reference is None:
raise ValueError("No test or reference segmentations.")
elif self.test is None:
labels = np.unique(self.reference)
else:
labels = np.union1d(np.unique(self.test),
np.unique(self.reference))
self.labels = list(map(lambda x: int(x), labels))
def set_metrics(self, metrics):
"""Set evaluation metrics"""
if isinstance(metrics, set):
self.metrics = list(metrics)
elif isinstance(metrics, (list, tuple, np.ndarray)):
self.metrics = metrics
else:
raise TypeError("Can only handle list, tuple, set & numpy array, but input is of type {}".format(type(metrics)))
def add_metric(self, metric):
if metric not in self.metrics:
self.metrics.append(metric)
def evaluate(self, test=None, reference=None, advanced=False, **metric_kwargs):
"""Compute metrics for segmentations."""
if test is not None:
self.set_test(test)
if reference is not None:
self.set_reference(reference)
if self.test is None or self.reference is None:
raise ValueError("Need both test and reference segmentations.")
if self.labels is None:
self.construct_labels()
self.metrics.sort()
# get functions for evaluation
# somewhat convoluted, but allows users to define additonal metrics
# on the fly, e.g. inside an IPython console
_funcs = {m: ALL_METRICS[m] for m in self.metrics + self.advanced_metrics}
frames = inspect.getouterframes(inspect.currentframe())
for metric in self.metrics:
for f in frames:
if metric in f[0].f_locals:
_funcs[metric] = f[0].f_locals[metric]
break
else:
if metric in _funcs:
continue
else:
raise NotImplementedError(
"Metric {} not implemented.".format(metric))
# get results
self.result = OrderedDict()
eval_metrics = self.metrics
if advanced:
eval_metrics += self.advanced_metrics
if isinstance(self.labels, dict):
for label, name in self.labels.items():
k = str(name)
self.result[k] = OrderedDict()
if not hasattr(label, "__iter__"):
self.confusion_matrix.set_test(self.test == label)
self.confusion_matrix.set_reference(self.reference == label)
else:
current_test = 0
current_reference = 0
for l in label:
current_test += (self.test == l)
current_reference += (self.reference == l)
self.confusion_matrix.set_test(current_test)
self.confusion_matrix.set_reference(current_reference)
for metric in eval_metrics:
self.result[k][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix,
nan_for_nonexisting=self.nan_for_nonexisting,
**metric_kwargs)
else:
for i, l in enumerate(self.labels):
k = str(l)
self.result[k] = OrderedDict()
self.confusion_matrix.set_test(self.test == l)
self.confusion_matrix.set_reference(self.reference == l)
for metric in eval_metrics:
self.result[k][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix,
nan_for_nonexisting=self.nan_for_nonexisting,
**metric_kwargs)
return self.result
def to_dict(self):
if self.result is None:
self.evaluate()
return self.result
def to_array(self):
"""Return result as numpy array (labels x metrics)."""
if self.result is None:
self.evaluate
result_metrics = sorted(self.result[list(self.result.keys())[0]].keys())
a = np.zeros((len(self.labels), len(result_metrics)), dtype=np.float32)
if isinstance(self.labels, dict):
for i, label in enumerate(self.labels.keys()):
for j, metric in enumerate(result_metrics):
a[i][j] = self.result[self.labels[label]][metric]
else:
for i, label in enumerate(self.labels):
for j, metric in enumerate(result_metrics):
a[i][j] = self.result[label][metric]
return a
def to_pandas(self):
"""Return result as pandas DataFrame."""
a = self.to_array()
if isinstance(self.labels, dict):
labels = list(self.labels.values())
else:
labels = self.labels
result_metrics = sorted(self.result[list(self.result.keys())[0]].keys())
return pd.DataFrame(a, index=labels, columns=result_metrics)
class NiftiEvaluator(Evaluator):
def __init__(self, *args, **kwargs):
self.test_nifti = None
self.reference_nifti = None
super(NiftiEvaluator, self).__init__(*args, **kwargs)
def set_test(self, test):
"""Set the test segmentation."""
if test is not None:
self.test_nifti = sitk.ReadImage(test)
super(NiftiEvaluator, self).set_test(sitk.GetArrayFromImage(self.test_nifti))
else:
self.test_nifti = None
super(NiftiEvaluator, self).set_test(test)
def set_reference(self, reference):
"""Set the reference segmentation."""
if reference is not None:
self.reference_nifti = sitk.ReadImage(reference)
super(NiftiEvaluator, self).set_reference(sitk.GetArrayFromImage(self.reference_nifti))
else:
self.reference_nifti = None
super(NiftiEvaluator, self).set_reference(reference)
def evaluate(self, test=None, reference=None, voxel_spacing=None, **metric_kwargs):
if voxel_spacing is None:
voxel_spacing = np.array(self.test_nifti.GetSpacing())[::-1]
metric_kwargs["voxel_spacing"] = voxel_spacing
return super(NiftiEvaluator, self).evaluate(test, reference, **metric_kwargs)
def run_evaluation(args):
test, ref, evaluator, metric_kwargs = args
# evaluate
evaluator.set_test(test)
evaluator.set_reference(ref)
if evaluator.labels is None:
evaluator.construct_labels()
current_scores = evaluator.evaluate(**metric_kwargs)
if type(test) == str:
current_scores["test"] = test
if type(ref) == str:
current_scores["reference"] = ref
return current_scores
def aggregate_scores(test_ref_pairs,
evaluator=NiftiEvaluator,
labels=None,
nanmean=True,
json_output_file=None,
json_name="",
json_description="",
json_author="Fabian",
json_task="",
num_threads=2,
**metric_kwargs):
"""
test = predicted image
:param test_ref_pairs:
:param evaluator:
:param labels: must be a dict of int-> str or a list of int
:param nanmean:
:param json_output_file:
:param json_name:
:param json_description:
:param json_author:
:param json_task:
:param metric_kwargs:
:return:
"""
if type(evaluator) == type:
evaluator = evaluator()
if labels is not None:
evaluator.set_labels(labels)
all_scores = OrderedDict()
all_scores["all"] = []
all_scores["mean"] = OrderedDict()
test = [i[0] for i in test_ref_pairs]
ref = [i[1] for i in test_ref_pairs]
p = Pool(num_threads)
all_res = p.map(run_evaluation, zip(test, ref, [evaluator]*len(ref), [metric_kwargs]*len(ref)))
p.close()
p.join()
for i in range(len(all_res)):
all_scores["all"].append(all_res[i])
# append score list for mean
for label, score_dict in all_res[i].items():
if label in ("test", "reference"):
continue
if label not in all_scores["mean"]:
all_scores["mean"][label] = OrderedDict()
for score, value in score_dict.items():
if score not in all_scores["mean"][label]:
all_scores["mean"][label][score] = []
all_scores["mean"][label][score].append(value)
for label in all_scores["mean"]:
for score in all_scores["mean"][label]:
if nanmean:
all_scores["mean"][label][score] = float(np.nanmean(all_scores["mean"][label][score]))
else:
all_scores["mean"][label][score] = float(np.mean(all_scores["mean"][label][score]))
# save to file if desired
# we create a hopefully unique id by hashing the entire output dictionary
if json_output_file is not None:
json_dict = OrderedDict()
json_dict["name"] = json_name
json_dict["description"] = json_description
timestamp = datetime.today()
json_dict["timestamp"] = str(timestamp)
json_dict["task"] = json_task
json_dict["author"] = json_author
json_dict["results"] = all_scores
json_dict["id"] = hashlib.md5(json.dumps(json_dict).encode("utf-8")).hexdigest()[:12]
save_json(json_dict, json_output_file)
return all_scores
def aggregate_scores_for_experiment(score_file,
labels=None,
metrics=Evaluator.default_metrics,
nanmean=True,
json_output_file=None,
json_name="",
json_description="",
json_author="Fabian",
json_task=""):
scores = np.load(score_file)
scores_mean = scores.mean(0)
if labels is None:
labels = list(map(str, range(scores.shape[1])))
results = []
results_mean = OrderedDict()
for i in range(scores.shape[0]):
results.append(OrderedDict())
for l, label in enumerate(labels):
results[-1][label] = OrderedDict()
results_mean[label] = OrderedDict()
for m, metric in enumerate(metrics):
results[-1][label][metric] = float(scores[i][l][m])
results_mean[label][metric] = float(scores_mean[l][m])
json_dict = OrderedDict()
json_dict["name"] = json_name
json_dict["description"] = json_description
timestamp = datetime.today()
json_dict["timestamp"] = str(timestamp)
json_dict["task"] = json_task
json_dict["author"] = json_author
json_dict["results"] = {"all": results, "mean": results_mean}
json_dict["id"] = hashlib.md5(json.dumps(json_dict).encode("utf-8")).hexdigest()[:12]
if json_output_file is not None:
json_output_file = open(json_output_file, "w")
json.dump(json_dict, json_output_file, indent=4, separators=(",", ": "))
json_output_file.close()
return json_dict
def evaluate_folder(folder_with_gts: str, folder_with_predictions: str, labels: tuple, **metric_kwargs):
"""
writes a summary.json to folder_with_predictions
:param folder_with_gts: folder where the ground truth segmentations are saved. Must be nifti files.
:param folder_with_predictions: folder where the predicted segmentations are saved. Must be nifti files.
:param labels: tuple of int with the labels in the dataset. For example (0, 1, 2, 3) for Task001_BrainTumour.
:return:
"""
files_gt = subfiles(folder_with_gts, suffix=".nii.gz", join=False)
files_pred = subfiles(folder_with_predictions, suffix=".nii.gz", join=False)
assert all([i in files_pred for i in files_gt]), "files missing in folder_with_predictions"
assert all([i in files_gt for i in files_pred]), "files missing in folder_with_gts"
test_ref_pairs = [(join(folder_with_predictions, i), join(folder_with_gts, i)) for i in files_pred]
res = aggregate_scores(test_ref_pairs, json_output_file=join(folder_with_predictions, "summary.json"),
num_threads=8, labels=labels, **metric_kwargs)
return res
| 30.422078 | 120 | 0.712416 |
121b005b045c295fc75938ee936a28a70f984238 | 1,240 | py | Python | tests/external/test_solo.py | jacobkimmel/scVI | 7f06d616ddb6af8b3b0f5ec630f338e4a873e122 | [
"BSD-3-Clause"
] | 1 | 2021-09-08T11:50:25.000Z | 2021-09-08T11:50:25.000Z | tests/external/test_solo.py | RichardVDH/scvi-tools | 2d05e6ea2d9c7387f95748dbea561a972ad33194 | [
"BSD-3-Clause"
] | null | null | null | tests/external/test_solo.py | RichardVDH/scvi-tools | 2d05e6ea2d9c7387f95748dbea561a972ad33194 | [
"BSD-3-Clause"
] | null | null | null | from scvi.data import setup_anndata, synthetic_iid
from scvi.external import SOLO
from scvi.model import SCVI
def test_solo(save_path):
n_latent = 5
adata = synthetic_iid(run_setup_anndata=False)
setup_anndata(adata)
model = SCVI(adata, n_latent=n_latent)
model.train(1, check_val_every_n_epoch=1, train_size=0.5)
solo = SOLO.from_scvi_model(model)
solo.train(1, check_val_every_n_epoch=1, train_size=0.9)
assert "validation_loss" in solo.history.keys()
solo.predict()
bdata = synthetic_iid(run_setup_anndata=False)
solo = SOLO.from_scvi_model(model, bdata)
solo.train(1, check_val_every_n_epoch=1, train_size=0.9)
assert "validation_loss" in solo.history.keys()
solo.predict()
def test_solo_multiple_batch(save_path):
n_latent = 5
adata = synthetic_iid()
adata.layers["my_layer"] = adata.X.copy()
setup_anndata(adata, layer="my_layer", batch_key="batch")
model = SCVI(adata, n_latent=n_latent)
model.train(1, check_val_every_n_epoch=1, train_size=0.5)
solo = SOLO.from_scvi_model(model, restrict_to_batch="batch_0")
solo.train(1, check_val_every_n_epoch=1, train_size=0.9)
assert "validation_loss" in solo.history.keys()
solo.predict()
| 33.513514 | 67 | 0.733871 |
c4eeed83686433c7061f2586193c524dd68ee29b | 7,562 | py | Python | py/desispec/fiberflat_vs_humidity.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | null | null | null | py/desispec/fiberflat_vs_humidity.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | null | null | null | py/desispec/fiberflat_vs_humidity.py | echaussidon/desispec | 8a8bd59653861509dd630ffc8e1cd6c67f6cdd51 | [
"BSD-3-Clause"
] | null | null | null | """
desispec.fiberflat_vs_humidity
==================
Utility functions to compute a fiber flat corrected for variations with humidity in the shack
"""
from __future__ import absolute_import, division
import numpy as np
import copy
from desiutil.log import get_logger
from desispec.fiberflat import apply_fiberflat
from desispec.fiberbitmasking import get_skysub_fiberbitmask_val
def _interpolated_fiberflat_vs_humidity(fiberflat_vs_humidity , humidity_array, humidity_point) :
"""
Interpolates between fiberflat templates indexed by humidity.
Args:
fiberflat_vs_humidity: 3D numpy array (n_humidity,n_fibers,n_wavelength)
humidity_array: 1D numpy array (n_humidity)
humidity_point: float, humidity value (same unit as humidity_array)
Returns 2D numpy array (n_fibers,n_wavelength)
"""
if humidity_point<=humidity_array[0] :
i1=0
else :
i1=np.where(humidity_array<humidity_point)[0][-1]
i2=i1+1
if i2>=humidity_array.size : # return largest value
return fiberflat_vs_humidity[-1]
w1=(humidity_array[i2]-humidity_point)/(humidity_array[i2]-humidity_array[i1])
w2=(humidity_point-humidity_array[i1])/(humidity_array[i2]-humidity_array[i1])
return w1*fiberflat_vs_humidity[i1]+w2*fiberflat_vs_humidity[i2]
def _fit_flat(wavelength,flux,ivar,fibers,mean_fiberflat_vs_humidity,humidity_array) :
"""
Finds best fit interpolation of fiberflat templates that matches an input flux frame
Works only if wavelength array intersects the range [4000,4600]A, i.e. the blue cameras
Args:
wavelength: 1D numpy array (n_wavelength) in Angstrom
flux: 2D numpy array (n_fibers,n_wavelength) unit does not matter
ivar: 2D numpy array (n_fibers,n_wavelength) inverse variance of flux
fibers: list or 1D number arrays of fibers to use among range(n_fibers)
mean_fiberflat_vs_humidity: 3D numpy array (n_humidity,n_fibers,n_wavelength)
humidity_array: 1D numpy array (n_humidity)
Returns best_fit_flat best_fit_humidity (2D numpy array (n_fibers,n_wavelength) and float)
"""
log = get_logger()
selection = (wavelength > 4000.) & (wavelength < 4600)
if np.sum(selection)==0 :
message="incorrect wavelength range"
log.error(message)
raise RuntimeError(message)
waveindex = np.where(selection)[0]
tmp_flux = flux[fibers][:,waveindex].copy()
tmp_ivar = ivar[fibers][:,waveindex].copy()
for loop in range(2) :
# remove mean variation from fiber to fiber
med = np.median(tmp_flux,axis=-1)
tmp_flux /= med[:,None]
tmp_ivar *= med[:,None]**2
# remove average over fibers
med = np.median(tmp_flux,axis=0)
tmp_flux /= med[None,:]
tmp_ivar *= med[None,:]**2
tmp_flat = mean_fiberflat_vs_humidity[:,fibers][:,:,waveindex].copy()
for loop in range(2) :
# remove mean variation from fiber to fiber
med = np.median(tmp_flat,axis=-1)
tmp_flat /= med[:,:,None]
# remove average over fibers
for index in range(tmp_flat.shape[0]) :
med = np.median(tmp_flat[index],axis=0)
tmp_flat[index] /= med[None,:]
# chi2 between all fiberflat templates (one per humidity bin)
# with current flux value
# summed over all fibers and all wavelength
# after having both the fiberflat and the flux normalized to 1
# per fiber when averaged over all wavelength
# and per wavelength when averaged over all fibers
chi2 = np.sum(np.sum(tmp_ivar*(tmp_flux-tmp_flat)**2,axis=-1),axis=-1)
# chi2 is a 1D array with size = number of humidity bins
# index of minimum, but then we refine
minindex=np.argmin(chi2)
bb=minindex-1
ee=minindex+2
if bb<0 :
bb+=1
ee+=1
if ee>=chi2.size :
bb-=1
ee-=1
# get the chi2 minimum
c=np.polyfit(humidity_array[bb:ee],chi2[bb:ee],2)
best_humidity = -c[1]/2./c[0]
best_humidity = max(humidity_array[0],best_humidity)
best_humidity = min(humidity_array[-1],best_humidity)
log.info("best fit humidity = {:.2f}".format(best_humidity))
# simple linear interpolation indexed by the humidity
flat = _interpolated_fiberflat_vs_humidity(mean_fiberflat_vs_humidity , humidity_array, best_humidity)
return flat , best_humidity
def compute_humidity_corrected_fiberflat(calib_fiberflat, mean_fiberflat_vs_humidity , humidity_array, current_humidity, frame) :
"""
Apply a humidity-dependent correction to an input fiber flat
Returns frame_fiberflat = calib_fiberflat / flat_vs_humidity_model(calib) * flat_vs_humidity_model(frame)
Args:
calib_fiberflat: desispec.FiberFlat object
mean_fiberflat_vs_humidity: 3D numpy array (n_humidity,n_fibers,n_wavelength)
humidity_array: 1D numpy array (n_humidity)
current_humidity: float (same unit as humidity_array)
frame: desispec.Frame object
Returns modified desispec.FiberFlat object
"""
log = get_logger()
best_humidity = current_humidity
log.info("using nightly flat to fit for the best fit nightly flat humidity")
selection = np.sum(calib_fiberflat.ivar!=0,axis=1)>10
good_flat_fibers = np.where(selection)[0]
flat2 , hum2 = _fit_flat(calib_fiberflat.wave,calib_fiberflat.fiberflat,calib_fiberflat.ivar,good_flat_fibers,mean_fiberflat_vs_humidity,humidity_array)
flat1 = None
hum1 = current_humidity
if frame is not None :
log.info("using frame to fit for the best fit current humidity")
ivar = frame.ivar*(frame.mask==0)
badfibermask = get_skysub_fiberbitmask_val()
selection = (frame.fibermap["OBJTYPE"]=="SKY") & (frame.fibermap["FIBERSTATUS"] & badfibermask == 0) & (np.sum(ivar!=0,axis=1)>10)
if np.sum(selection)>0 :
good_sky_fibers = np.where(selection)[0]
heliocor=frame.meta['HELIOCOR']
frame_wave_in_fiberflat_system = frame.wave/heliocor
tmp_flux = frame.flux.copy()
tmp_ivar = ivar.copy()
for fiber in good_sky_fibers:
ok=(ivar[fiber]>0)
tmp_flux[fiber] = np.interp(frame.wave,frame_wave_in_fiberflat_system[ok],frame.flux[fiber][ok])
tmp_ivar[fiber] = np.interp(frame.wave,frame_wave_in_fiberflat_system[ok],ivar[fiber][ok])
flat1 , hum1 = _fit_flat(frame.wave,tmp_flux*flat2/calib_fiberflat.fiberflat,tmp_ivar,good_sky_fibers,mean_fiberflat_vs_humidity,humidity_array)
if flat1 is None :
log.info("use input humidity = {:.2f}".format(current_humidity))
flat1 = _interpolated_fiberflat_vs_humidity(mean_fiberflat_vs_humidity , humidity_array, current_humidity)
# apply humidity correction to current calib fiberflat
fiberflat = copy.deepcopy(calib_fiberflat)
fiberflat.fiberflat = calib_fiberflat.fiberflat/flat2*flat1
fiberflat.header["EXPTHUM"] = (current_humidity,"exposure humidity from telemetry")
fiberflat.header["EXPFHUM"] = (hum1,"exposure humidity from flat fit")
fiberflat.header["CALFHUM"] = (hum2,"dome flat humidity from flat fit")
if np.abs(hum1-current_humidity)>10 :
message="large difference between best fit humidity during science exposure ({:.1f}) and value from telemetry ({:.1f})".format(hum1,current_humidity)
if np.abs(hum1-current_humidity)>20 :
log.error(message)
raise RuntimeError(message)
log.warning(message)
return fiberflat
| 41.779006 | 157 | 0.697964 |
854359e090fbe54c317469d6b0c135918da513d0 | 30,868 | py | Python | src/run_classifier.py | iki-taichi/bert-japanese | a4f170577a63bff8eb9899076dd587599f277150 | [
"Apache-2.0"
] | 1 | 2019-04-07T07:37:43.000Z | 2019-04-07T07:37:43.000Z | src/run_classifier.py | iki-taichi/bert-japanese | a4f170577a63bff8eb9899076dd587599f277150 | [
"Apache-2.0"
] | null | null | null | src/run_classifier.py | iki-taichi/bert-japanese | a4f170577a63bff8eb9899076dd587599f277150 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# This file is based on https://github.com/google-research/bert/blob/master/run_classifier.py.
# It is changed to use SentencePiece tokenizer and https://www.rondhuit.com/download/ldcc-20140209.tar.gz.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import configparser
import csv
import json
import os
import sys
import tempfile
import tensorflow as tf
import utils
CURDIR = os.path.dirname(os.path.abspath(__file__))
CONFIGPATH = os.path.join(CURDIR, os.pardir, 'config.ini')
config = configparser.ConfigParser()
config.read(CONFIGPATH)
bert_config_file = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8', suffix='.json')
bert_config_file.write(json.dumps({k:utils.str_to_value(v) for k,v in config['BERT-CONFIG'].items()}))
bert_config_file.seek(0)
import tokenization_sentencepiece
import tokenization_sp_mod
tokenization = getattr(sys.modules[__name__], config['TOKENIZER']['PACKAGE'])
sys.path.append(os.path.join(CURDIR, os.pardir, 'bert'))
import modeling
import optimization
flags = tf.flags
FLAGS = flags.FLAGS
# Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("model_file", None,
"The model file that the SentencePiece model was trained on.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
# Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class LivedoorProcessor(DataProcessor):
"""Processor for the livedoor data set (see https://www.rondhuit.com/download.html)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ['dokujo-tsushin', 'it-life-hack', 'kaden-channel', 'livedoor-homme', 'movie-enter', 'peachy', 'smax', 'sports-watch', 'topic-news']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
idx_text = line.index('text')
idx_label = line.index('label')
else:
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[idx_text])
label = tokenization.convert_to_unicode(line[idx_label])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"livedoor": LivedoorProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(bert_config_file.name)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
model_file=FLAGS.model_file, vocab_file=FLAGS.vocab_file,
do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("model_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 35.97669 | 143 | 0.688253 |
9ef652da234338c9f04234097652c242f540401e | 680 | py | Python | main.py | wd0/hq-intel | 53a3aeba91f7a5eac0729ca1502b1d593cf1f8b4 | [
"MIT"
] | null | null | null | main.py | wd0/hq-intel | 53a3aeba91f7a5eac0729ca1502b1d593cf1f8b4 | [
"MIT"
] | null | null | null | main.py | wd0/hq-intel | 53a3aeba91f7a5eac0729ca1502b1d593cf1f8b4 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from intel import *
QUIZ_DELAY = 35 # Seconds to wait for the next HQ question to happen.
SEARCH_DELAY = 0.5 # Seconds until we search SCREENSHOT_PATH for more files.
SCREENSHOT_PATH = '/home/mike/hq-intel/shots' # Pathname where screenshots appear.
def main():
os.chdir(SCREENSHOT_PATH)
seen = []
unseen = os.listdir()
while True:
for f in unseen:
if is_quiz_file(f):
quiz = Quiz(f)
quiz.run()
time.sleep(QUIZ_DELAY)
unseen = [f for f in os.listdir() if f not in seen]
seen += unseen
time.sleep(SEARCH_DELAY)
if __name__ == "__main__":
main()
| 27.2 | 82 | 0.607353 |
cd009e128d5097e9ebc89b35035ff8caf010dba6 | 1,230 | py | Python | frappe_metrc/frappe_metrc/doctype/room/room.py | samjaninf/frappe_metrc | a1968c14199421017e0f45b2497960223e598abf | [
"MIT"
] | null | null | null | frappe_metrc/frappe_metrc/doctype/room/room.py | samjaninf/frappe_metrc | a1968c14199421017e0f45b2497960223e598abf | [
"MIT"
] | null | null | null | frappe_metrc/frappe_metrc/doctype/room/room.py | samjaninf/frappe_metrc | a1968c14199421017e0f45b2497960223e598abf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Neil Lasrado and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import requests
from frappe.model.document import Document
from frappe_metrc.utils import get_metrc
metrc = get_metrc("room")
class Room(Document):
def validate(self):
self.create_or_update_room()
self.check_room()
def after_rename(self, old, new, merge=False):
self.create_or_update_room()
def create_or_update_room(self):
data = [
{
"Name": self.room_name
}
]
if not self.room_id:
# Create Room in Metrc and assign ID
#metrc.post("/rooms/v1/create", data)
doc = frappe.get_doc({
"doctype": "Room",
"Name": self.room_name
})
doc.room_name = sel.room_name
doc.insert()
else:
# use the update API to update the object if room id exists
data[0].update({"Id": self.room_id})
metrc.post("/rooms/v1/update", data)
def check_room(self):
# Try to find if the room id was assigned
rooms = metrc.get("/rooms/v1/active")
for room in rooms:
if room.get("Name") == self.room_name:
self.room_id = room.get("Id")
def on_trash(self):
metrc.delete("/rooms/v1/" + self.room_id)
| 23.653846 | 62 | 0.689431 |
08804f971cc4d60b2275a6105d3a1e39d6f97430 | 1,768 | py | Python | audiomentations/augmentations/high_pass_filter.py | Cangonin/audiomentations | fd1c0fd9bcfb9f62fa961938191e13d050752450 | [
"MIT"
] | null | null | null | audiomentations/augmentations/high_pass_filter.py | Cangonin/audiomentations | fd1c0fd9bcfb9f62fa961938191e13d050752450 | [
"MIT"
] | null | null | null | audiomentations/augmentations/high_pass_filter.py | Cangonin/audiomentations | fd1c0fd9bcfb9f62fa961938191e13d050752450 | [
"MIT"
] | null | null | null | from audiomentations.augmentations.base_butterword_filter import BaseButterworthFilter
class HighPassFilter(BaseButterworthFilter):
"""
Apply high-pass filtering to the input audio of parametrized filter steepness (6/12/18... dB / octave).
Can also be set for zero-phase filtering (will result in a 6db drop at cutoff).
"""
supports_multichannel = True
def __init__(
self,
min_cutoff_freq=20,
max_cutoff_freq=2400,
min_rolloff=12,
max_rolloff=24,
zero_phase=False,
p: float = 0.5,
):
"""
:param min_cutoff_freq: Minimum cutoff frequency in hertz
:param max_cutoff_freq: Maximum cutoff frequency in hertz
:param min_rolloff: Minimum filter roll-off (in db/octave).
Must be a multiple of 6
:param max_rolloff: Maximum filter roll-off (in db/octave)
Must be a multiple of 6
:param zero_phase: Whether filtering should be zero phase.
When this is set to `true` it will not affect the phase of the
input signal but will sound 3db lower at the cutoff frequency
compared to the non-zero phase case (6db vs 3db). Additionally,
it is 2X times slower than in the non-zero phase case. If you
absolutely want no phase distortions (e.g. want to augment a
drum track), set this to `true`.
:param p: The probability of applying this transform
"""
super().__init__(
min_cutoff_freq=min_cutoff_freq,
max_cutoff_freq=max_cutoff_freq,
min_rolloff=min_rolloff,
max_rolloff=max_rolloff,
zero_phase=zero_phase,
p=p,
filter_type="highpass",
)
| 38.434783 | 107 | 0.636312 |
1922b44bbe5d8d354f3fcb44e3a42a66e7d5e979 | 3,936 | py | Python | python/apps/exercise_renderer.py | jessepinnell/buph | 66e3ce670ee6ff360368fa677f3bd833fbf1f05f | [
"MIT"
] | null | null | null | python/apps/exercise_renderer.py | jessepinnell/buph | 66e3ce670ee6ff360368fa677f3bd833fbf1f05f | [
"MIT"
] | 2 | 2017-11-10T15:11:20.000Z | 2017-12-10T14:02:03.000Z | python/apps/exercise_renderer.py | jessepinnell/xrsrv | 66e3ce670ee6ff360368fa677f3bd833fbf1f05f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (c) 2017 Jesse Pinnell
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Simple application for generating HTML routine """
import sys
import argparse
import json
from xrsrv import routine_engine
from xrsrv import exercise_rendering
from xrsrv import type_factories
from xrsrv import exercise_database
from xrsrv.routine_generators.generator_exception import GeneratorException
EXERCISE_DATABASE_NAME = "exercise.db"
# pylint: disable=too-few-public-methods
# pylint: disable=no-self-argument
def generate_and_render():
""" Generates the list of exercises and renders the HTML """
app_args_parser = argparse.ArgumentParser()
app_args_parser.add_argument("--generator", type=str, help="generator to use", default="debug")
app_args_parser.add_argument("--fixtures", type=str, help="fixtures JSON file name", required=False)
app_args_parser.add_argument("--rigs", type=str, help="fixtures JSON file name", required=False)
app_args_parser.add_argument("--exercisedb", type=str, help="exercise db file name",\
default=EXERCISE_DATABASE_NAME)
app_args_parser.add_argument("--json", help="JSON genrator arguments filename", required=False)
app_args_parser.add_argument("--args", help="generator arguments", nargs=argparse.REMAINDER)
app_args = app_args_parser.parse_args()
exercise_db = exercise_database.SQLiteConnection(app_args.exercisedb)
engine = routine_engine.RoutineEngine(exercise_db)
user_fixtures = []
if app_args.fixtures is not None:
with open(app_args.fixtures) as json_file:
user_fixtures = list(map(type_factories.UserFixture._make, json.load(json_file)))
user_rigs = []
if app_args.rigs is not None:
with open(app_args.rigs) as json_file:
user_rigs = list(map(type_factories.UserRig._make, json.load(json_file)))
json_args = {}
if app_args.json is not None:
with open(app_args.json) as json_file:
json_args = json.load(json_file)
# XXX this is cheesy. Maybe add a subparser per generator? Need to read from engine.
generator_args = {}
if app_args.args is not None:
split_args = [arg.split("=") for arg in app_args.args]
generator_args = {val[0]: val[1] for val in split_args}
engine.set_user_exercise_environment(user_fixtures, user_rigs)
plan = engine.generate_plan(app_args.generator, **{**generator_args, **json_args})
basic_html_renderer = exercise_rendering.BasicHTMLRenderer()
if plan:
sys.stdout.write(basic_html_renderer.render("exercise_renderer output", plan))
if __name__ == "__main__":
try:
generate_and_render()
except GeneratorException as ex:
exit("Generator exception: {0}".format(ex))
except routine_engine.EngineException as ex:
exit("Engine exception: {0}".format(ex))
| 41.87234 | 104 | 0.736026 |
a911a9febe7b833b0faa8e3fc79d332fc594d90b | 87,900 | py | Python | sympy/polys/subresultants_qq_zz.py | bigfooted/sympy | 1fb2490fa2fa9b476da450f02a25b03c1dc07cf0 | [
"BSD-3-Clause"
] | 603 | 2020-12-23T13:49:32.000Z | 2022-03-31T23:38:03.000Z | sympy/polys/subresultants_qq_zz.py | bigfooted/sympy | 1fb2490fa2fa9b476da450f02a25b03c1dc07cf0 | [
"BSD-3-Clause"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | sympy/polys/subresultants_qq_zz.py | bigfooted/sympy | 1fb2490fa2fa9b476da450f02a25b03c1dc07cf0 | [
"BSD-3-Clause"
] | 35 | 2021-03-26T03:12:04.000Z | 2022-03-23T10:15:10.000Z | """
This module contains functions for the computation
of Euclidean, (generalized) Sturmian, (modified) subresultant
polynomial remainder sequences (prs's) of two polynomials;
included are also three functions for the computation of the
resultant of two polynomials.
Except for the function res_z(), which computes the resultant
of two polynomials, the pseudo-remainder function prem()
of sympy is _not_ used by any of the functions in the module.
Instead of prem() we use the function
rem_z().
Included is also the function quo_z().
An explanation of why we avoid prem() can be found in the
references stated in the docstring of rem_z().
1. Theoretical background:
==========================
Consider the polynomials f, g in Z[x] of degrees deg(f) = n and
deg(g) = m with n >= m.
Definition 1:
=============
The sign sequence of a polynomial remainder sequence (prs) is the
sequence of signs of the leading coefficients of its polynomials.
Sign sequences can be computed with the function:
sign_seq(poly_seq, x)
Definition 2:
=============
A polynomial remainder sequence (prs) is called complete if the
degree difference between any two consecutive polynomials is 1;
otherwise, it called incomplete.
It is understood that f, g belong to the sequences mentioned in
the two definitions above.
1A. Euclidean and subresultant prs's:
=====================================
The subresultant prs of f, g is a sequence of polynomials in Z[x]
analogous to the Euclidean prs, the sequence obtained by applying
on f, g Euclid's algorithm for polynomial greatest common divisors
(gcd) in Q[x].
The subresultant prs differs from the Euclidean prs in that the
coefficients of each polynomial in the former sequence are determinants
--- also referred to as subresultants --- of appropriately selected
sub-matrices of sylvester1(f, g, x), Sylvester's matrix of 1840 of
dimensions (n + m) * (n + m).
Recall that the determinant of sylvester1(f, g, x) itself is
called the resultant of f, g and serves as a criterion of whether
the two polynomials have common roots or not.
In sympy the resultant is computed with the function
resultant(f, g, x). This function does _not_ evaluate the
determinant of sylvester(f, g, x, 1); instead, it returns
the last member of the subresultant prs of f, g, multiplied
(if needed) by an appropriate power of -1; see the caveat below.
In this module we use three functions to compute the
resultant of f, g:
a) res(f, g, x) computes the resultant by evaluating
the determinant of sylvester(f, g, x, 1);
b) res_q(f, g, x) computes the resultant recursively, by
performing polynomial divisions in Q[x] with the function rem();
c) res_z(f, g, x) computes the resultant recursively, by
performing polynomial divisions in Z[x] with the function prem().
Caveat: If Df = degree(f, x) and Dg = degree(g, x), then:
resultant(f, g, x) = (-1)**(Df*Dg) * resultant(g, f, x).
For complete prs's the sign sequence of the Euclidean prs of f, g
is identical to the sign sequence of the subresultant prs of f, g
and the coefficients of one sequence are easily computed from the
coefficients of the other.
For incomplete prs's the polynomials in the subresultant prs, generally
differ in sign from those of the Euclidean prs, and --- unlike the
case of complete prs's --- it is not at all obvious how to compute
the coefficients of one sequence from the coefficients of the other.
1B. Sturmian and modified subresultant prs's:
=============================================
For the same polynomials f, g in Z[x] mentioned above, their ``modified''
subresultant prs is a sequence of polynomials similar to the Sturmian
prs, the sequence obtained by applying in Q[x] Sturm's algorithm on f, g.
The two sequences differ in that the coefficients of each polynomial
in the modified subresultant prs are the determinants --- also referred
to as modified subresultants --- of appropriately selected sub-matrices
of sylvester2(f, g, x), Sylvester's matrix of 1853 of dimensions 2n x 2n.
The determinant of sylvester2 itself is called the modified resultant
of f, g and it also can serve as a criterion of whether the two
polynomials have common roots or not.
For complete prs's the sign sequence of the Sturmian prs of f, g is
identical to the sign sequence of the modified subresultant prs of
f, g and the coefficients of one sequence are easily computed from
the coefficients of the other.
For incomplete prs's the polynomials in the modified subresultant prs,
generally differ in sign from those of the Sturmian prs, and --- unlike
the case of complete prs's --- it is not at all obvious how to compute
the coefficients of one sequence from the coefficients of the other.
As Sylvester pointed out, the coefficients of the polynomial remainders
obtained as (modified) subresultants are the smallest possible without
introducing rationals and without computing (integer) greatest common
divisors.
1C. On terminology:
===================
Whence the terminology? Well generalized Sturmian prs's are
``modifications'' of Euclidean prs's; the hint came from the title
of the Pell-Gordon paper of 1917.
In the literature one also encounters the name ``non signed'' and
``signed'' prs for Euclidean and Sturmian prs respectively.
Likewise ``non signed'' and ``signed'' subresultant prs for
subresultant and modified subresultant prs respectively.
2. Functions in the module:
===========================
No function utilizes sympy's function prem().
2A. Matrices:
=============
The functions sylvester(f, g, x, method=1) and
sylvester(f, g, x, method=2) compute either Sylvester matrix.
They can be used to compute (modified) subresultant prs's by
direct determinant evaluation.
The function bezout(f, g, x, method='prs') provides a matrix of
smaller dimensions than either Sylvester matrix. It is the function
of choice for computing (modified) subresultant prs's by direct
determinant evaluation.
sylvester(f, g, x, method=1)
sylvester(f, g, x, method=2)
bezout(f, g, x, method='prs')
The following identity holds:
bezout(f, g, x, method='prs') =
backward_eye(deg(f))*bezout(f, g, x, method='bz')*backward_eye(deg(f))
2B. Subresultant and modified subresultant prs's by
===================================================
determinant evaluations:
=======================
We use the Sylvester matrices of 1840 and 1853 to
compute, respectively, subresultant and modified
subresultant polynomial remainder sequences. However,
for large matrices this approach takes a lot of time.
Instead of utilizing the Sylvester matrices, we can
employ the Bezout matrix which is of smaller dimensions.
subresultants_sylv(f, g, x)
modified_subresultants_sylv(f, g, x)
subresultants_bezout(f, g, x)
modified_subresultants_bezout(f, g, x)
2C. Subresultant prs's by ONE determinant evaluation:
=====================================================
All three functions in this section evaluate one determinant
per remainder polynomial; this is the determinant of an
appropriately selected sub-matrix of sylvester1(f, g, x),
Sylvester's matrix of 1840.
To compute the remainder polynomials the function
subresultants_rem(f, g, x) employs rem(f, g, x).
By contrast, the other two functions implement Van Vleck's ideas
of 1900 and compute the remainder polynomials by trinagularizing
sylvester2(f, g, x), Sylvester's matrix of 1853.
subresultants_rem(f, g, x)
subresultants_vv(f, g, x)
subresultants_vv_2(f, g, x).
2E. Euclidean, Sturmian prs's in Q[x]:
======================================
euclid_q(f, g, x)
sturm_q(f, g, x)
2F. Euclidean, Sturmian and (modified) subresultant prs's P-G:
==============================================================
All functions in this section are based on the Pell-Gordon (P-G)
theorem of 1917.
Computations are done in Q[x], employing the function rem(f, g, x)
for the computation of the remainder polynomials.
euclid_pg(f, g, x)
sturm pg(f, g, x)
subresultants_pg(f, g, x)
modified_subresultants_pg(f, g, x)
2G. Euclidean, Sturmian and (modified) subresultant prs's A-M-V:
================================================================
All functions in this section are based on the Akritas-Malaschonok-
Vigklas (A-M-V) theorem of 2015.
Computations are done in Z[x], employing the function rem_z(f, g, x)
for the computation of the remainder polynomials.
euclid_amv(f, g, x)
sturm_amv(f, g, x)
subresultants_amv(f, g, x)
modified_subresultants_amv(f, g, x)
2Ga. Exception:
===============
subresultants_amv_q(f, g, x)
This function employs rem(f, g, x) for the computation of
the remainder polynomials, despite the fact that it implements
the A-M-V Theorem.
It is included in our module in order to show that theorems P-G
and A-M-V can be implemented utilizing either the function
rem(f, g, x) or the function rem_z(f, g, x).
For clearly historical reasons --- since the Collins-Brown-Traub
coefficients-reduction factor beta_i was not available in 1917 ---
we have implemented the Pell-Gordon theorem with the function
rem(f, g, x) and the A-M-V Theorem with the function rem_z(f, g, x).
2H. Resultants:
===============
res(f, g, x)
res_q(f, g, x)
res_z(f, g, x)
"""
from sympy import (Abs, degree, expand, eye, floor, LC, Matrix, nan, Poly, pprint)
from sympy import (QQ, pquo, quo, prem, rem, S, sign, simplify, summation, var, zeros)
from sympy.polys.polyerrors import PolynomialError
def sylvester(f, g, x, method = 1):
'''
The input polynomials f, g are in Z[x] or in Q[x]. Let m = degree(f, x),
n = degree(g, x) and mx = max( m , n ).
a. If method = 1 (default), computes sylvester1, Sylvester's matrix of 1840
of dimension (m + n) x (m + n). The determinants of properly chosen
submatrices of this matrix (a.k.a. subresultants) can be
used to compute the coefficients of the Euclidean PRS of f, g.
b. If method = 2, computes sylvester2, Sylvester's matrix of 1853
of dimension (2*mx) x (2*mx). The determinants of properly chosen
submatrices of this matrix (a.k.a. ``modified'' subresultants) can be
used to compute the coefficients of the Sturmian PRS of f, g.
Applications of these Matrices can be found in the references below.
Especially, for applications of sylvester2, see the first reference!!
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences. Serdica Journal of Computing,
Vol. 7, No 4, 101-134, 2013.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
'''
# obtain degrees of polys
m, n = degree( Poly(f, x), x), degree( Poly(g, x), x)
# Special cases:
# A:: case m = n < 0 (i.e. both polys are 0)
if m == n and n < 0:
return Matrix([])
# B:: case m = n = 0 (i.e. both polys are constants)
if m == n and n == 0:
return Matrix([])
# C:: m == 0 and n < 0 or m < 0 and n == 0
# (i.e. one poly is constant and the other is 0)
if m == 0 and n < 0:
return Matrix([])
elif m < 0 and n == 0:
return Matrix([])
# D:: m >= 1 and n < 0 or m < 0 and n >=1
# (i.e. one poly is of degree >=1 and the other is 0)
if m >= 1 and n < 0:
return Matrix([0])
elif m < 0 and n >= 1:
return Matrix([0])
fp = Poly(f, x).all_coeffs()
gp = Poly(g, x).all_coeffs()
# Sylvester's matrix of 1840 (default; a.k.a. sylvester1)
if method <= 1:
M = zeros(m + n)
k = 0
for i in range(n):
j = k
for coeff in fp:
M[i, j] = coeff
j = j + 1
k = k + 1
k = 0
for i in range(n, m + n):
j = k
for coeff in gp:
M[i, j] = coeff
j = j + 1
k = k + 1
return M
# Sylvester's matrix of 1853 (a.k.a sylvester2)
if method >= 2:
if len(fp) < len(gp):
h = []
for i in range(len(gp) - len(fp)):
h.append(0)
fp[ : 0] = h
else:
h = []
for i in range(len(fp) - len(gp)):
h.append(0)
gp[ : 0] = h
mx = max(m, n)
dim = 2*mx
M = zeros( dim )
k = 0
for i in range( mx ):
j = k
for coeff in fp:
M[2*i, j] = coeff
j = j + 1
j = k
for coeff in gp:
M[2*i + 1, j] = coeff
j = j + 1
k = k + 1
return M
def process_matrix_output(poly_seq, x):
"""
poly_seq is a polynomial remainder sequence computed either by
(modified_)subresultants_bezout or by (modified_)subresultants_sylv.
This function removes from poly_seq all zero polynomials as well
as all those whose degree is equal to the degree of a preceding
polynomial in poly_seq, as we scan it from left to right.
"""
L = poly_seq[:] # get a copy of the input sequence
d = degree(L[1], x)
i = 2
while i < len(L):
d_i = degree(L[i], x)
if d_i < 0: # zero poly
L.remove(L[i])
i = i - 1
if d == d_i: # poly degree equals degree of previous poly
L.remove(L[i])
i = i - 1
if d_i >= 0:
d = d_i
i = i + 1
return L
def subresultants_sylv(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x]. It is assumed
that deg(f) >= deg(g).
Computes the subresultant polynomial remainder sequence (prs)
of f, g by evaluating determinants of appropriately selected
submatrices of sylvester(f, g, x, 1). The dimensions of the
latter are (deg(f) + deg(g)) x (deg(f) + deg(g)).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of sylvester(f, g, x, 1).
If the subresultant prs is complete, then the output coincides
with the Euclidean sequence of the polynomials f, g.
References:
===========
1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233-266, 2004.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # subresultant list
# form matrix sylvester(f, g, x, 1)
S = sylvester(f, g, x, 1)
# pick appropriate submatrices of S
# and form subresultant polys
j = m - 1
while j > 0:
Sp = S[:, :] # copy of S
# delete last j rows of coeffs of g
for ind in range(m + n - j, m + n):
Sp.row_del(m + n - j)
# delete last j rows of coeffs of f
for ind in range(m - j, m):
Sp.row_del(m - j)
# evaluate determinants and form coefficients list
coeff_L, k, l = [], Sp.rows, 0
while l <= j:
coeff_L.append(Sp[ : , 0 : k].det())
Sp.col_swap(k - 1, k + l)
l += 1
# form poly and append to SP_L
SR_L.append(Poly(coeff_L, x).as_expr())
j -= 1
# j = 0
SR_L.append(S.det())
return process_matrix_output(SR_L, x)
def modified_subresultants_sylv(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x]. It is assumed
that deg(f) >= deg(g).
Computes the modified subresultant polynomial remainder sequence (prs)
of f, g by evaluating determinants of appropriately selected
submatrices of sylvester(f, g, x, 2). The dimensions of the
latter are (2*deg(f)) x (2*deg(f)).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of sylvester(f, g, x, 2).
If the modified subresultant prs is complete, then the output coincides
with the Sturmian sequence of the polynomials f, g.
References:
===========
1. A. G. Akritas,G.I. Malaschonok and P.S. Vigklas:
Sturm Sequences and Modified Subresultant Polynomial Remainder
Sequences. Serdica Journal of Computing, Vol. 8, No 1, 29--46, 2014.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # modified subresultant list
# form matrix sylvester(f, g, x, 2)
S = sylvester(f, g, x, 2)
# pick appropriate submatrices of S
# and form modified subresultant polys
j = m - 1
while j > 0:
# delete last 2*j rows of pairs of coeffs of f, g
Sp = S[0:2*n - 2*j, :] # copy of first 2*n - 2*j rows of S
# evaluate determinants and form coefficients list
coeff_L, k, l = [], Sp.rows, 0
while l <= j:
coeff_L.append(Sp[ : , 0 : k].det())
Sp.col_swap(k - 1, k + l)
l += 1
# form poly and append to SP_L
SR_L.append(Poly(coeff_L, x).as_expr())
j -= 1
# j = 0
SR_L.append(S.det())
return process_matrix_output(SR_L, x)
def res(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x].
The output is the resultant of f, g computed by evaluating
the determinant of the matrix sylvester(f, g, x, 1).
References:
===========
1. J. S. Cohen: Computer Algebra and Symbolic Computation
- Mathematical Methods. A. K. Peters, 2003.
"""
if f == 0 or g == 0:
raise PolynomialError("The resultant of %s and %s is not defined" % (f, g))
else:
return sylvester(f, g, x, 1).det()
def res_q(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x].
The output is the resultant of f, g computed recursively
by polynomial divisions in Q[x], using the function rem.
See Cohen's book p. 281.
References:
===========
1. J. S. Cohen: Computer Algebra and Symbolic Computation
- Mathematical Methods. A. K. Peters, 2003.
"""
m = degree(f, x)
n = degree(g, x)
if m < n:
return (-1)**(m*n) * res_q(g, f, x)
elif n == 0: # g is a constant
return g**m
else:
r = rem(f, g, x)
if r == 0:
return 0
else:
s = degree(r, x)
l = LC(g, x)
return (-1)**(m*n) * l**(m-s)*res_q(g, r, x)
def res_z(f, g, x):
"""
The input polynomials f, g are in Z[x] or in Q[x].
The output is the resultant of f, g computed recursively
by polynomial divisions in Z[x], using the function prem().
See Cohen's book p. 283.
References:
===========
1. J. S. Cohen: Computer Algebra and Symbolic Computation
- Mathematical Methods. A. K. Peters, 2003.
"""
m = degree(f, x)
n = degree(g, x)
if m < n:
return (-1)**(m*n) * res_z(g, f, x)
elif n == 0: # g is a constant
return g**m
else:
r = prem(f, g, x)
if r == 0:
return 0
else:
delta = m - n + 1
w = (-1)**(m*n) * res_z(g, r, x)
s = degree(r, x)
l = LC(g, x)
k = delta * n - m + s
return quo(w, l**k, x)
def sign_seq(poly_seq, x):
"""
Given a sequence of polynomials poly_seq, it returns
the sequence of signs of the leading coefficients of
the polynomials in poly_seq.
"""
return [sign(LC(poly_seq[i], x)) for i in range(len(poly_seq))]
def bezout(p, q, x, method='bz'):
"""
The input polynomials p, q are in Z[x] or in Q[x]. Let
mx = max( degree(p, x) , degree(q, x) ).
The default option bezout(p, q, x, method='bz') returns Bezout's
symmetric matrix of p and q, of dimensions (mx) x (mx). The
determinant of this matrix is equal to the determinant of sylvester2,
Sylvester's matrix of 1853, whose dimensions are (2*mx) x (2*mx);
however the subresultants of these two matrices may differ.
The other option, bezout(p, q, x, 'prs'), is of interest to us
in this module because it returns a matrix equivalent to sylvester2.
In this case all subresultants of the two matrices are identical.
Both the subresultant polynomial remainder sequence (prs) and
the modified subresultant prs of p and q can be computed by
evaluating determinants of appropriately selected submatrices of
bezout(p, q, x, 'prs') --- one determinant per coefficient of the
remainder polynomials.
The matrices bezout(p, q, x, 'bz') and bezout(p, q, x, 'prs')
are related by the formula
bezout(p, q, x, 'prs') =
backward_eye(deg(p)) * bezout(p, q, x, 'bz') * backward_eye(deg(p)),
where backward_eye() is the backward identity function.
References
==========
1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233-266, 2004.
"""
# obtain degrees of polys
m, n = degree( Poly(p, x), x), degree( Poly(q, x), x)
# Special cases:
# A:: case m = n < 0 (i.e. both polys are 0)
if m == n and n < 0:
return Matrix([])
# B:: case m = n = 0 (i.e. both polys are constants)
if m == n and n == 0:
return Matrix([])
# C:: m == 0 and n < 0 or m < 0 and n == 0
# (i.e. one poly is constant and the other is 0)
if m == 0 and n < 0:
return Matrix([])
elif m < 0 and n == 0:
return Matrix([])
# D:: m >= 1 and n < 0 or m < 0 and n >=1
# (i.e. one poly is of degree >=1 and the other is 0)
if m >= 1 and n < 0:
return Matrix([0])
elif m < 0 and n >= 1:
return Matrix([0])
y = var('y')
# expr is 0 when x = y
expr = p * q.subs({x:y}) - p.subs({x:y}) * q
# hence expr is exactly divisible by x - y
poly = Poly( quo(expr, x-y), x, y)
# form Bezout matrix and store them in B as indicated to get
# the LC coefficient of each poly either in the first position
# of each row (method='prs') or in the last (method='bz').
mx = max(m, n)
B = zeros(mx)
for i in range(mx):
for j in range(mx):
if method == 'prs':
B[mx - 1 - i, mx - 1 - j] = poly.nth(i, j)
else:
B[i, j] = poly.nth(i, j)
return B
def backward_eye(n):
'''
Returns the backward identity matrix of dimensions n x n.
Needed to "turn" the Bezout matrices
so that the leading coefficients are first.
See docstring of the function bezout(p, q, x, method='bz').
'''
M = eye(n) # identity matrix of order n
for i in range(int(M.rows / 2)):
M.row_swap(0 + i, M.rows - 1 - i)
return M
def subresultants_bezout(p, q, x):
"""
The input polynomials p, q are in Z[x] or in Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant polynomial remainder sequence
of p, q by evaluating determinants of appropriately selected
submatrices of bezout(p, q, x, 'prs'). The dimensions of the
latter are deg(p) x deg(p).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of bezout(p, q, x, 'prs').
bezout(p, q, x, 'prs) is used instead of sylvester(p, q, x, 1),
Sylvester's matrix of 1840, because the dimensions of the latter
are (deg(p) + deg(q)) x (deg(p) + deg(q)).
If the subresultant prs is complete, then the output coincides
with the Euclidean sequence of the polynomials p, q.
References
==========
1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233-266, 2004.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
f, g = p, q
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # subresultant list
F = LC(f, x)**(degF - degG)
# form the bezout matrix
B = bezout(f, g, x, 'prs')
# pick appropriate submatrices of B
# and form subresultant polys
if degF > degG:
j = 2
if degF == degG:
j = 1
while j <= degF:
M = B[0:j, :]
k, coeff_L = j - 1, []
while k <= degF - 1:
coeff_L.append(M[: ,0 : j].det())
if k < degF - 1:
M.col_swap(j - 1, k + 1)
k = k + 1
# apply Theorem 2.1 in the paper by Toca & Vega 2004
# to get correct signs
SR_L.append(int((-1)**(j*(j-1)/2)) * (Poly(coeff_L, x) / F).as_expr())
j = j + 1
return process_matrix_output(SR_L, x)
def modified_subresultants_bezout(p, q, x):
"""
The input polynomials p, q are in Z[x] or in Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the modified subresultant polynomial remainder sequence
of p, q by evaluating determinants of appropriately selected
submatrices of bezout(p, q, x, 'prs'). The dimensions of the
latter are deg(p) x deg(p).
Each coefficient is computed by evaluating the determinant of the
corresponding submatrix of bezout(p, q, x, 'prs').
bezout(p, q, x, 'prs') is used instead of sylvester(p, q, x, 2),
Sylvester's matrix of 1853, because the dimensions of the latter
are 2*deg(p) x 2*deg(p).
If the modified subresultant prs is complete, and LC( p ) > 0, the output
coincides with the (generalized) Sturm's sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
2. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants
and Their Applications. Appl. Algebra in Engin., Communic. and Comp.,
Vol. 15, 233-266, 2004.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
f, g = p, q
n = degF = degree(f, x)
m = degG = degree(g, x)
# make sure proper degrees
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, degF, degG, f, g = m, n, degG, degF, g, f
if n > 0 and m == 0:
return [f, g]
SR_L = [f, g] # subresultant list
# form the bezout matrix
B = bezout(f, g, x, 'prs')
# pick appropriate submatrices of B
# and form subresultant polys
if degF > degG:
j = 2
if degF == degG:
j = 1
while j <= degF:
M = B[0:j, :]
k, coeff_L = j - 1, []
while k <= degF - 1:
coeff_L.append(M[: ,0 : j].det())
if k < degF - 1:
M.col_swap(j - 1, k + 1)
k = k + 1
## Theorem 2.1 in the paper by Toca & Vega 2004 is _not needed_
## in this case since
## the bezout matrix is equivalent to sylvester2
SR_L.append(( Poly(coeff_L, x)).as_expr())
j = j + 1
return process_matrix_output(SR_L, x)
def sturm_pg(p, q, x, method=0):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Z[x] or Q[x].
If q = diff(p, x, 1) it is the usual Sturm sequence.
A. If method == 0, default, the remainder coefficients of the sequence
are (in absolute value) ``modified'' subresultants, which for non-monic
polynomials are greater than the coefficients of the corresponding
subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
B. If method == 1, the remainder coefficients of the sequence are (in
absolute value) subresultants, which for non-monic polynomials are
smaller than the coefficients of the corresponding ``modified''
subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
If the Sturm sequence is complete, method=0 and LC( p ) > 0, the coefficients
of the polynomials in the sequence are ``modified'' subresultants.
That is, they are determinants of appropriately selected submatrices of
sylvester2, Sylvester's matrix of 1853. In this case the Sturm sequence
coincides with the ``modified'' subresultant prs, of the polynomials
p, q.
If the Sturm sequence is incomplete and method=0 then the signs of the
coefficients of the polynomials in the sequence may differ from the signs
of the coefficients of the corresponding polynomials in the ``modified''
subresultant prs; however, the absolute values are the same.
To compute the coefficients, no determinant evaluation takes place. Instead,
polynomial divisions in Q[x] are performed, using the function rem(p, q, x);
the coefficients of the remainders computed this way become (``modified'')
subresultants with the help of the Pell-Gordon Theorem of 1917.
See also the function euclid_pg(p, q, x).
References
==========
1. Pell A. J., R. L. Gordon. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188-193.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
lcf = LC(p, x)**(d0 - d1) # lcf * subr = modified subr
a0, a1 = p, q # the input polys
sturm_seq = [a0, a1] # the output list
del0 = d0 - d1 # degree difference
rho1 = LC(a1, x) # leading coeff of a1
exp_deg = d1 - 1 # expected degree of a2
a2 = - rem(a0, a1, domain=QQ) # first remainder
rho2 = LC(a2,x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# mul_fac is the factor by which a2 is multiplied to
# get integer coefficients
mul_fac_old = rho1**(del0 + del1 - deg_diff_new)
# append accordingly
if method == 0:
sturm_seq.append( simplify(lcf * a2 * Abs(mul_fac_old)))
else:
sturm_seq.append( simplify( a2 * Abs(mul_fac_old)))
# main loop
deg_diff_old = deg_diff_new
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
del0 = del1 # update degree difference
exp_deg = d1 - 1 # new expected degree
a2 = - rem(a0, a1, domain=QQ) # new remainder
rho3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# take into consideration the power
# rho1**deg_diff_old that was "left out"
expo_old = deg_diff_old # rho1 raised to this power
expo_new = del0 + del1 - deg_diff_new # rho2 raised to this power
# update variables and append
mul_fac_new = rho2**(expo_new) * rho1**(expo_old) * mul_fac_old
deg_diff_old, mul_fac_old = deg_diff_new, mul_fac_new
rho1, rho2 = rho2, rho3
if method == 0:
sturm_seq.append( simplify(lcf * a2 * Abs(mul_fac_old)))
else:
sturm_seq.append( simplify( a2 * Abs(mul_fac_old)))
if flag: # change the sign of the sequence
sturm_seq = [-i for i in sturm_seq]
# gcd is of degree > 0 ?
m = len(sturm_seq)
if sturm_seq[m - 1] == nan or sturm_seq[m - 1] == 0:
sturm_seq.pop(m - 1)
return sturm_seq
def sturm_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Q[x].
Polynomial divisions in Q[x] are performed, using the function rem(p, q, x).
The coefficients of the polynomials in the Sturm sequence can be uniquely
determined from the corresponding coefficients of the polynomials found
either in:
(a) the ``modified'' subresultant prs, (references 1, 2)
or in
(b) the subresultant prs (reference 3).
References
==========
1. Pell A. J., R. L. Gordon. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188-193.
2 Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
a0, a1 = p, q # the input polys
sturm_seq = [a0, a1] # the output list
a2 = -rem(a0, a1, domain=QQ) # first remainder
d2 = degree(a2, x) # degree of a2
sturm_seq.append( a2 )
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
a2 = -rem(a0, a1, domain=QQ) # new remainder
d2 = degree(a2, x) # actual degree of a2
sturm_seq.append( a2 )
if flag: # change the sign of the sequence
sturm_seq = [-i for i in sturm_seq]
# gcd is of degree > 0 ?
m = len(sturm_seq)
if sturm_seq[m - 1] == nan or sturm_seq[m - 1] == 0:
sturm_seq.pop(m - 1)
return sturm_seq
def sturm_amv(p, q, x, method=0):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the (generalized) Sturm sequence of p and q in Z[x] or Q[x].
If q = diff(p, x, 1) it is the usual Sturm sequence.
A. If method == 0, default, the remainder coefficients of the
sequence are (in absolute value) ``modified'' subresultants, which
for non-monic polynomials are greater than the coefficients of the
corresponding subresultants by the factor Abs(LC(p)**( deg(p)- deg(q))).
B. If method == 1, the remainder coefficients of the sequence are (in
absolute value) subresultants, which for non-monic polynomials are
smaller than the coefficients of the corresponding ``modified''
subresultants by the factor Abs( LC(p)**( deg(p)- deg(q)) ).
If the Sturm sequence is complete, method=0 and LC( p ) > 0, then the
coefficients of the polynomials in the sequence are ``modified'' subresultants.
That is, they are determinants of appropriately selected submatrices of
sylvester2, Sylvester's matrix of 1853. In this case the Sturm sequence
coincides with the ``modified'' subresultant prs, of the polynomials
p, q.
If the Sturm sequence is incomplete and method=0 then the signs of the
coefficients of the polynomials in the sequence may differ from the signs
of the coefficients of the corresponding polynomials in the ``modified''
subresultant prs; however, the absolute values are the same.
To compute the coefficients, no determinant evaluation takes place.
Instead, we first compute the euclidean sequence of p and q using
euclid_amv(p, q, x) and then: (a) change the signs of the remainders in the
Euclidean sequence according to the pattern "-, -, +, +, -, -, +, +,..."
(see Lemma 1 in the 1st reference or Theorem 3 in the 2nd reference)
and (b) if method=0, assuming deg(p) > deg(q), we multiply the remainder
coefficients of the Euclidean sequence times the factor
Abs( LC(p)**( deg(p)- deg(q)) ) to make them modified subresultants.
See also the function sturm_pg(p, q, x).
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.'' Serdica
Journal of Computing 9(2) (2015), 123-138.
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
Remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# compute the euclidean sequence
prs = euclid_amv(p, q, x)
# defensive
if prs == [] or len(prs) == 2:
return prs
# the coefficients in prs are subresultants and hence are smaller
# than the corresponding subresultants by the factor
# Abs( LC(prs[0])**( deg(prs[0]) - deg(prs[1])) ); Theorem 2, 2nd reference.
lcf = Abs( LC(prs[0])**( degree(prs[0], x) - degree(prs[1], x) ) )
# the signs of the first two polys in the sequence stay the same
sturm_seq = [prs[0], prs[1]]
# change the signs according to "-, -, +, +, -, -, +, +,..."
# and multiply times lcf if needed
flag = 0
m = len(prs)
i = 2
while i <= m-1:
if flag == 0:
sturm_seq.append( - prs[i] )
i = i + 1
if i == m:
break
sturm_seq.append( - prs[i] )
i = i + 1
flag = 1
elif flag == 1:
sturm_seq.append( prs[i] )
i = i + 1
if i == m:
break
sturm_seq.append( prs[i] )
i = i + 1
flag = 0
# subresultants or modified subresultants?
if method == 0 and lcf > 1:
aux_seq = [sturm_seq[0], sturm_seq[1]]
for i in range(2, m):
aux_seq.append(simplify(sturm_seq[i] * lcf ))
sturm_seq = aux_seq
return sturm_seq
def euclid_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the Euclidean sequence of p and q in Z[x] or Q[x].
If the Euclidean sequence is complete the coefficients of the polynomials
in the sequence are subresultants. That is, they are determinants of
appropriately selected submatrices of sylvester1, Sylvester's matrix of 1840.
In this case the Euclidean sequence coincides with the subresultant prs
of the polynomials p, q.
If the Euclidean sequence is incomplete the signs of the coefficients of the
polynomials in the sequence may differ from the signs of the coefficients of
the corresponding polynomials in the subresultant prs; however, the absolute
values are the same.
To compute the Euclidean sequence, no determinant evaluation takes place.
We first compute the (generalized) Sturm sequence of p and q using
sturm_pg(p, q, x, 1), in which case the coefficients are (in absolute value)
equal to subresultants. Then we change the signs of the remainders in the
Sturm sequence according to the pattern "-, -, +, +, -, -, +, +,..." ;
see Lemma 1 in the 1st reference or Theorem 3 in the 2nd reference as well as
the function sturm_pg(p, q, x).
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.'' Serdica
Journal of Computing 9(2) (2015), 123-138.
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
Remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# compute the sturmian sequence using the Pell-Gordon (or AMV) theorem
# with the coefficients in the prs being (in absolute value) subresultants
prs = sturm_pg(p, q, x, 1) ## any other method would do
# defensive
if prs == [] or len(prs) == 2:
return prs
# the signs of the first two polys in the sequence stay the same
euclid_seq = [prs[0], prs[1]]
# change the signs according to "-, -, +, +, -, -, +, +,..."
flag = 0
m = len(prs)
i = 2
while i <= m-1:
if flag == 0:
euclid_seq.append(- prs[i] )
i = i + 1
if i == m:
break
euclid_seq.append(- prs[i] )
i = i + 1
flag = 1
elif flag == 1:
euclid_seq.append(prs[i] )
i = i + 1
if i == m:
break
euclid_seq.append(prs[i] )
i = i + 1
flag = 0
return euclid_seq
def euclid_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the Euclidean sequence of p and q in Q[x].
Polynomial divisions in Q[x] are performed, using the function rem(p, q, x).
The coefficients of the polynomials in the Euclidean sequence can be uniquely
determined from the corresponding coefficients of the polynomials found
either in:
(a) the ``modified'' subresultant polynomial remainder sequence,
(references 1, 2)
or in
(b) the subresultant polynomial remainder sequence (references 3).
References
==========
1. Pell A. J., R. L. Gordon. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188-193.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# make sure LC(p) > 0
flag = 0
if LC(p,x) < 0:
flag = 1
p = -p
q = -q
# initialize
a0, a1 = p, q # the input polys
euclid_seq = [a0, a1] # the output list
a2 = rem(a0, a1, domain=QQ) # first remainder
d2 = degree(a2, x) # degree of a2
euclid_seq.append( a2 )
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
a2 = rem(a0, a1, domain=QQ) # new remainder
d2 = degree(a2, x) # actual degree of a2
euclid_seq.append( a2 )
if flag: # change the sign of the sequence
euclid_seq = [-i for i in euclid_seq]
# gcd is of degree > 0 ?
m = len(euclid_seq)
if euclid_seq[m - 1] == nan or euclid_seq[m - 1] == 0:
euclid_seq.pop(m - 1)
return euclid_seq
def euclid_amv(f, g, x):
"""
f, g are polynomials in Z[x] or Q[x]. It is assumed
that degree(f, x) >= degree(g, x).
Computes the Euclidean sequence of p and q in Z[x] or Q[x].
If the Euclidean sequence is complete the coefficients of the polynomials
in the sequence are subresultants. That is, they are determinants of
appropriately selected submatrices of sylvester1, Sylvester's matrix of 1840.
In this case the Euclidean sequence coincides with the subresultant prs,
of the polynomials p, q.
If the Euclidean sequence is incomplete the signs of the coefficients of the
polynomials in the sequence may differ from the signs of the coefficients of
the corresponding polynomials in the subresultant prs; however, the absolute
values are the same.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Z[x] or Q[x] are performed, using
the function rem_z(f, g, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Collins-Brown-Traub formula for coefficient reduction.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
# make sure proper degrees
d0 = degree(f, x)
d1 = degree(g, x)
if d0 == 0 and d1 == 0:
return [f, g]
if d1 > d0:
d0, d1 = d1, d0
f, g = g, f
if d0 > 0 and d1 == 0:
return [f, g]
# initialize
a0 = f
a1 = g
euclid_seq = [a0, a1]
deg_dif_p1, c = degree(a0, x) - degree(a1, x) + 1, -1
# compute the first polynomial of the prs
i = 1
a2 = rem_z(a0, a1, x) / Abs( (-1)**deg_dif_p1 ) # first remainder
euclid_seq.append( a2 )
d2 = degree(a2, x) # actual degree of a2
# main loop
while d2 >= 1:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
i += 1
sigma0 = -LC(a0)
c = (sigma0**(deg_dif_p1 - 1)) / (c**(deg_dif_p1 - 2))
deg_dif_p1 = degree(a0, x) - d2 + 1
a2 = rem_z(a0, a1, x) / Abs( (c**(deg_dif_p1 - 1)) * sigma0 )
euclid_seq.append( a2 )
d2 = degree(a2, x) # actual degree of a2
# gcd is of degree > 0 ?
m = len(euclid_seq)
if euclid_seq[m - 1] == nan or euclid_seq[m - 1] == 0:
euclid_seq.pop(m - 1)
return euclid_seq
def modified_subresultants_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the ``modified'' subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
``modified'' subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester2, Sylvester's matrix of 1853.
To compute the coefficients, no determinant evaluation takes place. Instead,
polynomial divisions in Q[x] are performed, using the function rem(p, q, x);
the coefficients of the remainders computed this way become ``modified''
subresultants with the help of the Pell-Gordon Theorem of 1917.
If the ``modified'' subresultant prs is complete, and LC( p ) > 0, it coincides
with the (generalized) Sturm sequence of the polynomials p, q.
References
==========
1. Pell A. J., R. L. Gordon. The Modified Remainders Obtained in Finding
the Highest Common Factor of Two Polynomials. Annals of MatheMatics,
Second Series, 18 (1917), No. 4, 188-193.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p,x)
d1 = degree(q,x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p,q]
# initialize
k = var('k') # index in summation formula
u_list = [] # of elements (-1)**u_i
subres_l = [p, q] # mod. subr. prs output list
a0, a1 = p, q # the input polys
del0 = d0 - d1 # degree difference
degdif = del0 # save it
rho_1 = LC(a0) # lead. coeff (a0)
# Initialize Pell-Gordon variables
rho_list_minus_1 = sign( LC(a0, x)) # sign of LC(a0)
rho1 = LC(a1, x) # leading coeff of a1
rho_list = [ sign(rho1)] # of signs
p_list = [del0] # of degree differences
u = summation(k, (k, 1, p_list[0])) # value of u
u_list.append(u) # of u values
v = sum(p_list) # v value
# first remainder
exp_deg = d1 - 1 # expected degree of a2
a2 = - rem(a0, a1, domain=QQ) # first remainder
rho2 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# mul_fac is the factor by which a2 is multiplied to
# get integer coefficients
mul_fac_old = rho1**(del0 + del1 - deg_diff_new)
# update Pell-Gordon variables
p_list.append(1 + deg_diff_new) # deg_diff_new is 0 for complete seq
# apply Pell-Gordon formula (7) in second reference
num = 1 # numerator of fraction
for k in range(len(u_list)):
num *= (-1)**u_list[k]
num = num * (-1)**v
# denominator depends on complete / incomplete seq
if deg_diff_new == 0: # complete seq
den = 1
for k in range(len(rho_list)):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
else: # incomplete seq
den = 1
for k in range(len(rho_list)-1):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
expo = (p_list[len(rho_list) - 1] + p_list[len(rho_list)] - deg_diff_new)
den = den * rho_list[len(rho_list) - 1]**expo
# the sign of the determinant depends on sg(num / den)
if sign(num / den) > 0:
subres_l.append( simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
else:
subres_l.append(- simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
# update Pell-Gordon variables
k = var('k')
rho_list.append( sign(rho2))
u = summation(k, (k, 1, p_list[len(p_list) - 1]))
u_list.append(u)
v = sum(p_list)
deg_diff_old=deg_diff_new
# main loop
while d2 > 0:
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
del0 = del1 # update degree difference
exp_deg = d1 - 1 # new expected degree
a2 = - rem(a0, a1, domain=QQ) # new remainder
rho3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
deg_diff_new = exp_deg - d2 # expected - actual degree
del1 = d1 - d2 # degree difference
# take into consideration the power
# rho1**deg_diff_old that was "left out"
expo_old = deg_diff_old # rho1 raised to this power
expo_new = del0 + del1 - deg_diff_new # rho2 raised to this power
mul_fac_new = rho2**(expo_new) * rho1**(expo_old) * mul_fac_old
# update variables
deg_diff_old, mul_fac_old = deg_diff_new, mul_fac_new
rho1, rho2 = rho2, rho3
# update Pell-Gordon variables
p_list.append(1 + deg_diff_new) # deg_diff_new is 0 for complete seq
# apply Pell-Gordon formula (7) in second reference
num = 1 # numerator
for k in range(len(u_list)):
num *= (-1)**u_list[k]
num = num * (-1)**v
# denominator depends on complete / incomplete seq
if deg_diff_new == 0: # complete seq
den = 1
for k in range(len(rho_list)):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
else: # incomplete seq
den = 1
for k in range(len(rho_list)-1):
den *= rho_list[k]**(p_list[k] + p_list[k + 1])
den = den * rho_list_minus_1
expo = (p_list[len(rho_list) - 1] + p_list[len(rho_list)] - deg_diff_new)
den = den * rho_list[len(rho_list) - 1]**expo
# the sign of the determinant depends on sg(num / den)
if sign(num / den) > 0:
subres_l.append( simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
else:
subres_l.append(- simplify(rho_1**degdif*a2* Abs(mul_fac_old) ) )
# update Pell-Gordon variables
k = var('k')
rho_list.append( sign(rho2))
u = summation(k, (k, 1, p_list[len(p_list) - 1]))
u_list.append(u)
v = sum(p_list)
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
# LC( p ) < 0
m = len(subres_l) # list may be shorter now due to deg(gcd ) > 0
if LC( p ) < 0:
aux_seq = [subres_l[0], subres_l[1]]
for i in range(2, m):
aux_seq.append(simplify(subres_l[i] * (-1) ))
subres_l = aux_seq
return subres_l
def subresultants_pg(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Z[x] or Q[x], from
the modified subresultant prs of p and q.
The coefficients of the polynomials in these two sequences differ only
in sign and the factor LC(p)**( deg(p)- deg(q)) as stated in
Theorem 2 of the reference.
The coefficients of the polynomials in the output sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: "On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials."
Serdica Journal of Computing 9(2) (2015), 123-138.
"""
# compute the modified subresultant prs
lst = modified_subresultants_pg(p,q,x) ## any other method would do
# defensive
if lst == [] or len(lst) == 2:
return lst
# the coefficients in lst are modified subresultants and, hence, are
# greater than those of the corresponding subresultants by the factor
# LC(lst[0])**( deg(lst[0]) - deg(lst[1])); see Theorem 2 in reference.
lcf = LC(lst[0])**( degree(lst[0], x) - degree(lst[1], x) )
# Initialize the subresultant prs list
subr_seq = [lst[0], lst[1]]
# compute the degree sequences m_i and j_i of Theorem 2 in reference.
deg_seq = [degree(Poly(poly, x), x) for poly in lst]
deg = deg_seq[0]
deg_seq_s = deg_seq[1:-1]
m_seq = [m-1 for m in deg_seq_s]
j_seq = [deg - m for m in m_seq]
# compute the AMV factors of Theorem 2 in reference.
fact = [(-1)**( j*(j-1)/S(2) ) for j in j_seq]
# shortened list without the first two polys
lst_s = lst[2:]
# poly lst_s[k] is multiplied times fact[k], divided by lcf
# and appended to the subresultant prs list
m = len(fact)
for k in range(m):
if sign(fact[k]) == -1:
subr_seq.append(-lst_s[k] / lcf)
else:
subr_seq.append(lst_s[k] / lcf)
return subr_seq
def subresultants_amv_q(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Q[x] are performed, using the
function rem(p, q, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Akritas-Malaschonok-Vigklas Theorem of 2015.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
d0 = degree(p, x)
d1 = degree(q, x)
if d0 == 0 and d1 == 0:
return [p, q]
if d1 > d0:
d0, d1 = d1, d0
p, q = q, p
if d0 > 0 and d1 == 0:
return [p, q]
# initialize
i, s = 0, 0 # counters for remainders & odd elements
p_odd_index_sum = 0 # contains the sum of p_1, p_3, etc
subres_l = [p, q] # subresultant prs output list
a0, a1 = p, q # the input polys
sigma1 = LC(a1, x) # leading coeff of a1
p0 = d0 - d1 # degree difference
if p0 % 2 == 1:
s += 1
phi = floor( (s + 1) / 2 )
mul_fac = 1
d2 = d1
# main loop
while d2 > 0:
i += 1
a2 = rem(a0, a1, domain= QQ) # new remainder
if i == 1:
sigma2 = LC(a2, x)
else:
sigma3 = LC(a2, x)
sigma1, sigma2 = sigma2, sigma3
d2 = degree(a2, x)
p1 = d1 - d2
psi = i + phi + p_odd_index_sum
# new mul_fac
mul_fac = sigma1**(p0 + 1) * mul_fac
## compute the sign of the first fraction in formula (9) of the paper
# numerator
num = (-1)**psi
# denominator
den = sign(mul_fac)
# the sign of the determinant depends on sign( num / den ) != 0
if sign(num / den) > 0:
subres_l.append( simplify(expand(a2* Abs(mul_fac))))
else:
subres_l.append(- simplify(expand(a2* Abs(mul_fac))))
## bring into mul_fac the missing power of sigma if there was a degree gap
if p1 - 1 > 0:
mul_fac = mul_fac * sigma1**(p1 - 1)
# update AMV variables
a0, a1, d0, d1 = a1, a2, d1, d2
p0 = p1
if p0 % 2 ==1:
s += 1
phi = floor( (s + 1) / 2 )
if i%2 == 1:
p_odd_index_sum += p0 # p_i has odd index
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
return subres_l
def compute_sign(base, expo):
'''
base != 0 and expo >= 0 are integers;
returns the sign of base**expo without
evaluating the power itself!
'''
sb = sign(base)
if sb == 1:
return 1
pe = expo % 2
if pe == 0:
return -sb
else:
return sb
def rem_z(p, q, x):
'''
Intended mainly for p, q polynomials in Z[x] so that,
on dividing p by q, the remainder will also be in Z[x]. (However,
it also works fine for polynomials in Q[x].) It is assumed
that degree(p, x) >= degree(q, x).
It premultiplies p by the _absolute_ value of the leading coefficient
of q, raised to the power deg(p) - deg(q) + 1 and then performs
polynomial division in Q[x], using the function rem(p, q, x).
By contrast the function prem(p, q, x) does _not_ use the absolute
value of the leading coefficient of q.
This results not only in ``messing up the signs'' of the Euclidean and
Sturmian prs's as mentioned in the second reference,
but also in violation of the main results of the first and third
references --- Theorem 4 and Theorem 1 respectively. Theorems 4 and 1
establish a one-to-one correspondence between the Euclidean and the
Sturmian prs of p, q, on one hand, and the subresultant prs of p, q,
on the other.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials.''
Serdica Journal of Computing, 9(2) (2015), 123-138.
2. http://planetMath.org/sturmstheorem
3. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result on
the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
'''
if (p.as_poly().is_univariate and q.as_poly().is_univariate and
p.as_poly().gens == q.as_poly().gens):
delta = (degree(p, x) - degree(q, x) + 1)
return rem(Abs(LC(q, x))**delta * p, q, x)
else:
return prem(p, q, x)
def quo_z(p, q, x):
"""
Intended mainly for p, q polynomials in Z[x] so that,
on dividing p by q, the quotient will also be in Z[x]. (However,
it also works fine for polynomials in Q[x].) It is assumed
that degree(p, x) >= degree(q, x).
It premultiplies p by the _absolute_ value of the leading coefficient
of q, raised to the power deg(p) - deg(q) + 1 and then performs
polynomial division in Q[x], using the function quo(p, q, x).
By contrast the function pquo(p, q, x) does _not_ use the absolute
value of the leading coefficient of q.
See also function rem_z(p, q, x) for additional comments and references.
"""
if (p.as_poly().is_univariate and q.as_poly().is_univariate and
p.as_poly().gens == q.as_poly().gens):
delta = (degree(p, x) - degree(q, x) + 1)
return quo(Abs(LC(q, x))**delta * p, q, x)
else:
return pquo(p, q, x)
def subresultants_amv(f, g, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(f, x) >= degree(g, x).
Computes the subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients, no determinant evaluation takes place.
Instead, polynomial divisions in Z[x] or Q[x] are performed, using
the function rem_z(p, q, x); the coefficients of the remainders
computed this way become subresultants with the help of the
Akritas-Malaschonok-Vigklas Theorem of 2015 and the Collins-Brown-
Traub formula for coefficient reduction.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``A Basic Result
on the Theory of Subresultants.'' Serdica Journal of Computing 10 (2016), No.1, 31-48.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Subresultant Polynomial
remainder Sequences Obtained by Polynomial Divisions in Q[x] or in Z[x].''
Serdica Journal of Computing 10 (2016), No.3-4, 197-217.
"""
# make sure neither f nor g is 0
if f == 0 or g == 0:
return [f, g]
# make sure proper degrees
d0 = degree(f, x)
d1 = degree(g, x)
if d0 == 0 and d1 == 0:
return [f, g]
if d1 > d0:
d0, d1 = d1, d0
f, g = g, f
if d0 > 0 and d1 == 0:
return [f, g]
# initialize
a0 = f
a1 = g
subres_l = [a0, a1]
deg_dif_p1, c = degree(a0, x) - degree(a1, x) + 1, -1
# initialize AMV variables
sigma1 = LC(a1, x) # leading coeff of a1
i, s = 0, 0 # counters for remainders & odd elements
p_odd_index_sum = 0 # contains the sum of p_1, p_3, etc
p0 = deg_dif_p1 - 1
if p0 % 2 == 1:
s += 1
phi = floor( (s + 1) / 2 )
# compute the first polynomial of the prs
i += 1
a2 = rem_z(a0, a1, x) / Abs( (-1)**deg_dif_p1 ) # first remainder
sigma2 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
p1 = d1 - d2 # degree difference
# sgn_den is the factor, the denominator 1st fraction of (9),
# by which a2 is multiplied to get integer coefficients
sgn_den = compute_sign( sigma1, p0 + 1 )
## compute sign of the 1st fraction in formula (9) of the paper
# numerator
psi = i + phi + p_odd_index_sum
num = (-1)**psi
# denominator
den = sgn_den
# the sign of the determinant depends on sign(num / den) != 0
if sign(num / den) > 0:
subres_l.append( a2 )
else:
subres_l.append( -a2 )
# update AMV variable
if p1 % 2 == 1:
s += 1
# bring in the missing power of sigma if there was gap
if p1 - 1 > 0:
sgn_den = sgn_den * compute_sign( sigma1, p1 - 1 )
# main loop
while d2 >= 1:
phi = floor( (s + 1) / 2 )
if i%2 == 1:
p_odd_index_sum += p1 # p_i has odd index
a0, a1, d0, d1 = a1, a2, d1, d2 # update polys and degrees
p0 = p1 # update degree difference
i += 1
sigma0 = -LC(a0)
c = (sigma0**(deg_dif_p1 - 1)) / (c**(deg_dif_p1 - 2))
deg_dif_p1 = degree(a0, x) - d2 + 1
a2 = rem_z(a0, a1, x) / Abs( (c**(deg_dif_p1 - 1)) * sigma0 )
sigma3 = LC(a2, x) # leading coeff of a2
d2 = degree(a2, x) # actual degree of a2
p1 = d1 - d2 # degree difference
psi = i + phi + p_odd_index_sum
# update variables
sigma1, sigma2 = sigma2, sigma3
# new sgn_den
sgn_den = compute_sign( sigma1, p0 + 1 ) * sgn_den
# compute the sign of the first fraction in formula (9) of the paper
# numerator
num = (-1)**psi
# denominator
den = sgn_den
# the sign of the determinant depends on sign( num / den ) != 0
if sign(num / den) > 0:
subres_l.append( a2 )
else:
subres_l.append( -a2 )
# update AMV variable
if p1 % 2 ==1:
s += 1
# bring in the missing power of sigma if there was gap
if p1 - 1 > 0:
sgn_den = sgn_den * compute_sign( sigma1, p1 - 1 )
# gcd is of degree > 0 ?
m = len(subres_l)
if subres_l[m - 1] == nan or subres_l[m - 1] == 0:
subres_l.pop(m - 1)
return subres_l
def modified_subresultants_amv(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the modified subresultant prs of p and q in Z[x] or Q[x],
from the subresultant prs of p and q.
The coefficients of the polynomials in the two sequences differ only
in sign and the factor LC(p)**( deg(p)- deg(q)) as stated in
Theorem 2 of the reference.
The coefficients of the polynomials in the output sequence are
modified subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester2, Sylvester's matrix of 1853.
If the modified subresultant prs is complete, and LC( p ) > 0, it coincides
with the (generalized) Sturm's sequence of the polynomials p, q.
References
==========
1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: "On the Remainders
Obtained in Finding the Greatest Common Divisor of Two Polynomials."
Serdica Journal of Computing, Serdica Journal of Computing, 9(2) (2015), 123-138.
"""
# compute the subresultant prs
lst = subresultants_amv(p,q,x) ## any other method would do
# defensive
if lst == [] or len(lst) == 2:
return lst
# the coefficients in lst are subresultants and, hence, smaller than those
# of the corresponding modified subresultants by the factor
# LC(lst[0])**( deg(lst[0]) - deg(lst[1])); see Theorem 2.
lcf = LC(lst[0])**( degree(lst[0], x) - degree(lst[1], x) )
# Initialize the modified subresultant prs list
subr_seq = [lst[0], lst[1]]
# compute the degree sequences m_i and j_i of Theorem 2
deg_seq = [degree(Poly(poly, x), x) for poly in lst]
deg = deg_seq[0]
deg_seq_s = deg_seq[1:-1]
m_seq = [m-1 for m in deg_seq_s]
j_seq = [deg - m for m in m_seq]
# compute the AMV factors of Theorem 2
fact = [(-1)**( j*(j-1)/S(2) ) for j in j_seq]
# shortened list without the first two polys
lst_s = lst[2:]
# poly lst_s[k] is multiplied times fact[k] and times lcf
# and appended to the subresultant prs list
m = len(fact)
for k in range(m):
if sign(fact[k]) == -1:
subr_seq.append( simplify(-lst_s[k] * lcf) )
else:
subr_seq.append( simplify(lst_s[k] * lcf) )
return subr_seq
def correct_sign(deg_f, deg_g, s1, rdel, cdel):
"""
Used in various subresultant prs algorithms.
Evaluates the determinant, (a.k.a. subresultant) of a properly selected
submatrix of s1, Sylvester's matrix of 1840, to get the correct sign
and value of the leading coefficient of a given polynomial remainder.
deg_f, deg_g are the degrees of the original polynomials p, q for which the
matrix s1 = sylvester(p, q, x, 1) was constructed.
rdel denotes the expected degree of the remainder; it is the number of
rows to be deleted from each group of rows in s1 as described in the
reference below.
cdel denotes the expected degree minus the actual degree of the remainder;
it is the number of columns to be deleted --- starting with the last column
forming the square matrix --- from the matrix resulting after the row deletions.
References
==========
Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences
and Modified Subresultant Polynomial Remainder Sequences.''
Serdica Journal of Computing, Vol. 8, No 1, 29-46, 2014.
"""
M = s1[:, :] # copy of matrix s1
# eliminate rdel rows from the first deg_g rows
for i in range(M.rows - deg_f - 1, M.rows - deg_f - rdel - 1, -1):
M.row_del(i)
# eliminate rdel rows from the last deg_f rows
for i in range(M.rows - 1, M.rows - rdel - 1, -1):
M.row_del(i)
# eliminate cdel columns
for i in range(cdel):
M.col_del(M.rows - 1)
# define submatrix
Md = M[:, 0: M.rows]
return Md.det()
def subresultants_rem(p, q, x):
"""
p, q are polynomials in Z[x] or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p and q in Z[x] or Q[x];
the coefficients of the polynomials in the sequence are
subresultants. That is, they are determinants of appropriately
selected submatrices of sylvester1, Sylvester's matrix of 1840.
To compute the coefficients polynomial divisions in Q[x] are
performed, using the function rem(p, q, x). The coefficients
of the remainders computed this way become subresultants by evaluating
one subresultant per remainder --- that of the leading coefficient.
This way we obtain the correct sign and value of the leading coefficient
of the remainder and we easily ``force'' the rest of the coefficients
to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS's).'' Serdica Journal of Computing 9(1) (2015), 1-26.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
sr_list = [f, g] # subresultant list
# main loop
while deg_g > 0:
r = rem(p, q, x)
d = degree(r, x)
if d < 0:
return sr_list
# make coefficients subresultants evaluating ONE determinant
exp_deg = deg_g - 1 # expected degree
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
r = simplify((r / LC(r, x)) * sign_value)
# append poly with subresultant coeffs
sr_list.append(r)
# update degrees and polys
deg_f, deg_g = deg_g, d
p, q = q, r
# gcd is of degree > 0 ?
m = len(sr_list)
if sr_list[m - 1] == nan or sr_list[m - 1] == 0:
sr_list.pop(m - 1)
return sr_list
def pivot(M, i, j):
'''
M is a matrix, and M[i, j] specifies the pivot element.
All elements below M[i, j], in the j-th column, will
be zeroed, if they are not already 0, according to
Dodgson-Bareiss' integer preserving transformations.
References
==========
1. Akritas, A. G.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101-134, 2013.
'''
ma = M[:, :] # copy of matrix M
rs = ma.rows # No. of rows
cs = ma.cols # No. of cols
for r in range(i+1, rs):
if ma[r, j] != 0:
for c in range(j + 1, cs):
ma[r, c] = ma[i, j] * ma[r, c] - ma[i, c] * ma[r, j]
ma[r, j] = 0
return ma
def rotate_r(L, k):
'''
Rotates right by k. L is a row of a matrix or a list.
'''
ll = list(L)
if ll == []:
return []
for i in range(k):
el = ll.pop(len(ll) - 1)
ll.insert(0, el)
return ll if type(L) is list else Matrix([ll])
def rotate_l(L, k):
'''
Rotates left by k. L is a row of a matrix or a list.
'''
ll = list(L)
if ll == []:
return []
for i in range(k):
el = ll.pop(0)
ll.insert(len(ll) - 1, el)
return ll if type(L) is list else Matrix([ll])
def row2poly(row, deg, x):
'''
Converts the row of a matrix to a poly of degree deg and variable x.
Some entries at the beginning and/or at the end of the row may be zero.
'''
k = 0
poly = []
leng = len(row)
# find the beginning of the poly ; i.e. the first
# non-zero element of the row
while row[k] == 0:
k = k + 1
# append the next deg + 1 elements to poly
for j in range( deg + 1):
if k + j <= leng:
poly.append(row[k + j])
return Poly(poly, x)
def create_ma(deg_f, deg_g, row1, row2, col_num):
'''
Creates a ``small'' matrix M to be triangularized.
deg_f, deg_g are the degrees of the divident and of the
divisor polynomials respectively, deg_g > deg_f.
The coefficients of the divident poly are the elements
in row2 and those of the divisor poly are the elements
in row1.
col_num defines the number of columns of the matrix M.
'''
if deg_g - deg_f >= 1:
print('Reverse degrees')
return
m = zeros(deg_f - deg_g + 2, col_num)
for i in range(deg_f - deg_g + 1):
m[i, :] = rotate_r(row1, i)
m[deg_f - deg_g + 1, :] = row2
return m
def find_degree(M, deg_f):
'''
Finds the degree of the poly corresponding (after triangularization)
to the _last_ row of the ``small'' matrix M, created by create_ma().
deg_f is the degree of the divident poly.
If _last_ row is all 0's returns None.
'''
j = deg_f
for i in range(0, M.cols):
if M[M.rows - 1, i] == 0:
j = j - 1
else:
return j if j >= 0 else 0
def final_touches(s2, r, deg_g):
"""
s2 is sylvester2, r is the row pointer in s2,
deg_g is the degree of the poly last inserted in s2.
After a gcd of degree > 0 has been found with Van Vleck's
method, and was inserted into s2, if its last term is not
in the last column of s2, then it is inserted as many
times as needed, rotated right by one each time, until
the condition is met.
"""
R = s2.row(r-1)
# find the first non zero term
for i in range(s2.cols):
if R[0,i] == 0:
continue
else:
break
# missing rows until last term is in last column
mr = s2.cols - (i + deg_g + 1)
# insert them by replacing the existing entries in the row
i = 0
while mr != 0 and r + i < s2.rows :
s2[r + i, : ] = rotate_r(R, i + 1)
i += 1
mr -= 1
return s2
def subresultants_vv(p, q, x, method = 0):
"""
p, q are polynomials in Z[x] (intended) or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p, q by triangularizing,
in Z[x] or in Q[x], all the smaller matrices encountered in the
process of triangularizing sylvester2, Sylvester's matrix of 1853;
see references 1 and 2 for Van Vleck's method. With each remainder,
sylvester2 gets updated and is prepared to be printed if requested.
If sylvester2 has small dimensions and you want to see the final,
triangularized matrix use this version with method=1; otherwise,
use either this version with method=0 (default) or the faster version,
subresultants_vv_2(p, q, x), where sylvester2 is used implicitly.
Sylvester's matrix sylvester1 is also used to compute one
subresultant per remainder; namely, that of the leading
coefficient, in order to obtain the correct sign and to
force the remainder coefficients to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
If the final, triangularized matrix s2 is printed, then:
(a) if deg(p) - deg(q) > 1 or deg( gcd(p, q) ) > 0, several
of the last rows in s2 will remain unprocessed;
(b) if deg(p) - deg(q) == 0, p will not appear in the final matrix.
References
==========
1. Akritas, A. G.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101-134, 2013.
3. Akritas, A. G.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS's).'' Serdica Journal of Computing 9(1) (2015), 1-26.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
s2 = sylvester(f, g, x, 2)
sr_list = [f, g]
col_num = 2 * n # columns in s2
# make two rows (row0, row1) of poly coefficients
row0 = Poly(f, x, domain = QQ).all_coeffs()
leng0 = len(row0)
for i in range(col_num - leng0):
row0.append(0)
row0 = Matrix([row0])
row1 = Poly(g,x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
# row pointer for deg_f - deg_g == 1; may be reset below
r = 2
# modify first rows of s2 matrix depending on poly degrees
if deg_f - deg_g > 1:
r = 1
# replacing the existing entries in the rows of s2,
# insert row0 (deg_f - deg_g - 1) times, rotated each time
for i in range(deg_f - deg_g - 1):
s2[r + i, : ] = rotate_r(row0, i + 1)
r = r + deg_f - deg_g - 1
# insert row1 (deg_f - deg_g) times, rotated each time
for i in range(deg_f - deg_g):
s2[r + i, : ] = rotate_r(row1, r + i)
r = r + deg_f - deg_g
if deg_f - deg_g == 0:
r = 0
# main loop
while deg_g > 0:
# create a small matrix M, and triangularize it;
M = create_ma(deg_f, deg_g, row1, row0, col_num)
# will need only the first and last rows of M
for i in range(deg_f - deg_g + 1):
M1 = pivot(M, i, i)
M = M1[:, :]
# treat last row of M as poly; find its degree
d = find_degree(M, deg_f)
if d is None:
break
exp_deg = deg_g - 1
# evaluate one determinant & make coefficients subresultants
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
poly = row2poly(M[M.rows - 1, :], d, x)
temp2 = LC(poly, x)
poly = simplify((poly / temp2) * sign_value)
# update s2 by inserting first row of M as needed
row0 = M[0, :]
for i in range(deg_g - d):
s2[r + i, :] = rotate_r(row0, r + i)
r = r + deg_g - d
# update s2 by inserting last row of M as needed
row1 = rotate_l(M[M.rows - 1, :], deg_f - d)
row1 = (row1 / temp2) * sign_value
for i in range(deg_g - d):
s2[r + i, :] = rotate_r(row1, r + i)
r = r + deg_g - d
# update degrees
deg_f, deg_g = deg_g, d
# append poly with subresultant coeffs
sr_list.append(poly)
# final touches to print the s2 matrix
if method != 0 and s2.rows > 2:
s2 = final_touches(s2, r, deg_g)
pprint(s2)
elif method != 0 and s2.rows == 2:
s2[1, :] = rotate_r(s2.row(1), 1)
pprint(s2)
return sr_list
def subresultants_vv_2(p, q, x):
"""
p, q are polynomials in Z[x] (intended) or Q[x]. It is assumed
that degree(p, x) >= degree(q, x).
Computes the subresultant prs of p, q by triangularizing,
in Z[x] or in Q[x], all the smaller matrices encountered in the
process of triangularizing sylvester2, Sylvester's matrix of 1853;
see references 1 and 2 for Van Vleck's method.
If the sylvester2 matrix has big dimensions use this version,
where sylvester2 is used implicitly. If you want to see the final,
triangularized matrix sylvester2, then use the first version,
subresultants_vv(p, q, x, 1).
sylvester1, Sylvester's matrix of 1840, is also used to compute
one subresultant per remainder; namely, that of the leading
coefficient, in order to obtain the correct sign and to
``force'' the remainder coefficients to become subresultants.
If the subresultant prs is complete, then it coincides with the
Euclidean sequence of the polynomials p, q.
References
==========
1. Akritas, A. G.: ``A new method for computing polynomial greatest
common divisors and polynomial remainder sequences.''
Numerische MatheMatik 52, 119-127, 1988.
2. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``On a Theorem
by Van Vleck Regarding Sturm Sequences.''
Serdica Journal of Computing, 7, No 4, 101-134, 2013.
3. Akritas, A. G.:``Three New Methods for Computing Subresultant
Polynomial Remainder Sequences (PRS's).'' Serdica Journal of Computing 9(1) (2015), 1-26.
"""
# make sure neither p nor q is 0
if p == 0 or q == 0:
return [p, q]
# make sure proper degrees
f, g = p, q
n = deg_f = degree(f, x)
m = deg_g = degree(g, x)
if n == 0 and m == 0:
return [f, g]
if n < m:
n, m, deg_f, deg_g, f, g = m, n, deg_g, deg_f, g, f
if n > 0 and m == 0:
return [f, g]
# initialize
s1 = sylvester(f, g, x, 1)
sr_list = [f, g] # subresultant list
col_num = 2 * n # columns in sylvester2
# make two rows (row0, row1) of poly coefficients
row0 = Poly(f, x, domain = QQ).all_coeffs()
leng0 = len(row0)
for i in range(col_num - leng0):
row0.append(0)
row0 = Matrix([row0])
row1 = Poly(g,x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
# main loop
while deg_g > 0:
# create a small matrix M, and triangularize it
M = create_ma(deg_f, deg_g, row1, row0, col_num)
for i in range(deg_f - deg_g + 1):
M1 = pivot(M, i, i)
M = M1[:, :]
# treat last row of M as poly; find its degree
d = find_degree(M, deg_f)
if d is None:
return sr_list
exp_deg = deg_g - 1
# evaluate one determinant & make coefficients subresultants
sign_value = correct_sign(n, m, s1, exp_deg, exp_deg - d)
poly = row2poly(M[M.rows - 1, :], d, x)
poly = simplify((poly / LC(poly, x)) * sign_value)
# append poly with subresultant coeffs
sr_list.append(poly)
# update degrees and rows
deg_f, deg_g = deg_g, d
row0 = row1
row1 = Poly(poly, x, domain = QQ).all_coeffs()
leng1 = len(row1)
for i in range(col_num - leng1):
row1.append(0)
row1 = Matrix([row1])
return sr_list
| 34.484111 | 93 | 0.592912 |
f49fdd082669444d021d7e83c1b3aa2abdda7d99 | 8,189 | py | Python | paasta_tools/paastaapi/model/instance_bounce_status.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,711 | 2015-11-10T18:04:56.000Z | 2022-03-23T08:53:16.000Z | paasta_tools/paastaapi/model/instance_bounce_status.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 1,689 | 2015-11-10T17:59:04.000Z | 2022-03-31T20:46:46.000Z | paasta_tools/paastaapi/model/instance_bounce_status.py | sobolevn/paasta | 8b87e0b13816c09b3d063b6d3271e6c7627fd264 | [
"Apache-2.0"
] | 267 | 2015-11-10T19:17:16.000Z | 2022-02-08T20:59:52.000Z | # coding: utf-8
"""
Paasta API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from paasta_tools.paastaapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class InstanceBounceStatus(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('deploy_status',): {
'RUNNING': "Running",
'DEPLOYING': "Deploying",
'STOPPED': "Stopped",
'DELAYED': "Delayed",
'WAITING': "Waiting",
'NOTRUNNING': "NotRunning",
},
('desired_state',): {
'START': "start",
'STOP': "stop",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'active_shas': ([[str, none_type]],), # noqa: E501
'app_count': (int,), # noqa: E501
'deploy_status': (str,), # noqa: E501
'desired_state': (str,), # noqa: E501
'expected_instance_count': (int,), # noqa: E501
'running_instance_count': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'active_shas': 'active_shas', # noqa: E501
'app_count': 'app_count', # noqa: E501
'deploy_status': 'deploy_status', # noqa: E501
'desired_state': 'desired_state', # noqa: E501
'expected_instance_count': 'expected_instance_count', # noqa: E501
'running_instance_count': 'running_instance_count', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InstanceBounceStatus - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
active_shas ([[str, none_type]]): List of git/config SHAs running.. [optional] # noqa: E501
app_count (int): The number of different running versions of the same service (0 for stopped, 1 for running and 1+ for bouncing). [optional] # noqa: E501
deploy_status (str): Deploy status of a Kubernetes service. [optional] # noqa: E501
desired_state (str): Desired state of a service, for Kubernetes. [optional] # noqa: E501
expected_instance_count (int): The number of desired instances of the service. [optional] # noqa: E501
running_instance_count (int): The number of actual running instances of the service. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 41.358586 | 166 | 0.585664 |
89f55b907ee67dc13469b8adda756499498135d5 | 1,387 | py | Python | examples/predict_SNe_BAO.py | Guo-Jian-Wang/ecopann | 934108d22e4d5ba9489fcfe1d3cc82f7e847b42b | [
"MIT"
] | 13 | 2020-05-15T02:23:21.000Z | 2021-09-01T01:22:49.000Z | examples/predict_SNe_BAO.py | Guo-Jian-Wang/ecopann | 934108d22e4d5ba9489fcfe1d3cc82f7e847b42b | [
"MIT"
] | null | null | null | examples/predict_SNe_BAO.py | Guo-Jian-Wang/ecopann | 934108d22e4d5ba9489fcfe1d3cc82f7e847b42b | [
"MIT"
] | 2 | 2020-11-30T15:18:14.000Z | 2021-03-11T07:59:35.000Z | # -*- coding: utf-8 -*-
import sys
sys.path.append('..')
import ecopann.ann as ann
import ecopann.coplot.plot_contours as plc
import ecopann.cosmic_params as cosmic_params
import simulator
import matplotlib.pyplot as plt
import numpy as np
#%% observational data
fid_params = [-1, 0.3]
sim_mu = simulator.sim_SNe(fid_params=fid_params)
sim_Hz, sim_DA = simulator.sim_BAO(fid_params=fid_params)
z_SNe = sim_mu[:,0]
z_BAO = sim_Hz[:,0]
param_names = ['w', 'omm']
params_dict = {'omm' : [r'$\Omega_m$', 0.3, 0.0, 1.0],
'w' : [r'$w$', -1, np.nan, np.nan]}
# %% estimate parameters using ECoPANN
randn_num = '1.14058'; steps_n = 8 #train1k, epoch1k
predictor = ann.RePredict([sim_mu, sim_Hz, sim_DA], cov_matrix=None, path='SNe_BAO',
randn_num=randn_num, steps_n=steps_n,
params_dict=simulator.params_dict)
predictor.from_chain()
# predictor.from_net()
chain_ann = predictor.chain_ann
predictor.plot_steps()
predictor.plot_contours(fill_contours=False, show_titles=True)
predictor.save_steps()
predictor.save_contours()
#%%
labels = cosmic_params.ParamsProperty(param_names, params_dict=params_dict).labels
plc.Contours(chain_ann).plot(bins=100,smooth=5,labels=labels,fill_contours=False,show_titles=True,
best_values=fid_params,show_best_value_lines=True)
#%%
plt.show()
| 26.673077 | 98 | 0.69863 |
bc95804f44f4fc89e280031961d2529ac018964c | 1,841 | py | Python | shop/migrations/0001_initial.py | javierpinya/myshop | 7a8f9cce40ffb980d80b74d4d461bbb5c1b49a52 | [
"MIT"
] | null | null | null | shop/migrations/0001_initial.py | javierpinya/myshop | 7a8f9cce40ffb980d80b74d4d461bbb5c1b49a52 | [
"MIT"
] | null | null | null | shop/migrations/0001_initial.py | javierpinya/myshop | 7a8f9cce40ffb980d80b74d4d461bbb5c1b49a52 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-25 08:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200)),
('image', models.ImageField(blank=True, upload_to='products/%Y/%m/%d')),
('description', models.TextField(blank=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('available', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.Category')),
],
options={
'ordering': ('name',),
'index_together': {('id', 'slug')},
},
),
]
| 38.354167 | 138 | 0.548072 |
9a6c06cce0d7fc389e1187af430919724412e81d | 161 | py | Python | bracket/info.py | bracketing/bracket | a4f8927a2ba4bc06e29bbba1b0ae2249d33fb0c4 | [
"MIT"
] | null | null | null | bracket/info.py | bracketing/bracket | a4f8927a2ba4bc06e29bbba1b0ae2249d33fb0c4 | [
"MIT"
] | null | null | null | bracket/info.py | bracketing/bracket | a4f8927a2ba4bc06e29bbba1b0ae2249d33fb0c4 | [
"MIT"
] | 1 | 2020-12-30T11:25:57.000Z | 2020-12-30T11:25:57.000Z | # Package Version (https://github.com/bracketing/bracket/releases/)
__version__ = "0.1.2"
# Update Log (https://github.com/bracketing/bracket/tags)
logs = []
| 20.125 | 67 | 0.714286 |
0991fe871c7deef569d4599d5f1fa095061f0fbf | 8,011 | py | Python | Views/EditVAS_ui.py | acadianshadow237/BA_MDI1 | 73e0e87c15ff083ce860f7a09fa2de3a3c71c215 | [
"MIT"
] | null | null | null | Views/EditVAS_ui.py | acadianshadow237/BA_MDI1 | 73e0e87c15ff083ce860f7a09fa2de3a3c71c215 | [
"MIT"
] | null | null | null | Views/EditVAS_ui.py | acadianshadow237/BA_MDI1 | 73e0e87c15ff083ce860f7a09fa2de3a3c71c215 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'EditVAS_ui.ui'
##
## Created by: Qt User Interface Compiler version 6.1.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import * # type: ignore
from PySide6.QtGui import * # type: ignore
from PySide6.QtWidgets import * # type: ignore
class Ui_EditVAS_AS_Dialog(QWidget):
def setupUi(self, EditVAS_AS_Dialog):
if not EditVAS_AS_Dialog.objectName():
EditVAS_AS_Dialog.setObjectName(u"EditVAS_AS_Dialog")
EditVAS_AS_Dialog.resize(961, 623)
self.verticalLayout_3 = QVBoxLayout(EditVAS_AS_Dialog)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.verticalLayout_2 = QVBoxLayout()
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName(u"verticalLayout")
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.label_7 = QLabel(EditVAS_AS_Dialog)
self.label_7.setObjectName(u"label_7")
font = QFont()
font.setPointSize(12)
self.label_7.setFont(font)
self.label_7.setAlignment(Qt.AlignCenter)
self.horizontalLayout.addWidget(self.label_7)
self.textEditID = QTextEdit(EditVAS_AS_Dialog)
self.textEditID.setObjectName(u"textEditID")
self.textEditID.setMaximumSize(QSize(16777215, 35))
self.textEditID.setFont(font)
self.textEditID.setReadOnly(True)
self.horizontalLayout.addWidget(self.textEditID)
self.label = QLabel(EditVAS_AS_Dialog)
self.label.setObjectName(u"label")
self.label.setMaximumSize(QSize(60, 16777215))
self.label.setFont(font)
self.label.setAlignment(Qt.AlignCenter)
self.horizontalLayout.addWidget(self.label)
self.textEditName = QTextEdit(EditVAS_AS_Dialog)
self.textEditName.setObjectName(u"textEditName")
self.textEditName.setMaximumSize(QSize(100, 35))
self.textEditName.setFont(font)
self.textEditName.setAutoFillBackground(False)
self.textEditName.setReadOnly(True)
self.horizontalLayout.addWidget(self.textEditName)
self.label_2 = QLabel(EditVAS_AS_Dialog)
self.label_2.setObjectName(u"label_2")
self.label_2.setMaximumSize(QSize(98, 16777215))
self.label_2.setFont(font)
self.label_2.setAlignment(Qt.AlignCenter)
self.horizontalLayout.addWidget(self.label_2)
self.textEditRoadName = QTextEdit(EditVAS_AS_Dialog)
self.textEditRoadName.setObjectName(u"textEditRoadName")
self.textEditRoadName.setMaximumSize(QSize(100, 35))
self.textEditRoadName.setFont(font)
self.textEditRoadName.setAutoFillBackground(False)
self.textEditRoadName.setReadOnly(True)
self.horizontalLayout.addWidget(self.textEditRoadName)
self.label_5 = QLabel(EditVAS_AS_Dialog)
self.label_5.setObjectName(u"label_5")
self.label_5.setMaximumSize(QSize(200, 16777215))
self.label_5.setFont(font)
self.label_5.setAlignment(Qt.AlignCenter)
self.horizontalLayout.addWidget(self.label_5)
self.textEditpasid = QTextEdit(EditVAS_AS_Dialog)
self.textEditpasid.setObjectName(u"textEditpasid")
self.textEditpasid.setMaximumSize(QSize(110, 35))
self.textEditpasid.setFont(font)
self.textEditpasid.setAutoFillBackground(False)
self.textEditpasid.setReadOnly(True)
self.horizontalLayout.addWidget(self.textEditpasid)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.label_3 = QLabel(EditVAS_AS_Dialog)
self.label_3.setObjectName(u"label_3")
self.label_3.setMaximumSize(QSize(16777215, 16777215))
self.label_3.setFont(font)
self.label_3.setAlignment(Qt.AlignCenter)
self.horizontalLayout_2.addWidget(self.label_3)
self.textEditFrom = QTextEdit(EditVAS_AS_Dialog)
self.textEditFrom.setObjectName(u"textEditFrom")
self.textEditFrom.setMaximumSize(QSize(100, 35))
self.textEditFrom.setFont(font)
self.textEditFrom.setAutoFillBackground(False)
self.textEditFrom.setReadOnly(True)
self.horizontalLayout_2.addWidget(self.textEditFrom)
self.label_4 = QLabel(EditVAS_AS_Dialog)
self.label_4.setObjectName(u"label_4")
self.label_4.setMaximumSize(QSize(16777215, 16777215))
self.label_4.setFont(font)
self.label_4.setAlignment(Qt.AlignCenter)
self.horizontalLayout_2.addWidget(self.label_4)
self.textEditTo = QTextEdit(EditVAS_AS_Dialog)
self.textEditTo.setObjectName(u"textEditTo")
self.textEditTo.setMaximumSize(QSize(100, 35))
self.textEditTo.setFont(font)
self.textEditTo.setAutoFillBackground(True)
self.textEditTo.setReadOnly(True)
self.horizontalLayout_2.addWidget(self.textEditTo)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.tableWidget = QTableWidget(EditVAS_AS_Dialog)
self.tableWidget.setObjectName(u"tableWidget")
self.tableWidget.setFont(font)
self.tableWidget.setStyleSheet(u"QHeaderView::section {background-color: rgb(211,211,211); color: rgb(0, 0, 0);}\n"
"QHeaderView::section:horizontal{ border-top: 1px solid #fffff8;}")
self.verticalLayout_2.addWidget(self.tableWidget)
self.horizontalLayout_3 = QHBoxLayout()
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.horizontalSpacer = QSpacerItem(500, 17, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(self.horizontalSpacer)
self.pushButtonEdit = QPushButton(EditVAS_AS_Dialog)
self.pushButtonEdit.setObjectName(u"pushButtonEdit")
self.pushButtonEdit.setFont(font)
self.horizontalLayout_3.addWidget(self.pushButtonEdit)
self.pushButtonCancel = QPushButton(EditVAS_AS_Dialog)
self.pushButtonCancel.setObjectName(u"pushButtonCancel")
self.pushButtonCancel.setFont(font)
self.horizontalLayout_3.addWidget(self.pushButtonCancel)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.verticalLayout_2.setStretch(0, 2)
self.verticalLayout_2.setStretch(1, 30)
self.verticalLayout_2.setStretch(2, 1)
self.verticalLayout_3.addLayout(self.verticalLayout_2)
self.retranslateUi(EditVAS_AS_Dialog)
QMetaObject.connectSlotsByName(EditVAS_AS_Dialog)
# setupUi
def retranslateUi(self, EditVAS_AS_Dialog):
EditVAS_AS_Dialog.setWindowTitle(QCoreApplication.translate("EditVAS_AS_Dialog", u"Edit Analysis Sections", None))
self.label_7.setText(QCoreApplication.translate("EditVAS_AS_Dialog", u"ID:", None))
self.label.setText(QCoreApplication.translate("EditVAS_AS_Dialog", u"Name:", None))
self.label_2.setText(QCoreApplication.translate("EditVAS_AS_Dialog", u"RoadName:", None))
self.label_5.setText(QCoreApplication.translate("EditVAS_AS_Dialog", u"pvmt_analysis_section_id:", None))
self.label_3.setText(QCoreApplication.translate("EditVAS_AS_Dialog", u"From:", None))
self.label_4.setText(QCoreApplication.translate("EditVAS_AS_Dialog", u"To:", None))
self.pushButtonEdit.setText(QCoreApplication.translate("EditVAS_AS_Dialog", u"Edit", None))
self.pushButtonCancel.setText(QCoreApplication.translate("EditVAS_AS_Dialog", u"Cancel", None))
# retranslateUi
| 40.872449 | 123 | 0.707652 |
f6943f29c5aa2c04aba14839165bc4bc38d8bbeb | 23,268 | py | Python | src/python/utilities/PDFTree.py | plewis/phycas | 9f5a4d9b2342dab907d14a46eb91f92ad80a5605 | [
"MIT"
] | 3 | 2015-09-24T23:12:57.000Z | 2021-04-12T07:07:01.000Z | src/python/utilities/PDFTree.py | plewis/phycas | 9f5a4d9b2342dab907d14a46eb91f92ad80a5605 | [
"MIT"
] | null | null | null | src/python/utilities/PDFTree.py | plewis/phycas | 9f5a4d9b2342dab907d14a46eb91f92ad80a5605 | [
"MIT"
] | 1 | 2015-11-23T10:35:43.000Z | 2015-11-23T10:35:43.000Z | import math, types
from phycas.pdfgen import *
from phycas.utilities.CommonFunctions import CommonFunctions
from phycas.utilities.GlobalState import readFile
from phycas.phylogeny import Tree
class PDFTree(CommonFunctions):
def __init__(self):
CommonFunctions.__init__(self, None)
self.pdf_splits_to_plot = None
# Variables associated with PDF tree drawing (used in pdftree() function)
# The 14 standard fonts guaranteed to be available in all PDF consumer applications:
# Times-Roman Helvetica Courier Symbol
# Times-Bold Helvetica-Bold Courier-Bold ZapfDingbats
# Times-Italic Helvetica-Oblique Courier-Oblique
# Times-BoldItalic Helvetica-BoldOblique Courier-BoldOblique
self.pdf_filename = 'trees.pdf' # Set to desired name of pdf file to create
self.pdf_edge_support_file = None # File containing PAUP* output with table of support values; if specified, the support values will be shown on trees plotted
self.pdf_tip_label_font = 'Times-Italic' # Font used for tip node names; should be one of the 14 standard fonts listed above
self.pdf_tip_label_height = 12 # Height in points of tip node name font
self.pdf_plot_label_font = 'Helvetica' # Font used for plot axis labels; should be one of the 14 standard fonts listed above
self.pdf_plot_label_height = 12 # Height in points of plot axis label font
self.pdf_title_font = 'Helvetica' # Font used for scalebar text; should be one of the 14 standard fonts listed above
self.pdf_title_height = 14 # Height in points of scalebar text font
self.pdf_scalebar_position = 'bottom' # Valid values are 'top', 'bottom' or None
self.pdf_scalebar_label_font = 'Helvetica' # Font used for scalebar text; should be one of the 14 standard fonts listed above
self.pdf_scalebar_label_height = 10 # Height in points of scalebar text font
self.pdf_support_label_font = 'Times-Roman' # Font used for edge support values; should be one of the 14 standard fonts listed above
self.pdf_support_label_height = 8 # Height in points of edge support font
self.pdf_support_as_percent = True # If True, support values will be shown as percentages (e.g. 93.1) rather than proportions (e.g. 0.931)
self.pdf_support_decimals = 1 # The number of decimal places shown in support values (e.g. to get 93.7, specify 1; to round up to 94, specify 0)
self.pdf_ladderize = 'right' # Valid values are 'right', 'left' or None
self.pdf_page_width = 8.5 # Page width in inches
self.pdf_page_height = 11.0 # Page length in inches
self.pdf_line_width = 1.0 # Width of lines representing edges in the tree
self.pdf_left_margin = 1.0 # Left margin in inches (1 inch = 72 points)
self.pdf_right_margin = 1.0 # Right margin in inches (1 inch = 72 points)
self.pdf_top_margin = 1.0 # Top margin in inches (1 inch = 72 points)
self.pdf_bottom_margin = 1.0 # Bottom margin in inches (1 inch = 72 points)
self.keep_xy_proportional = True # If True, vertical dimension of each tree in a collection will be kept proportional to its horizontal dimension
self.keep_tip_labels_proportional = True # If True, tip label height will be kept commensurate with size of tree for each tree in a printed collection (smaller trees will have smaller tip labels)
self.pdf_treefile = None # Set to tree file name if you want to make one pdf file with each tree from tree file on a separate page
self.pdf_newick = None # Set to the tree description to print if only want to save one tree to a pdf file
self.pdf_outgroup_taxon = None # Set to taxon name of tip serving as the outgroup for display rooting purposes (note: at this time outgroup can consist of just one taxon)
def pdftree(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Creates a PDF file containing a single tree (if pdf_newick is
specified) or a collection of trees (if pdf_treefile is specified).
If a collection of trees is specified, scales all trees the same (i.e
the scalebar is identical in size for all trees plotted).
"""
#complex_outgroup = type(self.pdf_outgroup_taxon) in (types.ListType,types.TupleType)
simple_outgroup = type(self.pdf_outgroup_taxon) == types.StringType
self.phycassert(simple_outgroup, 'Phycas cannot yet deal with pdf_outgroup_taxon containing more than one outgroup taxon')
self.phycassert((self.pdf_treefile and not self.pdf_newick) or (self.pdf_newick and not self.pdf_treefile), 'set either pdf_newick or pdf_treefile, but not both')
# If pdf_edge_support_file has been specified, read splits table from the file
# and store the splits in the pdf_splits_to_plot dictionary
if self.pdf_edge_support_file and os.path.exists(self.pdf_edge_support_file):
# Read splits file and store all splits found along with their frequencies
contents_of_file = open(self.pdf_edge_support_file,'r').read()
regex = re.compile('([*.]+)\s+([0-9.]+)', re.M)
matches = regex.findall(contents_of_file)
self.phycassert(matches, 'could not find any splits defined in the pdf_edge_support_file named %s' % self.pdf_edge_support_file)
self.pdf_splits_to_plot = {}
for p,f in matches:
self.pdf_splits_to_plot[p] = float(f)
# Fork depending on whether user wants to print just one tree (pdf_newick specified)
# or an entire collection of trees (pdf_treefile specified)
if self.pdf_newick:
# Build tree the newick description of which is in self.newick
tree = self.pdf_newick.buildTree()
if self.pdf_outgroup_taxon:
num = tree.findTipByName(self.pdf_outgroup_taxon)
self.phycassert(num is not None, 'could not root tree using specified outgroup: no tip having name "%s" could be found' % self.pdf_outgroup_taxon)
tree.rerootAtTip(num)
if self.pdf_ladderize:
if self.pdf_ladderize == 'right':
tree.ladderizeRight()
else:
tree.ladderizeLeft()
# Save tree in PDF
pdf = PDFGenerator(self.pdf_page_width, self.pdf_page_height)
pdf.overwrite = True
pdf.newPage()
self.tree2pdf(pdf, tree)
pdf.saveDocument(self.pdf_filename)
else:
# Open pdf_treefile and read trees therein
self.tree_file_name = self.pdf_treefile
contents = readFile(self.pdf_treefile)
# Build each tree and determine its height
tree = Tree()
max_height = 0.0
for tree_def in contents.trees:
tree_def.buildTree(tree)
tree.rectifyNames(contents.taxon_labels)
if self.pdf_outgroup_taxon:
num = tree.findTipByName(self.pdf_outgroup_taxon)
self.phycassert(num is not None, 'could not root tree using specified outgroup: no tip having name "%s" could be found' % self.pdf_outgroup_taxon)
tree.rerootAtTip(num)
h = tree.calcTotalHeight()
if h > max_height:
max_height = h
#tlen = tree.edgeLenSum()
#print 'tlen =',tlen,', height =',h
# Build each tree again and save in PDF file
pdf = PDFGenerator(self.pdf_page_width, self.pdf_page_height)
pdf.overwrite = True
for tree_def in contents.trees:
tree_def.buildTree(tree)
tree.rectifyNames(contents.taxon_labels)
if self.pdf_outgroup_taxon:
num = tree.findTipByName(self.pdf_outgroup_taxon)
tree.rerootAtTip(num)
if self.pdf_ladderize:
if self.pdf_ladderize == 'right':
tree.ladderizeRight()
else:
tree.ladderizeLeft()
tree.rectifyNames(contents.taxon_labels)
pdf.newPage()
self.tree2pdf(pdf, tree, None, max_height)
pdf.saveDocument(self.pdf_filename)
# Prevent unintentional spillover
self.pdf_splits_to_plot = None
def tree2pdf(self, pdf, tree, title = None, xscalemax = 0.0, show_support = False):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Prints tree on a pdf object (instance of class PDFGenerator). If title
is specified, the supplied string will be centered at the top of the
page. The optional argument xscalemax represents the maximum height
of a group of trees being printed on separate pages in the same pdf
document. If xscalemax is left unspecified, each tree will be scaled
to fit the page and the scalebar will be adjusted accordingly. If
xscalemax is specified, it will be used to determine the scalebar, and
the scalebar will remain the same size for all trees printed with the
same xcalemax value.
"""
# TODO: max_label_points should be calculated outside this function and passed in as an argument
inch = 72.0
spacer = 5.0
max_label_points = 0.0
rooted_tree = tree.isRooted()
nodes = []
# Perform a preorder traversal:
# 1) for each node, set x-value to height above root (in units of edge length)
# 2) for each tip, set y-value to tip index, with root tip being 0, and other
# tips being numbered from left to right
# 3) find the length of the longest taxon label as it will be rendered in the
# PDF file so that the margin calculations can be made
# 4) for each internal, just set y-value to 0.0 for now; these internal y-values
# will be calculated on the subsequent postorder traversal
if self.pdf_splits_to_plot:
tree.recalcAllSplits(tree.getNObservables())
# Record information about the tip serving as the root
nd = tree.getFirstPreorder()
assert nd.isRoot(), 'first preorder node should be the root'
if not rooted_tree:
nodes.append(nd)
subroot = nd.getLeftChild()
height = subroot.getEdgeLen()
nd.setX(height)
if self.pdf_ladderize and self.pdf_ladderize == 'left':
last_tip_index = float(tree.getNObservables() - 1)
nd.setY(last_tip_index) #--> Y is irrelevant if rooted
ntips = 0.0
else:
nd.setY(0.0)
if rooted_tree:
ntips = 0.0
else:
ntips = 1.0
max_height = height
# Determine the width (in points) occupied by the longest taxon label
if self.pdf_tip_label_font and not rooted_tree:
taxon_label = nd.getNodeName()
label_width = float(self.pdf_tip_label_height)*pdf.calcStringWidth(self.pdf_tip_label_font, taxon_label)
if label_width > max_label_points:
max_label_points = label_width
# Record information about the internal node serving as the subroot
nd = nd.getNextPreorder()
assert nd.getParent().isRoot(), 'second preorder node should be the subroot'
nodes.append(nd)
nd.setX(0.0)
nd.setY(0.0)
subroot = nd
# Record information about the remaining nodes in the tree
while True:
nd = nd.getNextPreorder()
if not nd:
break
else:
ndpar = nd.getParent()
nodes.append(nd)
height = nd.getEdgeLen() + ndpar.getX()
nd.setX(height)
if height > max_height:
max_height = height
if nd.isTip():
nd.setY(ntips)
ntips += 1.0
if self.pdf_tip_label_font:
taxon_label = nd.getNodeName()
label_width = float(self.pdf_tip_label_height)*pdf.calcStringWidth(self.pdf_tip_label_font, taxon_label)
if label_width > max_label_points:
max_label_points = label_width
else:
nd.setY(0.0)
# Compute length represented by scale bar. For example,
# xscalemax = 0.00275
# log_xscalemax = -2.56
# ten_to_power = 10^floor(-2.56)
# = 10^{-3}
# = 0.001
# scalebar = 0.001*floor(0.00275/0.001)
# = 0.001*floor(2.75)
# = 0.002
# ndecimals = -floor(-2.56)
# = 3.0
if xscalemax == 0.0:
xscalemax = max_height
half_xscalemax = xscalemax/2.0
log_xscalemax = math.log10(half_xscalemax)
ten_to_power = 10**math.floor(log_xscalemax)
scalebar = ten_to_power*math.floor(half_xscalemax/ten_to_power)
ndecimals = -int(math.floor(log_xscalemax))
if ndecimals < 0:
ndecimals = 0
format_str = '%%.%df' % (ndecimals)
scalebar_str = format_str % scalebar
scalebar_str_extent = float(self.pdf_scalebar_label_height)*pdf.calcStringWidth(self.pdf_scalebar_label_font, scalebar_str)
scalebar_height = float(self.pdf_scalebar_label_height) + 2*spacer + self.pdf_line_width
# Find xscaler (amount by which branch lengths must be multiplied to give x-coordinate)
# and yscaler (amount by which the tip position must be multiplied to give y-coordinate).
xheight = 0.0
if self.pdf_tip_label_font:
xheight = float(self.pdf_tip_label_height)*pdf.getXHeight(self.pdf_tip_label_font)
half_xheight = xheight/2.0
ntips = tree.getNObservables()
label_width = max_label_points + spacer
right_margin = self.pdf_right_margin*inch
left_margin = self.pdf_left_margin*inch
top_margin = self.pdf_top_margin*inch
bottom_margin = self.pdf_bottom_margin*inch
plot_right = self.pdf_page_width*inch
plot_width = plot_right - left_margin - right_margin
plot_top = self.pdf_page_height*inch
plot_height = plot_top - top_margin - bottom_margin
tree_width = plot_width - label_width
tree_height = plot_height
if self.pdf_scalebar_position:
tree_height -= scalebar_height
if title:
tree_height -= 3.0*float(self.pdf_title_height)
tree_x0 = left_margin
tree_y0 = bottom_margin + scalebar_height
xscaler = tree_width/xscalemax
yscaler = tree_height/float(ntips - 1)
#pdf.addRectangle(left_margin, bottom_margin, plot_width, plot_height, 1, 'dotted')
if title and self.pdf_title_height > 0:
# Draw title centered at top of page
title_str_extent = float(self.pdf_title_height)*pdf.calcStringWidth(self.pdf_title_font, title)
title_x = left_margin + (plot_width - title_str_extent)/2.0
title_y = tree_y0 + tree_height + 2.0*float(self.pdf_title_height)
pdf.addText(title_x, title_y, self.pdf_title_font, self.pdf_title_height, title)
if self.pdf_scalebar_position:
if self.pdf_scalebar_position == 'top':
# Draw scalebar horizontally starting at top left corner
scalebar_width = scalebar*xscaler
scalebar_y = tree_x0 + tree_height - scalebar_height + spacer
pdf.addLine(left_margin, scalebar_y, left_margin + scalebar_width, scalebar_y, self.pdf_line_width)
# Draw scalebar text centered above the scalebar
scalebar_x = left_margin + (scalebar_width - scalebar_str_extent)/2.0
scalebar_y = tree_x0 + tree_height - float(self.pdf_scalebar_label_height)
pdf.addText(scalebar_x, scalebar_y, self.pdf_scalebar_label_font, self.pdf_scalebar_label_height, scalebar_str)
else:
# Draw scalebar horizontally starting at bottom left corner
scalebar_width = scalebar*xscaler
pdf.addLine(left_margin, bottom_margin, left_margin + scalebar_width, bottom_margin, self.pdf_line_width)
# Draw scalebar text centered above the scalebar
scalebar_x = left_margin + (scalebar_width - scalebar_str_extent)/2.0
scalebar_y = bottom_margin + spacer
pdf.addText(scalebar_x, scalebar_y, self.pdf_scalebar_label_font, self.pdf_scalebar_label_height, scalebar_str)
# add enough to left margin to center smaller trees horizontally
left_margin += (xscaler*(xscalemax - max_height) + label_width*(1.0 - max_height/xscalemax))/2.0
# add enough to the top margin to center smaller trees vertically
top_margin += (tree_height*(1.0 - max_height/xscalemax))/2.0
#top_margin += (plot_height*(1.0 - max_height/xscalemax))/2.0
# adjust yscaler to keep vertical tree dimension proportional to its horizontal dimension
if self.keep_xy_proportional:
yscaler *= max_height/xscalemax
# adjust tip label height (in points) to make size of tip labels commensurate with size of tree
if self.keep_tip_labels_proportional:
tip_font_points = self.pdf_tip_label_height*max_height/xscalemax
else:
tip_font_points = self.pdf_tip_label_height
# Perform a postorder traversal:
# 1) scale each x-value
# 2) calculate y-value of each internal node as the average y-value of its children
# 3) scale each y-value
# 4) plot each edge
# 5) plot names of tips
# 6) for each internal node, draw shoulder from leftmost child to rightmost
nodes.reverse()
for nd in nodes:
node_x = left_margin + nd.getX()*xscaler
if nd.isTip():
node_y = tree_y0 + tree_height - nd.getY()*yscaler
if self.pdf_scalebar_position and self.pdf_scalebar_position == 'top':
node_y -= scalebar_height
brlen = nd.isRoot() and xscaler*nd.getX() or xscaler*nd.getEdgeLen()
# draw tip node name
if self.pdf_tip_label_font:
pdf.addText(node_x + spacer, node_y - half_xheight, self.pdf_tip_label_font, tip_font_points, nd.getNodeName())
# draw line representing edge leading to tip node
pdf.addLine(node_x, node_y, node_x - brlen, node_y, self.pdf_line_width)
else:
nchildren = 1.0
child = nd.getLeftChild()
left_child = right_child = child
childY = child.getY()
while True:
child = child.getRightSib()
if child:
right_child = child
childY += child.getY()
nchildren += 1.0
else:
break
if (not rooted_tree) and (nd is subroot):
if self.pdf_ladderize and self.pdf_ladderize == 'left':
right_child = nd.getParent()
else:
left_child = nd.getParent()
else:
nd.setY(childY/nchildren)
node_y = tree_y0 + tree_height - childY*yscaler/nchildren
if self.pdf_scalebar_position and self.pdf_scalebar_position == 'top':
node_y -= scalebar_height
brlen = xscaler*nd.getEdgeLen()
# draw line representing edge leading to internal node
pdf.addLine(node_x, node_y, node_x - brlen, node_y, self.pdf_line_width)
# draw line representing shoulders of internal node
left_y = tree_y0 + tree_height - left_child.getY()*yscaler
right_y = tree_y0 + tree_height - right_child.getY()*yscaler
if self.pdf_scalebar_position and self.pdf_scalebar_position == 'top':
left_y -= scalebar_height
right_y -= scalebar_height
pdf.addLine(node_x, left_y, node_x, right_y, self.pdf_line_width)
# if specified, plot support value
if show_support and self.pdf_splits_to_plot:
for p in self.pdf_splits_to_plot.keys():
s = Split()
s.setOnSymbol('*')
s.setOffSymbol('.')
s.createFromPattern(p)
if s.equals(nd.getSplit()):
support_x = node_x + spacer
support_y = (left_y + right_y)/2.0 - half_xheight
support_str = '%.1f' % self.pdf_splits_to_plot[p]
pdf.addText(support_x, support_y, self.pdf_support_label_font, self.pdf_support_label_height, support_str)
break
elif show_support and nd is not subroot:
# Expecting each node's support data member to be set already
support_format = '%%.%df' % self.pdf_support_decimals
if self.pdf_support_as_percent:
support_str = support_format % (100.0*nd.getSupport(),)
else:
support_str = support_format % (nd.getSupport(),)
support_str_extent = float(self.pdf_support_label_height)*pdf.calcStringWidth(self.pdf_support_label_font, support_str)
support_x = node_x - (brlen + support_str_extent)/2.0
support_y = (left_y + right_y)/2.0 + half_xheight
pdf.addText(support_x, support_y, self.pdf_support_label_font, self.pdf_support_label_height, support_str)
| 57.029412 | 214 | 0.589393 |
a3adae1efbc2afc4d9426308678f385d764bedec | 1,809 | py | Python | .ipynb_checkpoints/create_tables-checkpoint.py | samson-arita/postgres-data-modeling | 8c73fdf3973e3ad05a37333243076d80b5f796a9 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/create_tables-checkpoint.py | samson-arita/postgres-data-modeling | 8c73fdf3973e3ad05a37333243076d80b5f796a9 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/create_tables-checkpoint.py | samson-arita/postgres-data-modeling | 8c73fdf3973e3ad05a37333243076d80b5f796a9 | [
"MIT"
] | null | null | null | import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def create_database():
"""
- Creates and connects to the sparkifydb
- Returns the connection and cursor to sparkifydb
"""
# connect to default database
conn = psycopg2.connect("host=127.0.0.1 dbname=studentdb user=student password=student")
conn.set_session(autocommit=True)
cur = conn.cursor()
# create sparkify database with UTF8 encoding
cur.execute("DROP DATABASE IF EXISTS sparkifydb")
cur.execute("CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0")
# close connection to default database
conn.close()
# connect to sparkify database
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
return cur, conn
def drop_tables(cur, conn):
"""
Drops each table using the queries in `drop_table_queries` list.
"""
for query in drop_table_queries:
print(query)
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
"""
Creates each table using the queries in `create_table_queries` list.
"""
for query in create_table_queries:
print(query)
cur.execute(query)
conn.commit()
def main():
"""
- Drops (if exists) and Creates the sparkify database.
- Establishes connection with the sparkify database and gets
cursor to it.
- Drops all the tables.
- Creates all tables needed.
- Finally, closes the connection.
"""
cur, conn = create_database()
print(cur, conn)
#drop_tables(cur, conn)
create_tables(cur, conn)
#drop_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| 24.12 | 93 | 0.656164 |
c49e1f3b41f0e15e50714a19caaad04614490d6e | 389 | py | Python | backend/naki/naki/view/spec.py | iimcz/emod | 432094c020247597a94e95f76cc524c20b68b685 | [
"MIT"
] | null | null | null | backend/naki/naki/view/spec.py | iimcz/emod | 432094c020247597a94e95f76cc524c20b68b685 | [
"MIT"
] | 6 | 2021-03-08T23:32:15.000Z | 2022-02-26T08:11:38.000Z | backend/naki/naki/view/spec.py | iimcz/emod | 432094c020247597a94e95f76cc524c20b68b685 | [
"MIT"
] | null | null | null | from cornice import Service
from cornice.service import get_services
from cornice_swagger import CorniceSwagger
swagger = Service(name='OpenAPI',
path='/api/v1/spec',
description="OpenAPI documentation")
@swagger.get()
def openAPI_spec(request):
doc = CorniceSwagger(get_services())
my_spec = doc.generate('NAKI', '1.0.0')
return my_spec | 25.933333 | 54 | 0.686375 |
a249432f26c10055223e9fcd75027e9153f2c137 | 160 | py | Python | Examples/get_sldies_slide_image.py | rizwanniazigroupdocs/aspose-slides-cloud-python | f692a7082387350f80f0b389c1914e33b800a76f | [
"MIT"
] | null | null | null | Examples/get_sldies_slide_image.py | rizwanniazigroupdocs/aspose-slides-cloud-python | f692a7082387350f80f0b389c1914e33b800a76f | [
"MIT"
] | null | null | null | Examples/get_sldies_slide_image.py | rizwanniazigroupdocs/aspose-slides-cloud-python | f692a7082387350f80f0b389c1914e33b800a76f | [
"MIT"
] | null | null | null | from slides_configuration import *
request=GetSlidesSlideImagesRequest("test.pptx", 1)
response = images_api.get_slides_slide_images(request)
print(response) | 22.857143 | 54 | 0.8375 |
b901fb85c07de737692a220c490079dd5238a866 | 5,340 | py | Python | data/scripts/hardcoreFastCommands.py | starpirate2203/BROODY-S-LAST-SCRIPT | 7b5a9eab02c782ebf39b0f3edf69536fae8289c6 | [
"MIT"
] | 2 | 2021-09-07T16:04:30.000Z | 2021-09-16T03:30:16.000Z | data/scripts/hardcoreFastCommands.py | starpirate2203/BROODY-S-LAST-SCRIPT | 7b5a9eab02c782ebf39b0f3edf69536fae8289c6 | [
"MIT"
] | null | null | null | data/scripts/hardcoreFastCommands.py | starpirate2203/BROODY-S-LAST-SCRIPT | 7b5a9eab02c782ebf39b0f3edf69536fae8289c6 | [
"MIT"
] | 1 | 2021-09-21T12:42:28.000Z | 2021-09-21T12:42:28.000Z | # -*- coding: utf-8 -*-
import bs
import bsUI
import bsInternal
# writed by drov.drov
gPopupWindowColor = (0.45, 0.4, 0.55)
commands = ['/kick','/ban','/frozen','/flex', \
'/dance','/dance2','/admin','/vip','/df','/rise','/curse','/head','/skin']
skins = ['delete', 'bunny','bear','pixie','santa','tnt',\
'shard','invincible','bones','pirate','frosty','agent',\
'taobao','grumbledorf','penguin','shadow','cyborg','zoe',\
'spaz','kronk','mel','warrior','lee','zola','butch',\
'oldlady','middleman','gladiator','alien','wrestler',\
'gretel','robot','witch','mcburton']
commands_account_needed = ['/kick','/ban','/admin','/vip','/df','/skin']
def get_number(clientID):
roster, activity = bsInternal._getGameRoster(), bsInternal._getForegroundHostActivity()
choices = []
if len(roster) > 0:
players_ids = []
my_ids = [i['players'] for i in roster if i['clientID'] == clientID]
my_ids = [i['id'] for i in my_ids[0]] if len(my_ids) > 0 else None
dt = [[c["id"] for c in i["players"]] for i in roster]
for i in dt:
for d in i:
players_ids.append(d)
players_ids.sort()
if len(my_ids) > 0: choices = [players_ids.index(i) for i in my_ids]
elif activity is not None and hasattr(activity, 'players') and len(activity.players) > 0:
for i in activity.players:
if i.exists() and hasattr(i, 'getInputDevice') and i.getInputDevice().getClientID() == clientID:
choices.append(activity.players.index(i))
return choices
def get_account(clientID):
roster, activity = bsInternal._getGameRoster(), bsInternal._getForegroundHostActivity()
account = None
if len(roster) > 0:
for i in roster:
if i['clientID'] == clientID:
account = i['displayString'].decode('utf-8')
break
elif activity is not None and hasattr(activity, 'players') and len(activity.players) > 0:
for i in activity.players:
if i.exists() and hasattr(i, 'getInputDevice') and i.getInputDevice().getClientID() == clientID:
account = i.getInputDevice()._getAccountName(True)
break
return account
def _popupWindow(self, choices=[]):
return bsUI.PopupMenuWindow(position=getattr(self, 'popupMenuPosition', (0,0)),
scale=2.3 if bsUI.gSmallUI else 1.65 if bsUI.gMedUI else 1.23,
choices=choices,
choicesDisplay=[bs.Lstr(value=i) for i in choices],
currentChoice=None,
color=gPopupWindowColor,
delegate=self)
def _onPartyMemberPress(self, clientID, isHost, widget):
if bsInternal._getForegroundHostSession() is not None: choicesDisplay = [bs.Lstr(resource='kickText')]
else:
if bsInternal._getConnectionToHostInfo().get('buildNumber', 0) < 14248: return
choicesDisplay = [bs.Lstr(resource='kickVoteText')]
choices = ['kick'] + commands
for i in commands: choicesDisplay.append(bs.Lstr(value=i))
self.popupMenuPosition = widget.getScreenSpaceCenter()
bsUI.PopupMenuWindow(position=self.popupMenuPosition,
scale=2.3 if bsUI.gSmallUI else 1.65 if bsUI.gMedUI else 1.23,
choices=choices,
choicesDisplay=choicesDisplay,
currentChoice=None,
color=gPopupWindowColor,
delegate=self)
self._popupType = 'commands'
self._popupPartyMemberClientID = clientID
self._popupPartyMemberIsHost = isHost
popupMenuOld = bsUI.PartyWindow.popupMenuSelectedChoice
def popupMenuSelectedChoice(self, popupWindow, choice):
cmd = self._popupType == 'commands'
if cmd and choice == 'kick':
self._popupType = 'partyMemberPress'
popupMenuOld(self, popupWindow=popupWindow, choice=choice)
elif cmd:
bs.textWidget(edit=self._textField, text='')
if choice in ['/skin']:
account = get_account(self._popupPartyMemberClientID)
if account is not None:
self._popupType = {'skins': account}
self._popupWindow(choices=skins)
elif choice in commands_account_needed:
account = get_account(self._popupPartyMemberClientID)
if account is not None: bs.textWidget(edit=self._textField, text=choice+' '+account)
elif choice in commands:
result = get_number(self._popupPartyMemberClientID)
if len(result) > 0:
self._popupType = 'number'
bs.textWidget(edit=self._textField, text=choice)
if len(result) > 1: self._popupWindow(choices=result)
else: choice = str(result[0])
else: bs.textWidget(edit=self._textField, text='')
if self._popupType == 'number': bs.textWidget(edit=self._textField, text=(bs.textWidget(query=self._textField)+' '+choice))
elif isinstance(self._popupType, dict) and 'skins' in self._popupType and choice != '/skin':
bs.textWidget(edit=self._textField, text=('/skin '+choice+' '+self._popupType.values()[0]))
else: popupMenuOld(self, popupWindow=popupWindow, choice=choice)
bsUI.PartyWindow.popupMenuSelectedChoice = popupMenuSelectedChoice
bsUI.PartyWindow._onPartyMemberPress = _onPartyMemberPress
bsUI.PartyWindow._popupWindow = _popupWindow
| 45.641026 | 127 | 0.640824 |
dbab8b16798c266427fd1a3ab798cc4d15b67979 | 11,305 | py | Python | classy/pl_modules/hf/generation.py | sunglasses-ai/classy | c166490a30d8ba6d7c25f70ce707b7a2ddcfb53f | [
"Apache-2.0"
] | 26 | 2021-10-17T08:32:53.000Z | 2022-03-30T10:57:13.000Z | classy/pl_modules/hf/generation.py | sunglasses-ai/classy | c166490a30d8ba6d7c25f70ce707b7a2ddcfb53f | [
"Apache-2.0"
] | 8 | 2021-11-02T20:57:44.000Z | 2022-03-13T09:42:29.000Z | classy/pl_modules/hf/generation.py | sunglasses-ai/classy | c166490a30d8ba6d7c25f70ce707b7a2ddcfb53f | [
"Apache-2.0"
] | null | null | null | import re
from typing import Dict, Iterator, List, Optional, Tuple
import omegaconf
import torch
from omegaconf import OmegaConf
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from classy.data.data_drivers import GenerationSample
from classy.pl_modules.base import ClassificationOutput, ClassyPLModule
from classy.pl_modules.mixins.task import GenerationTask
class HFGenerationPLModule(GenerationTask, ClassyPLModule):
def training_step(self, batch: dict, batch_idx: int) -> torch.Tensor:
""" """
forward_output = self.forward(**batch)
self.log("loss", forward_output.loss)
self.log("ppl", torch.exp(forward_output.loss))
return forward_output.loss
def validation_step(self, batch: dict, batch_idx: int) -> None:
""" """
forward_output = self.forward(**batch)
self.log("val_loss", forward_output.loss)
self.log(
"val_ppl",
torch.exp(forward_output.loss),
prog_bar=True,
on_step=False,
on_epoch=True,
)
return forward_output.loss
def test_step(self, batch: dict, batch_idx: int) -> None:
""" """
forward_output = self.forward(**batch)
self.log("test_loss", forward_output.loss)
self.log(
"test_ppl",
torch.exp(forward_output.loss),
prog_bar=True,
on_step=False,
on_epoch=True,
)
return forward_output.loss
class BartGenerativeModule(HFGenerationPLModule):
def __init__(
self,
transformer_model: str,
decoding_skip_special_tokens: bool,
decoding_clean_up_tokenization_spaces: bool,
optim_conf: omegaconf.DictConfig,
additional_special_tokens: Optional[List[str]] = None,
):
super().__init__(vocabulary=None, optim_conf=optim_conf)
self.tokenizer = AutoTokenizer.from_pretrained(
transformer_model,
additional_special_tokens=list(additional_special_tokens)
if additional_special_tokens is not None
else None,
use_fast=True,
)
self.model = AutoModelForSeq2SeqLM.from_pretrained(transformer_model)
if additional_special_tokens is not None and len(additional_special_tokens) > 0:
self.model.resize_token_embeddings(len(self.tokenizer))
self.decoding_skip_special_tokens = decoding_skip_special_tokens
self.decoding_clean_up_tokenization_spaces = (
decoding_clean_up_tokenization_spaces
)
self.forced_bos_token_id = self.tokenizer.bos_token_id
self.generation_params = {}
def load_prediction_params(self, prediction_params: Dict):
self.generation_params = prediction_params
def forward(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
labels: Optional[torch.Tensor],
decoder_attention_mask: Optional[torch.Tensor],
**kwargs,
) -> ClassificationOutput:
bart_out = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
decoder_attention_mask=decoder_attention_mask,
)
return ClassificationOutput(
loss=bart_out.loss,
logits=bart_out.logits,
probabilities=bart_out.logits.softmax(dim=-1),
predictions=bart_out.logits.argmax(dim=-1),
)
def batch_predict(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
decoder_start_token_id: torch.Tensor,
**kwargs,
) -> Iterator[GenerationSample]:
assert len(set(decoder_start_token_id.squeeze(-1).tolist())) == 1
# generate
bart_out = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_start_token_id=decoder_start_token_id[0][0],
forced_bos_token_id=self.forced_bos_token_id,
**self.generation_params,
)
# decode
decoded_bart_out = self.tokenizer.batch_decode(
bart_out,
skip_special_tokens=self.decoding_skip_special_tokens,
clean_up_tokenization_spaces=self.decoding_clean_up_tokenization_spaces,
)
# handle num sequences
num_sequences = int(len(decoded_bart_out) / input_ids.shape[0])
grouped_decoded_bart_out = []
for i in range(0, len(decoded_bart_out), num_sequences):
grouped_decoded_bart_out.append(decoded_bart_out[i : i + num_sequences])
# postprocess
samples = kwargs.get("samples")
for sample, prediction in zip(samples, grouped_decoded_bart_out):
sample.predicted_annotation = prediction[0]
if num_sequences > 1:
sample.predicted_annotation_group = prediction
yield sample
class MBartGenerativeModule(BartGenerativeModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.forced_bos_token_id = None
class T5GenerativeModule(HFGenerationPLModule):
def __init__(
self,
transformer_model: str,
decoding_skip_special_tokens: bool,
decoding_clean_up_tokenization_spaces: bool,
optim_conf: omegaconf.DictConfig,
additional_special_tokens: Optional[List[str]] = None,
):
super().__init__(vocabulary=None, optim_conf=optim_conf)
self.tokenizer = AutoTokenizer.from_pretrained(
transformer_model,
additional_special_tokens=list(additional_special_tokens)
if additional_special_tokens is not None
else None,
use_fast=True,
)
self.model = AutoModelForSeq2SeqLM.from_pretrained(transformer_model)
if additional_special_tokens is not None and len(additional_special_tokens) > 0:
self.model.resize_token_embeddings(len(self.tokenizer))
self.decoding_skip_special_tokens = decoding_skip_special_tokens
self.decoding_clean_up_tokenization_spaces = (
decoding_clean_up_tokenization_spaces
)
self.generation_params = {}
def load_prediction_params(self, prediction_params: Dict):
self.generation_params = prediction_params
def forward(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
labels: Optional[torch.Tensor],
decoder_attention_mask: Optional[torch.Tensor],
**kwargs,
) -> ClassificationOutput:
t5_out = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
decoder_attention_mask=decoder_attention_mask,
)
return ClassificationOutput(
loss=t5_out.loss,
logits=t5_out.logits,
probabilities=t5_out.logits.softmax(dim=-1),
predictions=t5_out.logits.argmax(dim=-1),
)
def batch_predict(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs,
) -> Iterator[GenerationSample]:
# generate
t5_out = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
**self.generation_params,
)
# decode
decoded_t5_out = self.tokenizer.batch_decode(
t5_out,
skip_special_tokens=self.decoding_skip_special_tokens,
clean_up_tokenization_spaces=self.decoding_clean_up_tokenization_spaces,
)
# handle num sequences
num_sequences = int(len(decoded_t5_out) / input_ids.shape[0])
grouped_decoded_t5_out = []
for i in range(0, len(decoded_t5_out), num_sequences):
grouped_decoded_t5_out.append(decoded_t5_out[i : i + num_sequences])
# postprocess
samples = kwargs.get("samples")
for sample, prediction in zip(samples, grouped_decoded_t5_out):
sample.predicted_annotation = prediction[0]
if num_sequences > 1:
sample.predicted_annotation_group = prediction
yield sample
class GPT2GenerativeModule(HFGenerationPLModule):
def __init__(
self,
transformer_model: str,
decoding_skip_special_tokens: bool,
decoding_clean_up_tokenization_spaces: bool,
optim_conf: omegaconf.DictConfig,
additional_special_tokens: Optional[List[str]] = None,
):
super().__init__(vocabulary=None, optim_conf=optim_conf)
self.tokenizer = AutoTokenizer.from_pretrained(
transformer_model,
additional_special_tokens=list(additional_special_tokens)
if additional_special_tokens is not None
else None,
use_fast=True,
add_prefix_space=True,
)
self.model = AutoModelForCausalLM.from_pretrained(transformer_model)
if additional_special_tokens is not None and len(additional_special_tokens) > 0:
self.model.model.shared = self.model.resize_token_embeddings(
len(self.tokenizer)
)
self.decoding_skip_special_tokens = decoding_skip_special_tokens
self.decoding_clean_up_tokenization_spaces = (
decoding_clean_up_tokenization_spaces
)
self.generation_params = {}
def load_prediction_params(self, prediction_params: Dict):
self.generation_params = prediction_params
def forward(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
labels: Optional[torch.Tensor],
**kwargs,
) -> ClassificationOutput:
gpt_out = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
)
return ClassificationOutput(
loss=gpt_out.loss,
logits=gpt_out.logits,
probabilities=gpt_out.logits.softmax(dim=-1),
predictions=gpt_out.logits.argmax(dim=-1),
)
def batch_predict(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs,
) -> Iterator[Tuple[GenerationSample, str]]:
# generate
gpt_out = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
**self.generation_params,
)
# decode
decoded_gpt_out = self.tokenizer.batch_decode(
gpt_out,
skip_special_tokens=self.decoding_skip_special_tokens,
clean_up_tokenization_spaces=self.decoding_clean_up_tokenization_spaces,
)
# handle num sequences
num_sequences = int(len(decoded_gpt_out) / input_ids.shape[0])
grouped_decoded_gpt_out = []
for i in range(0, len(decoded_gpt_out), num_sequences):
grouped_decoded_gpt_out.append(decoded_gpt_out[i : i + num_sequences])
# postprocess
samples = kwargs.get("samples")
for sample, prediction in zip(samples, decoded_gpt_out):
sample.predicted_annotation = prediction[0]
if num_sequences > 1:
sample.predicted_annotation_group = prediction
yield sample
| 37.065574 | 88 | 0.648651 |
6deb71aea645154f8b7cc5e053bc1572dc2e1b89 | 152 | py | Python | enseignant/admin.py | sandratraJovanie/torolalagna | 5984b2ef0ff1537ae7ce2385306783ae7a1c15e0 | [
"Apache-2.0"
] | null | null | null | enseignant/admin.py | sandratraJovanie/torolalagna | 5984b2ef0ff1537ae7ce2385306783ae7a1c15e0 | [
"Apache-2.0"
] | null | null | null | enseignant/admin.py | sandratraJovanie/torolalagna | 5984b2ef0ff1537ae7ce2385306783ae7a1c15e0 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from .models import *
admin.site.register(enseignants)
admin.site.register(responsables)
admin.site.register(suivis)
| 19 | 33 | 0.815789 |
76fbcdba11e112a2fa091f8718af4a77aa5afd4c | 158,199 | py | Python | salt/config/__init__.py | guoxiaod/salt | 2cd6c03b40932be137e6e8a672967b59025a2d34 | [
"Apache-2.0"
] | null | null | null | salt/config/__init__.py | guoxiaod/salt | 2cd6c03b40932be137e6e8a672967b59025a2d34 | [
"Apache-2.0"
] | null | null | null | salt/config/__init__.py | guoxiaod/salt | 2cd6c03b40932be137e6e8a672967b59025a2d34 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
All salt configuration loading and defaults should be in this module
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals, generators
import os
import re
import sys
import glob
import time
import codecs
import logging
import types
from copy import deepcopy
# pylint: disable=import-error,no-name-in-module
from salt.ext import six
from salt.ext.six.moves.urllib.parse import urlparse
# pylint: enable=import-error,no-name-in-module
# Import salt libs
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.network
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.user
import salt.utils.validate.path
import salt.utils.xdg
import salt.utils.yaml
import salt.utils.zeromq
import salt.syspaths
import salt.exceptions
import salt.defaults.exitcodes
try:
import psutil
if not hasattr(psutil, 'virtual_memory'):
raise ImportError('Version of psutil too old.')
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
log = logging.getLogger(__name__)
_DFLT_LOG_DATEFMT = '%H:%M:%S'
_DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S'
_DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s'
_DFLT_LOG_FMT_LOGFILE = (
'%(asctime)s,%(msecs)03d [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(process)d] %(message)s'
)
_DFLT_LOG_FMT_JID = "[JID: %(jid)s]"
_DFLT_REFSPECS = ['+refs/heads/*:refs/remotes/origin/*', '+refs/tags/*:refs/tags/*']
DEFAULT_INTERVAL = 60
if salt.utils.platform.is_windows():
# Since an 'ipc_mode' of 'ipc' will never work on Windows due to lack of
# support in ZeroMQ, we want the default to be something that has a
# chance of working.
_DFLT_IPC_MODE = 'tcp'
_MASTER_TRIES = -1
# This needs to be SYSTEM in order for salt-master to run as a Service
# Otherwise, it will not respond to CLI calls
_MASTER_USER = 'SYSTEM'
else:
_DFLT_IPC_MODE = 'ipc'
_MASTER_TRIES = 1
_MASTER_USER = salt.utils.user.get_user()
def _gather_buffer_space():
'''
Gather some system data and then calculate
buffer space.
Result is in bytes.
'''
if HAS_PSUTIL and psutil.version_info >= (0, 6, 0):
# Oh good, we have psutil. This will be quick.
total_mem = psutil.virtual_memory().total
else:
# Avoid loading core grains unless absolutely required
import platform
import salt.grains.core
# We need to load up ``mem_total`` grain. Let's mimic required OS data.
os_data = {'kernel': platform.system()}
grains = salt.grains.core._memdata(os_data)
total_mem = grains['mem_total']
# Return the higher number between 5% of the system memory and 10MiB
return max([total_mem * 0.05, 10 << 20])
# For the time being this will be a fixed calculation
# TODO: Allow user configuration
_DFLT_IPC_WBUFFER = _gather_buffer_space() * .5
# TODO: Reserved for future use
_DFLT_IPC_RBUFFER = _gather_buffer_space() * .5
FLO_DIR = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'daemons', 'flo')
VALID_OPTS = {
# The address of the salt master. May be specified as IP address or hostname
'master': (six.string_types, list),
# The TCP/UDP port of the master to connect to in order to listen to publications
'master_port': (six.string_types, int),
# The behaviour of the minion when connecting to a master. Can specify 'failover',
# 'disable', 'distributed', or 'func'. If 'func' is specified, the 'master' option should be
# set to an exec module function to run to determine the master hostname. If 'disable' is
# specified the minion will run, but will not try to connect to a master. If 'distributed'
# is specified the minion will try to deterministically pick a master based on its' id.
'master_type': six.string_types,
# Specify the format in which the master address will be specified. Can
# specify 'default' or 'ip_only'. If 'ip_only' is specified, then the
# master address will not be split into IP and PORT.
'master_uri_format': six.string_types,
# The following optiosn refer to the Minion only, and they specify
# the details of the source address / port to be used when connecting to
# the Master. This is useful when dealing withmachines where due to firewall
# rules you are restricted to use a certain IP/port combination only.
'source_interface_name': six.string_types,
'source_address': six.string_types,
'source_ret_port': (six.string_types, int),
'source_publish_port': (six.string_types, int),
# The fingerprint of the master key may be specified to increase security. Generate
# a master fingerprint with `salt-key -F master`
'master_finger': six.string_types,
# Deprecated in Fluorine. Use 'random_master' instead.
# Do not remove! Keep as an alias for usability.
'master_shuffle': bool,
# When in multi-master mode, temporarily remove a master from the list if a conenction
# is interrupted and try another master in the list.
'master_alive_interval': int,
# When in multi-master failover mode, fail back to the first master in the list if it's back
# online.
'master_failback': bool,
# When in multi-master mode, and master_failback is enabled ping the top master with this
# interval.
'master_failback_interval': int,
# The name of the signing key-pair
'master_sign_key_name': six.string_types,
# Sign the master auth-replies with a cryptographic signature of the masters public key.
'master_sign_pubkey': bool,
# Enables verification of the master-public-signature returned by the master in auth-replies.
# Must also set master_sign_pubkey for this to work
'verify_master_pubkey_sign': bool,
# If verify_master_pubkey_sign is enabled, the signature is only verified, if the public-key of
# the master changes. If the signature should always be verified, this can be set to True.
'always_verify_signature': bool,
# The name of the file in the masters pki-directory that holds the pre-calculated signature of
# the masters public-key
'master_pubkey_signature': six.string_types,
# Instead of computing the signature for each auth-reply, use a pre-calculated signature.
# The master_pubkey_signature must also be set for this.
'master_use_pubkey_signature': bool,
# Enable master stats eveents to be fired, these events will contain information about
# what commands the master is processing and what the rates are of the executions
'master_stats': bool,
'master_stats_event_iter': int,
# The key fingerprint of the higher-level master for the syndic to verify it is talking to the
# intended master
'syndic_finger': six.string_types,
# The caching mechanism to use for the PKI key store. Can substantially decrease master publish
# times. Available types:
# 'maint': Runs on a schedule as a part of the maintanence process.
# '': Disable the key cache [default]
'key_cache': six.string_types,
# The user under which the daemon should run
'user': six.string_types,
# The root directory prepended to these options: pki_dir, cachedir,
# sock_dir, log_file, autosign_file, autoreject_file, extension_modules,
# key_logfile, pidfile:
'root_dir': six.string_types,
# The directory used to store public key data
'pki_dir': six.string_types,
# A unique identifier for this daemon
'id': six.string_types,
# Use a module function to determine the unique identifier. If this is
# set and 'id' is not set, it will allow invocation of a module function
# to determine the value of 'id'. For simple invocations without function
# arguments, this may be a string that is the function name. For
# invocations with function arguments, this may be a dictionary with the
# key being the function name, and the value being an embedded dictionary
# where each key is a function argument name and each value is the
# corresponding argument value.
'id_function': (dict, six.string_types),
# The directory to store all cache files.
'cachedir': six.string_types,
# Append minion_id to these directories. Helps with
# multiple proxies and minions running on the same machine.
# Allowed elements in the list: pki_dir, cachedir, extension_modules, pidfile
'append_minionid_config_dirs': list,
# Flag to cache jobs locally.
'cache_jobs': bool,
# The path to the salt configuration file
'conf_file': six.string_types,
# The directory containing unix sockets for things like the event bus
'sock_dir': six.string_types,
# The pool size of unix sockets, it is necessary to avoid blocking waiting for zeromq and tcp communications.
'sock_pool_size': int,
# Specifies how the file server should backup files, if enabled. The backups
# live in the cache dir.
'backup_mode': six.string_types,
# A default renderer for all operations on this host
'renderer': six.string_types,
# Renderer whitelist. The only renderers from this list are allowed.
'renderer_whitelist': list,
# Rendrerer blacklist. Renderers from this list are disalloed even if specified in whitelist.
'renderer_blacklist': list,
# A flag indicating that a highstate run should immediately cease if a failure occurs.
'failhard': bool,
# A flag to indicate that highstate runs should force refresh the modules prior to execution
'autoload_dynamic_modules': bool,
# Force the minion into a single environment when it fetches files from the master
'saltenv': (type(None), six.string_types),
# Prevent saltenv from being overridden on the command line
'lock_saltenv': bool,
# Force the minion into a single pillar root when it fetches pillar data from the master
'pillarenv': (type(None), six.string_types),
# Make the pillarenv always match the effective saltenv
'pillarenv_from_saltenv': bool,
# Allows a user to provide an alternate name for top.sls
'state_top': six.string_types,
'state_top_saltenv': (type(None), six.string_types),
# States to run when a minion starts up
'startup_states': six.string_types,
# List of startup states
'sls_list': list,
# Configuration for snapper in the state system
'snapper_states': bool,
'snapper_states_config': six.string_types,
# A top file to execute if startup_states == 'top'
'top_file': six.string_types,
# Location of the files a minion should look for. Set to 'local' to never ask the master.
'file_client': six.string_types,
'local': bool,
# When using a local file_client, this parameter is used to allow the client to connect to
# a master for remote execution.
'use_master_when_local': bool,
# A map of saltenvs and fileserver backend locations
'file_roots': dict,
# A map of saltenvs and fileserver backend locations
'pillar_roots': dict,
# The external pillars permitted to be used on-demand using pillar.ext
'on_demand_ext_pillar': list,
# A map of glob paths to be used
'decrypt_pillar': list,
# Delimiter to use in path expressions for decrypt_pillar
'decrypt_pillar_delimiter': six.string_types,
# Default renderer for decrypt_pillar
'decrypt_pillar_default': six.string_types,
# List of renderers available for decrypt_pillar
'decrypt_pillar_renderers': list,
# The type of hashing algorithm to use when doing file comparisons
'hash_type': six.string_types,
# Refuse to load these modules
'disable_modules': list,
# Refuse to load these returners
'disable_returners': list,
# Tell the loader to only load modules in this list
'whitelist_modules': list,
# A list of additional directories to search for salt modules in
'module_dirs': list,
# A list of additional directories to search for salt returners in
'returner_dirs': list,
# A list of additional directories to search for salt states in
'states_dirs': list,
# A list of additional directories to search for salt grains in
'grains_dirs': list,
# A list of additional directories to search for salt renderers in
'render_dirs': list,
# A list of additional directories to search for salt outputters in
'outputter_dirs': list,
# A list of additional directories to search for salt utilities in. (Used by the loader
# to populate __utils__)
'utils_dirs': list,
# salt cloud providers
'providers': dict,
# First remove all modules during any sync operation
'clean_dynamic_modules': bool,
# A flag indicating that a master should accept any minion connection without any authentication
'open_mode': bool,
# Whether or not processes should be forked when needed. The alternative is to use threading.
'multiprocessing': bool,
# Maximum number of concurrently active processes at any given point in time
'process_count_max': int,
# Whether or not the salt minion should run scheduled mine updates
'mine_enabled': bool,
# Whether or not scheduled mine updates should be accompanied by a job return for the job cache
'mine_return_job': bool,
# The number of minutes between mine updates.
'mine_interval': int,
# The ipc strategy. (i.e., sockets versus tcp, etc)
'ipc_mode': six.string_types,
# Enable ipv6 support for daemons
'ipv6': bool,
# The chunk size to use when streaming files with the file server
'file_buffer_size': int,
# The TCP port on which minion events should be published if ipc_mode is TCP
'tcp_pub_port': int,
# The TCP port on which minion events should be pulled if ipc_mode is TCP
'tcp_pull_port': int,
# The TCP port on which events for the master should be published if ipc_mode is TCP
'tcp_master_pub_port': int,
# The TCP port on which events for the master should be pulled if ipc_mode is TCP
'tcp_master_pull_port': int,
# The TCP port on which events for the master should pulled and then republished onto
# the event bus on the master
'tcp_master_publish_pull': int,
# The TCP port for mworkers to connect to on the master
'tcp_master_workers': int,
# The file to send logging data to
'log_file': six.string_types,
# The level of verbosity at which to log
'log_level': six.string_types,
# The log level to log to a given file
'log_level_logfile': (type(None), six.string_types),
# The format to construct dates in log files
'log_datefmt': six.string_types,
# The dateformat for a given logfile
'log_datefmt_logfile': six.string_types,
# The format for console logs
'log_fmt_console': six.string_types,
# The format for a given log file
'log_fmt_logfile': (tuple, six.string_types),
# A dictionary of logging levels
'log_granular_levels': dict,
# The maximum number of bytes a single log file may contain before
# it is rotated. A value of 0 disables this feature.
# Currently only supported on Windows. On other platforms, use an
# external tool such as 'logrotate' to manage log files.
'log_rotate_max_bytes': int,
# The number of backup files to keep when rotating log files. Only
# used if log_rotate_max_bytes is greater than 0.
# Currently only supported on Windows. On other platforms, use an
# external tool such as 'logrotate' to manage log files.
'log_rotate_backup_count': int,
# If an event is above this size, it will be trimmed before putting it on the event bus
'max_event_size': int,
# Enable old style events to be sent on minion_startup. Change default to False in Neon release
'enable_legacy_startup_events': bool,
# Always execute states with test=True if this flag is set
'test': bool,
# Tell the loader to attempt to import *.pyx cython files if cython is available
'cython_enable': bool,
# Tell the loader to attempt to import *.zip archives
'enable_zip_modules': bool,
# Tell the client to show minions that have timed out
'show_timeout': bool,
# Tell the client to display the jid when a job is published
'show_jid': bool,
# Ensure that a generated jid is always unique. If this is set, the jid
# format is different due to an underscore and process id being appended
# to the jid. WARNING: A change to the jid format may break external
# applications that depend on the original format.
'unique_jid': bool,
# Tells the highstate outputter to show successful states. False will omit successes.
'state_verbose': bool,
# Specify the format for state outputs. See highstate outputter for additional details.
'state_output': six.string_types,
# Tells the highstate outputter to only report diffs of states that changed
'state_output_diff': bool,
# When true, states run in the order defined in an SLS file, unless requisites re-order them
'state_auto_order': bool,
# Fire events as state chunks are processed by the state compiler
'state_events': bool,
# The number of seconds a minion should wait before retry when attempting authentication
'acceptance_wait_time': float,
# The number of seconds a minion should wait before giving up during authentication
'acceptance_wait_time_max': float,
# Retry a connection attempt if the master rejects a minion's public key
'rejected_retry': bool,
# The interval in which a daemon's main loop should attempt to perform all necessary tasks
# for normal operation
'loop_interval': float,
# Perform pre-flight verification steps before daemon startup, such as checking configuration
# files and certain directories.
'verify_env': bool,
# The grains dictionary for a minion, containing specific "facts" about the minion
'grains': dict,
# Allow a daemon to function even if the key directories are not secured
'permissive_pki_access': bool,
# The passphrase of the master's private key
'key_pass': (type(None), six.string_types),
# The passphrase of the master's private signing key
'signing_key_pass': (type(None), six.string_types),
# The path to a directory to pull in configuration file includes
'default_include': six.string_types,
# If a minion is running an esky build of salt, upgrades can be performed using the url
# defined here. See saltutil.update() for additional information
'update_url': (bool, six.string_types),
# If using update_url with saltutil.update(), provide a list of services to be restarted
# post-install
'update_restart_services': list,
# The number of seconds to sleep between retrying an attempt to resolve the hostname of a
# salt master
'retry_dns': float,
# In the case when the resolve of the salt master hostname fails, fall back to localhost
'resolve_dns_fallback': bool,
# set the zeromq_reconnect_ivl option on the minion.
# http://lists.zeromq.org/pipermail/zeromq-dev/2011-January/008845.html
'recon_max': float,
# If recon_randomize is set, this specifies the lower bound for the randomized period
'recon_default': float,
# Tells the minion to choose a bounded, random interval to have zeromq attempt to reconnect
# in the event of a disconnect event
'recon_randomize': bool,
'return_retry_timer': int,
'return_retry_timer_max': int,
# Specify one or more returners in which all events will be sent to. Requires that the returners
# in question have an event_return(event) function!
'event_return': (list, six.string_types),
# The number of events to queue up in memory before pushing them down the pipe to an event
# returner specified by 'event_return'
'event_return_queue': int,
# Only forward events to an event returner if it matches one of the tags in this list
'event_return_whitelist': list,
# Events matching a tag in this list should never be sent to an event returner.
'event_return_blacklist': list,
# default match type for filtering events tags: startswith, endswith, find, regex, fnmatch
'event_match_type': six.string_types,
# This pidfile to write out to when a daemon starts
'pidfile': six.string_types,
# Used with the SECO range master tops system
'range_server': six.string_types,
# The tcp keepalive interval to set on TCP ports. This setting can be used to tune Salt
# connectivity issues in messy network environments with misbehaving firewalls
'tcp_keepalive': bool,
# Sets zeromq TCP keepalive idle. May be used to tune issues with minion disconnects
'tcp_keepalive_idle': float,
# Sets zeromq TCP keepalive count. May be used to tune issues with minion disconnects
'tcp_keepalive_cnt': float,
# Sets zeromq TCP keepalive interval. May be used to tune issues with minion disconnects.
'tcp_keepalive_intvl': float,
# The network interface for a daemon to bind to
'interface': six.string_types,
# The port for a salt master to broadcast publications on. This will also be the port minions
# connect to to listen for publications.
'publish_port': int,
# TODO unknown option!
'auth_mode': int,
# listen queue size / backlog
'zmq_backlog': int,
# Set the zeromq high water mark on the publisher interface.
# http://api.zeromq.org/3-2:zmq-setsockopt
'pub_hwm': int,
# IPC buffer size
# Refs https://github.com/saltstack/salt/issues/34215
'ipc_write_buffer': int,
# The number of MWorker processes for a master to startup. This number needs to scale up as
# the number of connected minions increases.
'worker_threads': int,
# The port for the master to listen to returns on. The minion needs to connect to this port
# to send returns.
'ret_port': int,
# The number of hours to keep jobs around in the job cache on the master
'keep_jobs': int,
# If the returner supports `clean_old_jobs`, then at cleanup time,
# archive the job data before deleting it.
'archive_jobs': bool,
# A master-only copy of the file_roots dictionary, used by the state compiler
'master_roots': dict,
# Add the proxymodule LazyLoader object to opts. This breaks many things
# but this was the default pre 2015.8.2. This should default to
# False in 2016.3.0
'add_proxymodule_to_opts': bool,
# Merge pillar data into configuration opts.
# As multiple proxies can run on the same server, we may need different
# configuration options for each, while there's one single configuration file.
# The solution is merging the pillar data of each proxy minion into the opts.
'proxy_merge_pillar_in_opts': bool,
# Deep merge of pillar data into configuration opts.
# Evaluated only when `proxy_merge_pillar_in_opts` is True.
'proxy_deep_merge_pillar_in_opts': bool,
# The strategy used when merging pillar into opts.
# Considered only when `proxy_merge_pillar_in_opts` is True.
'proxy_merge_pillar_in_opts_strategy': six.string_types,
# Allow enabling mine details using pillar data.
'proxy_mines_pillar': bool,
# In some particular cases, always alive proxies are not beneficial.
# This option can be used in those less dynamic environments:
# the user can request the connection
# always alive, or init-shutdown per command.
'proxy_always_alive': bool,
# Poll the connection state with the proxy minion
# If enabled, this option requires the function `alive`
# to be implemented in the proxy module
'proxy_keep_alive': bool,
# Frequency of the proxy_keep_alive, in minutes
'proxy_keep_alive_interval': int,
# Update intervals
'roots_update_interval': int,
'azurefs_update_interval': int,
'gitfs_update_interval': int,
'hgfs_update_interval': int,
'minionfs_update_interval': int,
's3fs_update_interval': int,
'svnfs_update_interval': int,
# NOTE: git_pillar_base, git_pillar_branch, git_pillar_env, and
# git_pillar_root omitted here because their values could conceivably be
# loaded as non-string types, which is OK because git_pillar will normalize
# them to strings. But rather than include all the possible types they
# could be, we'll just skip type-checking.
'git_pillar_ssl_verify': bool,
'git_pillar_global_lock': bool,
'git_pillar_user': six.string_types,
'git_pillar_password': six.string_types,
'git_pillar_insecure_auth': bool,
'git_pillar_privkey': six.string_types,
'git_pillar_pubkey': six.string_types,
'git_pillar_passphrase': six.string_types,
'git_pillar_refspecs': list,
'git_pillar_includes': bool,
'git_pillar_verify_config': bool,
# NOTE: gitfs_base, gitfs_mountpoint, and gitfs_root omitted here because
# their values could conceivably be loaded as non-string types, which is OK
# because gitfs will normalize them to strings. But rather than include all
# the possible types they could be, we'll just skip type-checking.
'gitfs_remotes': list,
'gitfs_insecure_auth': bool,
'gitfs_privkey': six.string_types,
'gitfs_pubkey': six.string_types,
'gitfs_passphrase': six.string_types,
'gitfs_env_whitelist': list,
'gitfs_env_blacklist': list,
'gitfs_saltenv_whitelist': list,
'gitfs_saltenv_blacklist': list,
'gitfs_ssl_verify': bool,
'gitfs_global_lock': bool,
'gitfs_saltenv': list,
'gitfs_ref_types': list,
'gitfs_refspecs': list,
'gitfs_disable_saltenv_mapping': bool,
'hgfs_remotes': list,
'hgfs_mountpoint': six.string_types,
'hgfs_root': six.string_types,
'hgfs_base': six.string_types,
'hgfs_branch_method': six.string_types,
'hgfs_env_whitelist': list,
'hgfs_env_blacklist': list,
'hgfs_saltenv_whitelist': list,
'hgfs_saltenv_blacklist': list,
'svnfs_remotes': list,
'svnfs_mountpoint': six.string_types,
'svnfs_root': six.string_types,
'svnfs_trunk': six.string_types,
'svnfs_branches': six.string_types,
'svnfs_tags': six.string_types,
'svnfs_env_whitelist': list,
'svnfs_env_blacklist': list,
'svnfs_saltenv_whitelist': list,
'svnfs_saltenv_blacklist': list,
'minionfs_env': six.string_types,
'minionfs_mountpoint': six.string_types,
'minionfs_whitelist': list,
'minionfs_blacklist': list,
# Specify a list of external pillar systems to use
'ext_pillar': list,
# Reserved for future use to version the pillar structure
'pillar_version': int,
# Whether or not a copy of the master opts dict should be rendered into minion pillars
'pillar_opts': bool,
# Cache the master pillar to disk to avoid having to pass through the rendering system
'pillar_cache': bool,
# Pillar cache TTL, in seconds. Has no effect unless `pillar_cache` is True
'pillar_cache_ttl': int,
# Pillar cache backend. Defaults to `disk` which stores caches in the master cache
'pillar_cache_backend': six.string_types,
'pillar_safe_render_error': bool,
# When creating a pillar, there are several strategies to choose from when
# encountering duplicate values
'pillar_source_merging_strategy': six.string_types,
# Recursively merge lists by aggregating them instead of replacing them.
'pillar_merge_lists': bool,
# If True, values from included pillar SLS targets will override
'pillar_includes_override_sls': bool,
# How to merge multiple top files from multiple salt environments
# (saltenvs); can be 'merge' or 'same'
'top_file_merging_strategy': six.string_types,
# The ordering for salt environment merging, when top_file_merging_strategy
# is set to 'same'
'env_order': list,
# The salt environment which provides the default top file when
# top_file_merging_strategy is set to 'same'; defaults to 'base'
'default_top': six.string_types,
'ping_on_rotate': bool,
'peer': dict,
'preserve_minion_cache': bool,
'syndic_master': (six.string_types, list),
# The behaviour of the multimaster syndic when connection to a master of masters failed. Can
# specify 'random' (default) or 'ordered'. If set to 'random' masters will be iterated in random
# order if 'ordered' the configured order will be used.
'syndic_failover': six.string_types,
'syndic_forward_all_events': bool,
'runner_dirs': list,
'client_acl_verify': bool,
'publisher_acl': dict,
'publisher_acl_blacklist': dict,
'sudo_acl': bool,
'external_auth': dict,
'token_expire': int,
'token_expire_user_override': (bool, dict),
'file_recv': bool,
'file_recv_max_size': int,
'file_ignore_regex': (list, six.string_types),
'file_ignore_glob': (list, six.string_types),
'fileserver_backend': list,
'fileserver_followsymlinks': bool,
'fileserver_ignoresymlinks': bool,
'fileserver_limit_traversal': bool,
'fileserver_verify_config': bool,
# Optionally apply '*' permissioins to any user. By default '*' is a fallback case that is
# applied only if the user didn't matched by other matchers.
'permissive_acl': bool,
# Optionally enables keeping the calculated user's auth list in the token file.
'keep_acl_in_token': bool,
# Auth subsystem module to use to get authorized access list for a user. By default it's the
# same module used for external authentication.
'eauth_acl_module': six.string_types,
# Subsystem to use to maintain eauth tokens. By default, tokens are stored on the local
# filesystem
'eauth_tokens': six.string_types,
# The number of open files a daemon is allowed to have open. Frequently needs to be increased
# higher than the system default in order to account for the way zeromq consumes file handles.
'max_open_files': int,
# Automatically accept any key provided to the master. Implies that the key will be preserved
# so that subsequent connections will be authenticated even if this option has later been
# turned off.
'auto_accept': bool,
'autosign_timeout': int,
# A mapping of external systems that can be used to generate topfile data.
'master_tops': dict,
# Whether or not matches from master_tops should be executed before or
# after those from the top file(s).
'master_tops_first': bool,
# A flag that should be set on a top-level master when it is ordering around subordinate masters
# via the use of a salt syndic
'order_masters': bool,
# Whether or not to cache jobs so that they can be examined later on
'job_cache': bool,
# Define a returner to be used as an external job caching storage backend
'ext_job_cache': six.string_types,
# Specify a returner for the master to use as a backend storage system to cache jobs returns
# that it receives
'master_job_cache': six.string_types,
# Specify whether the master should store end times for jobs as returns come in
'job_cache_store_endtime': bool,
# The minion data cache is a cache of information about the minions stored on the master.
# This information is primarily the pillar and grains data. The data is cached in the master
# cachedir under the name of the minion and used to predetermine what minions are expected to
# reply from executions.
'minion_data_cache': bool,
# The number of seconds between AES key rotations on the master
'publish_session': int,
# Defines a salt reactor. See http://docs.saltstack.com/en/latest/topics/reactor/
'reactor': list,
# The TTL for the cache of the reactor configuration
'reactor_refresh_interval': int,
# The number of workers for the runner/wheel in the reactor
'reactor_worker_threads': int,
# The queue size for workers in the reactor
'reactor_worker_hwm': int,
# Defines engines. See https://docs.saltstack.com/en/latest/topics/engines/
'engines': list,
# Whether or not to store runner returns in the job cache
'runner_returns': bool,
'serial': six.string_types,
'search': six.string_types,
# A compound target definition.
# See: http://docs.saltstack.com/en/latest/topics/targeting/nodegroups.html
'nodegroups': (dict, list),
# List-only nodegroups for salt-ssh. Each group must be formed as either a
# comma-separated list, or a YAML list.
'ssh_list_nodegroups': dict,
# By default, salt-ssh uses its own specially-generated RSA key to auth
# against minions. If this is set to True, salt-ssh will look in
# for a key at ~/.ssh/id_rsa, and fall back to using its own specially-
# generated RSA key if that file doesn't exist.
'ssh_use_home_key': bool,
# The logfile location for salt-key
'key_logfile': six.string_types,
# The upper bound for the random number of seconds that a minion should
# delay when starting in up before it connects to a master. This can be
# used to mitigate a thundering-herd scenario when many minions start up
# at once and attempt to all connect immediately to the master
'random_startup_delay': int,
# The source location for the winrepo sls files
# (used by win_pkg.py, minion only)
'winrepo_source_dir': six.string_types,
'winrepo_dir': six.string_types,
'winrepo_dir_ng': six.string_types,
'winrepo_cachefile': six.string_types,
# NOTE: winrepo_branch omitted here because its value could conceivably be
# loaded as a non-string type, which is OK because winrepo will normalize
# them to strings. But rather than include all the possible types it could
# be, we'll just skip type-checking.
'winrepo_cache_expire_max': int,
'winrepo_cache_expire_min': int,
'winrepo_remotes': list,
'winrepo_remotes_ng': list,
'winrepo_ssl_verify': bool,
'winrepo_user': six.string_types,
'winrepo_password': six.string_types,
'winrepo_insecure_auth': bool,
'winrepo_privkey': six.string_types,
'winrepo_pubkey': six.string_types,
'winrepo_passphrase': six.string_types,
'winrepo_refspecs': list,
# Set a hard limit for the amount of memory modules can consume on a minion.
'modules_max_memory': int,
# The number of minutes between the minion refreshing its cache of grains
'grains_refresh_every': int,
# Use lspci to gather system data for grains on a minion
'enable_lspci': bool,
# The number of seconds for the salt client to wait for additional syndics to
# check in with their lists of expected minions before giving up
'syndic_wait': int,
# Override Jinja environment option defaults for all templates except sls templates
'jinja_env': dict,
# Set Jinja environment options for sls templates
'jinja_sls_env': dict,
# If this is set to True leading spaces and tabs are stripped from the start
# of a line to a block.
'jinja_lstrip_blocks': bool,
# If this is set to True the first newline after a Jinja block is removed
'jinja_trim_blocks': bool,
# Cache minion ID to file
'minion_id_caching': bool,
# Always generate minion id in lowercase.
'minion_id_lowercase': bool,
# If set, the master will sign all publications before they are sent out
'sign_pub_messages': bool,
# The size of key that should be generated when creating new keys
'keysize': int,
# The transport system for this daemon. (i.e. zeromq, raet, etc)
'transport': six.string_types,
# The number of seconds to wait when the client is requesting information about running jobs
'gather_job_timeout': int,
# The number of seconds to wait before timing out an authentication request
'auth_timeout': int,
# The number of attempts to authenticate to a master before giving up
'auth_tries': int,
# The number of attempts to connect to a master before giving up.
# Set this to -1 for unlimited attempts. This allows for a master to have
# downtime and the minion to reconnect to it later when it comes back up.
# In 'failover' mode, it is the number of attempts for each set of masters.
# In this mode, it will cycle through the list of masters for each attempt.
'master_tries': int,
# Never give up when trying to authenticate to a master
'auth_safemode': bool,
# Selects a random master when starting a minion up in multi-master mode or
# when starting a minion with salt-call. ``master`` must be a list.
'random_master': bool,
# An upper bound for the amount of time for a minion to sleep before attempting to
# reauth after a restart.
'random_reauth_delay': int,
# The number of seconds for a syndic to poll for new messages that need to be forwarded
'syndic_event_forward_timeout': float,
# The length that the syndic event queue must hit before events are popped off and forwarded
'syndic_jid_forward_cache_hwm': int,
# Salt SSH configuration
'ssh_passwd': six.string_types,
'ssh_port': six.string_types,
'ssh_sudo': bool,
'ssh_sudo_user': six.string_types,
'ssh_timeout': float,
'ssh_user': six.string_types,
'ssh_scan_ports': six.string_types,
'ssh_scan_timeout': float,
'ssh_identities_only': bool,
'ssh_log_file': six.string_types,
'ssh_config_file': six.string_types,
'ssh_merge_pillar': bool,
# Enable ioflo verbose logging. Warning! Very verbose!
'ioflo_verbose': int,
'ioflo_period': float,
# Set ioflo to realtime. Useful only for testing/debugging to simulate many ioflo periods very
# quickly
'ioflo_realtime': bool,
# Location for ioflo logs
'ioflo_console_logdir': six.string_types,
# The port to bind to when bringing up a RAET daemon
'raet_port': int,
'raet_alt_port': int,
'raet_mutable': bool,
'raet_main': bool,
'raet_clear_remotes': bool,
'raet_clear_remote_masters': bool,
'raet_road_bufcnt': int,
'raet_lane_bufcnt': int,
'cluster_mode': bool,
'cluster_masters': list,
'sqlite_queue_dir': six.string_types,
'queue_dirs': list,
# Instructs the minion to ping its master(s) every n number of minutes. Used
# primarily as a mitigation technique against minion disconnects.
'ping_interval': int,
# Instructs the salt CLI to print a summary of a minion responses before returning
'cli_summary': bool,
# The maximum number of minion connections allowed by the master. Can have performance
# implications in large setups.
'max_minions': int,
'username': (type(None), six.string_types),
'password': (type(None), six.string_types),
# Use zmq.SUSCRIBE to limit listening sockets to only process messages bound for them
'zmq_filtering': bool,
# Connection caching. Can greatly speed up salt performance.
'con_cache': bool,
'rotate_aes_key': bool,
# Cache ZeroMQ connections. Can greatly improve salt performance.
'cache_sreqs': bool,
# Can be set to override the python_shell=False default in the cmd module
'cmd_safe': bool,
# Used strictly for performance testing in RAET.
'dummy_publisher': bool,
# Used by salt-api for master requests timeout
'rest_timeout': int,
# If set, all minion exec module actions will be rerouted through sudo as this user
'sudo_user': six.string_types,
# HTTP connection timeout in seconds. Applied for tornado http fetch functions like cp.get_url
# should be greater than overall download time
'http_connect_timeout': float,
# HTTP request timeout in seconds. Applied for tornado http fetch functions like cp.get_url
# should be greater than overall download time
'http_request_timeout': float,
# HTTP request max file content size.
'http_max_body': int,
# Delay in seconds before executing bootstrap (Salt Cloud)
'bootstrap_delay': int,
# If a proxymodule has a function called 'grains', then call it during
# regular grains loading and merge the results with the proxy's grains
# dictionary. Otherwise it is assumed that the module calls the grains
# function in a custom way and returns the data elsewhere
#
# Default to False for 2016.3 and 2016.11. Switch to True for 2017.7.0
'proxy_merge_grains_in_module': bool,
# Command to use to restart salt-minion
'minion_restart_command': list,
# Whether or not a minion should send the results of a command back to the master
# Useful when a returner is the source of truth for a job result
'pub_ret': bool,
# HTTP proxy settings. Used in tornado fetch functions, apt-key etc
'proxy_host': six.string_types,
'proxy_username': six.string_types,
'proxy_password': six.string_types,
'proxy_port': int,
# Exclude list of hostnames from proxy
'no_proxy': list,
# Minion de-dup jid cache max size
'minion_jid_queue_hwm': int,
# Minion data cache driver (one of satl.cache.* modules)
'cache': six.string_types,
# Enables a fast in-memory cache booster and sets the expiration time.
'memcache_expire_seconds': int,
# Set a memcache limit in items (bank + key) per cache storage (driver + driver_opts).
'memcache_max_items': int,
# Each time a cache storage got full cleanup all the expired items not just the oldest one.
'memcache_full_cleanup': bool,
# Enable collecting the memcache stats and log it on `debug` log level.
'memcache_debug': bool,
# Thin and minimal Salt extra modules
'thin_extra_mods': six.string_types,
'min_extra_mods': six.string_types,
# Default returners minion should use. List or comma-delimited string
'return': (six.string_types, list),
# TLS/SSL connection options. This could be set to a dictionary containing arguments
# corresponding to python ssl.wrap_socket method. For details see:
# http://www.tornadoweb.org/en/stable/tcpserver.html#tornado.tcpserver.TCPServer
# http://docs.python.org/2/library/ssl.html#ssl.wrap_socket
# Note: to set enum arguments values like `cert_reqs` and `ssl_version` use constant names
# without ssl module prefix: `CERT_REQUIRED` or `PROTOCOL_SSLv23`.
'ssl': (dict, bool, type(None)),
# Controls how a multi-function job returns its data. If this is False,
# it will return its data using a dictionary with the function name as
# the key. This is compatible with legacy systems. If this is True, it
# will return its data using an array in the same order as the input
# array of functions to execute. This allows for calling the same
# function multiple times in the same multi-function job.
'multifunc_ordered': bool,
# Controls whether beacons are set up before a connection
# to the master is attempted.
'beacons_before_connect': bool,
# Controls whether the scheduler is set up before a connection
# to the master is attempted.
'scheduler_before_connect': bool,
# Whitelist/blacklist specific modules to be synced
'extmod_whitelist': dict,
'extmod_blacklist': dict,
# django auth
'django_auth_path': six.string_types,
'django_auth_settings': six.string_types,
# Number of times to try to auth with the master on a reconnect with the
# tcp transport
'tcp_authentication_retries': int,
# Permit or deny allowing minions to request revoke of its own key
'allow_minion_key_revoke': bool,
# File chunk size for salt-cp
'salt_cp_chunk_size': int,
# Require that the minion sign messages it posts to the master on the event
# bus
'minion_sign_messages': bool,
# Have master drop messages from minions for which their signatures do
# not verify
'drop_messages_signature_fail': bool,
# Require that payloads from minions have a 'sig' entry
# (in other words, require that minions have 'minion_sign_messages'
# turned on)
'require_minion_sign_messages': bool,
# The list of config entries to be passed to external pillar function as
# part of the extra_minion_data param
# Subconfig entries can be specified by using the ':' notation (e.g. key:subkey)
'pass_to_ext_pillars': (six.string_types, list),
# Used by salt.modules.dockermod.compare_container_networks to specify which keys are compared
'docker.compare_container_networks': dict,
# SSDP discovery publisher description.
# Contains publisher configuration and minion mapping.
# Setting it to False disables discovery
'discovery': (dict, bool),
# Scheduler should be a dictionary
'schedule': dict,
# Whether to fire auth events
'auth_events': bool,
# Whether to fire Minion data cache refresh events
'minion_data_cache_events': bool,
# Enable calling ssh minions from the salt master
'enable_ssh_minions': bool,
# Thorium saltenv
'thoriumenv': (type(None), six.string_types),
# Thorium top file location
'thorium_top': six.string_types,
}
# default configurations
DEFAULT_MINION_OPTS = {
'interface': '0.0.0.0',
'master': 'salt',
'master_type': 'str',
'master_uri_format': 'default',
'source_interface_name': '',
'source_address': '',
'source_ret_port': 0,
'source_publish_port': 0,
'master_port': 4506,
'master_finger': '',
'master_shuffle': False,
'master_alive_interval': 0,
'master_failback': False,
'master_failback_interval': 0,
'verify_master_pubkey_sign': False,
'sign_pub_messages': False,
'always_verify_signature': False,
'master_sign_key_name': 'master_sign',
'syndic_finger': '',
'user': salt.utils.user.get_user(),
'root_dir': salt.syspaths.ROOT_DIR,
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'),
'id': '',
'id_function': {},
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
'append_minionid_config_dirs': [],
'cache_jobs': False,
'grains_cache': False,
'grains_cache_expiration': 300,
'grains_deep_merge': False,
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'minion'),
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'),
'sock_pool_size': 1,
'backup_mode': '',
'renderer': 'jinja|yaml',
'renderer_whitelist': [],
'renderer_blacklist': [],
'random_startup_delay': 0,
'failhard': False,
'autoload_dynamic_modules': True,
'saltenv': None,
'lock_saltenv': False,
'pillarenv': None,
'pillarenv_from_saltenv': False,
'pillar_opts': False,
'pillar_source_merging_strategy': 'smart',
'pillar_merge_lists': False,
'pillar_includes_override_sls': False,
# ``pillar_cache``, ``pillar_cache_ttl`` and ``pillar_cache_backend``
# are not used on the minion but are unavoidably in the code path
'pillar_cache': False,
'pillar_cache_ttl': 3600,
'pillar_cache_backend': 'disk',
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'minion', 'extmods'),
'state_top': 'top.sls',
'state_top_saltenv': None,
'startup_states': '',
'sls_list': [],
'top_file': '',
'thoriumenv': None,
'thorium_top': 'top.sls',
'thorium_interval': 0.5,
'thorium_roots': {
'base': [salt.syspaths.BASE_THORIUM_ROOTS_DIR],
},
'file_client': 'remote',
'local': False,
'use_master_when_local': False,
'file_roots': {
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR,
salt.syspaths.SPM_FORMULA_PATH]
},
'top_file_merging_strategy': 'merge',
'env_order': [],
'default_top': 'base',
'fileserver_limit_traversal': False,
'file_recv': False,
'file_recv_max_size': 100,
'file_ignore_regex': [],
'file_ignore_glob': [],
'fileserver_backend': ['roots'],
'fileserver_followsymlinks': True,
'fileserver_ignoresymlinks': False,
'pillar_roots': {
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR,
salt.syspaths.SPM_PILLAR_PATH]
},
'on_demand_ext_pillar': ['libvirt', 'virtkey'],
'decrypt_pillar': [],
'decrypt_pillar_delimiter': ':',
'decrypt_pillar_default': 'gpg',
'decrypt_pillar_renderers': ['gpg'],
# Update intervals
'roots_update_interval': DEFAULT_INTERVAL,
'azurefs_update_interval': DEFAULT_INTERVAL,
'gitfs_update_interval': DEFAULT_INTERVAL,
'hgfs_update_interval': DEFAULT_INTERVAL,
'minionfs_update_interval': DEFAULT_INTERVAL,
's3fs_update_interval': DEFAULT_INTERVAL,
'svnfs_update_interval': DEFAULT_INTERVAL,
'git_pillar_base': 'master',
'git_pillar_branch': 'master',
'git_pillar_env': '',
'git_pillar_root': '',
'git_pillar_ssl_verify': True,
'git_pillar_global_lock': True,
'git_pillar_user': '',
'git_pillar_password': '',
'git_pillar_insecure_auth': False,
'git_pillar_privkey': '',
'git_pillar_pubkey': '',
'git_pillar_passphrase': '',
'git_pillar_refspecs': _DFLT_REFSPECS,
'git_pillar_includes': True,
'gitfs_remotes': [],
'gitfs_mountpoint': '',
'gitfs_root': '',
'gitfs_base': 'master',
'gitfs_user': '',
'gitfs_password': '',
'gitfs_insecure_auth': False,
'gitfs_privkey': '',
'gitfs_pubkey': '',
'gitfs_passphrase': '',
'gitfs_env_whitelist': [],
'gitfs_env_blacklist': [],
'gitfs_saltenv_whitelist': [],
'gitfs_saltenv_blacklist': [],
'gitfs_global_lock': True,
'gitfs_ssl_verify': True,
'gitfs_saltenv': [],
'gitfs_ref_types': ['branch', 'tag', 'sha'],
'gitfs_refspecs': _DFLT_REFSPECS,
'gitfs_disable_saltenv_mapping': False,
'unique_jid': False,
'hash_type': 'sha256',
'disable_modules': [],
'disable_returners': [],
'whitelist_modules': [],
'module_dirs': [],
'returner_dirs': [],
'grains_dirs': [],
'states_dirs': [],
'render_dirs': [],
'outputter_dirs': [],
'utils_dirs': [],
'publisher_acl': {},
'publisher_acl_blacklist': {},
'providers': {},
'clean_dynamic_modules': True,
'open_mode': False,
'auto_accept': True,
'autosign_timeout': 120,
'multiprocessing': True,
'process_count_max': -1,
'mine_enabled': True,
'mine_return_job': False,
'mine_interval': 60,
'ipc_mode': _DFLT_IPC_MODE,
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
'ipv6': False,
'file_buffer_size': 262144,
'tcp_pub_port': 4510,
'tcp_pull_port': 4511,
'tcp_authentication_retries': 5,
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'minion'),
'log_level': 'warning',
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_fmt_jid': _DFLT_LOG_FMT_JID,
'log_granular_levels': {},
'log_rotate_max_bytes': 0,
'log_rotate_backup_count': 0,
'max_event_size': 1048576,
'enable_legacy_startup_events': True,
'test': False,
'ext_job_cache': '',
'cython_enable': False,
'enable_zip_modules': False,
'state_verbose': True,
'state_output': 'full',
'state_output_diff': False,
'state_auto_order': True,
'state_events': False,
'state_aggregate': False,
'snapper_states': False,
'snapper_states_config': 'root',
'acceptance_wait_time': 10,
'acceptance_wait_time_max': 0,
'rejected_retry': False,
'loop_interval': 1,
'verify_env': True,
'grains': {},
'permissive_pki_access': False,
'default_include': 'minion.d/*.conf',
'update_url': False,
'update_restart_services': [],
'retry_dns': 30,
'resolve_dns_fallback': True,
'recon_max': 10000,
'recon_default': 1000,
'recon_randomize': True,
'return_retry_timer': 5,
'return_retry_timer_max': 10,
'random_reauth_delay': 10,
'winrepo_source_dir': 'salt://win/repo-ng/',
'winrepo_dir': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo'),
'winrepo_dir_ng': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo-ng'),
'winrepo_cachefile': 'winrepo.p',
'winrepo_cache_expire_max': 21600,
'winrepo_cache_expire_min': 0,
'winrepo_remotes': ['https://github.com/saltstack/salt-winrepo.git'],
'winrepo_remotes_ng': ['https://github.com/saltstack/salt-winrepo-ng.git'],
'winrepo_branch': 'master',
'winrepo_ssl_verify': True,
'winrepo_user': '',
'winrepo_password': '',
'winrepo_insecure_auth': False,
'winrepo_privkey': '',
'winrepo_pubkey': '',
'winrepo_passphrase': '',
'winrepo_refspecs': _DFLT_REFSPECS,
'pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-minion.pid'),
'range_server': 'range:80',
'reactor_refresh_interval': 60,
'reactor_worker_threads': 10,
'reactor_worker_hwm': 10000,
'engines': [],
'tcp_keepalive': True,
'tcp_keepalive_idle': 300,
'tcp_keepalive_cnt': -1,
'tcp_keepalive_intvl': -1,
'modules_max_memory': -1,
'grains_refresh_every': 0,
'minion_id_caching': True,
'minion_id_lowercase': False,
'keysize': 2048,
'transport': 'zeromq',
'auth_timeout': 5,
'auth_tries': 7,
'master_tries': _MASTER_TRIES,
'master_tops_first': False,
'auth_safemode': False,
'random_master': False,
'minion_floscript': os.path.join(FLO_DIR, 'minion.flo'),
'caller_floscript': os.path.join(FLO_DIR, 'caller.flo'),
'ioflo_verbose': 0,
'ioflo_period': 0.1,
'ioflo_realtime': True,
'ioflo_console_logdir': '',
'raet_port': 4510,
'raet_alt_port': 4511,
'raet_mutable': False,
'raet_main': False,
'raet_clear_remotes': True,
'raet_clear_remote_masters': True,
'raet_road_bufcnt': 2,
'raet_lane_bufcnt': 100,
'cluster_mode': False,
'cluster_masters': [],
'restart_on_error': False,
'ping_interval': 0,
'username': None,
'password': None,
'zmq_filtering': False,
'zmq_monitor': False,
'cache_sreqs': True,
'cmd_safe': True,
'sudo_user': '',
'http_connect_timeout': 20.0, # tornado default - 20 seconds
'http_request_timeout': 1 * 60 * 60.0, # 1 hour
'http_max_body': 100 * 1024 * 1024 * 1024, # 100GB
'event_match_type': 'startswith',
'minion_restart_command': [],
'pub_ret': True,
'proxy_host': '',
'proxy_username': '',
'proxy_password': '',
'proxy_port': 0,
'minion_jid_queue_hwm': 100,
'ssl': None,
'multifunc_ordered': False,
'beacons_before_connect': False,
'scheduler_before_connect': False,
'cache': 'localfs',
'salt_cp_chunk_size': 65536,
'extmod_whitelist': {},
'extmod_blacklist': {},
'minion_sign_messages': False,
'docker.compare_container_networks': {
'static': ['Aliases', 'Links', 'IPAMConfig'],
'automatic': ['IPAddress', 'Gateway',
'GlobalIPv6Address', 'IPv6Gateway'],
},
'discovery': False,
'schedule': {},
'ssh_merge_pillar': True
}
DEFAULT_MASTER_OPTS = {
'interface': '0.0.0.0',
'publish_port': 4505,
'zmq_backlog': 1000,
'pub_hwm': 1000,
'auth_mode': 1,
'user': _MASTER_USER,
'worker_threads': 5,
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'master'),
'sock_pool_size': 1,
'ret_port': 4506,
'timeout': 5,
'keep_jobs': 24,
'archive_jobs': False,
'root_dir': salt.syspaths.ROOT_DIR,
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'master'),
'key_cache': '',
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'master'),
'file_roots': {
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR,
salt.syspaths.SPM_FORMULA_PATH]
},
'master_roots': {
'base': [salt.syspaths.BASE_MASTER_ROOTS_DIR],
},
'pillar_roots': {
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR,
salt.syspaths.SPM_PILLAR_PATH]
},
'on_demand_ext_pillar': ['libvirt', 'virtkey'],
'decrypt_pillar': [],
'decrypt_pillar_delimiter': ':',
'decrypt_pillar_default': 'gpg',
'decrypt_pillar_renderers': ['gpg'],
'thoriumenv': None,
'thorium_top': 'top.sls',
'thorium_interval': 0.5,
'thorium_roots': {
'base': [salt.syspaths.BASE_THORIUM_ROOTS_DIR],
},
'top_file_merging_strategy': 'merge',
'env_order': [],
'saltenv': None,
'lock_saltenv': False,
'pillarenv': None,
'default_top': 'base',
'file_client': 'local',
'local': True,
# Update intervals
'roots_update_interval': DEFAULT_INTERVAL,
'azurefs_update_interval': DEFAULT_INTERVAL,
'gitfs_update_interval': DEFAULT_INTERVAL,
'hgfs_update_interval': DEFAULT_INTERVAL,
'minionfs_update_interval': DEFAULT_INTERVAL,
's3fs_update_interval': DEFAULT_INTERVAL,
'svnfs_update_interval': DEFAULT_INTERVAL,
'git_pillar_base': 'master',
'git_pillar_branch': 'master',
'git_pillar_env': '',
'git_pillar_root': '',
'git_pillar_ssl_verify': True,
'git_pillar_global_lock': True,
'git_pillar_user': '',
'git_pillar_password': '',
'git_pillar_insecure_auth': False,
'git_pillar_privkey': '',
'git_pillar_pubkey': '',
'git_pillar_passphrase': '',
'git_pillar_refspecs': _DFLT_REFSPECS,
'git_pillar_includes': True,
'git_pillar_verify_config': True,
'gitfs_remotes': [],
'gitfs_mountpoint': '',
'gitfs_root': '',
'gitfs_base': 'master',
'gitfs_user': '',
'gitfs_password': '',
'gitfs_insecure_auth': False,
'gitfs_privkey': '',
'gitfs_pubkey': '',
'gitfs_passphrase': '',
'gitfs_env_whitelist': [],
'gitfs_env_blacklist': [],
'gitfs_saltenv_whitelist': [],
'gitfs_saltenv_blacklist': [],
'gitfs_global_lock': True,
'gitfs_ssl_verify': True,
'gitfs_saltenv': [],
'gitfs_ref_types': ['branch', 'tag', 'sha'],
'gitfs_refspecs': _DFLT_REFSPECS,
'gitfs_disable_saltenv_mapping': False,
'hgfs_remotes': [],
'hgfs_mountpoint': '',
'hgfs_root': '',
'hgfs_base': 'default',
'hgfs_branch_method': 'branches',
'hgfs_env_whitelist': [],
'hgfs_env_blacklist': [],
'hgfs_saltenv_whitelist': [],
'hgfs_saltenv_blacklist': [],
'show_timeout': True,
'show_jid': False,
'unique_jid': False,
'svnfs_remotes': [],
'svnfs_mountpoint': '',
'svnfs_root': '',
'svnfs_trunk': 'trunk',
'svnfs_branches': 'branches',
'svnfs_tags': 'tags',
'svnfs_env_whitelist': [],
'svnfs_env_blacklist': [],
'svnfs_saltenv_whitelist': [],
'svnfs_saltenv_blacklist': [],
'max_event_size': 1048576,
'master_stats': False,
'master_stats_event_iter': 60,
'minionfs_env': 'base',
'minionfs_mountpoint': '',
'minionfs_whitelist': [],
'minionfs_blacklist': [],
'ext_pillar': [],
'pillar_version': 2,
'pillar_opts': False,
'pillar_safe_render_error': True,
'pillar_source_merging_strategy': 'smart',
'pillar_merge_lists': False,
'pillar_includes_override_sls': False,
'pillar_cache': False,
'pillar_cache_ttl': 3600,
'pillar_cache_backend': 'disk',
'ping_on_rotate': False,
'peer': {},
'preserve_minion_cache': False,
'syndic_master': 'masterofmasters',
'syndic_failover': 'random',
'syndic_forward_all_events': False,
'syndic_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'syndic'),
'syndic_pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-syndic.pid'),
'outputter_dirs': [],
'runner_dirs': [],
'utils_dirs': [],
'client_acl_verify': True,
'publisher_acl': {},
'publisher_acl_blacklist': {},
'sudo_acl': False,
'external_auth': {},
'token_expire': 43200,
'token_expire_user_override': False,
'permissive_acl': False,
'keep_acl_in_token': False,
'eauth_acl_module': '',
'eauth_tokens': 'localfs',
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'extmods'),
'module_dirs': [],
'file_recv': False,
'file_recv_max_size': 100,
'file_buffer_size': 1048576,
'file_ignore_regex': [],
'file_ignore_glob': [],
'fileserver_backend': ['roots'],
'fileserver_followsymlinks': True,
'fileserver_ignoresymlinks': False,
'fileserver_limit_traversal': False,
'fileserver_verify_config': True,
'max_open_files': 100000,
'hash_type': 'sha256',
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'master'),
'open_mode': False,
'auto_accept': False,
'renderer': 'jinja|yaml',
'renderer_whitelist': [],
'renderer_blacklist': [],
'failhard': False,
'state_top': 'top.sls',
'state_top_saltenv': None,
'master_tops': {},
'master_tops_first': False,
'order_masters': False,
'job_cache': True,
'ext_job_cache': '',
'master_job_cache': 'local_cache',
'job_cache_store_endtime': False,
'minion_data_cache': True,
'enforce_mine_cache': False,
'ipc_mode': _DFLT_IPC_MODE,
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
'ipv6': False,
'tcp_master_pub_port': 4512,
'tcp_master_pull_port': 4513,
'tcp_master_publish_pull': 4514,
'tcp_master_workers': 4515,
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'master'),
'log_level': 'warning',
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_fmt_jid': _DFLT_LOG_FMT_JID,
'log_granular_levels': {},
'log_rotate_max_bytes': 0,
'log_rotate_backup_count': 0,
'pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-master.pid'),
'publish_session': 86400,
'range_server': 'range:80',
'reactor': [],
'reactor_refresh_interval': 60,
'reactor_worker_threads': 10,
'reactor_worker_hwm': 10000,
'engines': [],
'event_return': '',
'event_return_queue': 0,
'event_return_whitelist': [],
'event_return_blacklist': [],
'event_match_type': 'startswith',
'runner_returns': True,
'serial': 'msgpack',
'test': False,
'state_verbose': True,
'state_output': 'full',
'state_output_diff': False,
'state_auto_order': True,
'state_events': False,
'state_aggregate': False,
'search': '',
'loop_interval': 60,
'nodegroups': {},
'ssh_list_nodegroups': {},
'ssh_use_home_key': False,
'cython_enable': False,
'enable_gpu_grains': False,
# XXX: Remove 'key_logfile' support in 2014.1.0
'key_logfile': os.path.join(salt.syspaths.LOGS_DIR, 'key'),
'verify_env': True,
'permissive_pki_access': False,
'key_pass': None,
'signing_key_pass': None,
'default_include': 'master.d/*.conf',
'winrepo_dir': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo'),
'winrepo_dir_ng': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo-ng'),
'winrepo_cachefile': 'winrepo.p',
'winrepo_remotes': ['https://github.com/saltstack/salt-winrepo.git'],
'winrepo_remotes_ng': ['https://github.com/saltstack/salt-winrepo-ng.git'],
'winrepo_branch': 'master',
'winrepo_ssl_verify': True,
'winrepo_user': '',
'winrepo_password': '',
'winrepo_insecure_auth': False,
'winrepo_privkey': '',
'winrepo_pubkey': '',
'winrepo_passphrase': '',
'winrepo_refspecs': _DFLT_REFSPECS,
'syndic_wait': 5,
'jinja_env': {},
'jinja_sls_env': {},
'jinja_lstrip_blocks': False,
'jinja_trim_blocks': False,
'tcp_keepalive': True,
'tcp_keepalive_idle': 300,
'tcp_keepalive_cnt': -1,
'tcp_keepalive_intvl': -1,
'sign_pub_messages': True,
'keysize': 2048,
'transport': 'zeromq',
'gather_job_timeout': 10,
'syndic_event_forward_timeout': 0.5,
'syndic_jid_forward_cache_hwm': 100,
'regen_thin': False,
'ssh_passwd': '',
'ssh_priv_passwd': '',
'ssh_port': '22',
'ssh_sudo': False,
'ssh_sudo_user': '',
'ssh_timeout': 60,
'ssh_user': 'root',
'ssh_scan_ports': '22',
'ssh_scan_timeout': 0.01,
'ssh_identities_only': False,
'ssh_log_file': os.path.join(salt.syspaths.LOGS_DIR, 'ssh'),
'ssh_config_file': os.path.join(salt.syspaths.HOME_DIR, '.ssh', 'config'),
'master_floscript': os.path.join(FLO_DIR, 'master.flo'),
'worker_floscript': os.path.join(FLO_DIR, 'worker.flo'),
'maintenance_floscript': os.path.join(FLO_DIR, 'maint.flo'),
'ioflo_verbose': 0,
'ioflo_period': 0.01,
'ioflo_realtime': True,
'ioflo_console_logdir': '',
'raet_port': 4506,
'raet_alt_port': 4511,
'raet_mutable': False,
'raet_main': True,
'raet_clear_remotes': False,
'raet_clear_remote_masters': True,
'raet_road_bufcnt': 2,
'raet_lane_bufcnt': 100,
'cluster_mode': False,
'cluster_masters': [],
'sqlite_queue_dir': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'queues'),
'queue_dirs': [],
'cli_summary': False,
'max_minions': 0,
'master_sign_key_name': 'master_sign',
'master_sign_pubkey': False,
'master_pubkey_signature': 'master_pubkey_signature',
'master_use_pubkey_signature': False,
'zmq_filtering': False,
'zmq_monitor': False,
'con_cache': False,
'rotate_aes_key': True,
'cache_sreqs': True,
'dummy_pub': False,
'http_connect_timeout': 20.0, # tornado default - 20 seconds
'http_request_timeout': 1 * 60 * 60.0, # 1 hour
'http_max_body': 100 * 1024 * 1024 * 1024, # 100GB
'python2_bin': 'python2',
'python3_bin': 'python3',
'cache': 'localfs',
'memcache_expire_seconds': 0,
'memcache_max_items': 1024,
'memcache_full_cleanup': False,
'memcache_debug': False,
'thin_extra_mods': '',
'min_extra_mods': '',
'ssl': None,
'extmod_whitelist': {},
'extmod_blacklist': {},
'clean_dynamic_modules': True,
'django_auth_path': '',
'django_auth_settings': '',
'allow_minion_key_revoke': True,
'salt_cp_chunk_size': 98304,
'require_minion_sign_messages': False,
'drop_messages_signature_fail': False,
'discovery': False,
'schedule': {},
'auth_events': True,
'minion_data_cache_events': True,
'enable_ssh_minions': False,
}
# ----- Salt Proxy Minion Configuration Defaults ----------------------------------->
# These are merged with DEFAULT_MINION_OPTS since many of them also apply here.
DEFAULT_PROXY_MINION_OPTS = {
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'proxy'),
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'proxy'),
'add_proxymodule_to_opts': False,
'proxy_merge_grains_in_module': True,
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'proxy', 'extmods'),
'append_minionid_config_dirs': ['cachedir', 'pidfile', 'default_include', 'extension_modules'],
'default_include': 'proxy.d/*.conf',
'proxy_merge_pillar_in_opts': False,
'proxy_deep_merge_pillar_in_opts': False,
'proxy_merge_pillar_in_opts_strategy': 'smart',
'proxy_mines_pillar': True,
# By default, proxies will preserve the connection.
# If this option is set to False,
# the connection with the remote dumb device
# is closed after each command request.
'proxy_always_alive': True,
'proxy_keep_alive': True, # by default will try to keep alive the connection
'proxy_keep_alive_interval': 1, # frequency of the proxy keepalive in minutes
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'proxy'),
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'proxy'),
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'proxy'),
}
# ----- Salt Cloud Configuration Defaults ----------------------------------->
DEFAULT_CLOUD_OPTS = {
'verify_env': True,
'default_include': 'cloud.conf.d/*.conf',
# Global defaults
'ssh_auth': '',
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'cloud'),
'keysize': 4096,
'os': '',
'script': 'bootstrap-salt',
'start_action': None,
'enable_hard_maps': False,
'delete_sshkeys': False,
# Custom deploy scripts
'deploy_scripts_search_path': 'cloud.deploy.d',
# Logging defaults
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'cloud'),
'log_level': 'warning',
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_fmt_jid': _DFLT_LOG_FMT_JID,
'log_granular_levels': {},
'log_rotate_max_bytes': 0,
'log_rotate_backup_count': 0,
'bootstrap_delay': None,
'cache': 'localfs',
}
DEFAULT_API_OPTS = {
# ----- Salt master settings overridden by Salt-API --------------------->
'api_pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-api.pid'),
'api_logfile': os.path.join(salt.syspaths.LOGS_DIR, 'api'),
'rest_timeout': 300,
# <---- Salt master settings overridden by Salt-API ----------------------
}
DEFAULT_SPM_OPTS = {
# ----- Salt master settings overridden by SPM --------------------->
'spm_conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'spm'),
'formula_path': salt.syspaths.SPM_FORMULA_PATH,
'pillar_path': salt.syspaths.SPM_PILLAR_PATH,
'reactor_path': salt.syspaths.SPM_REACTOR_PATH,
'spm_logfile': os.path.join(salt.syspaths.LOGS_DIR, 'spm'),
'spm_default_include': 'spm.d/*.conf',
# spm_repos_config also includes a .d/ directory
'spm_repos_config': '/etc/salt/spm.repos',
'spm_cache_dir': os.path.join(salt.syspaths.CACHE_DIR, 'spm'),
'spm_build_dir': os.path.join(salt.syspaths.SRV_ROOT_DIR, 'spm_build'),
'spm_build_exclude': ['CVS', '.hg', '.git', '.svn'],
'spm_db': os.path.join(salt.syspaths.CACHE_DIR, 'spm', 'packages.db'),
'cache': 'localfs',
'spm_repo_dups': 'ignore',
# If set, spm_node_type will be either master or minion, but they should
# NOT be a default
'spm_node_type': '',
'spm_share_dir': os.path.join(salt.syspaths.SHARE_DIR, 'spm'),
# <---- Salt master settings overridden by SPM ----------------------
}
VM_CONFIG_DEFAULTS = {
'default_include': 'cloud.profiles.d/*.conf',
}
PROVIDER_CONFIG_DEFAULTS = {
'default_include': 'cloud.providers.d/*.conf',
}
# <---- Salt Cloud Configuration Defaults ------------------------------------
def _validate_file_roots(file_roots):
'''
If the file_roots option has a key that is None then we will error out,
just replace it with an empty list
'''
if not isinstance(file_roots, dict):
log.warning('The file_roots parameter is not properly formatted,'
' using defaults')
return {'base': _expand_glob_path([salt.syspaths.BASE_FILE_ROOTS_DIR])}
for saltenv, dirs in six.iteritems(file_roots):
normalized_saltenv = six.text_type(saltenv)
if normalized_saltenv != saltenv:
file_roots[normalized_saltenv] = file_roots.pop(saltenv)
if not isinstance(dirs, (list, tuple)):
file_roots[normalized_saltenv] = []
file_roots[normalized_saltenv] = \
_expand_glob_path(file_roots[normalized_saltenv])
return file_roots
def _expand_glob_path(file_roots):
'''
Applies shell globbing to a set of directories and returns
the expanded paths
'''
unglobbed_path = []
for path in file_roots:
try:
if glob.has_magic(path):
unglobbed_path.extend(glob.glob(path))
else:
unglobbed_path.append(path)
except Exception:
unglobbed_path.append(path)
return unglobbed_path
def _validate_opts(opts):
'''
Check that all of the types of values passed into the config are
of the right types
'''
def format_multi_opt(valid_type):
try:
num_types = len(valid_type)
except TypeError:
# Bare type name won't have a length, return the name of the type
# passed.
return valid_type.__name__
else:
def get_types(types, type_tuple):
for item in type_tuple:
if isinstance(item, tuple):
get_types(types, item)
else:
try:
types.append(item.__name__)
except AttributeError:
log.warning(
'Unable to interpret type %s while validating '
'configuration', item
)
types = []
get_types(types, valid_type)
ret = ', '.join(types[:-1])
ret += ' or ' + types[-1]
return ret
errors = []
err = (
'Config option \'{0}\' with value {1} has an invalid type of {2}, a '
'{3} is required for this option'
)
for key, val in six.iteritems(opts):
if key in VALID_OPTS:
if val is None:
if VALID_OPTS[key] is None:
continue
else:
try:
if None in VALID_OPTS[key]:
continue
except TypeError:
# VALID_OPTS[key] is not iterable and not None
pass
if isinstance(val, VALID_OPTS[key]):
continue
# We don't know what data type sdb will return at run-time so we
# simply cannot check it for correctness here at start-time.
if isinstance(val, six.string_types) and val.startswith('sdb://'):
continue
if hasattr(VALID_OPTS[key], '__call__'):
try:
VALID_OPTS[key](val)
if isinstance(val, (list, dict)):
# We'll only get here if VALID_OPTS[key] is str or
# bool, and the passed value is a list/dict. Attempting
# to run int() or float() on a list/dict will raise an
# exception, but running str() or bool() on it will
# pass despite not being the correct type.
errors.append(
err.format(
key,
val,
type(val).__name__,
VALID_OPTS[key].__name__
)
)
except (TypeError, ValueError):
errors.append(
err.format(key,
val,
type(val).__name__,
VALID_OPTS[key].__name__)
)
continue
errors.append(
err.format(key,
val,
type(val).__name__,
format_multi_opt(VALID_OPTS[key]))
)
# Convert list to comma-delimited string for 'return' config option
if isinstance(opts.get('return'), list):
opts['return'] = ','.join(opts['return'])
# RAET on Windows uses 'win32file.CreateMailslot()' for IPC. Due to this,
# sock_dirs must start with '\\.\mailslot\' and not contain any colons.
# We don't expect the user to know this, so we will fix up their path for
# them if it isn't compliant.
if (salt.utils.platform.is_windows() and opts.get('transport') == 'raet' and
'sock_dir' in opts and
not opts['sock_dir'].startswith('\\\\.\\mailslot\\')):
opts['sock_dir'] = (
'\\\\.\\mailslot\\' + opts['sock_dir'].replace(':', ''))
for error in errors:
log.warning(error)
if errors:
return False
return True
def _validate_ssh_minion_opts(opts):
'''
Ensure we're not using any invalid ssh_minion_opts. We want to make sure
that the ssh_minion_opts does not override any pillar or fileserver options
inherited from the master config. To add other items, modify the if
statement in the for loop below.
'''
ssh_minion_opts = opts.get('ssh_minion_opts', {})
if not isinstance(ssh_minion_opts, dict):
log.error('Invalidly-formatted ssh_minion_opts')
opts.pop('ssh_minion_opts')
for opt_name in list(ssh_minion_opts):
if re.match('^[a-z0-9]+fs_', opt_name, flags=re.IGNORECASE) \
or ('pillar' in opt_name and not 'ssh_merge_pillar' == opt_name) \
or opt_name in ('fileserver_backend',):
log.warning(
'\'%s\' is not a valid ssh_minion_opts parameter, ignoring',
opt_name
)
ssh_minion_opts.pop(opt_name)
def _append_domain(opts):
'''
Append a domain to the existing id if it doesn't already exist
'''
# Domain already exists
if opts['id'].endswith(opts['append_domain']):
return opts['id']
# Trailing dot should mean an FQDN that is terminated, leave it alone.
if opts['id'].endswith('.'):
return opts['id']
return '{0[id]}.{0[append_domain]}'.format(opts)
def _read_conf_file(path):
'''
Read in a config file from a given path and process it into a dictionary
'''
log.debug('Reading configuration from %s', path)
with salt.utils.files.fopen(path, 'r') as conf_file:
try:
conf_opts = salt.utils.yaml.safe_load(conf_file) or {}
except salt.utils.yaml.YAMLError as err:
message = 'Error parsing configuration file: {0} - {1}'.format(path, err)
log.error(message)
raise salt.exceptions.SaltConfigurationError(message)
# only interpret documents as a valid conf, not things like strings,
# which might have been caused by invalid yaml syntax
if not isinstance(conf_opts, dict):
message = 'Error parsing configuration file: {0} - conf ' \
'should be a document, not {1}.'.format(path, type(conf_opts))
log.error(message)
raise salt.exceptions.SaltConfigurationError(message)
# allow using numeric ids: convert int to string
if 'id' in conf_opts:
if not isinstance(conf_opts['id'], six.string_types):
conf_opts['id'] = six.text_type(conf_opts['id'])
else:
conf_opts['id'] = salt.utils.data.decode(conf_opts['id'])
return conf_opts
def _absolute_path(path, relative_to=None):
'''
Return an absolute path. In case ``relative_to`` is passed and ``path`` is
not an absolute path, we try to prepend ``relative_to`` to ``path``and if
that path exists, return that one
'''
if path and os.path.isabs(path):
return path
if path and relative_to is not None:
_abspath = os.path.join(relative_to, path)
if os.path.isfile(_abspath):
log.debug(
'Relative path \'%s\' converted to existing absolute path '
'\'%s\'', path, _abspath
)
return _abspath
return path
def load_config(path, env_var, default_path=None, exit_on_config_errors=True):
'''
Returns configuration dict from parsing either the file described by
``path`` or the environment variable described by ``env_var`` as YAML.
'''
if path is None:
# When the passed path is None, we just want the configuration
# defaults, not actually loading the whole configuration.
return {}
if default_path is None:
# This is most likely not being used from salt, i.e., could be salt-cloud
# or salt-api which have not yet migrated to the new default_path
# argument. Let's issue a warning message that the environ vars won't
# work.
import inspect
previous_frame = inspect.getframeinfo(inspect.currentframe().f_back)
log.warning(
"The function '%s()' defined in '%s' is not yet using the "
"new 'default_path' argument to `salt.config.load_config()`. "
"As such, the '%s' environment variable will be ignored",
previous_frame.function, previous_frame.filename, env_var
)
# In this case, maintain old behavior
default_path = DEFAULT_MASTER_OPTS['conf_file']
# Default to the environment variable path, if it exists
env_path = os.environ.get(env_var, path)
if not env_path or not os.path.isfile(env_path):
env_path = path
# If non-default path from `-c`, use that over the env variable
if path != default_path:
env_path = path
path = env_path
# If the configuration file is missing, attempt to copy the template,
# after removing the first header line.
if not os.path.isfile(path):
template = '{0}.template'.format(path)
if os.path.isfile(template):
log.debug('Writing %s based on %s', path, template)
with salt.utils.files.fopen(path, 'w') as out:
with salt.utils.files.fopen(template, 'r') as ifile:
ifile.readline() # skip first line
out.write(ifile.read())
opts = {}
if salt.utils.validate.path.is_readable(path):
try:
opts = _read_conf_file(path)
opts['conf_file'] = path
except salt.exceptions.SaltConfigurationError as error:
log.error(error)
if exit_on_config_errors:
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
log.debug('Missing configuration file: %s', path)
return opts
def include_config(include, orig_path, verbose, exit_on_config_errors=False):
'''
Parses extra configuration file(s) specified in an include list in the
main config file.
'''
# Protect against empty option
if not include:
return {}
if orig_path is None:
# When the passed path is None, we just want the configuration
# defaults, not actually loading the whole configuration.
return {}
if isinstance(include, six.string_types):
include = [include]
configuration = {}
for path in include:
# Allow for includes like ~/foo
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(orig_path), path)
# Catch situation where user typos path in configuration; also warns
# for empty include directory (which might be by design)
if len(glob.glob(path)) == 0:
if verbose:
log.warning(
'Warning parsing configuration file: "include" path/glob '
"'%s' matches no files", path
)
for fn_ in sorted(glob.glob(path)):
log.debug('Including configuration from \'%s\'', fn_)
try:
opts = _read_conf_file(fn_)
except salt.exceptions.SaltConfigurationError as error:
log.error(error)
if exit_on_config_errors:
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
# Initialize default config if we wish to skip config errors
opts = {}
schedule = opts.get('schedule', {})
if schedule and 'schedule' in configuration:
configuration['schedule'].update(schedule)
include = opts.get('include', [])
if include:
opts.update(include_config(include, fn_, verbose))
salt.utils.dictupdate.update(configuration, opts, True, True)
return configuration
def prepend_root_dir(opts, path_options):
'''
Prepends the options that represent filesystem paths with value of the
'root_dir' option.
'''
root_dir = os.path.abspath(opts['root_dir'])
def_root_dir = salt.syspaths.ROOT_DIR.rstrip(os.sep)
for path_option in path_options:
if path_option in opts:
path = opts[path_option]
tmp_path_def_root_dir = None
tmp_path_root_dir = None
# When running testsuite, salt.syspaths.ROOT_DIR is often empty
if path == def_root_dir or path.startswith(def_root_dir + os.sep):
# Remove the default root dir prefix
tmp_path_def_root_dir = path[len(def_root_dir):]
if root_dir and (path == root_dir or
path.startswith(root_dir + os.sep)):
# Remove the root dir prefix
tmp_path_root_dir = path[len(root_dir):]
if tmp_path_def_root_dir and not tmp_path_root_dir:
# Just the default root dir matched
path = tmp_path_def_root_dir
elif tmp_path_root_dir and not tmp_path_def_root_dir:
# Just the root dir matched
path = tmp_path_root_dir
elif tmp_path_def_root_dir and tmp_path_root_dir:
# In this case both the default root dir and the override root
# dir matched; this means that either
# def_root_dir is a substring of root_dir or vice versa
# We must choose the most specific path
if def_root_dir in root_dir:
path = tmp_path_root_dir
else:
path = tmp_path_def_root_dir
elif salt.utils.platform.is_windows() and not os.path.splitdrive(path)[0]:
# In windows, os.path.isabs resolves '/' to 'C:\\' or whatever
# the root drive is. This elif prevents the next from being
# hit, so that the root_dir is prefixed in cases where the
# drive is not prefixed on a config option
pass
elif os.path.isabs(path):
# Absolute path (not default or overridden root_dir)
# No prepending required
continue
# Prepending the root dir
opts[path_option] = salt.utils.path.join(root_dir, path)
def insert_system_path(opts, paths):
'''
Inserts path into python path taking into consideration 'root_dir' option.
'''
if isinstance(paths, six.string_types):
paths = [paths]
for path in paths:
path_options = {'path': path, 'root_dir': opts['root_dir']}
prepend_root_dir(path_options, path_options)
if (os.path.isdir(path_options['path'])
and path_options['path'] not in sys.path):
sys.path.insert(0, path_options['path'])
def minion_config(path,
env_var='SALT_MINION_CONFIG',
defaults=None,
cache_minion_id=False,
ignore_config_errors=True,
minion_id=None,
role='minion'):
'''
Reads in the minion configuration file and sets up special options
This is useful for Minion-side operations, such as the
:py:class:`~salt.client.Caller` class, and manually running the loader
interface.
.. code-block:: python
import salt.config
minion_opts = salt.config.minion_config('/etc/salt/minion')
'''
if defaults is None:
defaults = DEFAULT_MINION_OPTS.copy()
if not os.environ.get(env_var, None):
# No valid setting was given using the configuration variable.
# Lets see is SALT_CONFIG_DIR is of any use
salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)
if salt_config_dir:
env_config_file_path = os.path.join(salt_config_dir, 'minion')
if salt_config_dir and os.path.isfile(env_config_file_path):
# We can get a configuration file using SALT_CONFIG_DIR, let's
# update the environment with this information
os.environ[env_var] = env_config_file_path
overrides = load_config(path, env_var, DEFAULT_MINION_OPTS['conf_file'])
default_include = overrides.get('default_include',
defaults['default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False,
exit_on_config_errors=not ignore_config_errors))
overrides.update(include_config(include, path, verbose=True,
exit_on_config_errors=not ignore_config_errors))
opts = apply_minion_config(overrides, defaults,
cache_minion_id=cache_minion_id,
minion_id=minion_id)
opts['__role'] = role
apply_sdb(opts)
_validate_opts(opts)
return opts
def proxy_config(path,
env_var='SALT_PROXY_CONFIG',
defaults=None,
cache_minion_id=False,
ignore_config_errors=True,
minion_id=None):
'''
Reads in the proxy minion configuration file and sets up special options
This is useful for Minion-side operations, such as the
:py:class:`~salt.client.Caller` class, and manually running the loader
interface.
.. code-block:: python
import salt.config
proxy_opts = salt.config.proxy_config('/etc/salt/proxy')
'''
if defaults is None:
defaults = DEFAULT_MINION_OPTS.copy()
defaults.update(DEFAULT_PROXY_MINION_OPTS)
if not os.environ.get(env_var, None):
# No valid setting was given using the configuration variable.
# Lets see is SALT_CONFIG_DIR is of any use
salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)
if salt_config_dir:
env_config_file_path = os.path.join(salt_config_dir, 'proxy')
if salt_config_dir and os.path.isfile(env_config_file_path):
# We can get a configuration file using SALT_CONFIG_DIR, let's
# update the environment with this information
os.environ[env_var] = env_config_file_path
overrides = load_config(path, env_var, DEFAULT_PROXY_MINION_OPTS['conf_file'])
default_include = overrides.get('default_include',
defaults['default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False,
exit_on_config_errors=not ignore_config_errors))
overrides.update(include_config(include, path, verbose=True,
exit_on_config_errors=not ignore_config_errors))
opts = apply_minion_config(overrides, defaults,
cache_minion_id=cache_minion_id,
minion_id=minion_id)
apply_sdb(opts)
_validate_opts(opts)
return opts
def syndic_config(master_config_path,
minion_config_path,
master_env_var='SALT_MASTER_CONFIG',
minion_env_var='SALT_MINION_CONFIG',
minion_defaults=None,
master_defaults=None):
if minion_defaults is None:
minion_defaults = DEFAULT_MINION_OPTS
if master_defaults is None:
master_defaults = DEFAULT_MASTER_OPTS
opts = {}
master_opts = master_config(
master_config_path, master_env_var, master_defaults
)
minion_opts = minion_config(
minion_config_path, minion_env_var, minion_defaults
)
opts['_minion_conf_file'] = master_opts['conf_file']
opts['_master_conf_file'] = minion_opts['conf_file']
opts.update(master_opts)
opts.update(minion_opts)
syndic_opts = {
'__role': 'syndic',
'root_dir': opts.get('root_dir', salt.syspaths.ROOT_DIR),
'pidfile': opts.get('syndic_pidfile', 'salt-syndic.pid'),
'log_file': opts.get('syndic_log_file', 'salt-syndic.log'),
'log_level': master_opts['log_level'],
'id': minion_opts['id'],
'pki_dir': minion_opts['pki_dir'],
'master': opts['syndic_master'],
'interface': master_opts['interface'],
'master_port': int(
opts.get(
# The user has explicitly defined the syndic master port
'syndic_master_port',
opts.get(
# No syndic_master_port, grab master_port from opts
'master_port',
# No master_opts, grab from the provided minion defaults
minion_defaults.get(
'master_port',
# Not on the provided minion defaults, load from the
# static minion defaults
DEFAULT_MINION_OPTS['master_port']
)
)
)
),
'user': opts.get('syndic_user', opts['user']),
'sock_dir': os.path.join(
opts['cachedir'], opts.get('syndic_sock_dir', opts['sock_dir'])
),
'sock_pool_size': master_opts['sock_pool_size'],
'cachedir': master_opts['cachedir'],
}
opts.update(syndic_opts)
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir', 'autosign_grains_dir'
]
for config_key in ('log_file', 'key_logfile', 'syndic_log_file'):
# If this is not a URI and instead a local path
if urlparse(opts.get(config_key, '')).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
return opts
def apply_sdb(opts, sdb_opts=None):
'''
Recurse for sdb:// links for opts
'''
# Late load of SDB to keep CLI light
import salt.utils.sdb
if sdb_opts is None:
sdb_opts = opts
if isinstance(sdb_opts, six.string_types) and sdb_opts.startswith('sdb://'):
return salt.utils.sdb.sdb_get(sdb_opts, opts)
elif isinstance(sdb_opts, dict):
for key, value in six.iteritems(sdb_opts):
if value is None:
continue
sdb_opts[key] = apply_sdb(opts, value)
elif isinstance(sdb_opts, list):
for key, value in enumerate(sdb_opts):
if value is None:
continue
sdb_opts[key] = apply_sdb(opts, value)
return sdb_opts
# ----- Salt Cloud Configuration Functions ---------------------------------->
def cloud_config(path, env_var='SALT_CLOUD_CONFIG', defaults=None,
master_config_path=None, master_config=None,
providers_config_path=None, providers_config=None,
profiles_config_path=None, profiles_config=None):
'''
Read in the Salt Cloud config and return the dict
'''
if path:
config_dir = os.path.dirname(path)
else:
config_dir = salt.syspaths.CONFIG_DIR
# Load the cloud configuration
overrides = load_config(
path,
env_var,
os.path.join(config_dir, 'cloud')
)
if defaults is None:
defaults = DEFAULT_CLOUD_OPTS.copy()
# Set defaults early to override Salt Master's default config values later
defaults.update(overrides)
overrides = defaults
# Load cloud configuration from any default or provided includes
overrides.update(
salt.config.include_config(overrides['default_include'], path, verbose=False)
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
# The includes have been evaluated, let's see if master, providers and
# profiles configuration settings have been included and if not, set the
# default value
if 'master_config' in overrides and master_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
master_config_path = overrides['master_config']
elif 'master_config' not in overrides and not master_config \
and not master_config_path:
# The configuration setting is not being provided in the main cloud
# configuration file, and
master_config_path = os.path.join(config_dir, 'master')
# Convert relative to absolute paths if necessary
master_config_path = _absolute_path(master_config_path, config_dir)
if 'providers_config' in overrides and providers_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
providers_config_path = overrides['providers_config']
elif 'providers_config' not in overrides and not providers_config \
and not providers_config_path:
providers_config_path = os.path.join(config_dir, 'cloud.providers')
# Convert relative to absolute paths if necessary
providers_config_path = _absolute_path(providers_config_path, config_dir)
if 'profiles_config' in overrides and profiles_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
profiles_config_path = overrides['profiles_config']
elif 'profiles_config' not in overrides and not profiles_config \
and not profiles_config_path:
profiles_config_path = os.path.join(config_dir, 'cloud.profiles')
# Convert relative to absolute paths if necessary
profiles_config_path = _absolute_path(profiles_config_path, config_dir)
# Prepare the deploy scripts search path
deploy_scripts_search_path = overrides.get(
'deploy_scripts_search_path',
defaults.get('deploy_scripts_search_path', 'cloud.deploy.d')
)
if isinstance(deploy_scripts_search_path, six.string_types):
deploy_scripts_search_path = [deploy_scripts_search_path]
# Check the provided deploy scripts search path removing any non existing
# entries.
for idx, entry in enumerate(deploy_scripts_search_path[:]):
if not os.path.isabs(entry):
# Let's try adding the provided path's directory name turns the
# entry into a proper directory
entry = os.path.join(os.path.dirname(path), entry)
if os.path.isdir(entry):
# Path exists, let's update the entry (its path might have been
# made absolute)
deploy_scripts_search_path[idx] = entry
continue
# It's not a directory? Remove it from the search path
deploy_scripts_search_path.pop(idx)
# Add the built-in scripts directory to the search path (last resort)
deploy_scripts_search_path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'cloud',
'deploy'
)
)
)
# Let's make the search path a tuple and add it to the overrides.
overrides.update(
deploy_scripts_search_path=tuple(deploy_scripts_search_path)
)
# Grab data from the 4 sources
# 1st - Master config
if master_config_path is not None and master_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `master_config` or `master_config_path`, not both.'
)
elif master_config_path is None and master_config is None:
master_config = salt.config.master_config(
overrides.get(
# use the value from the cloud config file
'master_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'master')
)
)
elif master_config_path is not None and master_config is None:
master_config = salt.config.master_config(master_config_path)
# cloud config has a separate cachedir
del master_config['cachedir']
# 2nd - salt-cloud configuration which was loaded before so we could
# extract the master configuration file if needed.
# Override master configuration with the salt cloud(current overrides)
master_config.update(overrides)
# We now set the overridden master_config as the overrides
overrides = master_config
if providers_config_path is not None and providers_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `providers_config` or `providers_config_path`, '
'not both.'
)
elif providers_config_path is None and providers_config is None:
providers_config_path = overrides.get(
# use the value from the cloud config file
'providers_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
if profiles_config_path is not None and profiles_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `profiles_config` or `profiles_config_path`, not both.'
)
elif profiles_config_path is None and profiles_config is None:
profiles_config_path = overrides.get(
# use the value from the cloud config file
'profiles_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.profiles')
)
# Apply the salt-cloud configuration
opts = apply_cloud_config(overrides, defaults)
# 3rd - Include Cloud Providers
if 'providers' in opts:
if providers_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Do not mix the old cloud providers configuration with '
'the passing a pre-configured providers configuration '
'dictionary.'
)
if providers_config_path is not None:
providers_confd = os.path.join(
os.path.dirname(providers_config_path),
'cloud.providers.d', '*'
)
if (os.path.isfile(providers_config_path) or
glob.glob(providers_confd)):
raise salt.exceptions.SaltCloudConfigError(
'Do not mix the old cloud providers configuration with '
'the new one. The providers configuration should now go '
'in the file `{0}` or a separate `*.conf` file within '
'`cloud.providers.d/` which is relative to `{0}`.'.format(
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
)
# No exception was raised? It's the old configuration alone
providers_config = opts['providers']
elif providers_config_path is not None:
# Load from configuration file, even if that files does not exist since
# it will be populated with defaults.
providers_config = cloud_providers_config(providers_config_path)
# Let's assign back the computed providers configuration
opts['providers'] = providers_config
# 4th - Include VM profiles config
if profiles_config is None:
# Load profiles configuration from the provided file
profiles_config = vm_profiles_config(profiles_config_path,
providers_config)
opts['profiles'] = profiles_config
# recurse opts for sdb configs
apply_sdb(opts)
# prepend root_dir
prepend_root_dirs = ['cachedir']
if 'log_file' in opts and urlparse(opts['log_file']).scheme == '':
prepend_root_dirs.append(opts['log_file'])
prepend_root_dir(opts, prepend_root_dirs)
# Return the final options
return opts
def apply_cloud_config(overrides, defaults=None):
'''
Return a cloud config
'''
if defaults is None:
defaults = DEFAULT_CLOUD_OPTS
config = defaults.copy()
if overrides:
config.update(overrides)
# If the user defined providers in salt cloud's main configuration file, we
# need to take care for proper and expected format.
if 'providers' in config:
# Keep a copy of the defined providers
providers = config['providers'].copy()
# Reset the providers dictionary
config['providers'] = {}
# Populate the providers dictionary
for alias, details in six.iteritems(providers):
if isinstance(details, list):
for detail in details:
if 'driver' not in detail:
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias \'{0}\' has an entry '
'missing the required setting of \'driver\'.'.format(
alias
)
)
driver = detail['driver']
if ':' in driver:
# Weird, but...
alias, driver = driver.split(':')
if alias not in config['providers']:
config['providers'][alias] = {}
detail['provider'] = '{0}:{1}'.format(alias, driver)
config['providers'][alias][driver] = detail
elif isinstance(details, dict):
if 'driver' not in details:
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias \'{0}\' has an entry '
'missing the required setting of \'driver\''.format(
alias
)
)
driver = details['driver']
if ':' in driver:
# Weird, but...
alias, driver = driver.split(':')
if alias not in config['providers']:
config['providers'][alias] = {}
details['provider'] = '{0}:{1}'.format(alias, driver)
config['providers'][alias][driver] = details
# Migrate old configuration
config = old_to_new(config)
return config
def old_to_new(opts):
providers = (
'AWS',
'CLOUDSTACK',
'DIGITALOCEAN',
'EC2',
'GOGRID',
'IBMSCE',
'JOYENT',
'LINODE',
'OPENSTACK',
'PARALLELS'
'RACKSPACE',
'SALTIFY'
)
for provider in providers:
provider_config = {}
for opt, val in opts.items():
if provider in opt:
value = val
name = opt.split('.', 1)[1]
provider_config[name] = value
lprovider = provider.lower()
if provider_config:
provider_config['provider'] = lprovider
opts.setdefault('providers', {})
# provider alias
opts['providers'][lprovider] = {}
# provider alias, provider driver
opts['providers'][lprovider][lprovider] = provider_config
return opts
def vm_profiles_config(path,
providers,
env_var='SALT_CLOUDVM_CONFIG',
defaults=None):
'''
Read in the salt cloud VM config file
'''
if defaults is None:
defaults = VM_CONFIG_DEFAULTS
overrides = salt.config.load_config(
path, env_var, os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.profiles')
)
default_include = overrides.get(
'default_include', defaults['default_include']
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
return apply_vm_profiles_config(providers, overrides, defaults)
def apply_vm_profiles_config(providers, overrides, defaults=None):
if defaults is None:
defaults = VM_CONFIG_DEFAULTS
config = defaults.copy()
if overrides:
config.update(overrides)
vms = {}
for key, val in six.iteritems(config):
if key in ('conf_file', 'include', 'default_include', 'user'):
continue
if not isinstance(val, dict):
raise salt.exceptions.SaltCloudConfigError(
'The VM profiles configuration found in \'{0[conf_file]}\' is '
'not in the proper format'.format(config)
)
val['profile'] = key
vms[key] = val
# Is any VM profile extending data!?
for profile, details in six.iteritems(vms.copy()):
if 'extends' not in details:
if ':' in details['provider']:
alias, driver = details['provider'].split(':')
if alias not in providers or driver not in providers[alias]:
log.trace(
'The profile \'%s\' is defining \'%s\' '
'as the provider. Since there is no valid '
'configuration for that provider, the profile will be '
'removed from the available listing',
profile, details['provider']
)
vms.pop(profile)
continue
if 'profiles' not in providers[alias][driver]:
providers[alias][driver]['profiles'] = {}
providers[alias][driver]['profiles'][profile] = details
if details['provider'] not in providers:
log.trace(
'The profile \'%s\' is defining \'%s\' as the '
'provider. Since there is no valid configuration for '
'that provider, the profile will be removed from the '
'available listing', profile, details['provider']
)
vms.pop(profile)
continue
driver = next(iter(list(providers[details['provider']].keys())))
providers[details['provider']][driver].setdefault(
'profiles', {}).update({profile: details})
details['provider'] = '{0[provider]}:{1}'.format(details, driver)
vms[profile] = details
continue
extends = details.pop('extends')
if extends not in vms:
log.error(
'The \'%s\' profile is trying to extend data from \'%s\' '
'though \'%s\' is not defined in the salt profiles loaded '
'data. Not extending and removing from listing!',
profile, extends, extends
)
vms.pop(profile)
continue
extended = deepcopy(vms.get(extends))
extended.pop('profile')
# Merge extended configuration with base profile
extended = salt.utils.dictupdate.update(extended, details)
if ':' not in extended['provider']:
if extended['provider'] not in providers:
log.trace(
'The profile \'%s\' is defining \'%s\' as the '
'provider. Since there is no valid configuration for '
'that provider, the profile will be removed from the '
'available listing', profile, extended['provider']
)
vms.pop(profile)
continue
driver = next(iter(list(providers[extended['provider']].keys())))
providers[extended['provider']][driver].setdefault(
'profiles', {}).update({profile: extended})
extended['provider'] = '{0[provider]}:{1}'.format(extended, driver)
else:
alias, driver = extended['provider'].split(':')
if alias not in providers or driver not in providers[alias]:
log.trace(
'The profile \'%s\' is defining \'%s\' as '
'the provider. Since there is no valid configuration '
'for that provider, the profile will be removed from '
'the available listing', profile, extended['provider']
)
vms.pop(profile)
continue
providers[alias][driver].setdefault('profiles', {}).update(
{profile: extended}
)
# Update the profile's entry with the extended data
vms[profile] = extended
return vms
def cloud_providers_config(path,
env_var='SALT_CLOUD_PROVIDERS_CONFIG',
defaults=None):
'''
Read in the salt cloud providers configuration file
'''
if defaults is None:
defaults = PROVIDER_CONFIG_DEFAULTS
overrides = salt.config.load_config(
path, env_var, os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
default_include = overrides.get(
'default_include', defaults['default_include']
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
return apply_cloud_providers_config(overrides, defaults)
def apply_cloud_providers_config(overrides, defaults=None):
'''
Apply the loaded cloud providers configuration.
'''
if defaults is None:
defaults = PROVIDER_CONFIG_DEFAULTS
config = defaults.copy()
if overrides:
config.update(overrides)
# Is the user still using the old format in the new configuration file?!
for name, settings in six.iteritems(config.copy()):
if '.' in name:
log.warning(
'Please switch to the new providers configuration syntax'
)
# Let's help out and migrate the data
config = old_to_new(config)
# old_to_new will migrate the old data into the 'providers' key of
# the config dictionary. Let's map it correctly
for prov_name, prov_settings in six.iteritems(config.pop('providers')):
config[prov_name] = prov_settings
break
providers = {}
ext_count = 0
for key, val in six.iteritems(config):
if key in ('conf_file', 'include', 'default_include', 'user'):
continue
if not isinstance(val, (list, tuple)):
val = [val]
else:
# Need to check for duplicate cloud provider entries per "alias" or
# we won't be able to properly reference it.
handled_providers = set()
for details in val:
if 'driver' not in details:
if 'extends' not in details:
log.error(
'Please check your cloud providers configuration. '
'There\'s no \'driver\' nor \'extends\' definition '
'referenced.'
)
continue
if details['driver'] in handled_providers:
log.error(
'You can only have one entry per cloud provider. For '
'example, if you have a cloud provider configuration '
'section named, \'production\', you can only have a '
'single entry for EC2, Joyent, Openstack, and so '
'forth.'
)
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias \'{0}\' has multiple entries '
'for the \'{1[driver]}\' driver.'.format(key, details)
)
handled_providers.add(details['driver'])
for entry in val:
if 'driver' not in entry:
entry['driver'] = '-only-extendable-{0}'.format(ext_count)
ext_count += 1
if key not in providers:
providers[key] = {}
provider = entry['driver']
if provider not in providers[key]:
providers[key][provider] = entry
# Is any provider extending data!?
while True:
keep_looping = False
for provider_alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries):
# Set a holder for the defined profiles
providers[provider_alias][driver]['profiles'] = {}
if 'extends' not in details:
continue
extends = details.pop('extends')
if ':' in extends:
alias, provider = extends.split(':')
if alias not in providers:
raise salt.exceptions.SaltCloudConfigError(
'The \'{0}\' cloud provider entry in \'{1}\' is '
'trying to extend data from \'{2}\' though '
'\'{2}\' is not defined in the salt cloud '
'providers loaded data.'.format(
details['driver'],
provider_alias,
alias
)
)
if provider not in providers.get(alias):
raise salt.exceptions.SaltCloudConfigError(
'The \'{0}\' cloud provider entry in \'{1}\' is '
'trying to extend data from \'{2}:{3}\' though '
'\'{3}\' is not defined in \'{1}\''.format(
details['driver'],
provider_alias,
alias,
provider
)
)
details['extends'] = '{0}:{1}'.format(alias, provider)
# change provider details '-only-extendable-' to extended
# provider name
details['driver'] = provider
elif providers.get(extends):
raise salt.exceptions.SaltCloudConfigError(
'The \'{0}\' cloud provider entry in \'{1}\' is '
'trying to extend from \'{2}\' and no provider was '
'specified. Not extending!'.format(
details['driver'], provider_alias, extends
)
)
elif extends not in providers:
raise salt.exceptions.SaltCloudConfigError(
'The \'{0}\' cloud provider entry in \'{1}\' is '
'trying to extend data from \'{2}\' though \'{2}\' '
'is not defined in the salt cloud providers loaded '
'data.'.format(
details['driver'], provider_alias, extends
)
)
else:
if driver in providers.get(extends):
details['extends'] = '{0}:{1}'.format(extends, driver)
elif '-only-extendable-' in providers.get(extends):
details['extends'] = '{0}:{1}'.format(
extends, '-only-extendable-{0}'.format(ext_count)
)
else:
# We're still not aware of what we're trying to extend
# from. Let's try on next iteration
details['extends'] = extends
keep_looping = True
if not keep_looping:
break
while True:
# Merge provided extends
keep_looping = False
for alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries):
if 'extends' not in details:
# Extends resolved or non existing, continue!
continue
if 'extends' in details['extends']:
# Since there's a nested extends, resolve this one in the
# next iteration
keep_looping = True
continue
# Let's get a reference to what we're supposed to extend
extends = details.pop('extends')
# Split the setting in (alias, driver)
ext_alias, ext_driver = extends.split(':')
# Grab a copy of what should be extended
extended = providers.get(ext_alias).get(ext_driver).copy()
# Merge the data to extend with the details
extended = salt.utils.dictupdate.update(extended, details)
# Update the providers dictionary with the merged data
providers[alias][driver] = extended
# Update name of the driver, now that it's populated with extended information
if driver.startswith('-only-extendable-'):
providers[alias][ext_driver] = providers[alias][driver]
# Delete driver with old name to maintain dictionary size
del providers[alias][driver]
if not keep_looping:
break
# Now clean up any providers entry that was just used to be a data tree to
# extend from
for provider_alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries.copy()):
if not driver.startswith('-only-extendable-'):
continue
log.info(
"There's at least one cloud driver under the '%s' "
'cloud provider alias which does not have the required '
"'driver' setting. Removing it from the available "
'providers listing.', provider_alias
)
providers[provider_alias].pop(driver)
if not providers[provider_alias]:
providers.pop(provider_alias)
return providers
def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
'''
Search and return a setting in a known order:
1. In the virtual machine's configuration
2. In the virtual machine's profile configuration
3. In the virtual machine's provider configuration
4. In the salt cloud configuration if global searching is enabled
5. Return the provided default
'''
# As a last resort, return the default
value = default
if search_global is True and opts.get(name, None) is not None:
# The setting name exists in the cloud(global) configuration
value = deepcopy(opts[name])
if vm_ and name:
# Let's get the value from the profile, if present
if 'profile' in vm_ and vm_['profile'] is not None:
if name in opts['profiles'][vm_['profile']]:
if isinstance(value, dict):
value.update(opts['profiles'][vm_['profile']][name].copy())
else:
value = deepcopy(opts['profiles'][vm_['profile']][name])
# Let's get the value from the provider, if present.
if ':' in vm_['driver']:
# The provider is defined as <provider-alias>:<driver-name>
alias, driver = vm_['driver'].split(':')
if alias in opts['providers'] and \
driver in opts['providers'][alias]:
details = opts['providers'][alias][driver]
if name in details:
if isinstance(value, dict):
value.update(details[name].copy())
else:
value = deepcopy(details[name])
elif len(opts['providers'].get(vm_['driver'], ())) > 1:
# The provider is NOT defined as <provider-alias>:<driver-name>
# and there's more than one entry under the alias.
# WARN the user!!!!
log.error(
"The '%s' cloud provider definition has more than one "
'entry. Your VM configuration should be specifying the '
"provider as 'driver: %s:<driver-engine>'. Since "
"it's not, we're returning the first definition which "
'might not be what you intended.',
vm_['driver'], vm_['driver']
)
if vm_['driver'] in opts['providers']:
# There's only one driver defined for this provider. This is safe.
alias_defs = opts['providers'].get(vm_['driver'])
provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]
if name in provider_driver_defs:
# The setting name exists in the VM's provider configuration.
# Return it!
if isinstance(value, dict):
value.update(provider_driver_defs[name].copy())
else:
value = deepcopy(provider_driver_defs[name])
if name and vm_ and name in vm_:
# The setting name exists in VM configuration.
if isinstance(vm_[name], types.GeneratorType):
value = next(vm_[name], '')
else:
if isinstance(value, dict) and isinstance(vm_[name], dict):
value.update(vm_[name].copy())
else:
value = deepcopy(vm_[name])
return value
def is_provider_configured(opts, provider, required_keys=(), log_message=True, aliases=()):
'''
Check and return the first matching and fully configured cloud provider
configuration.
'''
if ':' in provider:
alias, driver = provider.split(':')
if alias not in opts['providers']:
return False
if driver not in opts['providers'][alias]:
return False
for key in required_keys:
if opts['providers'][alias][driver].get(key, None) is None:
if log_message is True:
# There's at least one require configuration key which is not
# set.
log.warning(
"The required '%s' configuration setting is missing "
"from the '%s' driver, which is configured under the "
"'%s' alias.", key, provider, alias
)
return False
# If we reached this far, there's a properly configured provider.
# Return it!
return opts['providers'][alias][driver]
for alias, drivers in six.iteritems(opts['providers']):
for driver, provider_details in six.iteritems(drivers):
if driver != provider and driver not in aliases:
continue
# If we reached this far, we have a matching provider, let's see if
# all required configuration keys are present and not None.
skip_provider = False
for key in required_keys:
if provider_details.get(key, None) is None:
if log_message is True:
# This provider does not include all necessary keys,
# continue to next one.
log.warning(
"The required '%s' configuration setting is "
"missing from the '%s' driver, which is configured "
"under the '%s' alias.", key, provider, alias
)
skip_provider = True
break
if skip_provider:
continue
# If we reached this far, the provider included all required keys
return provider_details
# If we reached this point, the provider is not configured.
return False
def is_profile_configured(opts, provider, profile_name, vm_=None):
'''
Check if the requested profile contains the minimum required parameters for
a profile.
Required parameters include image and provider for all drivers, while some
drivers also require size keys.
.. versionadded:: 2015.8.0
'''
# Standard dict keys required by all drivers.
required_keys = ['provider']
alias, driver = provider.split(':')
# Most drivers need an image to be specified, but some do not.
non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']
# Most drivers need a size, but some do not.
non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',
'softlayer', 'softlayer_hw', 'vmware', 'vsphere',
'virtualbox', 'libvirt', 'oneandone', 'profitbricks']
provider_key = opts['providers'][alias][driver]
profile_key = opts['providers'][alias][driver]['profiles'][profile_name]
# If cloning on Linode, size and image are not necessary.
# They are obtained from the to-be-cloned VM.
if driver == 'linode' and profile_key.get('clonefrom', False):
non_image_drivers.append('linode')
non_size_drivers.append('linode')
elif driver == 'gce' and 'sourceImage' in six.text_type(vm_.get('ex_disks_gce_struct')):
non_image_drivers.append('gce')
# If cloning on VMware, specifying image is not necessary.
if driver == 'vmware' and 'image' not in list(profile_key.keys()):
non_image_drivers.append('vmware')
if driver not in non_image_drivers:
required_keys.append('image')
if driver == 'vmware':
required_keys.append('datastore')
elif driver in ['linode', 'virtualbox']:
required_keys.append('clonefrom')
elif driver == 'nova':
nova_image_keys = ['image', 'block_device_mapping', 'block_device', 'boot_volume']
if not any([key in provider_key for key in nova_image_keys]) and not any([key in profile_key for key in nova_image_keys]):
required_keys.extend(nova_image_keys)
if driver not in non_size_drivers:
required_keys.append('size')
# Check if required fields are supplied in the provider config. If they
# are present, remove it from the required_keys list.
for item in list(required_keys):
if item in provider_key:
required_keys.remove(item)
# If a vm_ dict was passed in, use that information to get any other configs
# that we might have missed thus far, such as a option provided in a map file.
if vm_:
for item in list(required_keys):
if item in vm_:
required_keys.remove(item)
# Check for remaining required parameters in the profile config.
for item in required_keys:
if profile_key.get(item, None) is None:
# There's at least one required configuration item which is not set.
log.error(
"The required '%s' configuration setting is missing from "
"the '%s' profile, which is configured under the '%s' alias.",
item, profile_name, alias
)
return False
return True
def check_driver_dependencies(driver, dependencies):
'''
Check if the driver's dependencies are available.
.. versionadded:: 2015.8.0
driver
The name of the driver.
dependencies
The dictionary of dependencies to check.
'''
ret = True
for key, value in six.iteritems(dependencies):
if value is False:
log.warning(
"Missing dependency: '%s'. The %s driver requires "
"'%s' to be installed.", key, key, driver
)
ret = False
return ret
# <---- Salt Cloud Configuration Functions -----------------------------------
def _cache_id(minion_id, cache_file):
'''
Helper function, writes minion id to a cache file.
'''
path = os.path.dirname(cache_file)
try:
if not os.path.isdir(path):
os.makedirs(path)
except OSError as exc:
# Handle race condition where dir is created after os.path.isdir check
if os.path.isdir(path):
pass
else:
log.error('Failed to create dirs to minion_id file: %s', exc)
try:
with salt.utils.files.fopen(cache_file, 'w') as idf:
idf.write(minion_id)
except (IOError, OSError) as exc:
log.error('Could not cache minion ID: %s', exc)
def call_id_function(opts):
'''
Evaluate the function that determines the ID if the 'id_function'
option is set and return the result
'''
if opts.get('id'):
return opts['id']
# Import 'salt.loader' here to avoid a circular dependency
import salt.loader as loader
if isinstance(opts['id_function'], six.string_types):
mod_fun = opts['id_function']
fun_kwargs = {}
elif isinstance(opts['id_function'], dict):
mod_fun, fun_kwargs = six.next(six.iteritems(opts['id_function']))
if fun_kwargs is None:
fun_kwargs = {}
else:
log.error('\'id_function\' option is neither a string nor a dictionary')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# split module and function and try loading the module
mod, fun = mod_fun.split('.')
if not opts.get('grains'):
# Get grains for use by the module
opts['grains'] = loader.grains(opts)
try:
id_mod = loader.raw_mod(opts, mod, fun)
if not id_mod:
raise KeyError
# we take whatever the module returns as the minion ID
newid = id_mod[mod_fun](**fun_kwargs)
if not isinstance(newid, six.string_types) or not newid:
log.error(
'Function %s returned value "%s" of type %s instead of string',
mod_fun, newid, type(newid)
)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated minion ID from module: %s', mod_fun)
return newid
except TypeError:
log.error(
'Function arguments %s are incorrect for function %s',
fun_kwargs, mod_fun
)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def get_id(opts, cache_minion_id=False):
'''
Guess the id of the minion.
If CONFIG_DIR/minion_id exists, use the cached minion ID from that file.
If no minion id is configured, use multiple sources to find a FQDN.
If no FQDN is found you may get an ip address.
Returns two values: the detected ID, and a boolean value noting whether or
not an IP address is being used for the ID.
'''
if opts['root_dir'] is None:
root_dir = salt.syspaths.ROOT_DIR
else:
root_dir = opts['root_dir']
config_dir = salt.syspaths.CONFIG_DIR
if config_dir.startswith(salt.syspaths.ROOT_DIR):
config_dir = config_dir.split(salt.syspaths.ROOT_DIR, 1)[-1]
# Check for cached minion ID
id_cache = os.path.join(root_dir,
config_dir.lstrip(os.path.sep),
'minion_id')
if opts.get('minion_id_caching', True):
try:
with salt.utils.files.fopen(id_cache) as idf:
name = salt.utils.stringutils.to_unicode(idf.readline().strip())
bname = salt.utils.stringutils.to_bytes(name)
if bname.startswith(codecs.BOM): # Remove BOM if exists
name = salt.utils.stringutils.to_str(bname.replace(codecs.BOM, '', 1))
if name and name != 'localhost':
log.debug('Using cached minion ID from %s: %s', id_cache, name)
return name, False
except (IOError, OSError):
pass
if '__role' in opts and opts.get('__role') == 'minion':
log.debug(
'Guessing ID. The id can be explicitly set in %s',
os.path.join(salt.syspaths.CONFIG_DIR, 'minion')
)
if opts.get('id_function'):
newid = call_id_function(opts)
else:
newid = salt.utils.network.generate_minion_id()
if opts.get('minion_id_lowercase'):
newid = newid.lower()
log.debug('Changed minion id %s to lowercase.', newid)
if '__role' in opts and opts.get('__role') == 'minion':
if opts.get('id_function'):
log.debug(
'Found minion id from external function %s: %s',
opts['id_function'], newid
)
else:
log.debug('Found minion id from generate_minion_id(): %s', newid)
if cache_minion_id and opts.get('minion_id_caching', True):
_cache_id(newid, id_cache)
is_ipv4 = salt.utils.network.is_ipv4(newid)
return newid, is_ipv4
def _update_ssl_config(opts):
'''
Resolves string names to integer constant in ssl configuration.
'''
if opts['ssl'] in (None, False):
opts['ssl'] = None
return
if opts['ssl'] is True:
opts['ssl'] = {}
return
import ssl
for key, prefix in (('cert_reqs', 'CERT_'),
('ssl_version', 'PROTOCOL_')):
val = opts['ssl'].get(key)
if val is None:
continue
if not isinstance(val, six.string_types) or not val.startswith(prefix) or not hasattr(ssl, val):
message = 'SSL option \'{0}\' must be set to one of the following values: \'{1}\'.' \
.format(key, '\', \''.join([val for val in dir(ssl) if val.startswith(prefix)]))
log.error(message)
raise salt.exceptions.SaltConfigurationError(message)
opts['ssl'][key] = getattr(ssl, val)
def _adjust_log_file_override(overrides, default_log_file):
'''
Adjusts the log_file based on the log_dir override
'''
if overrides.get('log_dir'):
# Adjust log_file if a log_dir override is introduced
if overrides.get('log_file'):
if not os.path.isabs(overrides['log_file']):
# Prepend log_dir if log_file is relative
overrides['log_file'] = os.path.join(overrides['log_dir'],
overrides['log_file'])
else:
# Create the log_file override
overrides['log_file'] = \
os.path.join(overrides['log_dir'],
os.path.basename(default_log_file))
def apply_minion_config(overrides=None,
defaults=None,
cache_minion_id=False,
minion_id=None):
'''
Returns minion configurations dict.
'''
if defaults is None:
defaults = DEFAULT_MINION_OPTS
if overrides is None:
overrides = {}
opts = defaults.copy()
opts['__role'] = 'minion'
_adjust_log_file_override(overrides, defaults['log_file'])
if overrides:
opts.update(overrides)
if 'environment' in opts:
if opts['saltenv'] is not None:
log.warning(
'The \'saltenv\' and \'environment\' minion config options '
'cannot both be used. Ignoring \'environment\' in favor of '
'\'saltenv\'.',
)
# Set environment to saltenv in case someone's custom module is
# refrencing __opts__['environment']
opts['environment'] = opts['saltenv']
else:
log.warning(
'The \'environment\' minion config option has been renamed '
'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts['environment']
)
opts['saltenv'] = opts['environment']
for idx, val in enumerate(opts['fileserver_backend']):
if val in ('git', 'hg', 'svn', 'minion'):
new_val = val + 'fs'
log.debug(
'Changed %s to %s in minion opts\' fileserver_backend list',
val, new_val
)
opts['fileserver_backend'][idx] = new_val
opts['__cli'] = salt.utils.stringutils.to_unicode(
os.path.basename(sys.argv[0])
)
# No ID provided. Will getfqdn save us?
using_ip_for_id = False
if not opts.get('id'):
if minion_id:
opts['id'] = minion_id
else:
opts['id'], using_ip_for_id = get_id(
opts,
cache_minion_id=cache_minion_id)
# it does not make sense to append a domain to an IP based id
if not using_ip_for_id and 'append_domain' in opts:
opts['id'] = _append_domain(opts)
for directory in opts.get('append_minionid_config_dirs', []):
if directory in ('pki_dir', 'cachedir', 'extension_modules'):
newdirectory = os.path.join(opts[directory], opts['id'])
opts[directory] = newdirectory
elif directory == 'default_include' and directory in opts:
include_dir = os.path.dirname(opts[directory])
new_include_dir = os.path.join(include_dir,
opts['id'],
os.path.basename(opts[directory]))
opts[directory] = new_include_dir
# pidfile can be in the list of append_minionid_config_dirs, but pidfile
# is the actual path with the filename, not a directory.
if 'pidfile' in opts.get('append_minionid_config_dirs', []):
newpath_list = os.path.split(opts['pidfile'])
opts['pidfile'] = os.path.join(newpath_list[0], 'salt', opts['id'], newpath_list[1])
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
# Enabling open mode requires that the value be set to True, and
# nothing else!
opts['open_mode'] = opts['open_mode'] is True
opts['file_roots'] = _validate_file_roots(opts['file_roots'])
opts['pillar_roots'] = _validate_file_roots(opts['pillar_roots'])
# Make sure ext_mods gets set if it is an untrue value
# (here to catch older bad configs)
opts['extension_modules'] = (
opts.get('extension_modules') or
os.path.join(opts['cachedir'], 'extmods')
)
# Set up the utils_dirs location from the extension_modules location
opts['utils_dirs'] = (
opts.get('utils_dirs') or
[os.path.join(opts['extension_modules'], 'utils')]
)
# Insert all 'utils_dirs' directories to the system path
insert_system_path(opts, opts['utils_dirs'])
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'sock_dir', 'extension_modules', 'pidfile',
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('log_file', 'key_logfile'):
if urlparse(opts.get(config_key, '')).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
# if there is no beacons option yet, add an empty beacons dict
if 'beacons' not in opts:
opts['beacons'] = {}
if overrides.get('ipc_write_buffer', '') == 'dynamic':
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
if 'ipc_write_buffer' not in overrides:
opts['ipc_write_buffer'] = 0
# Make sure hash_type is lowercase
opts['hash_type'] = opts['hash_type'].lower()
# Check and update TLS/SSL configuration
_update_ssl_config(opts)
_update_discovery_config(opts)
return opts
def _update_discovery_config(opts):
'''
Update discovery config for all instances.
:param opts:
:return:
'''
if opts.get('discovery') not in (None, False):
if opts['discovery'] is True:
opts['discovery'] = {}
discovery_config = {'attempts': 3, 'pause': 5, 'port': 4520, 'match': 'any', 'mapping': {}}
for key in opts['discovery']:
if key not in discovery_config:
raise salt.exceptions.SaltConfigurationError('Unknown discovery option: {0}'.format(key))
if opts.get('__role') != 'minion':
for key in ['attempts', 'pause', 'match']:
del discovery_config[key]
opts['discovery'] = salt.utils.dictupdate.update(discovery_config, opts['discovery'], True, True)
def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None, exit_on_config_errors=False):
'''
Reads in the master configuration file and sets up default options
This is useful for running the actual master daemon. For running
Master-side client interfaces that need the master opts see
:py:func:`salt.client.client_config`.
'''
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
if not os.environ.get(env_var, None):
# No valid setting was given using the configuration variable.
# Lets see is SALT_CONFIG_DIR is of any use
salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)
if salt_config_dir:
env_config_file_path = os.path.join(salt_config_dir, 'master')
if salt_config_dir and os.path.isfile(env_config_file_path):
# We can get a configuration file using SALT_CONFIG_DIR, let's
# update the environment with this information
os.environ[env_var] = env_config_file_path
overrides = load_config(path, env_var, DEFAULT_MASTER_OPTS['conf_file'])
default_include = overrides.get('default_include',
defaults['default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False,
exit_on_config_errors=exit_on_config_errors))
overrides.update(include_config(include, path, verbose=True,
exit_on_config_errors=exit_on_config_errors))
opts = apply_master_config(overrides, defaults)
_validate_ssh_minion_opts(opts)
_validate_opts(opts)
# If 'nodegroups:' is uncommented in the master config file, and there are
# no nodegroups defined, opts['nodegroups'] will be None. Fix this by
# reverting this value to the default, as if 'nodegroups:' was commented
# out or not present.
if opts.get('nodegroups') is None:
opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {})
if salt.utils.data.is_dictlist(opts['nodegroups']):
opts['nodegroups'] = salt.utils.data.repack_dictlist(opts['nodegroups'])
if opts.get('transport') == 'raet' and 'aes' in opts:
opts.pop('aes')
apply_sdb(opts)
return opts
def apply_master_config(overrides=None, defaults=None):
'''
Returns master configurations dict.
'''
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
if overrides is None:
overrides = {}
opts = defaults.copy()
opts['__role'] = 'master'
_adjust_log_file_override(overrides, defaults['log_file'])
if overrides:
opts.update(overrides)
opts['__cli'] = salt.utils.stringutils.to_unicode(
os.path.basename(sys.argv[0])
)
if 'environment' in opts:
if opts['saltenv'] is not None:
log.warning(
'The \'saltenv\' and \'environment\' master config options '
'cannot both be used. Ignoring \'environment\' in favor of '
'\'saltenv\'.',
)
# Set environment to saltenv in case someone's custom runner is
# refrencing __opts__['environment']
opts['environment'] = opts['saltenv']
else:
log.warning(
'The \'environment\' master config option has been renamed '
'to \'saltenv\'. Using %s as the \'saltenv\' config value.',
opts['environment']
)
opts['saltenv'] = opts['environment']
if six.PY2 and 'rest_cherrypy' in opts:
# CherryPy is not unicode-compatible
opts['rest_cherrypy'] = salt.utils.data.encode(opts['rest_cherrypy'])
for idx, val in enumerate(opts['fileserver_backend']):
if val in ('git', 'hg', 'svn', 'minion'):
new_val = val + 'fs'
log.debug(
'Changed %s to %s in master opts\' fileserver_backend list',
val, new_val
)
opts['fileserver_backend'][idx] = new_val
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')
# Make sure ext_mods gets set if it is an untrue value
# (here to catch older bad configs)
opts['extension_modules'] = (
opts.get('extension_modules') or
os.path.join(opts['cachedir'], 'extmods')
)
# Set up the utils_dirs location from the extension_modules location
opts['utils_dirs'] = (
opts.get('utils_dirs') or
[os.path.join(opts['extension_modules'], 'utils')]
)
# Insert all 'utils_dirs' directories to the system path
insert_system_path(opts, opts['utils_dirs'])
if overrides.get('ipc_write_buffer', '') == 'dynamic':
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
if 'ipc_write_buffer' not in overrides:
opts['ipc_write_buffer'] = 0
using_ip_for_id = False
append_master = False
if not opts.get('id'):
opts['id'], using_ip_for_id = get_id(
opts,
cache_minion_id=None)
append_master = True
# it does not make sense to append a domain to an IP based id
if not using_ip_for_id and 'append_domain' in opts:
opts['id'] = _append_domain(opts)
if append_master:
opts['id'] += '_master'
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir', 'syndic_dir',
'sqlite_queue_dir', 'autosign_grains_dir'
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('log_file', 'key_logfile', 'ssh_log_file'):
log_setting = opts.get(config_key, '')
if log_setting is None:
continue
if urlparse(log_setting).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
# Enabling open mode requires that the value be set to True, and
# nothing else!
opts['open_mode'] = opts['open_mode'] is True
opts['auto_accept'] = opts['auto_accept'] is True
opts['file_roots'] = _validate_file_roots(opts['file_roots'])
opts['pillar_roots'] = _validate_file_roots(opts['pillar_roots'])
if opts['file_ignore_regex']:
# If file_ignore_regex was given, make sure it's wrapped in a list.
# Only keep valid regex entries for improved performance later on.
if isinstance(opts['file_ignore_regex'], six.string_types):
ignore_regex = [opts['file_ignore_regex']]
elif isinstance(opts['file_ignore_regex'], list):
ignore_regex = opts['file_ignore_regex']
opts['file_ignore_regex'] = []
for regex in ignore_regex:
try:
# Can't store compiled regex itself in opts (breaks
# serialization)
re.compile(regex)
opts['file_ignore_regex'].append(regex)
except Exception:
log.warning(
'Unable to parse file_ignore_regex. Skipping: %s',
regex
)
if opts['file_ignore_glob']:
# If file_ignore_glob was given, make sure it's wrapped in a list.
if isinstance(opts['file_ignore_glob'], six.string_types):
opts['file_ignore_glob'] = [opts['file_ignore_glob']]
# Let's make sure `worker_threads` does not drop below 3 which has proven
# to make `salt.modules.publish` not work under the test-suite.
if opts['worker_threads'] < 3 and opts.get('peer', None):
log.warning(
"The 'worker_threads' setting in '%s' cannot be lower than "
'3. Resetting it to the default value of 3.', opts['conf_file']
)
opts['worker_threads'] = 3
opts.setdefault('pillar_source_merging_strategy', 'smart')
# Make sure hash_type is lowercase
opts['hash_type'] = opts['hash_type'].lower()
# Check and update TLS/SSL configuration
_update_ssl_config(opts)
_update_discovery_config(opts)
return opts
def client_config(path, env_var='SALT_CLIENT_CONFIG', defaults=None):
'''
Load Master configuration data
Usage:
.. code-block:: python
import salt.config
master_opts = salt.config.client_config('/etc/salt/master')
Returns a dictionary of the Salt Master configuration file with necessary
options needed to communicate with a locally-running Salt Master daemon.
This function searches for client specific configurations and adds them to
the data from the master configuration.
This is useful for master-side operations like
:py:class:`~salt.client.LocalClient`.
'''
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
xdg_dir = salt.utils.xdg.xdg_config_dir()
if os.path.isdir(xdg_dir):
client_config_dir = xdg_dir
saltrc_config_file = 'saltrc'
else:
client_config_dir = os.path.expanduser('~')
saltrc_config_file = '.saltrc'
# Get the token file path from the provided defaults. If not found, specify
# our own, sane, default
opts = {
'token_file': defaults.get(
'token_file',
os.path.join(client_config_dir, 'salt_token')
)
}
# Update options with the master configuration, either from the provided
# path, salt's defaults or provided defaults
opts.update(
master_config(path, defaults=defaults)
)
# Update with the users salt dot file or with the environment variable
saltrc_config = os.path.join(client_config_dir, saltrc_config_file)
opts.update(
load_config(
saltrc_config,
env_var,
saltrc_config
)
)
# Make sure we have a proper and absolute path to the token file
if 'token_file' in opts:
opts['token_file'] = os.path.abspath(
os.path.expanduser(
opts['token_file']
)
)
# If the token file exists, read and store the contained token
if os.path.isfile(opts['token_file']):
# Make sure token is still valid
expire = opts.get('token_expire', 43200)
if os.stat(opts['token_file']).st_mtime + expire > time.mktime(time.localtime()):
with salt.utils.files.fopen(opts['token_file']) as fp_:
opts['token'] = fp_.read().strip()
# On some platforms, like OpenBSD, 0.0.0.0 won't catch a master running on localhost
if opts['interface'] == '0.0.0.0':
opts['interface'] = '127.0.0.1'
# Make sure the master_uri is set
if 'master_uri' not in opts:
opts['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=salt.utils.zeromq.ip_bracket(opts['interface']),
port=opts['ret_port']
)
# Return the client options
_validate_opts(opts)
return opts
def api_config(path):
'''
Read in the Salt Master config file and add additional configs that
need to be stubbed out for salt-api
'''
# Let's grab a copy of salt-api's required defaults
opts = DEFAULT_API_OPTS
# Let's override them with salt's master opts
opts.update(client_config(path, defaults=DEFAULT_MASTER_OPTS))
# Let's set the pidfile and log_file values in opts to api settings
opts.update({
'pidfile': opts.get('api_pidfile', DEFAULT_API_OPTS['api_pidfile']),
'log_file': opts.get('api_logfile', DEFAULT_API_OPTS['api_logfile']),
})
prepend_root_dir(opts, [
'api_pidfile',
'api_logfile',
'log_file',
'pidfile'
])
return opts
def spm_config(path):
'''
Read in the salt master config file and add additional configs that
need to be stubbed out for spm
.. versionadded:: 2015.8.0
'''
# Let's grab a copy of salt's master default opts
defaults = DEFAULT_MASTER_OPTS.copy()
# Let's override them with spm's required defaults
defaults.update(DEFAULT_SPM_OPTS)
overrides = load_config(path, 'SPM_CONFIG', DEFAULT_SPM_OPTS['spm_conf_file'])
default_include = overrides.get('spm_default_include',
defaults['spm_default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False))
overrides.update(include_config(include, path, verbose=True))
defaults = apply_master_config(overrides, defaults)
defaults = apply_spm_config(overrides, defaults)
return client_config(path, env_var='SPM_CONFIG', defaults=defaults)
def apply_spm_config(overrides, defaults):
'''
Returns the spm configurations dict.
.. versionadded:: 2015.8.1
'''
opts = defaults.copy()
_adjust_log_file_override(overrides, defaults['log_file'])
if overrides:
opts.update(overrides)
# Prepend root_dir to other paths
prepend_root_dirs = [
'formula_path', 'pillar_path', 'reactor_path',
'spm_cache_dir', 'spm_build_dir'
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('spm_logfile',):
log_setting = opts.get(config_key, '')
if log_setting is None:
continue
if urlparse(log_setting).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
return opts
| 37.44355 | 130 | 0.632166 |
fb910efa52bb28c5873820f5eadad5e007efc280 | 661 | py | Python | utils/routers/__init__.py | HeroCTF/ctfd-whale | 4885675b539fbe5630c6cfb0aa3b12177f897a60 | [
"MIT"
] | null | null | null | utils/routers/__init__.py | HeroCTF/ctfd-whale | 4885675b539fbe5630c6cfb0aa3b12177f897a60 | [
"MIT"
] | null | null | null | utils/routers/__init__.py | HeroCTF/ctfd-whale | 4885675b539fbe5630c6cfb0aa3b12177f897a60 | [
"MIT"
] | null | null | null | from CTFd.utils import get_config
from .frp import FrpRouter
from .trp import TrpRouter
_routers = {
"frp": FrpRouter,
"trp": TrpRouter,
}
def instanciate(cls):
return cls()
@instanciate
class Router:
_name = ""
_router = None
def __getattr__(self, name: str):
router_conftype = get_config("whale:router_type", "frp")
if Router._name != router_conftype:
Router._router = _routers[router_conftype]()
Router._name = router_conftype
return getattr(Router._router, name)
@staticmethod
def reset():
Router._name = ""
Router._router = None
__all__ = ["Router"]
| 18.885714 | 64 | 0.633888 |
6da7b1d1d1227b47eacfa58655ae06c939bd099d | 1,406 | py | Python | handler/AbsenteesAdminHandler.py | Videl/absentees-blackboard | 35658c14253340c34ef7dac98322306c7c555df1 | [
"MIT"
] | null | null | null | handler/AbsenteesAdminHandler.py | Videl/absentees-blackboard | 35658c14253340c34ef7dac98322306c7c555df1 | [
"MIT"
] | null | null | null | handler/AbsenteesAdminHandler.py | Videl/absentees-blackboard | 35658c14253340c34ef7dac98322306c7c555df1 | [
"MIT"
] | null | null | null | __author__ = 'Mael Beuget, Pierre Monnin & Thibaut Smith'
from handler.BaseHandler import *
from model.Absentees import *
class AbsenteesAdminHandler(BaseHandler):
def __init__(self, request=None, response=None):
super(AbsenteesAdminHandler, self).__init__()
self.initialize(request, response)
self.page_name = "administration"
def get(self):
if self.is_connected() and get_is_admin_from_id(self.request.cookies.get('user_id').split('|')[0]):
absentees = get_all_absentees()
self.render("administration_absentees.html", absentees=absentees)
else:
self.render("message.html", title="Access forbidden",
text="It seems you're not an administrator nor a connected user")
def post(self):
if self.is_connected() and get_is_admin_from_id(self.request.cookies.get('user_id').split('|')[0]):
date = self.request.get('date')
class_title = self.request.get('class_title')
student_name = self.request.get('student_name')
absentees = get_absentees_from_criteria(date, class_title, student_name)
self.render("administration_absentees.html", absentees=absentees)
else:
self.render("message.html", title="Access forbidden",
text="It seems you're not an administrator nor a connected user") | 41.352941 | 107 | 0.657895 |
d1aad3ccc3909d316a723c7c6daf1e807b524b34 | 1,540 | py | Python | typewise_alert.py | clean-code-craft-tcq-2/coverage-in-py-Venkatesha-Iyengar | 2d5aab207653139af4e791d42f841d91651ccc37 | [
"MIT"
] | null | null | null | typewise_alert.py | clean-code-craft-tcq-2/coverage-in-py-Venkatesha-Iyengar | 2d5aab207653139af4e791d42f841d91651ccc37 | [
"MIT"
] | null | null | null | typewise_alert.py | clean-code-craft-tcq-2/coverage-in-py-Venkatesha-Iyengar | 2d5aab207653139af4e791d42f841d91651ccc37 | [
"MIT"
] | null | null | null | coolingTypes ={
'PASSIVE_COOLING' : [0, 35],
'HI_ACTIVE_COOLING' : [0, 45],
'MED_ACTIVE_COOLING' : [0, 40],
}
email_content = {
'TOO_LOW' : 'Hi, the temperature is too low',
'TOO_HIGH' : 'Hi, the temperature is too high',
}
def infer_breach(value, lowerLimit, upperLimit):
if value < lowerLimit:
return 'TOO_LOW'
if value > upperLimit:
return 'TOO_HIGH'
return 'NORMAL'
def define_temperature_breach_limits(coolingType):
return (coolingTypes.get(coolingType, [0,0]))
def classify_temperature_breach(coolingType, temperatureInC):
lowerLimit, upperLimit = define_temperature_breach_limits(coolingType)
return infer_breach(temperatureInC, lowerLimit, upperLimit)
def alertTargets(alertTarget, breachType):
return {
'TO_CONTROLLER': lambda: send_to_controller(breachType),
'TO_EMAIL': lambda: send_to_email(breachType),
}.get(alertTarget,lambda: 'Not Valid')()
def check_and_alert(alertTarget, batteryChar, temperatureInC):
flag_alerted = False
breachType =\
classify_temperature_breach(batteryChar['coolingType'], temperatureInC)
flag_alerted = alertTargets(alertTarget, breachType)
return flag_alerted
def send_to_controller(breachType):
header = 0xfeed
print(f'{header}, {breachType}')
return True
def email_body(breachType, recepient):
return f'To: {recepient} \n{email_content.get(breachType, "Invalid Breach Type")}'
def send_to_email(breachType):
recepient = "a.b@c.com"
print(email_body(breachType, recepient))
return True
| 30.196078 | 86 | 0.731169 |
803451d45d927f4796156572418b6cc7b9046c32 | 9,686 | py | Python | components/auto_annotation.py | masc-it/CVLAB | 9c6e25a800b532a440c660e10ea001c699da68f1 | [
"MIT"
] | null | null | null | components/auto_annotation.py | masc-it/CVLAB | 9c6e25a800b532a440c660e10ea001c699da68f1 | [
"MIT"
] | null | null | null | components/auto_annotation.py | masc-it/CVLAB | 9c6e25a800b532a440c660e10ea001c699da68f1 | [
"MIT"
] | null | null | null | from .data import *
import imgui
from .projects import Project
from yolov5 import detect
import os
import threading
from . import annotation
from .file_selector import file_selector
from custom_utils import save_img_annotations
def start_inference(frame_data, exp: Experiment):
predictions = detect.run(weights=exp.model_path, imgsz=[1280, 1280], conf_thres=exp.threshold_conf, iou_thres=exp.threshold_iou, save_conf=True,
exist_ok=True, save_txt=True, source=exp.data_path, project=exp.data_path + "/exp", name="predictions",)
frame_data["imgs_to_render"]["inference_preview"]["scale"] = 1
for _, (bboxes, img) in enumerate(predictions):
frame_data["imgs_to_render"]["inference_preview"]["name"] = img
name_ext = os.path.basename(img).rsplit('.')
img_info = ImageInfo(name_ext[0], name_ext[1], CollectionInfo(exp.exp_name, exp.exp_name, exp.data_path ))
# exp.imgs.append(img_info)
for bbox in bboxes:
bbox : BBox = BBox(bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"], bbox["class"], bbox["conf"])
img_info.add_bbox(bbox)
frame_data["imgs_to_render"]["inference_preview"]["img_info"] = img_info
exp.add_image(img_info)
save_img_annotations(img_info)
# print(img)
# frame_data["img"] = img
exp.progress += 0.1
if not exp.is_running:
break
frame_data["imgs_to_render"]["inference_preview"]["img_info"] = None
frame_data["imgs_to_render"]["inference_preview"]["texture"] = None
exp.is_running = False
exp.progress = 0
frame_data["done"] = True
frame_data["is_running"] = False
def auto_ann_content(frame_data):
_files_list(frame_data, "inference_preview")
if frame_data["is_running"]:
imgui.begin_child("i_progress")
imgui.progress_bar(
fraction=frame_data['experiment'].progress * 10 / frame_data["num_imgs"] ,
size=(-1, 0.0),
overlay=f"{int(frame_data['experiment'].progress * 10)}/{frame_data['num_imgs']}"
)
annotation._annotation_screen(frame_data, "inference_preview", allow_edit=False)
imgui.end_child()
else:
annotation._annotation_screen(frame_data, "inference_preview", allow_edit=False)
def header_auto_annotation(frame_data):
project : Project = frame_data["project"]
""" if frame_data["is_running"]:
imgui.internal.push_item_flag(imgui.internal.ITEM_DISABLED, True)
imgui.push_style_var(imgui.STYLE_ALPHA, imgui.get_style().alpha * 0.5) """
if imgui.button("New session"):
imgui.open_popup("Auto-annotation session")
imgui.set_next_window_size(700, 350)
frame_data["is_dialog_open"] = True
frame_data["experiment"] = Experiment("D:/Projects/python/pdf-toolbox/pdf_toolbox/backend/data/pdf_best_multi_nano.pt", "D:\\Projects\\python\\semantics\\project\\test_final\\imgs_exp2", "")
if imgui.begin_popup_modal("Auto-annotation session", flags=imgui.WINDOW_NO_RESIZE )[0]:
imgui.begin_child(label="auto_ann_content", height=250, border=False, )
imgui.columns(1, "header_2", False)
model_btn_title = "Choose model path..."
if imgui.button(model_btn_title):
imgui.open_popup("Choose model path...")
model_file = file_selector("Choose model path...", False)
if model_file is not None:
frame_data["experiment"].model_path = model_file
if frame_data["experiment"].model_path != "":
imgui.same_line()
imgui.text(frame_data["experiment"].model_path)
images_btn_title = "Choose images directory..."
if imgui.button(images_btn_title):
imgui.open_popup(images_btn_title)
images_path = file_selector(images_btn_title, True)
if images_path is not None:
frame_data["experiment"].data_path = images_path
if frame_data["experiment"].data_path != "":
imgui.same_line()
imgui.text(frame_data["experiment"].data_path)
_, frame_data["experiment"].exp_name = imgui.input_text("Name",frame_data["experiment"].exp_name, 128)
imgui.separator()
imgui.push_item_width(520)
_, frame_data["experiment"].threshold_conf = imgui.slider_float(
label="Confidence threshold",
value=frame_data["experiment"].threshold_conf,
min_value=0.0,
max_value=1.0,
format="%.2f",
)
_, frame_data["experiment"].threshold_iou = imgui.slider_float(
label="IoU threshold",
value=frame_data["experiment"].threshold_iou,
min_value=0.0,
max_value=1.0,
format="%.2f",
)
imgui.pop_item_width()
imgui.separator()
imgui.end_child()
if imgui.button("Start annotation"):
frame_data["experiment"].update_info()
frame_data["experiment"].is_running = True
frame_data["is_running"] = True
frame_data["experiment"].progress = 0
frame_data["done"] = False
frame_data["num_imgs"] = frame_data["experiment"].num_imgs
frame_data["project"].save_experiment(frame_data["experiment"])
thread = threading.Thread(target=start_inference, args=(frame_data, frame_data["experiment"]))
thread.start()
imgui.close_current_popup()
frame_data["is_dialog_open"] = False
imgui.same_line()
if imgui.button("Close"):
imgui.close_current_popup()
frame_data["is_dialog_open"] = False
imgui.end_popup()
""" if frame_data["is_running"]:
imgui.internal.pop_item_flag()
imgui.pop_style_var() """
imgui.columns(1)
if frame_data["is_running"]:
start_clicked = imgui.button("Stop analysis")
if start_clicked:
if frame_data["is_running"]:
frame_data["is_running"] = False
imgui.same_line()
scale_changed, frame_data["img_scale"] = imgui.slider_float(
label="Zoom",
value=frame_data["img_scale"],
min_value=0.5,
max_value=2.0,
format="%.1f",
)
if scale_changed:
frame_data["scale_changed"] = True
def _files_list(frame_data, img_render_id):
project : Project = frame_data["project"]
experiments : dict[str, Experiment] = project.experiments
img_data = frame_data["imgs_to_render"][img_render_id]
# add 20 more (scrollbar)
frame_data["x_offset"] = int(frame_data["viewport"][0] / 5) + 20
imgui.begin_child(label="files_list", width=frame_data["x_offset"] - 20, height=-1, border=False, )
for exp_id in experiments:
exp = experiments[exp_id]
if imgui.tree_node(exp.exp_name):
for i, img_info in enumerate(exp.imgs):
# img_info = project.imgs[k]
name = img_info.name
clicked, _ = imgui.selectable(
label=name, selected=(frame_data["selected_file"]["idx"] == i and frame_data["selected_file"]["collection"] == exp_id)
)
if clicked or frame_data["scale_changed"]:
img_data["scale"] = frame_data["img_scale"]
if clicked:
frame_data["scale_changed"] = True
base_p = name
img_data["name"] = name
img_data["img_info"] = img_info
frame_data["selected_file"]["collection"] = exp_id
frame_data["selected_file"]["idx"] = i
frame_data["selected_file"]["name"] = base_p
if frame_data["scale_changed"]:
frame_data["scale_changed"] = False
img_data["img_info"].change_scale(frame_data["img_scale"])
if frame_data["imgs_info"].get(frame_data["selected_file"]["name"]) is None:
frame_data["imgs_info"][frame_data["selected_file"]["name"]] = {}
frame_data["imgs_info"][frame_data["selected_file"]["name"]]["orig_size"] = [img_data["img_info"].w, img_data["img_info"].h]
frame_data["imgs_info"][frame_data["selected_file"]["name"]]["scaled_size"] = [img_data["img_info"].scaled_w, img_data["img_info"].scaled_h]
imgui.tree_pop()
imgui.end_child()
imgui.same_line(position=frame_data["x_offset"])
def inference_progress(frame_data):
img_data = frame_data["imgs_to_render"]["inference_preview"]
if frame_data["is_running"]:
imgui.columns(3,"progr", False)
imgui.next_column()
imgui.progress_bar(
fraction=frame_data['experiment'].progress * 10 / frame_data["num_imgs"] ,
size=(-1, 0.0),
overlay=f"{int(frame_data['experiment'].progress * 10)}/{frame_data['num_imgs']}"
)
imgui.columns(1)
imgui.spacing()
if img_data["texture"] is not None:
imgui.same_line((frame_data["viewport"][0] / 2) - (img_data["width"] / 2))
imgui.image(img_data["texture"], img_data["width"], img_data["height"])
| 40.190871 | 198 | 0.591472 |
4429a246e84e56a0e1d10afb43fed87a52cb4a97 | 1,121 | py | Python | tests/api/test_cargo_event_entity.py | V0RT3X4/python-sdk | 4cffae83b90a58a56f1a534057fa1ca1c8671e05 | [
"Apache-2.0"
] | 9 | 2019-11-13T17:14:55.000Z | 2019-11-18T16:06:13.000Z | tests/api/test_cargo_event_entity.py | VorTECHsa/python-sdk | d85aabd8d9843e4d04d857360492bea002c2b24b | [
"Apache-2.0"
] | 114 | 2020-01-08T11:08:24.000Z | 2022-03-30T16:42:23.000Z | tests/api/test_cargo_event_entity.py | V0RT3X4/python-sdk | 4cffae83b90a58a56f1a534057fa1ca1c8671e05 | [
"Apache-2.0"
] | 6 | 2020-05-28T00:09:02.000Z | 2022-03-14T03:52:44.000Z | from unittest import TestCase
import jsons
from vortexasdk.api.cargo_movement import CargoEvent
from vortexasdk.api.geography import GeographyEntity
class TestCargoEventEntity(TestCase):
def test_serialize(self):
with open("tests/api/examples/cargo_event_entity1.json", "r") as f:
serialized = f.read()
deserialized = jsons.loads(serialized, CargoEvent)
expected = CargoEvent(
event_type="cargo_port_unload_event",
location=[
GeographyEntity(
id="2aaad41b89dfad19e5668918018ae02695d7710bcbe5f2dc689234e8da492de3",
layer="country",
label="United Kingdom",
source="model",
probability=1,
)
],
probability=1,
pos=[-0.256674902984994, 53.74191566386998],
start_timestamp="2019-10-24T13:16:43+0000",
end_timestamp="2019-10-25T00:40:46+0000",
)
assert expected == deserialized
| 33.969697 | 94 | 0.557538 |
1da179f81752546f4f5510f24199b39b4efa6c54 | 5,436 | py | Python | clarifai/client/mime_util.py | camielv/conscious-bugs | 75ad8b3a64d7cd1df3deac6b18f5b535cf2d2704 | [
"BSD-3-Clause"
] | null | null | null | clarifai/client/mime_util.py | camielv/conscious-bugs | 75ad8b3a64d7cd1df3deac6b18f5b535cf2d2704 | [
"BSD-3-Clause"
] | null | null | null | clarifai/client/mime_util.py | camielv/conscious-bugs | 75ad8b3a64d7cd1df3deac6b18f5b535cf2d2704 | [
"BSD-3-Clause"
] | 1 | 2019-09-05T10:56:43.000Z | 2019-09-05T10:56:43.000Z | import sys
import urllib
from email.encoders import encode_noop
from email.message import Message
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from uuid import uuid4
if sys.version_info >= (3,0):
import urllib.request as urllib2
from urllib.parse import urlparse
from urllib.parse import quote
def iteritems(d):
return iter(d.items())
else:
import urllib2
from urlparse import urlparse
from urllib import quote
def iteritems(d):
return d.iteritems()
class RequestWithMethod(urllib2.Request):
"""Extend urllib2.Request to support methods beyond GET and POST."""
def __init__(self, url, method, data=None, headers={},
origin_req_host=None, unverifiable=False):
self.url = url
self._method = method
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
def get_method(self):
if self._method:
return self._method
else:
return urllib2.Request.get_method(self)
def __str__(self):
return '%s %s' % (self.get_method(), self.url)
def post_data_multipart(url, media=[], form_data={}, headers={}):
"""POST a multipart MIME request with encoded media.
Args:
url: where to send the request.
media: list of (encoded_data, filename) pairs.
form_data: dict of API params.
headers: dict of extra HTTP headers to send with the request.
"""
message = multipart_form_message(media, form_data)
response = post_multipart_request(url, message, headers=headers)
return response
def parse_url(url):
"""Return a host, port, path tuple from a url."""
parsed_url = urlparse(url)
port = parsed_url.port or 80
if url.startswith('https'):
port = 443
return parsed_url.hostname, port, parsed_url.path
def post_multipart_request(url, multipart_message, headers={}):
data, headers = message_as_post_data(multipart_message, headers)
req = RequestWithMethod(url, 'POST', data, headers)
f = urllib2.urlopen(req)
response = f.read()
f.close()
return response
def crlf_mixed_join(lines):
""" This handles the mix of 'str' and 'unicode' in the data,
encode 'unicode' lines into 'utf-8' so the lines will be joinable
otherwise, the non-unicode lines will be auto converted into unicode
and triggers exception because the MIME data is not unicode convertible
Also, Python3 makes this even more complicated.
"""
# set default encoding to 'utf-8'
encoding = 'utf-8'
post_data = bytearray()
idx = 0
for line in lines:
if sys.version_info < (3,0):
if isinstance(line, unicode):
line = line.encode(encoding)
# turn to bytearray
line_bytes = bytearray(line)
if sys.version_info >= (3,0):
if isinstance(line, str):
line_bytes = bytearray(line, encoding)
else:
line_bytes = bytearray(line)
if idx > 0:
post_data.extend(b'\r\n')
post_data.extend(line_bytes)
idx += 1
return post_data
def form_data_media(encoded_data, filename, field_name='encoded_data', headers={}):
"""From raw encoded media return a MIME part for POSTing as form data."""
message = MIMEApplication(encoded_data, 'application/octet-stream', encode_noop, **headers)
disposition_headers = {
'name': '%s' % field_name,
'filename': quote(filename.encode('utf-8')),
}
message.add_header('Content-Disposition', 'form-data', **disposition_headers)
# Django seems fussy and doesn't like the MIME-Version header in multipart POSTs.
del message['MIME-Version']
return message
def message_as_post_data(message, headers):
"""Return a string suitable for using as POST data, from a multipart MIME message."""
# The built-in mail generator outputs broken POST data for several reasons:
# * It breaks long header lines, and django doesn't like this. Can use Generator.
# * It uses newlines, not CRLF. There seems to be no easy fix in 2.7:
# http://stackoverflow.com/questions/3086860/how-do-i-generate-a-multipart-mime-message-with-correct-crlf-in-python
# * It produces the outermost multipart MIME headers, which would need to get stripped off
# as form data because the HTTP headers are used instead.
# So just generate what we need directly.
assert message.is_multipart()
# Simple way to get a boundary. urllib3 uses this approach.
boundary = uuid4().hex
lines = []
for part in message.get_payload():
lines.append('--' + boundary)
for k, v in part.items():
lines.append('%s: %s' % (k, v))
lines.append('')
data = part.get_payload(decode=True)
lines.append(data)
lines.append('--%s--' % boundary)
post_data = crlf_mixed_join(lines)
headers['Content-Length'] = str(len(post_data))
headers['Content-Type'] = 'multipart/form-data; boundary=%s' % boundary
return post_data, headers
def multipart_form_message(media, form_data={}):
"""Return a MIMEMultipart message to upload encoded media via an HTTP form POST request.
Args:
media: a list of (encoded_data, filename) tuples.
form_data: dict of name, value form fields.
"""
message = MIMEMultipart('form-data', None)
if form_data:
for (name, val) in iteritems(form_data):
part = Message()
part.add_header('Content-Disposition', 'form-data', name=name)
part.set_payload(val)
message.attach(part)
for im, filename in media:
message.attach(form_data_media(im, filename))
return message
| 33.146341 | 119 | 0.706034 |
a76e69af9d605017892b61ba8b3f82d97ff4ae85 | 2,541 | py | Python | src/dataset/get_songs.py | alexameen/artist-lyric-gen | 0372ec2e9f06783f7eca3490e4199c2fe4fc3044 | [
"MIT"
] | null | null | null | src/dataset/get_songs.py | alexameen/artist-lyric-gen | 0372ec2e9f06783f7eca3490e4199c2fe4fc3044 | [
"MIT"
] | null | null | null | src/dataset/get_songs.py | alexameen/artist-lyric-gen | 0372ec2e9f06783f7eca3490e4199c2fe4fc3044 | [
"MIT"
] | 1 | 2021-01-01T07:38:59.000Z | 2021-01-01T07:38:59.000Z | import argparse
import lyricsgenius
import pandas as pd
import time
import sys
from dataset_utils import loop_and_process, name_to_file_name, read_list_from_file
from genius import GENIUS_ACCESS_TOKEN
raw_songs_dir = 'RAW_SONGS_DONT_DELETE'
artist_song_split_token = ' | '
artist_lyric_dir = 'raw_artist_lyrics'
def instantiate_genius():
genius = lyricsgenius.Genius(GENIUS_ACCESS_TOKEN, timeout=30)
genius.excluded_terms = ["Remix", "Live", "Intro", "Outro", "Freestyle", "Demo", "Interlude", "Snippet", "Excerpts", "Medley", "MTV", "Radio", "Edit", "Skit", "Discography"]
return genius
def get_songs(name=None, csv=None):
artists = pd.DataFrame([], columns=['Artist'])
if csv is not None:
print("\n Getting lyrics for all artists in {}".format(csv))
with open(csv) as openfile:
artists = openfile.readlines()
artists = [artist.strip() for artist in artists]
elif name is not None:
print("\n Getting lyrics for {}".format(name))
artists = pd.DataFrame([name], columns=['Artist'])
else:
print("No Input Artists")
while len(artists) > 0:
try:
genius = instantiate_genius()
# functions
def process_artist(name, bar):
artist = genius.search_artist(name)
songs = artist.songs
def process_song(song, bar):
return {
'title': song.title,
'artist': song.artist,
'lyrics': song.lyrics,
'featured_artists': [a['name'] for a in song.featured_artists]
}
def get_song_name(song):
return song.artist + artist_song_split_token + song.title
loop_and_process(songs, process_song, "Song", get_song_name, raw_songs_dir)
return None
def get_artist_name(name):
return name
loop_and_process(
artists,
process_artist,
"Artist",
get_artist_name,
artist_lyric_dir,
)
except:
e = sys.exc_info()[0]
print(e)
finally:
completed_artists = read_list_from_file("{}/{}".format(artist_lyric_dir, "_LIST"))
for artist in completed_artists:
if artist in artists:
artists.remove(artist)
if __name__ == "__main__":
get_songs(csv='get_artists.csv')
| 36.3 | 177 | 0.573003 |
7d469c9c6b2178642940433433e6e5aa80e18372 | 1,049 | py | Python | examples/new_framework_test.py | abs428/mltrace | 750b4d69e4a665a3d85ceeff61a1b6c0614feb77 | [
"Apache-2.0"
] | 328 | 2021-04-26T17:22:12.000Z | 2022-03-30T08:52:33.000Z | examples/new_framework_test.py | abs428/mltrace | 750b4d69e4a665a3d85ceeff61a1b6c0614feb77 | [
"Apache-2.0"
] | 94 | 2021-04-14T19:39:51.000Z | 2022-03-26T00:43:46.000Z | examples/new_framework_test.py | abs428/mltrace | 750b4d69e4a665a3d85ceeff61a1b6c0614feb77 | [
"Apache-2.0"
] | 20 | 2021-05-10T15:24:27.000Z | 2022-03-30T00:14:27.000Z | """
examples/newFrameworkTest.py
This file contains one component, a function to increment
a number, and runs that component 10 times. The output of
the ith component run is the input to the (i+1)th component
run. Thus if you trace the last output in the UI, you should
see that it depends on 9 things.
"""
from examples.full_pipeline_example.components import PreprocessingComponent
import pandas as pd
import numpy as np
import random
import string
_identifier = "".join(random.choice(string.ascii_lowercase) for i in range(10))
c = PreprocessingComponent("aditi")
@c.run(input_vars=["type", "n"], output_vars=["testOutput"])
def gen_fake_data(
type: str,
n: int = 1000,
):
df = pd.DataFrame(
np.random.normal(1.0, 1.0, n)
if type == "normal"
else np.random.wald(1.0, 1.0, n),
columns=["rando"],
)
testOutput = "hello world!"
print(testOutput)
return testOutput
if __name__ == "__main__":
# Run the tiny function with some fake inputs and outputs
gen_fake_data("wald")
| 24.395349 | 79 | 0.696854 |
e1a4119c728ba16b6a1af52d892680ef56171dfb | 24,402 | py | Python | resolwe/flow/models/utils.py | JureZmrzlikar/resolwe | 2c967b5fa06b6b7daeee88b3fca4cd19d10d99c3 | [
"Apache-2.0"
] | null | null | null | resolwe/flow/models/utils.py | JureZmrzlikar/resolwe | 2c967b5fa06b6b7daeee88b3fca4cd19d10d99c3 | [
"Apache-2.0"
] | null | null | null | resolwe/flow/models/utils.py | JureZmrzlikar/resolwe | 2c967b5fa06b6b7daeee88b3fca4cd19d10d99c3 | [
"Apache-2.0"
] | null | null | null | """Resolwe models utils."""
import copy
import json
import os
import re
import jsonschema
from django.contrib.staticfiles import finders
from django.core.exceptions import ValidationError
from resolwe.flow.utils import dict_dot, iterate_dict, iterate_fields, iterate_schema
class DirtyError(ValidationError):
"""Error raised when required fields missing."""
def validation_schema(name):
"""Return json schema for json validation."""
schemas = {
"processor": "processSchema.json",
"descriptor": "descriptorSchema.json",
"field": "fieldSchema.json",
"type": "typeSchema.json",
}
if name not in schemas:
raise ValueError()
field_schema_file = finders.find("flow/{}".format(schemas["field"]), all=True)[0]
with open(field_schema_file, "r") as fn:
field_schema = fn.read()
if name == "field":
return json.loads(field_schema.replace("{{PARENT}}", ""))
schema_file = finders.find("flow/{}".format(schemas[name]), all=True)[0]
with open(schema_file, "r") as fn:
schema = fn.read()
return json.loads(
schema.replace("{{FIELD}}", field_schema).replace("{{PARENT}}", "/field")
)
TYPE_SCHEMA = validation_schema("type")
def validate_schema(
instance, schema, test_required=True, data_location=None, skip_missing_data=False
):
"""Check if DictField values are consistent with our data types.
Perform basic JSON schema validation and our custom validations:
* check that required fields are given (if `test_required` is set
to ``True``)
* check if ``basic:file:`` and ``list:basic:file`` fields match
regex given in schema (only if ``validate_regex`` is defined in
schema for coresponding fields) and exists (only if
``data_location`` is given)
* check if directories referenced in ``basic:dir:`` and
``list:basic:dir``fields exist (only if ``data_location`` is
given)
* check that referenced ``Data`` objects (in ``data:<data_type>``
and ``list:data:<data_type>`` fields) exists and are of type
``<data_type>``
* check that referenced ``Storage`` objects (in ``basic:json``
fields) exists
:param list instance: Instance to be validated
:param list schema: Schema for validation
:param bool test_required: Flag for testing if all required fields
are present. It is usefule if validation is run before ``Data``
object is finished and there are some field stil missing
(default: ``False``)
:param :class:`~resolwe.flow.models.data.DataLocation` data_location:
data location used for checking if files and directories exist
(default: ``None``)
:param bool skip_missing_data: Don't raise an error if referenced
``Data`` object does not exist
:rtype: None
:raises ValidationError: if ``instance`` doesn't match schema
defined in ``schema``
"""
from .storage import Storage # Prevent circular import.
path_prefix = None
if data_location:
path_prefix = data_location.get_path()
def validate_refs(field):
"""Validate reference paths."""
for ref_filename in field.get("refs", []):
ref_path = os.path.join(path_prefix, ref_filename)
if not os.path.exists(ref_path):
raise ValidationError(
"Path referenced in `refs` ({}) does not exist.".format(ref_path)
)
if not (os.path.isfile(ref_path) or os.path.isdir(ref_path)):
raise ValidationError(
"Path referenced in `refs` ({}) is neither a file or directory.".format(
ref_path
)
)
def validate_file(field, regex):
"""Validate file name (and check that it exists)."""
filename = field["file"]
if regex and not re.search(regex, filename):
raise ValidationError(
"File name {} does not match regex {}".format(filename, regex)
)
if path_prefix:
path = os.path.join(path_prefix, filename)
if not os.path.exists(path):
raise ValidationError(
"Referenced path ({}) does not exist.".format(path)
)
if not os.path.isfile(path):
raise ValidationError(
"Referenced path ({}) is not a file.".format(path)
)
validate_refs(field)
def validate_dir(field):
"""Check that dirs and referenced files exists."""
dirname = field["dir"]
if path_prefix:
path = os.path.join(path_prefix, dirname)
if not os.path.exists(path):
raise ValidationError(
"Referenced path ({}) does not exist.".format(path)
)
if not os.path.isdir(path):
raise ValidationError(
"Referenced path ({}) is not a directory.".format(path)
)
validate_refs(field)
def validate_data(data_pk, type_):
"""Check that `Data` objects exist and is of right type."""
from .data import Data # prevent circular import
data_qs = Data.objects.filter(pk=data_pk).values("process__type")
if not data_qs.exists():
if skip_missing_data:
return
raise ValidationError(
"Referenced `Data` object does not exist (id:{})".format(data_pk)
)
data = data_qs.first()
if not data["process__type"].startswith(type_):
raise ValidationError(
"Data object of type `{}` is required, but type `{}` is given. "
"(id:{})".format(type_, data["process__type"], data_pk)
)
def validate_range(value, interval, name):
"""Check that given value is inside the specified range."""
if not interval:
return
if value < interval[0] or value > interval[1]:
raise ValidationError(
"Value of field '{}' is out of range. It should be between {} and {}.".format(
name, interval[0], interval[1]
)
)
is_dirty = False
dirty_fields = []
for _schema, _fields, _ in iterate_schema(instance, schema):
name = _schema["name"]
is_required = _schema.get("required", True)
if test_required and is_required and name not in _fields:
is_dirty = True
dirty_fields.append(name)
if name in _fields:
field = _fields[name]
type_ = _schema.get("type", "")
# Treat None as if the field is missing.
if not is_required and field is None:
continue
try:
jsonschema.validate([{"type": type_, "value": field}], TYPE_SCHEMA)
except jsonschema.exceptions.ValidationError as ex:
raise ValidationError(ex.message)
choices = [choice["value"] for choice in _schema.get("choices", [])]
allow_custom_choice = _schema.get("allow_custom_choice", False)
if choices and not allow_custom_choice and field not in choices:
raise ValidationError(
"Value of field '{}' must match one of predefined choices. "
"Current value: {}".format(name, field)
)
if type_ == "basic:file:":
validate_file(field, _schema.get("validate_regex"))
elif type_ == "list:basic:file:":
for obj in field:
validate_file(obj, _schema.get("validate_regex"))
elif type_ == "basic:dir:":
validate_dir(field)
elif type_ == "list:basic:dir:":
for obj in field:
validate_dir(obj)
elif (
type_ == "basic:json:" and not Storage.objects.filter(pk=field).exists()
):
raise ValidationError(
"Referenced `Storage` object does not exist (id:{})".format(field)
)
elif type_.startswith("data:"):
validate_data(field, type_)
elif type_.startswith("list:data:"):
for data_id in field:
validate_data(data_id, type_[5:]) # remove `list:` from type
elif type_ == "basic:integer:" or type_ == "basic:decimal:":
validate_range(field, _schema.get("range"), name)
elif type_ == "list:basic:integer:" or type_ == "list:basic:decimal:":
for obj in field:
validate_range(obj, _schema.get("range"), name)
try:
# Check that schema definitions exist for all fields
for _, _ in iterate_fields(instance, schema):
pass
except KeyError as ex:
raise ValidationError(str(ex))
if is_dirty:
dirty_fields = ['"{}"'.format(field) for field in dirty_fields]
raise DirtyError(
"Required fields {} not given.".format(", ".join(dirty_fields))
)
def _hydrate_values(output, output_schema, data):
"""Hydrate basic:file and basic:json values.
Find fields with basic:file type and assign a full path to the file.
Find fields with basic:json type and assign a JSON object from storage.
"""
def hydrate_path(file_name):
"""Hydrate file paths."""
from resolwe.flow.managers import manager
class HydratedPath(str):
"""String wrapper, which also stores the original filename."""
__slots__ = ("data_id", "file_name")
def __new__(cls, value=""):
"""Initialize hydrated path."""
hydrated = str.__new__(cls, value)
hydrated.data_id = data.id
hydrated.file_name = file_name
return hydrated
return HydratedPath(manager.get_executor().resolve_data_path(data, file_name))
def hydrate_storage(storage_id):
"""Hydrate storage fields."""
from .storage import LazyStorageJSON # Prevent circular import.
return LazyStorageJSON(pk=storage_id)
for field_schema, fields in iterate_fields(output, output_schema):
name = field_schema["name"]
value = fields[name]
if "type" in field_schema:
if field_schema["type"].startswith("basic:file:"):
value["file"] = hydrate_path(value["file"])
value["refs"] = [hydrate_path(ref) for ref in value.get("refs", [])]
elif field_schema["type"].startswith("list:basic:file:"):
for obj in value:
obj["file"] = hydrate_path(obj["file"])
obj["refs"] = [hydrate_path(ref) for ref in obj.get("refs", [])]
if field_schema["type"].startswith("basic:dir:"):
value["dir"] = hydrate_path(value["dir"])
value["refs"] = [hydrate_path(ref) for ref in value.get("refs", [])]
elif field_schema["type"].startswith("list:basic:dir:"):
for obj in value:
obj["dir"] = hydrate_path(obj["dir"])
obj["refs"] = [hydrate_path(ref) for ref in obj.get("refs", [])]
elif field_schema["type"].startswith("basic:json:"):
fields[name] = hydrate_storage(value)
elif field_schema["type"].startswith("list:basic:json:"):
fields[name] = [hydrate_storage(storage_id) for storage_id in value]
def hydrate_input_references(input_, input_schema, hydrate_values=True):
"""Hydrate ``input_`` with linked data.
Find fields with complex data:<...> types in ``input_``.
Assign an output of corresponding data object to those fields.
"""
from .data import Data # prevent circular import
for field_schema, fields in iterate_fields(input_, input_schema):
name = field_schema["name"]
value = fields[name]
if "type" in field_schema:
if field_schema["type"].startswith("data:"):
if value is None:
continue
try:
data = Data.objects.get(id=value)
except Data.DoesNotExist:
fields[name] = {}
continue
output = copy.deepcopy(data.output)
hydrate_input_references(output, data.process.output_schema)
if hydrate_values:
_hydrate_values(output, data.process.output_schema, data)
output["__id"] = data.id
output["__type"] = data.process.type
output["__descriptor"] = data.descriptor
output["__name"] = getattr(data, "name", None)
output["__entity_name"] = getattr(data.entity, "name", None)
output["__output_schema"] = data.process.output_schema
fields[name] = output
elif field_schema["type"].startswith("list:data:"):
outputs = []
for val in value:
if val is None:
continue
try:
data = Data.objects.get(id=val)
except Data.DoesNotExist:
outputs.append({})
continue
output = copy.deepcopy(data.output)
hydrate_input_references(output, data.process.output_schema)
if hydrate_values:
_hydrate_values(output, data.process.output_schema, data)
output["__id"] = data.id
output["__type"] = data.process.type
output["__descriptor"] = data.descriptor
output["__name"] = getattr(data, "name", None)
output["__entity_name"] = getattr(data.entity, "name", None)
output["__output_schema"] = data.process.output_schema
outputs.append(output)
fields[name] = outputs
def hydrate_input_uploads(input_, input_schema, hydrate_values=True):
"""Hydrate input basic:upload types with upload location.
Find basic:upload fields in input.
Add the upload location for relative paths.
"""
from resolwe.flow.managers import manager
files = []
for field_schema, fields in iterate_fields(input_, input_schema):
name = field_schema["name"]
value = fields[name]
if "type" in field_schema:
if field_schema["type"] == "basic:file:":
files.append(value)
elif field_schema["type"] == "list:basic:file:":
files.extend(value)
urlregex = re.compile(
r"^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]"
)
for value in files:
if "file_temp" in value:
if isinstance(value["file_temp"], str):
# If file_temp not url, hydrate path.
if not urlregex.search(value["file_temp"]):
value["file_temp"] = manager.get_executor().resolve_upload_path(
value["file_temp"]
)
else:
# Something very strange happened.
value["file_temp"] = "Invalid value for file_temp in DB"
def hydrate_size(data, force=False):
"""Add file and dir sizes.
Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:``
and ``list:basic:dir:`` fields.
``force`` parameter is used to recompute file sizes also on objects
that already have these values, e.g. in migrations.
"""
from .data import Data # prevent circular import
def get_dir_size(path):
"""Get directory size."""
total_size = 0
for dirpath, _, filenames in os.walk(path):
for file_name in filenames:
file_path = os.path.join(dirpath, file_name)
if not os.path.isfile(
file_path
): # Skip all "not normal" files (links, ...)
continue
total_size += os.path.getsize(file_path)
return total_size
def get_refs_size(obj, obj_path):
"""Calculate size of all references of ``obj``.
:param dict obj: Data object's output field (of type file/dir).
:param str obj_path: Path to ``obj``.
"""
total_size = 0
for ref in obj.get("refs", []):
ref_path = data.location.get_path(filename=ref)
if ref_path in obj_path:
# It is a common case that ``obj['file']`` is also contained in
# one of obj['ref']. In that case, we need to make sure that it's
# size is not counted twice:
continue
if os.path.isfile(ref_path):
total_size += os.path.getsize(ref_path)
elif os.path.isdir(ref_path):
total_size += get_dir_size(ref_path)
return total_size
def add_file_size(obj):
"""Add file size to the basic:file field."""
if (
data.status in [Data.STATUS_DONE, Data.STATUS_ERROR]
and "size" in obj
and not force
):
return
path = data.location.get_path(filename=obj["file"])
if not os.path.isfile(path):
raise ValidationError("Referenced file does not exist ({})".format(path))
obj["size"] = os.path.getsize(path)
obj["total_size"] = obj["size"] + get_refs_size(obj, path)
def add_dir_size(obj):
"""Add directory size to the basic:dir field."""
if (
data.status in [Data.STATUS_DONE, Data.STATUS_ERROR]
and "size" in obj
and not force
):
return
path = data.location.get_path(filename=obj["dir"])
if not os.path.isdir(path):
raise ValidationError("Referenced dir does not exist ({})".format(path))
obj["size"] = get_dir_size(path)
obj["total_size"] = obj["size"] + get_refs_size(obj, path)
data_size = 0
for field_schema, fields in iterate_fields(data.output, data.process.output_schema):
name = field_schema["name"]
value = fields[name]
if "type" in field_schema:
if field_schema["type"].startswith("basic:file:"):
add_file_size(value)
data_size += value.get("total_size", 0)
elif field_schema["type"].startswith("list:basic:file:"):
for obj in value:
add_file_size(obj)
data_size += obj.get("total_size", 0)
elif field_schema["type"].startswith("basic:dir:"):
add_dir_size(value)
data_size += value.get("total_size", 0)
elif field_schema["type"].startswith("list:basic:dir:"):
for obj in value:
add_dir_size(obj)
data_size += obj.get("total_size", 0)
data.size = data_size
def render_descriptor(data):
"""Render data descriptor.
The rendering is based on descriptor schema and input context.
:param data: data instance
:type data: :class:`resolwe.flow.models.Data` or :class:`dict`
"""
if not data.descriptor_schema:
return
# Set default values
for field_schema, field, path in iterate_schema(
data.descriptor, data.descriptor_schema.schema, "descriptor"
):
if "default" in field_schema and field_schema["name"] not in field:
dict_dot(data, path, field_schema["default"])
def render_template(process, template_string, context):
"""Render template using the specified expression engine."""
from resolwe.flow.managers import manager
# Get the appropriate expression engine. If none is defined, do not evaluate
# any expressions.
expression_engine = process.requirements.get("expression-engine", None)
if not expression_engine:
return template_string
return manager.get_expression_engine(expression_engine).evaluate_block(
template_string, context
)
def json_path_components(path):
"""Convert JSON path to individual path components.
:param path: JSON path, which can be either an iterable of path
components or a dot-separated string
:return: A list of path components
"""
if isinstance(path, str):
path = path.split(".")
return list(path)
def validate_process_subtype(supertype_name, supertype, subtype_name, subtype):
"""Perform process subtype validation.
:param supertype_name: Supertype name
:param supertype: Supertype schema
:param subtype_name: Subtype name
:param subtype: Subtype schema
:return: A list of validation error strings
"""
errors = []
for item in supertype:
# Ensure that the item exists in subtype and has the same schema.
for subitem in subtype:
if item["name"] != subitem["name"]:
continue
for key in set(item.keys()) | set(subitem.keys()):
if key in ("label", "description"):
# Label and description can differ.
continue
elif key == "required":
# A non-required item can be made required in subtype, but not the
# other way around.
item_required = item.get("required", True)
subitem_required = subitem.get("required", False)
if item_required and not subitem_required:
errors.append(
"Field '{}' is marked as required in '{}' and optional in '{}'.".format(
item["name"], supertype_name, subtype_name,
)
)
elif item.get(key, None) != subitem.get(key, None):
errors.append(
"Schema for field '{}' in type '{}' does not match supertype '{}'.".format(
item["name"], subtype_name, supertype_name
)
)
break
else:
errors.append(
"Schema for type '{}' is missing supertype '{}' field '{}'.".format(
subtype_name, supertype_name, item["name"]
)
)
return errors
def validate_process_types(queryset=None):
"""Perform process type validation.
:param queryset: Optional process queryset to validate
:return: A list of validation error strings
"""
if not queryset:
from .process import Process
queryset = Process.objects.all()
processes = {}
for process in queryset:
dict_dot(
processes,
process.type.replace(":", ".") + "__schema__",
process.output_schema,
)
errors = []
for path, key, value in iterate_dict(
processes, exclude=lambda key, value: key == "__schema__"
):
if "__schema__" not in value:
continue
# Validate with any parent types.
for length in range(len(path), 0, -1):
parent_type = ".".join(path[:length] + ["__schema__"])
try:
parent_schema = dict_dot(processes, parent_type)
except KeyError:
continue
errors += validate_process_subtype(
supertype_name=":".join(path[:length]),
supertype=parent_schema,
subtype_name=":".join(path + [key]),
subtype=value["__schema__"],
)
return errors
def fill_with_defaults(process_input, input_schema):
"""Fill empty optional fields in input with default values."""
for field_schema, fields, path in iterate_schema(
process_input, input_schema, include_groups=True
):
if "group" in field_schema and field_schema["name"] not in fields:
dict_dot(process_input, path, {})
if "default" in field_schema and field_schema["name"] not in fields:
dict_dot(process_input, path, field_schema["default"])
| 36.3125 | 100 | 0.568765 |
9573d3bff5e51d8cc62b349dccb2e08f78632c56 | 1,130 | py | Python | .sample_configs/param_handlers/import_data_video_action_recognition_sample.py | dizcology/python-aiplatform | 1a135775966c8a2303ded529eba514dcf9db7205 | [
"Apache-2.0"
] | 180 | 2020-09-23T17:21:15.000Z | 2022-03-30T17:25:47.000Z | .sample_configs/param_handlers/import_data_video_action_recognition_sample.py | pompipo/python-aiplatform | 3612b05c62dfb46822cd2c1798fd47349dba33bc | [
"Apache-2.0"
] | 601 | 2020-09-23T16:23:44.000Z | 2022-03-31T19:08:23.000Z | .sample_configs/param_handlers/import_data_video_action_recognition_sample.py | pompipo/python-aiplatform | 3612b05c62dfb46822cd2c1798fd47349dba33bc | [
"Apache-2.0"
] | 109 | 2020-09-23T16:22:04.000Z | 2022-03-28T21:18:29.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_name(name: str) -> str:
# Sample function parameter name in import_data_video_action_recognition_sample
name = name
return name
def make_import_configs(
gcs_source_uri: str,
) -> typing.Sequence[google.cloud.aiplatform_v1beta1.types.dataset.ImportDataConfig]:
import_configs = [
{
"gcs_source": {"uris": [gcs_source_uri]},
"import_schema_uri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/video_action_recognition_io_format_1.0.0.yaml",
}
]
return import_configs
| 33.235294 | 134 | 0.730088 |
e9683638a75eb7aca317b12802d49349b458e8a5 | 1,218 | bzl | Python | haiku/_src/build_defs.bzl | qsays/dm-haiku | 5f4a4011d666f6bdb8266797c26cc7daa1684bb4 | [
"Apache-2.0"
] | 7 | 2020-03-01T11:47:50.000Z | 2020-07-30T08:35:23.000Z | haiku/_src/build_defs.bzl | marload/dm-haiku | 8190b42cc6d9e7fae21c7d738bd3d719c144ab8a | [
"Apache-2.0"
] | null | null | null | haiku/_src/build_defs.bzl | marload/dm-haiku | 8190b42cc6d9e7fae21c7d738bd3d719c144ab8a | [
"Apache-2.0"
] | null | null | null | """Haiku specific build rules."""
def hk_py_library(name, **kwargs):
"""Proxy for py_library.
Internally we override this to enable type checking via PyType (more
information at https://github.com/google/pytype).
Args:
name: library name.
**kwargs: keyword args passed straight to py_library.
"""
native.py_library(name = name, **kwargs)
def hk_py_test(
name,
deps = [],
tags = [],
main = None,
gpu = True,
tpu = True,
**kwargs):
"""Runs a py_test.
Args:
name: test target name to generate suffixed with `test`.
deps: additional dependencies for the test targets.
tags: tags to be assigned to the different test targets.
main: main script to be run for the test.
gpu: Whether the test can be run on GPU. Note ignored by test.
tpu: Whether the test can be run on TPU. Note ignored by test.
**kwargs: extra keyword arguments to the test.
"""
if main == None:
main = name + ".py"
native.py_test(
name = name,
deps = deps,
tags = tags,
main = main,
python_version = "PY3",
**kwargs
)
| 27.066667 | 72 | 0.578818 |
fcdaab15e25c39e8dc9c5ca0f0d6ce2659318260 | 9,820 | py | Python | Toolbox/Reservoir_system_simulation/Res_sys_sim.py | AndresPenuela/MHA-Workshop | 69ce4cedc1396e8ee57ccde3b2eea194c58599ea | [
"MIT"
] | 1 | 2021-05-25T13:12:09.000Z | 2021-05-25T13:12:09.000Z | Toolbox/Reservoir_system_simulation/Res_sys_sim.py | AndresPenuela/MHA-Workshop | 69ce4cedc1396e8ee57ccde3b2eea194c58599ea | [
"MIT"
] | null | null | null | Toolbox/Reservoir_system_simulation/Res_sys_sim.py | AndresPenuela/MHA-Workshop | 69ce4cedc1396e8ee57ccde3b2eea194c58599ea | [
"MIT"
] | 1 | 2020-05-27T01:43:01.000Z | 2020-05-27T01:43:01.000Z | # -*- coding: utf-8 -*-
"""
This function implements the reservoir simulation model (Res_sys_sim). First,
it extracts and process the regulated flows contained in the Qreg variable.
This makes the regulated flow data readable by the mass balance function
(Mass_bal_func). Then, Mass_bal_func links all the key variables that represent
the reservoir dynamics (inflow, storage and outflows).
@author: Andres Peñuela
"""
import numpy as np
from numba import njit
### Mass balance function ###
@njit(parallel = False) # Numba decorator to speed-up the function below
def Mass_bal_func(I, e,
s_0, s_min, s_max,
env_min,
Qreg_inf, Qreg_rel,
s_frac,
Policy_inf, Policy_rel):
"""
The mathematical model (Mass_bal_func) of the reservoir essentially consists
of a water balance equation, where the storage (s) at a future time step
(for example, at the beginning of the next week) is predicted from the storage
at the current time (the beginning of the this week) by adding and subtracting
the inflows and outflows that will occur during the temporal interval ahead:
s(t+1) = s(t) + I(t) + Qreg_inf – E(t) – env(t) - spill(t) – Qreg_rel(t)
Where
s(t) = reservoir storage at time-step t, in Vol (for example: ML)
I(t) = reservoir inflows in the interval [t,t+1], in Vol/time (for example:
ML/week). This is usually provided by a flow forecasting system or
assumed by looking, for example, at historical inflow records for the
relevant time of year
E(t) = evaporation from the reservoir surface area in the interval [t,t+1], in
Vol/time (for example: ML/week). This is calculated internally to the
model, by multipling the evaporation rate for unit surface area
(e(t)) by the reservoir surface area (which is derived from the storage
S given that the reservoir geometry is known)
env(t) = environmental compensation flow in the interval [t,t+1], in Vol/time
(for example: ML/week). This is usually set to the value that was
agreed upon with the environemtal regulator
spill(t) = outflow through spillways (if any) in the interval [t,t+1], in
Vol/time (for example: ML/week). This is calculated internally to
the model, and is equal to the excess volume with respect to the
maximum reservoir capacity (so most of the time spill(t) is
equal to 0 as the maximum capacity is not reached, but it
occasionally is >0 so that the capacity is never exceeded)
Qreg_inf(t) = reservoir regulated inflows in the interval [t,t+1], in Vol/time
(for example: ML/week). This is a completely free variable that
the reservoir operator will need to specify
Qreg_rel(t) = reservoir regulated release for water supply in the interval
[t,t+1], in Vol/time (for example: ML/week). This is a completely
free variable that the reservoir operator will need to specify
Policy related inputs: s_frac, Policy_inf, Policy_rel
"""
T = I.shape[0]
### Declare output variables ###
# Reservoir storage
s = np.zeros(T+1)
# Environmental flow
env = np.zeros(T)
# Spillage
spill = np.zeros(T)
# Evaporation
E = np.zeros(T)
### Initial conditions ###
s[0] = s_0 # initial storage
for t in range(T): # Loop for each time-step
if not np.isnan(Policy_inf[0]):
Qreg_inf[t] = np.interp(s[t]/s_max, s_frac, Policy_inf)
if not np.isnan(Policy_rel[0]):
Qreg_rel[t] = np.interp(s[t]/s_max, s_frac, Policy_rel)
### Evaporation volume ###
# (E) = evaporation depth * water surface area (A)
# By default we assume A = 1 km2, but it should be modified according
# to your reservoir charateristics and to take into account the
# variation of the water surface area as a function of the water
# surface elevation"""
A = 1 # in km2.
E[t] = e[t] * A # in ML (= mm * km2)
# If at week t the inflow (I) is lower than the required environmental compensation (env_min),
# then the environmental compensation (Qenv) = total inflows (natural + regulated). Otherwise Qenv = env_min.
if env_min[t] >= I[t] + Qreg_inf[t] :
env[t] = I[t] + Qreg_inf[t]
else:
env[t] = env_min[t]
# If the required environmental compensation is higher than the water resource available (s + I - E)
# then the environmental compensation is equal to the higher value between 0 and the resource available
if env_min[t] >= s[t] - s_min + I[t] + Qreg_inf[t] - E[t]:
env[t] = np.array([0,s[t] - s_min + I[t] + Qreg_inf[t] - E[t]]).max()
else:
env[t] = env_min[t]
# If the regulated release (Qreg_rel) is higher than the water resource available (s + I - E - Qenv)
# then the release is equal to the lower value between the resource available and the pre-defined release (Qreg_rel)
Qreg_rel[t] = np.array([Qreg_rel[t], np.array([0,s[t] - s_min + I[t] + Qreg_inf[t] - E[t] - env[t]]).max()]).min()
# The spillage is equal to the higher value between 0 and the resource available exceeding the reservoir capacity
spill[t] = np.array([0,s[t] + I[t] + Qreg_inf[t] - Qreg_rel[t] - env[t] - E[t] - s_max]).max()
# The final storage (initial storage in the next step) is equal to the storage + inflow - outflows
s[t+1] = np.array([s_min,s[t] + I[t] + Qreg_inf[t] - Qreg_rel[t] - env[t] - E[t] - spill[t]]).max()
return env, spill, Qreg_rel, Qreg_inf, s, E
def Res_sys_sim(I, e, s_0, s_min, s_max, env_min, d, Qreg):
"""
The function extracts both regulated inflows (Qreg_inf) and regulated
releases (Qreg_rel) from Qreg. Both, Qreg_inf and Qreg_rel are processed
either as empty variables or as a time series, i.e. array of values for
each time step, or a as policy function, i.e. a function that can be used
to determine the release conditional on the state of the reservoir system
in the current time-step. This step essentially makes inputs readable by
the mass balance function (Mass_bal_func).
Comment: if the release scheduling is not predefined, the model
automatically will assume the releases equal to the water demand (Qreg_rel
= d)
"""
# Time length
T = I.shape[0]
# Required environmental compensation flow
env_min = env_min + np.zeros(T)
# Required demand
d = d + np.zeros(T)
# Regulated releases + inflows
if Qreg['rel_inf'] == []:
Qreg_rel = np.zeros(T)
Qreg_inf = np.zeros(T)
elif isinstance(Qreg['rel_inf'],(dict)):
exec('from '+Qreg['rel_inf']['file_name']+' import '+Qreg['rel_inf']['function'])
Qreg_rel = np.zeros(T)
Qreg_inf = np.zeros(T)
# Regulated water release
if Qreg['releases'] == []:
Qreg_rel = d # releases = demand
elif isinstance(Qreg['releases'],(np.ndarray)): # a release scheduling is provided as an input
Qreg_rel = Qreg['releases'] + np.zeros(T)
elif isinstance(Qreg['releases'],(dict)):
exec('from '+Qreg['releases']['file_name']+' import '+Qreg['releases']['function'])
# Regulated inflows
if Qreg['inflows'] == []:
Qreg_inf = np.zeros(T) # No regulated inflows
elif isinstance(Qreg['inflows'],(np.ndarray)): # a regulated inflows scheduling is provided as an input
Qreg_inf = Qreg['inflows'] + np.zeros(T)
elif isinstance(Qreg['releases'],(dict)):
exec('from '+Qreg['inflows']['file_name']+' import '+Qreg['inflows']['function'])
### Operating policy ###
s_step = 0.01
s_frac = np.arange(0,1+s_step,s_step) # storage fraction
Policy_rel = np.zeros(len(s_frac)) + np.nan # Regulated release policy
Policy_inf = np.zeros(len(s_frac)) + np.nan # Regulated inflow policy
for i in np.arange(len(s_frac)):
if isinstance(Qreg['rel_inf'],(dict)): # a dictionary with: the name of the function,
# file name where it is contained and the parameters of the function
exec('Policy_rel[i], Policy_inf[i] = '+Qreg['rel_inf']['function']+'('+str(Qreg['rel_inf']['param'])+','+str(s_frac[i])+')')
if isinstance(Qreg['releases'],(dict)): # a dictionary with: the name of the function,
# file name where it is contained and the parameters of the function
exec('Policy_rel[i] = '+Qreg['releases']['function']+'('+str(Qreg['releases']['param'])+','+str(s_frac[i])+')')
if isinstance(Qreg['inflows'],(dict)): # a dictionary with: the name of the function,
# file name where it is contained and the parameters of the function
exec('Policy_inf[i] = '+Qreg['inflows']['function']+'('+str(Qreg['inflows']['param'])+','+str(s_frac[i])+')')
### Run mass balance function ###
env, spill, Qreg_rel, Qreg_inf, s, E = Mass_bal_func(I, e,
s_0, s_min, s_max,
env_min,
Qreg_inf, Qreg_rel,
s_frac,
Policy_inf,Policy_rel)
return env, spill, Qreg_rel, Qreg_inf, s, E
| 48.613861 | 136 | 0.602444 |
3e36aee5fc03462c3c48fa7ded71eaf77f1d7959 | 4,303 | py | Python | tests/unit/awslambda/test_awslambda.py | Yurzs/boto | d739d6c52877699206e69b9901bbe92ea437ba5d | [
"MIT"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | tests/unit/awslambda/test_awslambda.py | Yurzs/boto | d739d6c52877699206e69b9901bbe92ea437ba5d | [
"MIT"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | tests/unit/awslambda/test_awslambda.py | Yurzs/boto | d739d6c52877699206e69b9901bbe92ea437ba5d | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import tempfile
import shutil
import os
import socket
from boto.compat import json
from boto.awslambda.layer1 import AWSLambdaConnection
from tests.unit import AWSMockServiceTestCase
from tests.compat import mock
class TestAWSLambda(AWSMockServiceTestCase):
connection_class = AWSLambdaConnection
def default_body(self):
return b'{}'
def test_upload_function_binary(self):
self.set_http_response(status_code=201)
function_data = b'This is my file'
self.service_connection.upload_function(
function_name='my-function',
function_zip=function_data,
role='myrole',
handler='myhandler',
mode='event',
runtime='nodejs'
)
self.assertEqual(self.actual_request.body, function_data)
self.assertEqual(
self.actual_request.headers['Content-Length'],
str(len(function_data))
)
self.assertEqual(
self.actual_request.path,
'/2014-11-13/functions/my-function?Handler=myhandler&Mode'
'=event&Role=myrole&Runtime=nodejs'
)
def test_upload_function_file(self):
self.set_http_response(status_code=201)
rootdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, rootdir)
filename = 'test_file'
function_data = b'This is my file'
full_path = os.path.join(rootdir, filename)
with open(full_path, 'wb') as f:
f.write(function_data)
with open(full_path, 'rb') as f:
self.service_connection.upload_function(
function_name='my-function',
function_zip=f,
role='myrole',
handler='myhandler',
mode='event',
runtime='nodejs'
)
self.assertEqual(self.actual_request.body.read(),
function_data)
self.assertEqual(
self.actual_request.headers['Content-Length'],
str(len(function_data))
)
self.assertEqual(
self.actual_request.path,
'/2014-11-13/functions/my-function?Handler=myhandler&Mode'
'=event&Role=myrole&Runtime=nodejs'
)
def test_upload_function_unseekable_file_no_tell(self):
sock = socket.socket()
with self.assertRaises(TypeError):
self.service_connection.upload_function(
function_name='my-function',
function_zip=sock,
role='myrole',
handler='myhandler',
mode='event',
runtime='nodejs'
)
def test_upload_function_unseekable_file_cannot_tell(self):
mock_file = mock.Mock()
mock_file.tell.side_effect = IOError
with self.assertRaises(TypeError):
self.service_connection.upload_function(
function_name='my-function',
function_zip=mock_file,
role='myrole',
handler='myhandler',
mode='event',
runtime='nodejs'
)
| 36.466102 | 76 | 0.630723 |
33303243d0c3796bfdf45b0f75b98ce8f57c4f49 | 158 | py | Python | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_Lag1Trend_NoCycle_NoAR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_Lag1Trend_NoCycle_NoAR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_Lag1Trend_NoCycle_NoAR.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['Lag1Trend'] , ['NoCycle'] , ['NoAR'] ); | 39.5 | 80 | 0.746835 |
625aea500a5cd1059b1f7afba8de05909bd0eb98 | 2,971 | py | Python | Datasets/listdataset.py | doudoulaile/RL-GAN-Net | 9c221223d1878bc24f0f39ad34928c1bb2974ae3 | [
"MIT"
] | 112 | 2019-03-13T00:52:17.000Z | 2022-03-30T07:42:27.000Z | Datasets/listdataset.py | ANABUR920/RL-GAN-Net | 9c221223d1878bc24f0f39ad34928c1bb2974ae3 | [
"MIT"
] | 20 | 2019-04-25T02:31:41.000Z | 2022-03-11T23:52:06.000Z | Datasets/listdataset.py | ANABUR920/RL-GAN-Net | 9c221223d1878bc24f0f39ad34928c1bb2974ae3 | [
"MIT"
] | 37 | 2019-04-10T05:18:35.000Z | 2022-03-31T02:51:28.000Z | import torch.utils.data as data
import os
import os.path
#from plyfile import PlyData, PlyElement
from Datasets.plyfile.plyfile import PlyData
import numpy as np
#import main import args as args
def load_ply(dir,file_name, with_faces=False, with_color=False):
path = os.path.join(dir,file_name)
ply_data = PlyData.read(path)
points = ply_data['vertex']
points = np.vstack([points['x'], points['y'], points['z']]).T
ret_val = [points]
if with_faces:
faces = np.vstack(ply_data['face']['vertex_indices'])
ret_val.append(faces)
if with_color:
r = np.vstack(ply_data['vertex']['red'])
g = np.vstack(ply_data['vertex']['green'])
b = np.vstack(ply_data['vertex']['blue'])
color = np.hstack((r, g, b))
ret_val.append(color)
if len(ret_val) == 1: # Unwrap the list
ret_val = ret_val[0]
return ret_val
def npy_loader(dir,file_name):
path = os.path.join(dir,file_name)
output = np.load(path)
return output
class ListDataset(data.Dataset):
def __init__(self, input_root,target_root, path_list, net_name, co_transforms = None, input_transforms = None, target_transforms = None,args=None,mode=None,give_name = False):
self.input_root = input_root
if net_name=='auto_encoder' : # As target root is same as input root for auto encoder
self.target_root = input_root
else:
self.target_root = target_root
self.path_list = path_list
self.net_name = net_name
if(self.net_name=='GAN'):
self.loader = npy_loader
else:
self.loader = load_ply
self.input_transforms = input_transforms
self.target_transforms = target_transforms
self.co_transforms = co_transforms
self.args = args
self.mode = mode
self.give_name =give_name
def __getitem__(self,index):
inputs_list,targets_list = self.path_list[index]
input_name = inputs_list[0]
input_name = input_name[:-4]
target_name = targets_list[0]
target_name = target_name[:-4]
inputs = self.loader(self.input_root,inputs_list[0])
targets = self.loader(self.target_root,targets_list[0])
if self.mode == 'train':
if self.co_transforms is not None:
if self.net_name=='GAN': # No target transform on GFV
inputs = self.co_transforms(inputs)
else:
inputs,targets = self.co_transforms(inputs,targets)
if self.input_transforms is not None:
inputs = self.input_transforms(inputs)
# if self.target_transforms is not None:
# targets = self.target_transforms(targets)
if(self.give_name==True):
return inputs, input_name
else:
return inputs
def __len__(self):
return len(self.path_list)
| 31.946237 | 179 | 0.621003 |
59a018e214d4067e9af8eaf656d4771f6a41c780 | 2,422 | py | Python | assets/ckeditor/dev/samplesvalidator/samplesvalidator.py | mirwansyahs/siabanks | 3ad789291587639fea875fd59297b67cb6a04cfd | [
"MIT"
] | 2 | 2021-04-09T14:59:45.000Z | 2021-04-18T07:27:07.000Z | assets/ckeditor/dev/samplesvalidator/samplesvalidator.py | mirwansyahs/siabanks | 3ad789291587639fea875fd59297b67cb6a04cfd | [
"MIT"
] | null | null | null | assets/ckeditor/dev/samplesvalidator/samplesvalidator.py | mirwansyahs/siabanks | 3ad789291587639fea875fd59297b67cb6a04cfd | [
"MIT"
] | null | null | null | # Copyright (c) 2003-2019, CKSource - Frederico Knabben. All rights reserved.
# For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
# Validates HTML files in a directory with W3C validator.
# To use this script simply call:
# python samplesvalidator.py
#
# By default this script validates samples directory ( 'project/samples' ).
# To validate some other directory an environmental variable must be set:
# export CKSAMPLESPATH=/home/me/some/path/to/be/validated
#
# To change validation service url you can also set an environmental variable:
# export CKSAMPLESURL='http://my.validation.servi.ce'
#
# To revert any kind of variable type:
# unset VARIABLE
import urllib, urllib2
import json
import os
import re
pathEnvVar = 'CKSAMPLESPATH'
urlEnvVar = 'CKSAMPLESURL'
scriptPath = os.path.dirname( os.path.realpath( __file__ ) )
# Let's move to the desired directory.
# \-> Look for ENV variable or use default samples path.
if pathEnvVar in os.environ:
path = os.environ[ pathEnvVar ]
else:
path = os.path.abspath( os.path.join( scriptPath, '../../samples/' ) )
os.chdir( path )
# Let's determine validator url.
# \-> Look for ENV variable or use default url.
url = os.environ[ urlEnvVar ] if urlEnvVar in os.environ else 'http://validator.w3.org/check'
# Find all HTML files in path.
directoryFiles = os.listdir( '.' )
htmlRegex = re.compile( '.html$', re.IGNORECASE )
htmlFiles = filter( htmlRegex.search, directoryFiles )
# Iterate over HTML files.
for index, fileName in enumerate( htmlFiles ):
# Determine the full path of the file.
filePath = os.path.join( path, fileName )
print '(%(index)s/%(total)s) Validating %(filePath)s...' % {
'filePath': filePath,
'total': len( htmlFiles ),
'index': index + 1
}
# Open the file.
fileHandler = open( filePath, 'r' )
# Prepare POST request.
postData = {
'fragment': fileHandler.read(),
'charset': 'utf-8',
'output': 'json'
}
# Close file.
fileHandler.close()
# Do the request. Keep the response.
data = urllib.urlencode( postData )
request = urllib2.Request( url, data )
response = json.loads( urllib2.urlopen( request ).read() )
# Print validation messages.
for message in response[ 'messages' ]:
message[ 'type' ] = message[ 'type' ].upper()
message[ 'message' ] = message[ 'message' ].title()
print '\t* %(type)s (Last line: %(lastLine)s, Last column: %(lastColumn)s): %(message)s' % message
| 30.658228 | 100 | 0.70768 |
e220de8c61333f25e54d5c3ff2dd63240afc5e7f | 30,279 | py | Python | pyscf/pbc/scf/khf.py | gkclab/pyscf | 2bbf2c11ca51986307331194192574f454fbf7c0 | [
"Apache-2.0"
] | null | null | null | pyscf/pbc/scf/khf.py | gkclab/pyscf | 2bbf2c11ca51986307331194192574f454fbf7c0 | [
"Apache-2.0"
] | 36 | 2018-08-22T19:44:03.000Z | 2020-05-09T10:02:36.000Z | pyscf/pbc/scf/khf.py | gkclab/pyscf | 2bbf2c11ca51986307331194192574f454fbf7c0 | [
"Apache-2.0"
] | 4 | 2018-02-14T16:28:28.000Z | 2019-08-12T16:40:30.000Z | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Garnet Chan <gkc1000@gmail.com>
# Timothy Berkelbach <tim.berkelbach@gmail.com>
# Qiming Sun <osirpt.sun@gmail.com>
#
'''
Hartree-Fock for periodic systems with k-point sampling
See Also:
hf.py : Hartree-Fock for periodic systems at a single k-point
'''
import sys
import time
from functools import reduce
import numpy as np
import scipy.linalg
import h5py
from pyscf.pbc.scf import hf as pbchf
from pyscf import lib
from pyscf.scf import hf as mol_hf
from pyscf.lib import logger
from pyscf.pbc.gto import ecp
from pyscf.pbc.scf import addons
from pyscf.pbc.scf import chkfile # noqa
from pyscf.pbc import tools
from pyscf.pbc import df
from pyscf import __config__
WITH_META_LOWDIN = getattr(__config__, 'pbc_scf_analyze_with_meta_lowdin', True)
PRE_ORTH_METHOD = getattr(__config__, 'pbc_scf_analyze_pre_orth_method', 'ANO')
CHECK_COULOMB_IMAG = getattr(__config__, 'pbc_scf_check_coulomb_imag', True)
def get_ovlp(mf, cell=None, kpts=None):
'''Get the overlap AO matrices at sampled k-points.
Args:
kpts : (nkpts, 3) ndarray
Returns:
ovlp_kpts : (nkpts, nao, nao) ndarray
'''
if cell is None: cell = mf.cell
if kpts is None: kpts = mf.kpts
# Avoid pbcopt's prescreening in the lattice sum, for better accuracy
s = cell.pbc_intor('int1e_ovlp', hermi=1, kpts=kpts,
pbcopt=lib.c_null_ptr())
cond = np.max(lib.cond(s))
if cond * cell.precision > 1e2:
prec = 1e2 / cond
rmin = max([cell.bas_rcut(ib, prec) for ib in range(cell.nbas)])
if cell.rcut < rmin:
logger.warn(cell, 'Singularity detected in overlap matrix. '
'Integral accuracy may be not enough.\n '
'You can adjust cell.precision or cell.rcut to '
'improve accuracy. Recommended values are\n '
'cell.precision = %.2g or smaller.\n '
'cell.rcut = %.4g or larger.', prec, rmin)
return lib.asarray(s)
def get_hcore(mf, cell=None, kpts=None):
'''Get the core Hamiltonian AO matrices at sampled k-points.
Args:
kpts : (nkpts, 3) ndarray
Returns:
hcore : (nkpts, nao, nao) ndarray
'''
if cell is None: cell = mf.cell
if kpts is None: kpts = mf.kpts
if cell.pseudo:
nuc = lib.asarray(mf.with_df.get_pp(kpts))
else:
nuc = lib.asarray(mf.with_df.get_nuc(kpts))
if len(cell._ecpbas) > 0:
nuc += lib.asarray(ecp.ecp_int(cell, kpts))
t = lib.asarray(cell.pbc_intor('int1e_kin', 1, 1, kpts))
return nuc + t
def get_j(mf, cell, dm_kpts, kpts, kpts_band=None):
'''Get the Coulomb (J) AO matrix at sampled k-points.
Args:
dm_kpts : (nkpts, nao, nao) ndarray or a list of (nkpts,nao,nao) ndarray
Density matrix at each k-point. If a list of k-point DMs, eg,
UHF alpha and beta DM, the alpha and beta DMs are contracted
separately. It needs to be Hermitian.
Kwargs:
kpts_band : (k,3) ndarray
A list of arbitrary "band" k-points at which to evalute the matrix.
Returns:
vj : (nkpts, nao, nao) ndarray
or list of vj if the input dm_kpts is a list of DMs
'''
return df.FFTDF(cell).get_jk(dm_kpts, kpts, kpts_band, with_k=False)[0]
def get_jk(mf, cell, dm_kpts, kpts, kpts_band=None, with_j=True, with_k=True,
omega=None, **kwargs):
'''Get the Coulomb (J) and exchange (K) AO matrices at sampled k-points.
Args:
dm_kpts : (nkpts, nao, nao) ndarray
Density matrix at each k-point. It needs to be Hermitian.
Kwargs:
kpts_band : (3,) ndarray
A list of arbitrary "band" k-point at which to evalute the matrix.
Returns:
vj : (nkpts, nao, nao) ndarray
vk : (nkpts, nao, nao) ndarray
or list of vj and vk if the input dm_kpts is a list of DMs
'''
return df.FFTDF(cell).get_jk(dm_kpts, kpts, kpts_band, with_j, with_k,
omega, exxdiv=mf.exxdiv)
def get_fock(mf, h1e=None, s1e=None, vhf=None, dm=None, cycle=-1, diis=None,
diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
h1e_kpts, s_kpts, vhf_kpts, dm_kpts = h1e, s1e, vhf, dm
if h1e_kpts is None: h1e_kpts = mf.get_hcore()
if vhf_kpts is None: vhf_kpts = mf.get_veff(mf.cell, dm_kpts)
f_kpts = h1e_kpts + vhf_kpts
if cycle < 0 and diis is None: # Not inside the SCF iteration
return f_kpts
if diis_start_cycle is None:
diis_start_cycle = mf.diis_start_cycle
if level_shift_factor is None:
level_shift_factor = mf.level_shift
if damp_factor is None:
damp_factor = mf.damp
if s_kpts is None: s_kpts = mf.get_ovlp()
if dm_kpts is None: dm_kpts = mf.make_rdm1()
if 0 <= cycle < diis_start_cycle-1 and abs(damp_factor) > 1e-4:
f_kpts = [mol_hf.damping(s1e, dm_kpts[k] * 0.5, f_kpts[k], \
damp_factor) for k, s1e in enumerate(s_kpts)]
if diis and cycle >= diis_start_cycle:
f_kpts = diis.update(s_kpts, dm_kpts, f_kpts, mf, h1e_kpts, vhf_kpts)
if abs(level_shift_factor) > 1e-4:
f_kpts = [mol_hf.level_shift(s, dm_kpts[k], f_kpts[k], level_shift_factor)
for k, s in enumerate(s_kpts)]
return lib.asarray(f_kpts)
def get_fermi(mf, mo_energy_kpts=None, mo_occ_kpts=None):
'''Fermi level
'''
if mo_energy_kpts is None: mo_energy_kpts = mf.mo_energy
if mo_occ_kpts is None: mo_occ_kpts = mf.mo_occ
# mo_energy_kpts and mo_occ_kpts are k-point RHF quantities
assert(mo_energy_kpts[0].ndim == 1)
assert(mo_occ_kpts[0].ndim == 1)
# occ array in mo_occ_kpts may have different size. See issue #250
nocc = sum(mo_occ.sum() for mo_occ in mo_occ_kpts) / 2
# nocc may not be perfect integer when smearing is enabled
nocc = int(nocc.round(3))
fermi = np.sort(np.hstack(mo_energy_kpts))[nocc-1]
for k, mo_e in enumerate(mo_energy_kpts):
mo_occ = mo_occ_kpts[k]
if mo_occ[mo_e > fermi].sum() > 1.:
logger.warn(mf, 'Occupied band above Fermi level: \n'
'k=%d, mo_e=%s, mo_occ=%s', k, mo_e, mo_occ)
return fermi
def get_occ(mf, mo_energy_kpts=None, mo_coeff_kpts=None):
'''Label the occupancies for each orbital for sampled k-points.
This is a k-point version of scf.hf.SCF.get_occ
'''
if mo_energy_kpts is None: mo_energy_kpts = mf.mo_energy
nkpts = len(mo_energy_kpts)
nocc = mf.cell.tot_electrons(nkpts) // 2
mo_energy = np.sort(np.hstack(mo_energy_kpts))
fermi = mo_energy[nocc-1]
mo_occ_kpts = []
for mo_e in mo_energy_kpts:
mo_occ_kpts.append((mo_e <= fermi).astype(np.double) * 2)
if nocc < mo_energy.size:
logger.info(mf, 'HOMO = %.12g LUMO = %.12g',
mo_energy[nocc-1], mo_energy[nocc])
if mo_energy[nocc-1]+1e-3 > mo_energy[nocc]:
logger.warn(mf, 'HOMO %.12g == LUMO %.12g',
mo_energy[nocc-1], mo_energy[nocc])
else:
logger.info(mf, 'HOMO = %.12g', mo_energy[nocc-1])
if mf.verbose >= logger.DEBUG:
np.set_printoptions(threshold=len(mo_energy))
logger.debug(mf, ' k-point mo_energy')
for k,kpt in enumerate(mf.cell.get_scaled_kpts(mf.kpts)):
logger.debug(mf, ' %2d (%6.3f %6.3f %6.3f) %s %s',
k, kpt[0], kpt[1], kpt[2],
mo_energy_kpts[k][mo_occ_kpts[k]> 0],
mo_energy_kpts[k][mo_occ_kpts[k]==0])
np.set_printoptions(threshold=1000)
return mo_occ_kpts
def get_grad(mo_coeff_kpts, mo_occ_kpts, fock):
'''
returns 1D array of gradients, like non K-pt version
note that occ and virt indices of different k pts now occur
in sequential patches of the 1D array
'''
nkpts = len(mo_occ_kpts)
grad_kpts = [mol_hf.get_grad(mo_coeff_kpts[k], mo_occ_kpts[k], fock[k])
for k in range(nkpts)]
return np.hstack(grad_kpts)
def make_rdm1(mo_coeff_kpts, mo_occ_kpts, **kwargs):
'''One particle density matrices for all k-points.
Returns:
dm_kpts : (nkpts, nao, nao) ndarray
'''
nkpts = len(mo_occ_kpts)
dm_kpts = [mol_hf.make_rdm1(mo_coeff_kpts[k], mo_occ_kpts[k])
for k in range(nkpts)]
return lib.asarray(dm_kpts)
def energy_elec(mf, dm_kpts=None, h1e_kpts=None, vhf_kpts=None):
'''Following pyscf.scf.hf.energy_elec()
'''
if dm_kpts is None: dm_kpts = mf.make_rdm1()
if h1e_kpts is None: h1e_kpts = mf.get_hcore()
if vhf_kpts is None: vhf_kpts = mf.get_veff(mf.cell, dm_kpts)
nkpts = len(dm_kpts)
e1 = 1./nkpts * np.einsum('kij,kji', dm_kpts, h1e_kpts)
e_coul = 1./nkpts * np.einsum('kij,kji', dm_kpts, vhf_kpts) * 0.5
mf.scf_summary['e1'] = e1.real
mf.scf_summary['e2'] = e_coul.real
logger.debug(mf, 'E1 = %s E_coul = %s', e1, e_coul)
if CHECK_COULOMB_IMAG and abs(e_coul.imag > mf.cell.precision*10):
logger.warn(mf, "Coulomb energy has imaginary part %s. "
"Coulomb integrals (e-e, e-N) may not converge !",
e_coul.imag)
return (e1+e_coul).real, e_coul.real
def analyze(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
'''Analyze the given SCF object: print orbital energies, occupancies;
print orbital coefficients; Mulliken population analysis; Dipole moment
'''
mf.dump_scf_summary(verbose)
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
ovlp_ao = mf.get_ovlp()
dm = mf.make_rdm1(mo_coeff, mo_occ)
if with_meta_lowdin:
return mf.mulliken_meta(mf.cell, dm, s=ovlp_ao, verbose=verbose)
else:
raise NotImplementedError
#return mf.mulliken_pop(mf.cell, dm, s=ovlp_ao, verbose=verbose)
def mulliken_meta(cell, dm_ao_kpts, verbose=logger.DEBUG,
pre_orth_method=PRE_ORTH_METHOD, s=None):
'''A modified Mulliken population analysis, based on meta-Lowdin AOs.
Note this function only computes the Mulliken population for the gamma
point density matrix.
'''
from pyscf.lo import orth
if s is None:
s = get_ovlp(cell)
log = logger.new_logger(cell, verbose)
log.note('Analyze output for *gamma point*')
log.info(' To include the contributions from k-points, transform to a '
'supercell then run the population analysis on the supercell\n'
' from pyscf.pbc.tools import k2gamma\n'
' k2gamma.k2gamma(mf).mulliken_meta()')
log.note("KRHF mulliken_meta")
dm_ao_gamma = dm_ao_kpts[0,:,:].real
s_gamma = s[0,:,:].real
c = orth.restore_ao_character(cell, pre_orth_method)
orth_coeff = orth.orth_ao(cell, 'meta_lowdin', pre_orth_ao=c, s=s_gamma)
c_inv = np.dot(orth_coeff.T, s_gamma)
dm = reduce(np.dot, (c_inv, dm_ao_gamma, c_inv.T.conj()))
log.note(' ** Mulliken pop on meta-lowdin orthogonal AOs **')
return mol_hf.mulliken_pop(cell, dm, np.eye(orth_coeff.shape[0]), log)
def canonicalize(mf, mo_coeff_kpts, mo_occ_kpts, fock=None):
if fock is None:
dm = mf.make_rdm1(mo_coeff_kpts, mo_occ_kpts)
fock = mf.get_fock(dm=dm)
mo_coeff = []
mo_energy = []
for k, mo in enumerate(mo_coeff_kpts):
mo1 = np.empty_like(mo)
mo_e = np.empty_like(mo_occ_kpts[k])
occidx = mo_occ_kpts[k] == 2
viridx = ~occidx
for idx in (occidx, viridx):
if np.count_nonzero(idx) > 0:
orb = mo[:,idx]
f1 = reduce(np.dot, (orb.T.conj(), fock[k], orb))
e, c = scipy.linalg.eigh(f1)
mo1[:,idx] = np.dot(orb, c)
mo_e[idx] = e
mo_coeff.append(mo1)
mo_energy.append(mo_e)
return mo_energy, mo_coeff
def init_guess_by_chkfile(cell, chkfile_name, project=None, kpts=None):
'''Read the KHF results from checkpoint file, then project it to the
basis defined by ``cell``
Returns:
Density matrix, 3D ndarray
'''
from pyscf.pbc.scf import kuhf
dm = kuhf.init_guess_by_chkfile(cell, chkfile_name, project, kpts)
return dm[0] + dm[1]
def dip_moment(cell, dm_kpts, unit='Debye', verbose=logger.NOTE,
grids=None, rho=None, kpts=np.zeros((1,3))):
''' Dipole moment in the unit cell (is it well defined)?
Args:
cell : an instance of :class:`Cell`
dm_kpts (a list of ndarrays) : density matrices of k-points
Return:
A list: the dipole moment on x, y and z components
'''
from pyscf.pbc.dft import gen_grid
from pyscf.pbc.dft import numint
if grids is None:
grids = gen_grid.UniformGrids(cell)
if rho is None:
rho = numint.KNumInt().get_rho(cell, dm_kpts, grids, kpts, cell.max_memory)
return pbchf.dip_moment(cell, dm_kpts, unit, verbose, grids, rho, kpts)
def get_rho(mf, dm=None, grids=None, kpts=None):
'''Compute density in real space
'''
from pyscf.pbc.dft import gen_grid
from pyscf.pbc.dft import numint
if dm is None:
dm = mf.make_rdm1()
if getattr(dm[0], 'ndim', None) != 2: # KUHF
dm = dm[0] + dm[1]
if grids is None:
grids = gen_grid.UniformGrids(mf.cell)
if kpts is None:
kpts = mf.kpts
ni = numint.KNumInt()
return ni.get_rho(mf.cell, dm, grids, kpts, mf.max_memory)
def as_scanner(mf):
import copy
if isinstance(mf, lib.SinglePointScanner):
return mf
logger.info(mf, 'Create scanner for %s', mf.__class__)
class SCF_Scanner(mf.__class__, lib.SinglePointScanner):
def __init__(self, mf_obj):
self.__dict__.update(mf_obj.__dict__)
def __call__(self, cell_or_geom, **kwargs):
from pyscf.pbc import gto
if isinstance(cell_or_geom, gto.Cell):
cell = cell_or_geom
else:
cell = self.cell.set_geom_(cell_or_geom, inplace=False)
# Cleanup intermediates associated to the pervious mol object
self.reset(cell)
if 'dm0' in kwargs:
dm0 = kwargs.pop('dm0')
elif self.mo_coeff is None:
dm0 = None
elif self.chkfile and h5py.is_hdf5(self.chkfile):
dm0 = self.from_chk(self.chkfile)
else:
dm0 = self.make_rdm1()
# dm0 form last calculation cannot be used in the current
# calculation if a completely different system is given.
# Obviously, the systems are very different if the number of
# basis functions are different.
# TODO: A robust check should include more comparison on
# various attributes between current `mol` and the `mol` in
# last calculation.
if dm0.shape[-1] != cell.nao_nr():
#TODO:
#from pyscf.scf import addons
#if numpy.any(last_mol.atom_charges() != mol.atom_charges()):
# dm0 = None
#elif non-relativistic:
# addons.project_dm_nr2nr(last_mol, dm0, last_mol)
#else:
# addons.project_dm_r2r(last_mol, dm0, last_mol)
dm0 = None
self.mo_coeff = None # To avoid last mo_coeff being used by SOSCF
e_tot = self.kernel(dm0=dm0, **kwargs)
return e_tot
return SCF_Scanner(mf)
class KSCF(pbchf.SCF):
'''SCF base class with k-point sampling.
Compared to molecular SCF, some members such as mo_coeff, mo_occ
now have an additional first dimension for the k-points,
e.g. mo_coeff is (nkpts, nao, nao) ndarray
Attributes:
kpts : (nks,3) ndarray
The sampling k-points in Cartesian coordinates, in units of 1/Bohr.
'''
conv_tol_grad = getattr(__config__, 'pbc_scf_KSCF_conv_tol_grad', None)
direct_scf = getattr(__config__, 'pbc_scf_SCF_direct_scf', False)
def __init__(self, cell, kpts=np.zeros((1,3)),
exxdiv=getattr(__config__, 'pbc_scf_SCF_exxdiv', 'ewald')):
if not cell._built:
sys.stderr.write('Warning: cell.build() is not called in input\n')
cell.build()
self.cell = cell
mol_hf.SCF.__init__(self, cell)
self.with_df = df.FFTDF(cell)
self.exxdiv = exxdiv
self.kpts = kpts
self.conv_tol = cell.precision * 10
self.exx_built = False
self._keys = self._keys.union(['cell', 'exx_built', 'exxdiv', 'with_df'])
@property
def kpts(self):
if 'kpts' in self.__dict__:
# To handle the attribute kpt loaded from chkfile
self.kpt = self.__dict__.pop('kpts')
return self.with_df.kpts
@kpts.setter
def kpts(self, x):
self.with_df.kpts = np.reshape(x, (-1,3))
@property
def mo_energy_kpts(self):
return self.mo_energy
@property
def mo_coeff_kpts(self):
return self.mo_coeff
@property
def mo_occ_kpts(self):
return self.mo_occ
def dump_flags(self, verbose=None):
mol_hf.SCF.dump_flags(self, verbose)
logger.info(self, '\n')
logger.info(self, '******** PBC SCF flags ********')
logger.info(self, 'N kpts = %d', len(self.kpts))
logger.debug(self, 'kpts = %s', self.kpts)
logger.info(self, 'Exchange divergence treatment (exxdiv) = %s', self.exxdiv)
# "vcut_ws" precomputing is triggered by pbc.tools.pbc.get_coulG
#if self.exxdiv == 'vcut_ws':
# if self.exx_built is False:
# self.precompute_exx()
# logger.info(self, 'WS alpha = %s', self.exx_alpha)
cell = self.cell
if ((cell.dimension >= 2 and cell.low_dim_ft_type != 'inf_vacuum') and
isinstance(self.exxdiv, str) and self.exxdiv.lower() == 'ewald'):
madelung = tools.pbc.madelung(cell, [self.kpts])
logger.info(self, ' madelung (= occupied orbital energy shift) = %s', madelung)
nkpts = len(self.kpts)
# FIXME: consider the fractional num_electron or not? This maybe
# relates to the charged system.
nelectron = float(self.cell.tot_electrons(nkpts)) / nkpts
logger.info(self, ' Total energy shift due to Ewald probe charge'
' = -1/2 * Nelec*madelung = %.12g',
madelung*nelectron * -.5)
logger.info(self, 'DF object = %s', self.with_df)
if not getattr(self.with_df, 'build', None):
# .dump_flags() is called in pbc.df.build function
self.with_df.dump_flags(verbose)
return self
def check_sanity(self):
mol_hf.SCF.check_sanity(self)
self.with_df.check_sanity()
if (isinstance(self.exxdiv, str) and self.exxdiv.lower() != 'ewald' and
isinstance(self.with_df, df.df.DF)):
logger.warn(self, 'exxdiv %s is not supported in DF or MDF',
self.exxdiv)
return self
def build(self, cell=None):
#if self.exxdiv == 'vcut_ws':
# self.precompute_exx()
if 'kpts' in self.__dict__:
# To handle the attribute kpts loaded from chkfile
self.kpts = self.__dict__.pop('kpts')
if self.verbose >= logger.WARN:
self.check_sanity()
return self
def get_init_guess(self, cell=None, key='minao'):
if cell is None:
cell = self.cell
dm_kpts = None
key = key.lower()
if key == '1e' or key == 'hcore':
dm_kpts = self.init_guess_by_1e(cell)
elif getattr(cell, 'natm', 0) == 0:
logger.info(self, 'No atom found in cell. Use 1e initial guess')
dm_kpts = self.init_guess_by_1e(cell)
elif key == 'atom':
dm = self.init_guess_by_atom(cell)
elif key[:3] == 'chk':
try:
dm_kpts = self.from_chk()
except (IOError, KeyError):
logger.warn(self, 'Fail to read %s. Use MINAO initial guess',
self.chkfile)
dm = self.init_guess_by_minao(cell)
else:
dm = self.init_guess_by_minao(cell)
if dm_kpts is None:
dm_kpts = lib.asarray([dm]*len(self.kpts))
ne = np.einsum('kij,kji->', dm_kpts, self.get_ovlp(cell)).real
# FIXME: consider the fractional num_electron or not? This maybe
# relate to the charged system.
nkpts = len(self.kpts)
nelectron = float(self.cell.tot_electrons(nkpts))
if abs(ne - nelectron) > 1e-7*nkpts:
logger.debug(self, 'Big error detected in the electron number '
'of initial guess density matrix (Ne/cell = %g)!\n'
' This can cause huge error in Fock matrix and '
'lead to instability in SCF for low-dimensional '
'systems.\n DM is normalized wrt the number '
'of electrons %s', ne/nkpts, nelectron/nkpts)
dm_kpts *= (nelectron / ne).reshape(-1,1,1)
return dm_kpts
def init_guess_by_1e(self, cell=None):
if cell is None: cell = self.cell
if cell.dimension < 3:
logger.warn(self, 'Hcore initial guess is not recommended in '
'the SCF of low-dimensional systems.')
return mol_hf.SCF.init_guess_by_1e(self, cell)
get_hcore = get_hcore
get_ovlp = get_ovlp
get_fock = get_fock
get_occ = get_occ
energy_elec = energy_elec
get_fermi = get_fermi
def get_j(self, cell=None, dm_kpts=None, hermi=1, kpts=None,
kpts_band=None, omega=None):
return self.get_jk(cell, dm_kpts, hermi, kpts, kpts_band,
with_k=False, omega=omega)[0]
def get_k(self, cell=None, dm_kpts=None, hermi=1, kpts=None,
kpts_band=None, omega=None):
return self.get_jk(cell, dm_kpts, hermi, kpts, kpts_band,
with_j=False, omega=omega)[1]
def get_jk(self, cell=None, dm_kpts=None, hermi=1, kpts=None, kpts_band=None,
with_j=True, with_k=True, omega=None, **kwargs):
if cell is None: cell = self.cell
if kpts is None: kpts = self.kpts
if dm_kpts is None: dm_kpts = self.make_rdm1()
cpu0 = (time.clock(), time.time())
vj, vk = self.with_df.get_jk(dm_kpts, hermi, kpts, kpts_band,
with_j, with_k, omega, exxdiv=self.exxdiv)
logger.timer(self, 'vj and vk', *cpu0)
return vj, vk
def get_veff(self, cell=None, dm_kpts=None, dm_last=0, vhf_last=0, hermi=1,
kpts=None, kpts_band=None):
'''Hartree-Fock potential matrix for the given density matrix.
See :func:`scf.hf.get_veff` and :func:`scf.hf.RHF.get_veff`
'''
vj, vk = self.get_jk(cell, dm_kpts, hermi, kpts, kpts_band)
return vj - vk * .5
def analyze(self, verbose=None, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
if verbose is None: verbose = self.verbose
return analyze(self, verbose, with_meta_lowdin, **kwargs)
def get_grad(self, mo_coeff_kpts, mo_occ_kpts, fock=None):
'''
returns 1D array of gradients, like non K-pt version
note that occ and virt indices of different k pts now occur
in sequential patches of the 1D array
'''
if fock is None:
dm1 = self.make_rdm1(mo_coeff_kpts, mo_occ_kpts)
fock = self.get_hcore(self.cell, self.kpts) + self.get_veff(self.cell, dm1)
return get_grad(mo_coeff_kpts, mo_occ_kpts, fock)
def eig(self, h_kpts, s_kpts):
nkpts = len(h_kpts)
eig_kpts = []
mo_coeff_kpts = []
for k in range(nkpts):
e, c = self._eigh(h_kpts[k], s_kpts[k])
eig_kpts.append(e)
mo_coeff_kpts.append(c)
return eig_kpts, mo_coeff_kpts
def make_rdm1(self, mo_coeff_kpts=None, mo_occ_kpts=None, **kwargs):
if mo_coeff_kpts is None:
# Note: this is actually "self.mo_coeff_kpts"
# which is stored in self.mo_coeff of the scf.hf.RHF superclass
mo_coeff_kpts = self.mo_coeff
if mo_occ_kpts is None:
# Note: this is actually "self.mo_occ_kpts"
# which is stored in self.mo_occ of the scf.hf.RHF superclass
mo_occ_kpts = self.mo_occ
return make_rdm1(mo_coeff_kpts, mo_occ_kpts, **kwargs)
def get_bands(self, kpts_band, cell=None, dm_kpts=None, kpts=None):
'''Get energy bands at the given (arbitrary) 'band' k-points.
Returns:
mo_energy : (nmo,) ndarray or a list of (nmo,) ndarray
Bands energies E_n(k)
mo_coeff : (nao, nmo) ndarray or a list of (nao,nmo) ndarray
Band orbitals psi_n(k)
'''
if cell is None: cell = self.cell
if dm_kpts is None: dm_kpts = self.make_rdm1()
if kpts is None: kpts = self.kpts
kpts_band = np.asarray(kpts_band)
single_kpt_band = (kpts_band.ndim == 1)
kpts_band = kpts_band.reshape(-1,3)
fock = self.get_hcore(cell, kpts_band)
fock = fock + self.get_veff(cell, dm_kpts, kpts=kpts, kpts_band=kpts_band)
s1e = self.get_ovlp(cell, kpts_band)
mo_energy, mo_coeff = self.eig(fock, s1e)
if single_kpt_band:
mo_energy = mo_energy[0]
mo_coeff = mo_coeff[0]
return mo_energy, mo_coeff
def init_guess_by_chkfile(self, chk=None, project=None, kpts=None):
if chk is None: chk = self.chkfile
if kpts is None: kpts = self.kpts
return init_guess_by_chkfile(self.cell, chk, project, kpts)
def from_chk(self, chk=None, project=None, kpts=None):
return self.init_guess_by_chkfile(chk, project, kpts)
def dump_chk(self, envs):
if self.chkfile:
mol_hf.SCF.dump_chk(self, envs)
with h5py.File(self.chkfile, 'a') as fh5:
fh5['scf/kpts'] = self.kpts
return self
def mulliken_meta(self, cell=None, dm=None, verbose=logger.DEBUG,
pre_orth_method=PRE_ORTH_METHOD, s=None):
if cell is None: cell = self.cell
if dm is None: dm = self.make_rdm1()
if s is None: s = self.get_ovlp(cell)
return mulliken_meta(cell, dm, s=s, verbose=verbose,
pre_orth_method=pre_orth_method)
def mulliken_pop(self):
raise NotImplementedError
get_rho = get_rho
@lib.with_doc(dip_moment.__doc__)
def dip_moment(self, cell=None, dm=None, unit='Debye', verbose=logger.NOTE,
**kwargs):
rho = kwargs.pop('rho', None)
if rho is None:
rho = self.get_rho(dm)
if cell is None:
cell = self.cell
return dip_moment(cell, dm, unit, verbose, rho=rho, kpts=self.kpts, **kwargs)
canonicalize = canonicalize
def density_fit(self, auxbasis=None, with_df=None):
from pyscf.pbc.df import df_jk
return df_jk.density_fit(self, auxbasis, with_df=with_df)
def mix_density_fit(self, auxbasis=None, with_df=None):
from pyscf.pbc.df import mdf_jk
return mdf_jk.density_fit(self, auxbasis, with_df=with_df)
def stability(self,
internal=getattr(__config__, 'pbc_scf_KSCF_stability_internal', True),
external=getattr(__config__, 'pbc_scf_KSCF_stability_external', False),
verbose=None):
from pyscf.pbc.scf.stability import rhf_stability
return rhf_stability(self, internal, external, verbose)
def newton(self):
from pyscf.pbc.scf import newton_ah
return newton_ah.newton(self)
def sfx2c1e(self):
from pyscf.pbc.x2c import sfx2c1e
return sfx2c1e.sfx2c1e(self)
x2c = x2c1e = sfx2c1e
def to_rhf(self, mf):
'''Convert the input mean-field object to a KRHF/KROHF/KRKS/KROKS object'''
return addons.convert_to_rhf(mf)
def to_uhf(self, mf):
'''Convert the input mean-field object to a KUHF/KUKS object'''
return addons.convert_to_uhf(mf)
def to_ghf(self, mf):
'''Convert the input mean-field object to a KGHF/KGKS object'''
return addons.convert_to_ghf(mf)
as_scanner = as_scanner
class KRHF(KSCF, pbchf.RHF):
def check_sanity(self):
cell = self.cell
if cell.spin != 0 and len(self.kpts) % 2 != 0:
logger.warn(self, 'Problematic nelec %s and number of k-points %d '
'found in KRHF method.', cell.nelec, len(self.kpts))
return KSCF.check_sanity(self)
def convert_from_(self, mf):
'''Convert given mean-field object to KRHF'''
addons.convert_to_rhf(mf, self)
return self
def nuc_grad_method(self):
from pyscf.pbc.grad import krhf
return krhf.Gradients(self)
del(WITH_META_LOWDIN, PRE_ORTH_METHOD)
if __name__ == '__main__':
from pyscf.pbc import gto
cell = gto.Cell()
cell.atom = '''
He 0 0 1
He 1 0 1
'''
cell.basis = '321g'
cell.a = np.eye(3) * 3
cell.mesh = [11] * 3
cell.verbose = 5
cell.build()
mf = KRHF(cell, [2,1,1])
mf.kernel()
mf.analyze()
| 37.613665 | 94 | 0.611546 |
9473c0d4e0626cef58c23574f68559a6077d3f78 | 9,637 | py | Python | official/nlp/tasks/electra_task.py | mcasanova1445/models | 37be0fdb4abccca633bb3199a4e6f3f71cd174d9 | [
"Apache-2.0"
] | 1 | 2022-02-02T06:29:41.000Z | 2022-02-02T06:29:41.000Z | official/nlp/tasks/electra_task.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 8 | 2020-05-19T00:52:30.000Z | 2020-06-04T23:57:20.000Z | official/nlp/tasks/electra_task.py | mdsaifhaider/models | 7214e17eb425963ec3d0295be215d5d26deaeb32 | [
"Apache-2.0"
] | 2 | 2021-10-07T04:47:04.000Z | 2021-12-18T04:18:19.000Z | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ELECTRA pretraining task (Joint Masked LM and Replaced Token Detection)."""
import dataclasses
import tensorflow as tf
from official.core import base_task
from official.core import config_definitions as cfg
from official.core import task_factory
from official.modeling import tf_utils
from official.nlp.configs import bert
from official.nlp.configs import electra
from official.nlp.configs import encoders
from official.nlp.data import pretrain_dataloader
from official.nlp.modeling import layers
from official.nlp.modeling import models
@dataclasses.dataclass
class ElectraPretrainConfig(cfg.TaskConfig):
"""The model config."""
model: electra.ElectraPretrainerConfig = electra.ElectraPretrainerConfig(
cls_heads=[
bert.ClsHeadConfig(
inner_dim=768,
num_classes=2,
dropout_rate=0.1,
name='next_sentence')
])
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
def _build_pretrainer(
config: electra.ElectraPretrainerConfig) -> models.ElectraPretrainer:
"""Instantiates ElectraPretrainer from the config."""
generator_encoder_cfg = config.generator_encoder
discriminator_encoder_cfg = config.discriminator_encoder
# Copy discriminator's embeddings to generator for easier model serialization.
discriminator_network = encoders.build_encoder(discriminator_encoder_cfg)
if config.tie_embeddings:
embedding_layer = discriminator_network.get_embedding_layer()
generator_network = encoders.build_encoder(
generator_encoder_cfg, embedding_layer=embedding_layer)
else:
generator_network = encoders.build_encoder(generator_encoder_cfg)
generator_encoder_cfg = generator_encoder_cfg.get()
return models.ElectraPretrainer(
generator_network=generator_network,
discriminator_network=discriminator_network,
vocab_size=generator_encoder_cfg.vocab_size,
num_classes=config.num_classes,
sequence_length=config.sequence_length,
num_token_predictions=config.num_masked_tokens,
mlm_activation=tf_utils.get_activation(
generator_encoder_cfg.hidden_activation),
mlm_initializer=tf.keras.initializers.TruncatedNormal(
stddev=generator_encoder_cfg.initializer_range),
classification_heads=[
layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads
],
disallow_correct=config.disallow_correct)
@task_factory.register_task_cls(ElectraPretrainConfig)
class ElectraPretrainTask(base_task.Task):
"""ELECTRA Pretrain Task (Masked LM + Replaced Token Detection)."""
def build_model(self):
return _build_pretrainer(self.task_config.model)
def build_losses(self,
labels,
model_outputs,
metrics,
aux_losses=None) -> tf.Tensor:
metrics = dict([(metric.name, metric) for metric in metrics])
# generator lm and (optional) nsp loss.
lm_prediction_losses = tf.keras.losses.sparse_categorical_crossentropy(
labels['masked_lm_ids'],
tf.cast(model_outputs['lm_outputs'], tf.float32),
from_logits=True)
lm_label_weights = labels['masked_lm_weights']
lm_numerator_loss = tf.reduce_sum(lm_prediction_losses * lm_label_weights)
lm_denominator_loss = tf.reduce_sum(lm_label_weights)
mlm_loss = tf.math.divide_no_nan(lm_numerator_loss, lm_denominator_loss)
metrics['lm_example_loss'].update_state(mlm_loss)
if 'next_sentence_labels' in labels:
sentence_labels = labels['next_sentence_labels']
sentence_outputs = tf.cast(
model_outputs['sentence_outputs'], dtype=tf.float32)
sentence_loss = tf.keras.losses.sparse_categorical_crossentropy(
sentence_labels, sentence_outputs, from_logits=True)
metrics['next_sentence_loss'].update_state(sentence_loss)
total_loss = mlm_loss + sentence_loss
else:
total_loss = mlm_loss
# discriminator replaced token detection (rtd) loss.
rtd_logits = model_outputs['disc_logits']
rtd_labels = tf.cast(model_outputs['disc_label'], tf.float32)
input_mask = tf.cast(labels['input_mask'], tf.float32)
rtd_ind_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=rtd_logits, labels=rtd_labels)
rtd_numerator = tf.reduce_sum(input_mask * rtd_ind_loss)
rtd_denominator = tf.reduce_sum(input_mask)
rtd_loss = tf.math.divide_no_nan(rtd_numerator, rtd_denominator)
metrics['discriminator_loss'].update_state(rtd_loss)
total_loss = total_loss + \
self.task_config.model.discriminator_loss_weight * rtd_loss
if aux_losses:
total_loss += tf.add_n(aux_losses)
metrics['total_loss'].update_state(total_loss)
return total_loss
def build_inputs(self, params, input_context=None):
"""Returns tf.data.Dataset for pretraining."""
if params.input_path == 'dummy':
def dummy_data(_):
dummy_ids = tf.zeros((1, params.seq_length), dtype=tf.int32)
dummy_lm = tf.zeros((1, params.max_predictions_per_seq), dtype=tf.int32)
return dict(
input_word_ids=dummy_ids,
input_mask=dummy_ids,
input_type_ids=dummy_ids,
masked_lm_positions=dummy_lm,
masked_lm_ids=dummy_lm,
masked_lm_weights=tf.cast(dummy_lm, dtype=tf.float32),
next_sentence_labels=tf.zeros((1, 1), dtype=tf.int32))
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
return pretrain_dataloader.BertPretrainDataLoader(params).load(
input_context)
def build_metrics(self, training=None):
del training
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='masked_lm_accuracy'),
tf.keras.metrics.Mean(name='lm_example_loss'),
tf.keras.metrics.SparseCategoricalAccuracy(
name='discriminator_accuracy'),
]
if self.task_config.train_data.use_next_sentence_label:
metrics.append(
tf.keras.metrics.SparseCategoricalAccuracy(
name='next_sentence_accuracy'))
metrics.append(tf.keras.metrics.Mean(name='next_sentence_loss'))
metrics.append(tf.keras.metrics.Mean(name='discriminator_loss'))
metrics.append(tf.keras.metrics.Mean(name='total_loss'))
return metrics
def process_metrics(self, metrics, labels, model_outputs):
metrics = dict([(metric.name, metric) for metric in metrics])
if 'masked_lm_accuracy' in metrics:
metrics['masked_lm_accuracy'].update_state(labels['masked_lm_ids'],
model_outputs['lm_outputs'],
labels['masked_lm_weights'])
if 'next_sentence_accuracy' in metrics:
metrics['next_sentence_accuracy'].update_state(
labels['next_sentence_labels'], model_outputs['sentence_outputs'])
if 'discriminator_accuracy' in metrics:
disc_logits_expanded = tf.expand_dims(model_outputs['disc_logits'], -1)
discrim_full_logits = tf.concat(
[-1.0 * disc_logits_expanded, disc_logits_expanded], -1)
metrics['discriminator_accuracy'].update_state(
model_outputs['disc_label'], discrim_full_logits,
labels['input_mask'])
def train_step(self, inputs, model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer, metrics):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
with tf.GradientTape() as tape:
outputs = model(inputs, training=True)
# Computes per-replica loss.
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
optimizer.apply_gradients(list(zip(grads, tvars)))
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
def validation_step(self, inputs, model: tf.keras.Model, metrics):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
outputs = model(inputs, training=False)
loss = self.build_losses(
labels=inputs,
model_outputs=outputs,
metrics=metrics,
aux_losses=model.losses)
self.process_metrics(metrics, inputs, outputs)
return {self.loss: loss}
| 39.658436 | 80 | 0.713604 |
cf13c5a94f76a73897e661bdc37bafaacaa93c20 | 7,860 | py | Python | tools/refactor/parsetab_re.py | joeedh/fairmotion | 5c322fc012cdd94ddc2f21d68264c845b3c2c770 | [
"MIT"
] | 1 | 2015-05-22T14:11:17.000Z | 2015-05-22T14:11:17.000Z | tools/extjs_cc/parsetab_re.py | joeedh/fairmotion | 5c322fc012cdd94ddc2f21d68264c845b3c2c770 | [
"MIT"
] | 2 | 2021-09-02T20:01:35.000Z | 2022-01-26T19:47:35.000Z | tools/refactor/parsetab_re.py | joeedh/fairmotion | 5c322fc012cdd94ddc2f21d68264c845b3c2c770 | [
"MIT"
] | null | null | null |
# parsetab_re.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'BACKSLASH DIVIDE ID_PART LSBRACKET LT RSBRACKET STAR UCHARre_lit : DIVIDE re_body DIVIDE re_flagsre_body : re_first_char re_charsre_chars : re_chars re_char\n |\n re_first_char : re_non_term_restrict1\n | re_backlash_seq\n | re_expr_class\n re_char : re_non_term_restrict2\n | re_backlash_seq\n | re_expr_class\n re_backlash_seq : BACKSLASH re_non_termre_non_term : UCHAR\n | LSBRACKET \n | RSBRACKET\n | STAR\n | DIVIDE\n | BACKSLASH\n | ID_PART\n re_non_term_restrict1 : UCHAR\n | RSBRACKET\n | ID_PART\n re_non_term_restrict2 : UCHAR\n | RSBRACKET\n | STAR\n | ID_PART\n re_non_term_restrict3 : UCHAR\n | LSBRACKET \n | STAR\n | DIVIDE\n | ID_PART\n re_expr_class : LSBRACKET re_class_chars RSBRACKET\n re_class_chars : re_class_chars re_class_char\n |\n re_class_char : re_non_term_restrict3\n | re_backlash_seq\n re_flags : re_flags ID_PART\n |\n '
_lr_action_items = {'DIVIDE':([0,3,4,5,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,],[2,13,-4,-5,-6,-7,-19,-20,-21,21,-33,-2,-17,-11,-12,-13,-14,-15,-16,-18,40,-3,-8,-9,-10,-22,-23,-24,-25,-27,-31,-32,-34,-35,-26,-28,-29,-30,]),'$end':([1,13,24,42,],[0,-37,-1,-36,]),'UCHAR':([2,4,5,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,],[8,-4,-5,-6,-7,-19,-20,-21,17,-33,29,-17,-11,-12,-13,-14,-15,-16,-18,38,-3,-8,-9,-10,-22,-23,-24,-25,-27,-31,-32,-34,-35,-26,-28,-29,-30,]),'RSBRACKET':([2,4,5,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,],[9,-4,-5,-6,-7,-19,-20,-21,19,-33,30,-17,-11,-12,-13,-14,-15,-16,-18,34,-3,-8,-9,-10,-22,-23,-24,-25,-27,-31,-32,-34,-35,-26,-28,-29,-30,]),'ID_PART':([2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,],[10,-4,-5,-6,-7,-19,-20,-21,22,-33,-37,32,-17,-11,-12,-13,-14,-15,-16,-18,41,42,-3,-8,-9,-10,-22,-23,-24,-25,-27,-31,-32,-34,-35,-26,-28,-29,-30,-36,]),'BACKSLASH':([2,4,5,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,],[11,-4,-5,-6,-7,-19,-20,-21,15,-33,11,-17,-11,-12,-13,-14,-15,-16,-18,11,-3,-8,-9,-10,-22,-23,-24,-25,-27,-31,-32,-34,-35,-26,-28,-29,-30,]),'LSBRACKET':([2,4,5,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,],[12,-4,-5,-6,-7,-19,-20,-21,18,-33,12,-17,-11,-12,-13,-14,-15,-16,-18,33,-3,-8,-9,-10,-22,-23,-24,-25,-27,-31,-32,-34,-35,-26,-28,-29,-30,]),'STAR':([4,5,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,],[-4,-5,-6,-7,-19,-20,-21,20,-33,31,-17,-11,-12,-13,-14,-15,-16,-18,39,-3,-8,-9,-10,-22,-23,-24,-25,-27,-31,-32,-34,-35,-26,-28,-29,-30,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'re_lit':([0,],[1,]),'re_body':([2,],[3,]),'re_first_char':([2,],[4,]),'re_non_term_restrict1':([2,],[5,]),'re_backlash_seq':([2,14,23,],[6,27,37,]),'re_expr_class':([2,14,],[7,28,]),'re_chars':([4,],[14,]),'re_non_term':([11,],[16,]),'re_class_chars':([12,],[23,]),'re_flags':([13,],[24,]),'re_char':([14,],[25,]),'re_non_term_restrict2':([14,],[26,]),'re_class_char':([23,],[35,]),'re_non_term_restrict3':([23,],[36,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> re_lit","S'",1,None,None,None),
('re_lit -> DIVIDE re_body DIVIDE re_flags','re_lit',4,'p_re_lit','js_regexpr_parse.py',51),
('re_body -> re_first_char re_chars','re_body',2,'p_re_body','js_regexpr_parse.py',56),
('re_chars -> re_chars re_char','re_chars',2,'p_re_chars','js_regexpr_parse.py',61),
('re_chars -> <empty>','re_chars',0,'p_re_chars','js_regexpr_parse.py',62),
('re_first_char -> re_non_term_restrict1','re_first_char',1,'p_re_first_char','js_regexpr_parse.py',71),
('re_first_char -> re_backlash_seq','re_first_char',1,'p_re_first_char','js_regexpr_parse.py',72),
('re_first_char -> re_expr_class','re_first_char',1,'p_re_first_char','js_regexpr_parse.py',73),
('re_char -> re_non_term_restrict2','re_char',1,'p_re_char','js_regexpr_parse.py',79),
('re_char -> re_backlash_seq','re_char',1,'p_re_char','js_regexpr_parse.py',80),
('re_char -> re_expr_class','re_char',1,'p_re_char','js_regexpr_parse.py',81),
('re_backlash_seq -> BACKSLASH re_non_term','re_backlash_seq',2,'p_re_backlash_seq','js_regexpr_parse.py',87),
('re_non_term -> UCHAR','re_non_term',1,'p_re_non_term','js_regexpr_parse.py',92),
('re_non_term -> LSBRACKET','re_non_term',1,'p_re_non_term','js_regexpr_parse.py',93),
('re_non_term -> RSBRACKET','re_non_term',1,'p_re_non_term','js_regexpr_parse.py',94),
('re_non_term -> STAR','re_non_term',1,'p_re_non_term','js_regexpr_parse.py',95),
('re_non_term -> DIVIDE','re_non_term',1,'p_re_non_term','js_regexpr_parse.py',96),
('re_non_term -> BACKSLASH','re_non_term',1,'p_re_non_term','js_regexpr_parse.py',97),
('re_non_term -> ID_PART','re_non_term',1,'p_re_non_term','js_regexpr_parse.py',98),
('re_non_term_restrict1 -> UCHAR','re_non_term_restrict1',1,'p_re_non_term_restrict1','js_regexpr_parse.py',103),
('re_non_term_restrict1 -> RSBRACKET','re_non_term_restrict1',1,'p_re_non_term_restrict1','js_regexpr_parse.py',104),
('re_non_term_restrict1 -> ID_PART','re_non_term_restrict1',1,'p_re_non_term_restrict1','js_regexpr_parse.py',105),
('re_non_term_restrict2 -> UCHAR','re_non_term_restrict2',1,'p_re_non_term_restrict2','js_regexpr_parse.py',110),
('re_non_term_restrict2 -> RSBRACKET','re_non_term_restrict2',1,'p_re_non_term_restrict2','js_regexpr_parse.py',111),
('re_non_term_restrict2 -> STAR','re_non_term_restrict2',1,'p_re_non_term_restrict2','js_regexpr_parse.py',112),
('re_non_term_restrict2 -> ID_PART','re_non_term_restrict2',1,'p_re_non_term_restrict2','js_regexpr_parse.py',113),
('re_non_term_restrict3 -> UCHAR','re_non_term_restrict3',1,'p_re_non_term_restrict3','js_regexpr_parse.py',119),
('re_non_term_restrict3 -> LSBRACKET','re_non_term_restrict3',1,'p_re_non_term_restrict3','js_regexpr_parse.py',120),
('re_non_term_restrict3 -> STAR','re_non_term_restrict3',1,'p_re_non_term_restrict3','js_regexpr_parse.py',121),
('re_non_term_restrict3 -> DIVIDE','re_non_term_restrict3',1,'p_re_non_term_restrict3','js_regexpr_parse.py',122),
('re_non_term_restrict3 -> ID_PART','re_non_term_restrict3',1,'p_re_non_term_restrict3','js_regexpr_parse.py',123),
('re_expr_class -> LSBRACKET re_class_chars RSBRACKET','re_expr_class',3,'p_re_expr_class','js_regexpr_parse.py',128),
('re_class_chars -> re_class_chars re_class_char','re_class_chars',2,'p_re_class_chars','js_regexpr_parse.py',134),
('re_class_chars -> <empty>','re_class_chars',0,'p_re_class_chars','js_regexpr_parse.py',135),
('re_class_char -> re_non_term_restrict3','re_class_char',1,'p_re_class_char','js_regexpr_parse.py',144),
('re_class_char -> re_backlash_seq','re_class_char',1,'p_re_class_char','js_regexpr_parse.py',145),
('re_flags -> re_flags ID_PART','re_flags',2,'p_re_flags','js_regexpr_parse.py',151),
('re_flags -> <empty>','re_flags',0,'p_re_flags','js_regexpr_parse.py',152),
]
| 115.588235 | 1,890 | 0.659924 |
75da412ab7219dc94452ea5dc1ceb909643e5635 | 45,171 | py | Python | venv/Lib/site-packages/google/protobuf/descriptor_pool.py | parthpankajtiwary/keras-groundup | 0df0844e7d9dca741fad0965761a12f72ee51f07 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/google/protobuf/descriptor_pool.py | parthpankajtiwary/keras-groundup | 0df0844e7d9dca741fad0965761a12f72ee51f07 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/google/protobuf/descriptor_pool.py | parthpankajtiwary/keras-groundup | 0df0844e7d9dca741fad0965761a12f72ee51f07 | [
"MIT"
] | null | null | null | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides DescriptorPool to use as a container for proto2 descriptors.
The DescriptorPool is used in conjection with a DescriptorDatabase to maintain
a collection of protocol buffer descriptors for use when dynamically creating
message types at runtime.
For most applications protocol buffers should be used via modules generated by
the protocol buffer compiler tool. This should only be used when the type of
protocol buffers used in an application or library cannot be predetermined.
Below is a straightforward example on how to use this class:
pool = DescriptorPool()
file_descriptor_protos = [ ... ]
for file_descriptor_proto in file_descriptor_protos:
pool.Add(file_descriptor_proto)
my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType')
The message descriptor can be used in conjunction with the message_factory
module in order to create a protocol buffer class that can be encoded and
decoded.
If you want to get a Python class for the specified proto, use the
helper functions inside google.protobuf.message_factory
directly instead of this class.
"""
__author__ = 'matthewtoia@google.com (Matt Toia)'
import collections
import warnings
from google.protobuf import descriptor
from google.protobuf import descriptor_database
from google.protobuf import text_encoding
_USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access
def _NormalizeFullyQualifiedName(name):
"""Remove leading period from fully-qualified type name.
Due to b/13860351 in descriptor_database.py, types in the root namespace are
generated with a leading period. This function removes that prefix.
Args:
name: A str, the fully-qualified symbol name.
Returns:
A str, the normalized fully-qualified symbol name.
"""
return name.lstrip('.')
def _OptionsOrNone(descriptor_proto):
"""Returns the value of the field `options`, or None if it is not set."""
if descriptor_proto.HasField('options'):
return descriptor_proto.options
else:
return None
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL)
class DescriptorPool(object):
"""A collection of protobufs dynamically constructed by descriptor protos."""
if _USE_C_DESCRIPTORS:
def __new__(cls, descriptor_db=None):
# pylint: disable=protected-access
return descriptor._message.DescriptorPool(descriptor_db)
def __init__(self, descriptor_db=None):
"""Initializes a Pool of proto buffs.
The descriptor_db argument to the constructor is provided to allow
specialized file descriptor proto lookup code to be triggered on demand. An
example would be an implementation which will read and compile a file
specified in a call to FindFileByName() and not require the call to Add()
at all. Results from this database will be cached internally here as well.
Args:
descriptor_db: A secondary source of file descriptors.
"""
self._internal_db = descriptor_database.DescriptorDatabase()
self._descriptor_db = descriptor_db
self._descriptors = {}
self._enum_descriptors = {}
self._service_descriptors = {}
self._file_descriptors = {}
self._toplevel_extensions = {}
# TODO(jieluo): Remove _file_desc_by_toplevel_extension after
# maybe year 2020 for compatibility issue (with 3.4.1 only).
self._file_desc_by_toplevel_extension = {}
self._top_enum_values = {}
# We store extensions in two two-level mappings: The first key is the
# descriptor of the message being extended, the second key is the extension
# full name or its tag number.
self._extensions_by_name = collections.defaultdict(dict)
self._extensions_by_number = collections.defaultdict(dict)
def _CheckConflictRegister(self, desc, desc_name, file_name):
"""Check if the descriptor name conflicts with another of the same name.
Args:
desc: Descriptor of a message, enum, service, extension or enum value.
desc_name: the full name of desc.
file_name: The file name of descriptor.
"""
for register, descriptor_type in [
(self._descriptors, descriptor.Descriptor),
(self._enum_descriptors, descriptor.EnumDescriptor),
(self._service_descriptors, descriptor.ServiceDescriptor),
(self._toplevel_extensions, descriptor.FieldDescriptor),
(self._top_enum_values, descriptor.EnumValueDescriptor)]:
if desc_name in register:
old_desc = register[desc_name]
if isinstance(old_desc, descriptor.EnumValueDescriptor):
old_file = old_desc.type.file.name
else:
old_file = old_desc.file.name
if not isinstance(desc, descriptor_type) or (
old_file != file_name):
error_msg = ('Conflict register for file "' + file_name +
'": ' + desc_name +
' is already defined in file "' +
old_file + '". Please fix the conflict by adding '
'package name on the proto file, or use different '
'name for the duplication.')
if isinstance(desc, descriptor.EnumValueDescriptor):
error_msg += ('\nNote: enum values appear as '
'siblings of the enum type instead of '
'children of it.')
raise TypeError(error_msg)
return
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this pool.
Args:
file_desc_proto: The FileDescriptorProto to add.
"""
self._internal_db.Add(file_desc_proto)
def AddSerializedFile(self, serialized_file_desc_proto):
"""Adds the FileDescriptorProto and its types to this pool.
Args:
serialized_file_desc_proto: A bytes string, serialization of the
FileDescriptorProto to add.
"""
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pb2
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
serialized_file_desc_proto)
self.Add(file_desc_proto)
def AddDescriptor(self, desc):
"""Adds a Descriptor to the pool, non-recursively.
If the Descriptor contains nested messages or enums, the caller must
explicitly register them. This method also registers the FileDescriptor
associated with the message.
Args:
desc: A Descriptor.
"""
if not isinstance(desc, descriptor.Descriptor):
raise TypeError('Expected instance of descriptor.Descriptor.')
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._descriptors[desc.full_name] = desc
self._AddFileDescriptor(desc.file)
def AddEnumDescriptor(self, enum_desc):
"""Adds an EnumDescriptor to the pool.
This method also registers the FileDescriptor associated with the enum.
Args:
enum_desc: An EnumDescriptor.
"""
if not isinstance(enum_desc, descriptor.EnumDescriptor):
raise TypeError('Expected instance of descriptor.EnumDescriptor.')
file_name = enum_desc.file.name
self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name)
self._enum_descriptors[enum_desc.full_name] = enum_desc
# Top enum values need to be indexed.
# Count the number of dots to see whether the enum is toplevel or nested
# in a message. We cannot use enum_desc.containing_type at this stage.
if enum_desc.file.package:
top_level = (enum_desc.full_name.count('.')
- enum_desc.file.package.count('.') == 1)
else:
top_level = enum_desc.full_name.count('.') == 0
if top_level:
file_name = enum_desc.file.name
package = enum_desc.file.package
for enum_value in enum_desc.values:
full_name = _NormalizeFullyQualifiedName(
'.'.join((package, enum_value.name)))
self._CheckConflictRegister(enum_value, full_name, file_name)
self._top_enum_values[full_name] = enum_value
self._AddFileDescriptor(enum_desc.file)
def AddServiceDescriptor(self, service_desc):
"""Adds a ServiceDescriptor to the pool.
Args:
service_desc: A ServiceDescriptor.
"""
if not isinstance(service_desc, descriptor.ServiceDescriptor):
raise TypeError('Expected instance of descriptor.ServiceDescriptor.')
self._CheckConflictRegister(service_desc, service_desc.full_name,
service_desc.file.name)
self._service_descriptors[service_desc.full_name] = service_desc
def AddExtensionDescriptor(self, extension):
"""Adds a FieldDescriptor describing an extension to the pool.
Args:
extension: A FieldDescriptor.
Raises:
AssertionError: when another extension with the same number extends the
same message.
TypeError: when the specified extension is not a
descriptor.FieldDescriptor.
"""
if not (isinstance(extension, descriptor.FieldDescriptor) and
extension.is_extension):
raise TypeError('Expected an extension descriptor.')
if extension.extension_scope is None:
self._toplevel_extensions[extension.full_name] = extension
try:
existing_desc = self._extensions_by_number[
extension.containing_type][extension.number]
except KeyError:
pass
else:
if extension is not existing_desc:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" '
'with field number %d.' %
(extension.full_name, existing_desc.full_name,
extension.containing_type.full_name, extension.number))
self._extensions_by_number[extension.containing_type][
extension.number] = extension
self._extensions_by_name[extension.containing_type][
extension.full_name] = extension
# Also register MessageSet extensions with the type name.
if _IsMessageSetExtension(extension):
self._extensions_by_name[extension.containing_type][
extension.message_type.full_name] = extension
def AddFileDescriptor(self, file_desc):
"""Adds a FileDescriptor to the pool, non-recursively.
If the FileDescriptor contains messages or enums, the caller must explicitly
register them.
Args:
file_desc: A FileDescriptor.
"""
self._AddFileDescriptor(file_desc)
# TODO(jieluo): This is a temporary solution for FieldDescriptor.file.
# FieldDescriptor.file is added in code gen. Remove this solution after
# maybe 2020 for compatibility reason (with 3.4.1 only).
for extension in file_desc.extensions_by_name.values():
self._file_desc_by_toplevel_extension[
extension.full_name] = file_desc
def _AddFileDescriptor(self, file_desc):
"""Adds a FileDescriptor to the pool, non-recursively.
If the FileDescriptor contains messages or enums, the caller must explicitly
register them.
Args:
file_desc: A FileDescriptor.
"""
if not isinstance(file_desc, descriptor.FileDescriptor):
raise TypeError('Expected instance of descriptor.FileDescriptor.')
self._file_descriptors[file_desc.name] = file_desc
def FindFileByName(self, file_name):
"""Gets a FileDescriptor by file name.
Args:
file_name: The path to the file to get a descriptor for.
Returns:
A FileDescriptor for the named file.
Raises:
KeyError: if the file cannot be found in the pool.
"""
try:
return self._file_descriptors[file_name]
except KeyError:
pass
try:
file_proto = self._internal_db.FindFileByName(file_name)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileByName(file_name)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file named %s' % file_name)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def FindFileContainingSymbol(self, symbol):
"""Gets the FileDescriptor for the file containing the specified symbol.
Args:
symbol: The name of the symbol to search for.
Returns:
A FileDescriptor that contains the specified symbol.
Raises:
KeyError: if the file cannot be found in the pool.
"""
symbol = _NormalizeFullyQualifiedName(symbol)
try:
return self._InternalFindFileContainingSymbol(symbol)
except KeyError:
pass
try:
# Try fallback database. Build and find again if possible.
self._FindFileContainingSymbolInDb(symbol)
return self._InternalFindFileContainingSymbol(symbol)
except KeyError:
raise KeyError('Cannot find a file containing %s' % symbol)
def _InternalFindFileContainingSymbol(self, symbol):
"""Gets the already built FileDescriptor containing the specified symbol.
Args:
symbol: The name of the symbol to search for.
Returns:
A FileDescriptor that contains the specified symbol.
Raises:
KeyError: if the file cannot be found in the pool.
"""
try:
return self._descriptors[symbol].file
except KeyError:
pass
try:
return self._enum_descriptors[symbol].file
except KeyError:
pass
try:
return self._service_descriptors[symbol].file
except KeyError:
pass
try:
return self._top_enum_values[symbol].type.file
except KeyError:
pass
try:
return self._file_desc_by_toplevel_extension[symbol]
except KeyError:
pass
# Try fields, enum values and nested extensions inside a message.
top_name, _, sub_name = symbol.rpartition('.')
try:
message = self.FindMessageTypeByName(top_name)
assert (sub_name in message.extensions_by_name or
sub_name in message.fields_by_name or
sub_name in message.enum_values_by_name)
return message.file
except (KeyError, AssertionError):
raise KeyError('Cannot find a file containing %s' % symbol)
def FindMessageTypeByName(self, full_name):
"""Loads the named descriptor from the pool.
Args:
full_name: The full name of the descriptor to load.
Returns:
The descriptor for the named type.
Raises:
KeyError: if the message cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._descriptors[full_name]
def FindEnumTypeByName(self, full_name):
"""Loads the named enum descriptor from the pool.
Args:
full_name: The full name of the enum descriptor to load.
Returns:
The enum descriptor for the named type.
Raises:
KeyError: if the enum cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._enum_descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._enum_descriptors[full_name]
def FindFieldByName(self, full_name):
"""Loads the named field descriptor from the pool.
Args:
full_name: The full name of the field descriptor to load.
Returns:
The field descriptor for the named field.
Raises:
KeyError: if the field cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, field_name = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.fields_by_name[field_name]
def FindOneofByName(self, full_name):
"""Loads the named oneof descriptor from the pool.
Args:
full_name: The full name of the oneof descriptor to load.
Returns:
The oneof descriptor for the named oneof.
Raises:
KeyError: if the oneof cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, oneof_name = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.oneofs_by_name[oneof_name]
def FindExtensionByName(self, full_name):
"""Loads the named extension descriptor from the pool.
Args:
full_name: The full name of the extension descriptor to load.
Returns:
A FieldDescriptor, describing the named extension.
Raises:
KeyError: if the extension cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
try:
# The proto compiler does not give any link between the FileDescriptor
# and top-level extensions unless the FileDescriptorProto is added to
# the DescriptorDatabase, but this can impact memory usage.
# So we registered these extensions by name explicitly.
return self._toplevel_extensions[full_name]
except KeyError:
pass
message_name, _, extension_name = full_name.rpartition('.')
try:
# Most extensions are nested inside a message.
scope = self.FindMessageTypeByName(message_name)
except KeyError:
# Some extensions are defined at file scope.
scope = self._FindFileContainingSymbolInDb(full_name)
return scope.extensions_by_name[extension_name]
def FindExtensionByNumber(self, message_descriptor, number):
"""Gets the extension of the specified message with the specified number.
Extensions have to be registered to this pool by calling
AddExtensionDescriptor.
Args:
message_descriptor: descriptor of the extended message.
number: integer, number of the extension field.
Returns:
A FieldDescriptor describing the extension.
Raises:
KeyError: when no extension with the given number is known for the
specified message.
"""
try:
return self._extensions_by_number[message_descriptor][number]
except KeyError:
self._TryLoadExtensionFromDB(message_descriptor, number)
return self._extensions_by_number[message_descriptor][number]
def FindAllExtensions(self, message_descriptor):
"""Gets all the known extension of a given message.
Extensions have to be registered to this pool by calling
AddExtensionDescriptor.
Args:
message_descriptor: descriptor of the extended message.
Returns:
A list of FieldDescriptor describing the extensions.
"""
# Fallback to descriptor db if FindAllExtensionNumbers is provided.
if self._descriptor_db and hasattr(
self._descriptor_db, 'FindAllExtensionNumbers'):
full_name = message_descriptor.full_name
all_numbers = self._descriptor_db.FindAllExtensionNumbers(full_name)
for number in all_numbers:
if number in self._extensions_by_number[message_descriptor]:
continue
self._TryLoadExtensionFromDB(message_descriptor, number)
return list(self._extensions_by_number[message_descriptor].values())
def _TryLoadExtensionFromDB(self, message_descriptor, number):
"""Try to Load extensions from decriptor db.
Args:
message_descriptor: descriptor of the extended message.
number: the extension number that needs to be loaded.
"""
if not self._descriptor_db:
return
# Only supported when FindFileContainingExtension is provided.
if not hasattr(
self._descriptor_db, 'FindFileContainingExtension'):
return
full_name = message_descriptor.full_name
file_proto = self._descriptor_db.FindFileContainingExtension(
full_name, number)
if file_proto is None:
return
try:
file_desc = self._ConvertFileProtoToFileDescriptor(file_proto)
for extension in file_desc.extensions_by_name.values():
self._extensions_by_number[extension.containing_type][
extension.number] = extension
self._extensions_by_name[extension.containing_type][
extension.full_name] = extension
for message_type in file_desc.message_types_by_name.values():
for extension in message_type.extensions:
self._extensions_by_number[extension.containing_type][
extension.number] = extension
self._extensions_by_name[extension.containing_type][
extension.full_name] = extension
except:
warn_msg = ('Unable to load proto file %s for extension number %d.' %
(file_proto.name, number))
warnings.warn(warn_msg, RuntimeWarning)
def FindServiceByName(self, full_name):
"""Loads the named service descriptor from the pool.
Args:
full_name: The full name of the service descriptor to load.
Returns:
The service descriptor for the named service.
Raises:
KeyError: if the service cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._service_descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._service_descriptors[full_name]
def FindMethodByName(self, full_name):
"""Loads the named service method descriptor from the pool.
Args:
full_name: The full name of the method descriptor to load.
Returns:
The method descriptor for the service method.
Raises:
KeyError: if the method cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
service_name, _, method_name = full_name.rpartition('.')
service_descriptor = self.FindServiceByName(service_name)
return service_descriptor.methods_by_name[method_name]
def _FindFileContainingSymbolInDb(self, symbol):
"""Finds the file in descriptor DB containing the specified symbol.
Args:
symbol: The name of the symbol to search for.
Returns:
A FileDescriptor that contains the specified symbol.
Raises:
KeyError: if the file cannot be found in the descriptor database.
"""
try:
file_proto = self._internal_db.FindFileContainingSymbol(symbol)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file containing %s' % symbol)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def _ConvertFileProtoToFileDescriptor(self, file_proto):
"""Creates a FileDescriptor from a proto or returns a cached copy.
This method also has the side effect of loading all the symbols found in
the file into the appropriate dictionaries in the pool.
Args:
file_proto: The proto to convert.
Returns:
A FileDescriptor matching the passed in proto.
"""
if file_proto.name not in self._file_descriptors:
built_deps = list(self._GetDeps(file_proto.dependency))
direct_deps = [self.FindFileByName(n) for n in file_proto.dependency]
public_deps = [direct_deps[i] for i in file_proto.public_dependency]
file_descriptor = descriptor.FileDescriptor(
pool=self,
name=file_proto.name,
package=file_proto.package,
syntax=file_proto.syntax,
options=_OptionsOrNone(file_proto),
serialized_pb=file_proto.SerializeToString(),
dependencies=direct_deps,
public_dependencies=public_deps)
scope = {}
# This loop extracts all the message and enum types from all the
# dependencies of the file_proto. This is necessary to create the
# scope of available message types when defining the passed in
# file proto.
for dependency in built_deps:
scope.update(self._ExtractSymbols(
dependency.message_types_by_name.values()))
scope.update((_PrefixWithDot(enum.full_name), enum)
for enum in dependency.enum_types_by_name.values())
for message_type in file_proto.message_type:
message_desc = self._ConvertMessageDescriptor(
message_type, file_proto.package, file_descriptor, scope,
file_proto.syntax)
file_descriptor.message_types_by_name[message_desc.name] = (
message_desc)
for enum_type in file_proto.enum_type:
file_descriptor.enum_types_by_name[enum_type.name] = (
self._ConvertEnumDescriptor(enum_type, file_proto.package,
file_descriptor, None, scope, True))
for index, extension_proto in enumerate(file_proto.extension):
extension_desc = self._MakeFieldDescriptor(
extension_proto, file_proto.package, index, file_descriptor,
is_extension=True)
extension_desc.containing_type = self._GetTypeFromScope(
file_descriptor.package, extension_proto.extendee, scope)
self._SetFieldType(extension_proto, extension_desc,
file_descriptor.package, scope)
file_descriptor.extensions_by_name[extension_desc.name] = (
extension_desc)
self._file_desc_by_toplevel_extension[extension_desc.full_name] = (
file_descriptor)
for desc_proto in file_proto.message_type:
self._SetAllFieldTypes(file_proto.package, desc_proto, scope)
if file_proto.package:
desc_proto_prefix = _PrefixWithDot(file_proto.package)
else:
desc_proto_prefix = ''
for desc_proto in file_proto.message_type:
desc = self._GetTypeFromScope(
desc_proto_prefix, desc_proto.name, scope)
file_descriptor.message_types_by_name[desc_proto.name] = desc
for index, service_proto in enumerate(file_proto.service):
file_descriptor.services_by_name[service_proto.name] = (
self._MakeServiceDescriptor(service_proto, index, scope,
file_proto.package, file_descriptor))
self.Add(file_proto)
self._file_descriptors[file_proto.name] = file_descriptor
return self._file_descriptors[file_proto.name]
def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None,
scope=None, syntax=None):
"""Adds the proto to the pool in the specified package.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: The package the proto should be located in.
file_desc: The file containing this message.
scope: Dict mapping short and full symbols to message and enum types.
syntax: string indicating syntax of the file ("proto2" or "proto3")
Returns:
The added descriptor.
"""
if package:
desc_name = '.'.join((package, desc_proto.name))
else:
desc_name = desc_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
if scope is None:
scope = {}
nested = [
self._ConvertMessageDescriptor(
nested, desc_name, file_desc, scope, syntax)
for nested in desc_proto.nested_type]
enums = [
self._ConvertEnumDescriptor(enum, desc_name, file_desc, None,
scope, False)
for enum in desc_proto.enum_type]
fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc)
for index, field in enumerate(desc_proto.field)]
extensions = [
self._MakeFieldDescriptor(extension, desc_name, index, file_desc,
is_extension=True)
for index, extension in enumerate(desc_proto.extension)]
oneofs = [
descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)),
index, None, [], desc.options)
for index, desc in enumerate(desc_proto.oneof_decl)]
extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]
if extension_ranges:
is_extendable = True
else:
is_extendable = False
desc = descriptor.Descriptor(
name=desc_proto.name,
full_name=desc_name,
filename=file_name,
containing_type=None,
fields=fields,
oneofs=oneofs,
nested_types=nested,
enum_types=enums,
extensions=extensions,
options=_OptionsOrNone(desc_proto),
is_extendable=is_extendable,
extension_ranges=extension_ranges,
file=file_desc,
serialized_start=None,
serialized_end=None,
syntax=syntax)
for nested in desc.nested_types:
nested.containing_type = desc
for enum in desc.enum_types:
enum.containing_type = desc
for field_index, field_desc in enumerate(desc_proto.field):
if field_desc.HasField('oneof_index'):
oneof_index = field_desc.oneof_index
oneofs[oneof_index].fields.append(fields[field_index])
fields[field_index].containing_oneof = oneofs[oneof_index]
scope[_PrefixWithDot(desc_name)] = desc
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._descriptors[desc_name] = desc
return desc
def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None,
containing_type=None, scope=None, top_level=False):
"""Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf.
Args:
enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the enum descriptor.
containing_type: The type containing this enum.
scope: Scope containing available types.
top_level: If True, the enum is a top level symbol. If False, the enum
is defined inside a message.
Returns:
The added descriptor
"""
if package:
enum_name = '.'.join((package, enum_proto.name))
else:
enum_name = enum_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
values = [self._MakeEnumValueDescriptor(value, index)
for index, value in enumerate(enum_proto.value)]
desc = descriptor.EnumDescriptor(name=enum_proto.name,
full_name=enum_name,
filename=file_name,
file=file_desc,
values=values,
containing_type=containing_type,
options=_OptionsOrNone(enum_proto))
scope['.%s' % enum_name] = desc
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._enum_descriptors[enum_name] = desc
# Add top level enum values.
if top_level:
for value in values:
full_name = _NormalizeFullyQualifiedName(
'.'.join((package, value.name)))
self._CheckConflictRegister(value, full_name, file_name)
self._top_enum_values[full_name] = value
return desc
def _MakeFieldDescriptor(self, field_proto, message_name, index,
file_desc, is_extension=False):
"""Creates a field descriptor from a FieldDescriptorProto.
For message and enum type fields, this method will do a look up
in the pool for the appropriate descriptor for that type. If it
is unavailable, it will fall back to the _source function to
create it. If this type is still unavailable, construction will
fail.
Args:
field_proto: The proto describing the field.
message_name: The name of the containing message.
index: Index of the field
file_desc: The file containing the field descriptor.
is_extension: Indication that this field is for an extension.
Returns:
An initialized FieldDescriptor object
"""
if message_name:
full_name = '.'.join((message_name, field_proto.name))
else:
full_name = field_proto.name
return descriptor.FieldDescriptor(
name=field_proto.name,
full_name=full_name,
index=index,
number=field_proto.number,
type=field_proto.type,
cpp_type=None,
message_type=None,
enum_type=None,
containing_type=None,
label=field_proto.label,
has_default_value=False,
default_value=None,
is_extension=is_extension,
extension_scope=None,
options=_OptionsOrNone(field_proto),
file=file_desc)
def _SetAllFieldTypes(self, package, desc_proto, scope):
"""Sets all the descriptor's fields's types.
This method also sets the containing types on any extensions.
Args:
package: The current package of desc_proto.
desc_proto: The message descriptor to update.
scope: Enclosing scope of available types.
"""
package = _PrefixWithDot(package)
main_desc = self._GetTypeFromScope(package, desc_proto.name, scope)
if package == '.':
nested_package = _PrefixWithDot(desc_proto.name)
else:
nested_package = '.'.join([package, desc_proto.name])
for field_proto, field_desc in zip(desc_proto.field, main_desc.fields):
self._SetFieldType(field_proto, field_desc, nested_package, scope)
for extension_proto, extension_desc in (
zip(desc_proto.extension, main_desc.extensions)):
extension_desc.containing_type = self._GetTypeFromScope(
nested_package, extension_proto.extendee, scope)
self._SetFieldType(extension_proto, extension_desc, nested_package, scope)
for nested_type in desc_proto.nested_type:
self._SetAllFieldTypes(nested_package, nested_type, scope)
def _SetFieldType(self, field_proto, field_desc, package, scope):
"""Sets the field's type, cpp_type, message_type and enum_type.
Args:
field_proto: Data about the field in proto format.
field_desc: The descriptor to modiy.
package: The package the field's container is in.
scope: Enclosing scope of available types.
"""
if field_proto.type_name:
desc = self._GetTypeFromScope(package, field_proto.type_name, scope)
else:
desc = None
if not field_proto.HasField('type'):
if isinstance(desc, descriptor.Descriptor):
field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE
else:
field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM
field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(
field_proto.type)
if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE
or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP):
field_desc.message_type = desc
if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.enum_type = desc
if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED:
field_desc.has_default_value = False
field_desc.default_value = []
elif field_proto.HasField('default_value'):
field_desc.has_default_value = True
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = float(field_proto.default_value)
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = field_proto.default_value
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = field_proto.default_value.lower() == 'true'
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values_by_name[
field_proto.default_value].number
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:
field_desc.default_value = text_encoding.CUnescape(
field_proto.default_value)
elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
field_desc.default_value = None
else:
# All other types are of the "int" type.
field_desc.default_value = int(field_proto.default_value)
else:
field_desc.has_default_value = False
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = 0.0
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = u''
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = False
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values[0].number
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:
field_desc.default_value = b''
elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
field_desc.default_value = None
else:
# All other types are of the "int" type.
field_desc.default_value = 0
field_desc.type = field_proto.type
def _MakeEnumValueDescriptor(self, value_proto, index):
"""Creates a enum value descriptor object from a enum value proto.
Args:
value_proto: The proto describing the enum value.
index: The index of the enum value.
Returns:
An initialized EnumValueDescriptor object.
"""
return descriptor.EnumValueDescriptor(
name=value_proto.name,
index=index,
number=value_proto.number,
options=_OptionsOrNone(value_proto),
type=None)
def _MakeServiceDescriptor(self, service_proto, service_index, scope,
package, file_desc):
"""Make a protobuf ServiceDescriptor given a ServiceDescriptorProto.
Args:
service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message.
service_index: The index of the service in the File.
scope: Dict mapping short and full symbols to message and enum types.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the service descriptor.
Returns:
The added descriptor.
"""
if package:
service_name = '.'.join((package, service_proto.name))
else:
service_name = service_proto.name
methods = [self._MakeMethodDescriptor(method_proto, service_name, package,
scope, index)
for index, method_proto in enumerate(service_proto.method)]
desc = descriptor.ServiceDescriptor(name=service_proto.name,
full_name=service_name,
index=service_index,
methods=methods,
options=_OptionsOrNone(service_proto),
file=file_desc)
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._service_descriptors[service_name] = desc
return desc
def _MakeMethodDescriptor(self, method_proto, service_name, package, scope,
index):
"""Creates a method descriptor from a MethodDescriptorProto.
Args:
method_proto: The proto describing the method.
service_name: The name of the containing service.
package: Optional package name to look up for types.
scope: Scope containing available types.
index: Index of the method in the service.
Returns:
An initialized MethodDescriptor object.
"""
full_name = '.'.join((service_name, method_proto.name))
input_type = self._GetTypeFromScope(
package, method_proto.input_type, scope)
output_type = self._GetTypeFromScope(
package, method_proto.output_type, scope)
return descriptor.MethodDescriptor(name=method_proto.name,
full_name=full_name,
index=index,
containing_service=None,
input_type=input_type,
output_type=output_type,
options=_OptionsOrNone(method_proto))
def _ExtractSymbols(self, descriptors):
"""Pulls out all the symbols from descriptor protos.
Args:
descriptors: The messages to extract descriptors from.
Yields:
A two element tuple of the type name and descriptor object.
"""
for desc in descriptors:
yield (_PrefixWithDot(desc.full_name), desc)
for symbol in self._ExtractSymbols(desc.nested_types):
yield symbol
for enum in desc.enum_types:
yield (_PrefixWithDot(enum.full_name), enum)
def _GetDeps(self, dependencies):
"""Recursively finds dependencies for file protos.
Args:
dependencies: The names of the files being depended on.
Yields:
Each direct and indirect dependency.
"""
for dependency in dependencies:
dep_desc = self.FindFileByName(dependency)
yield dep_desc
for parent_dep in dep_desc.dependencies:
yield parent_dep
def _GetTypeFromScope(self, package, type_name, scope):
"""Finds a given type name in the current scope.
Args:
package: The package the proto should be located in.
type_name: The name of the type to be found in the scope.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The descriptor for the requested type.
"""
if type_name not in scope:
components = _PrefixWithDot(package).split('.')
while components:
possible_match = '.'.join(components + [type_name])
if possible_match in scope:
type_name = possible_match
break
else:
components.pop(-1)
return scope[type_name]
def _PrefixWithDot(name):
return name if name.startswith('.') else '.%s' % name
if _USE_C_DESCRIPTORS:
# TODO(amauryfa): This pool could be constructed from Python code, when we
# support a flag like 'use_cpp_generated_pool=True'.
# pylint: disable=protected-access
_DEFAULT = descriptor._message.default_pool
else:
_DEFAULT = DescriptorPool()
def Default():
return _DEFAULT
| 37.486307 | 87 | 0.677625 |
2bd55bab11baf95abbb8379c7b0ffa7c65ce302f | 7,601 | py | Python | server/glusterfsd.py | MatteoManzoni/kadalu | b875972b9b3309930b1cdd805d0b9f966b6daf16 | [
"Apache-2.0"
] | 452 | 2019-03-12T06:40:12.000Z | 2022-03-27T06:36:09.000Z | server/glusterfsd.py | MatteoManzoni/kadalu | b875972b9b3309930b1cdd805d0b9f966b6daf16 | [
"Apache-2.0"
] | 569 | 2019-03-10T06:02:15.000Z | 2022-03-31T20:43:36.000Z | server/glusterfsd.py | MatteoManzoni/kadalu | b875972b9b3309930b1cdd805d0b9f966b6daf16 | [
"Apache-2.0"
] | 102 | 2019-03-11T05:05:28.000Z | 2022-02-27T23:00:14.000Z | """
Starts Gluster Brick(fsd) process
"""
import logging
import os
import sys
import uuid
import xattr
from jinja2 import Template
from kadalulib import (CommandException, Proc, execute, logf,
send_analytics_tracker)
# noqa # pylint: disable=I1101
VOLUME_ID_XATTR_NAME = "trusted.glusterfs.volume-id"
VOLFILES_DIR = "/kadalu/volfiles"
TEMPLATES_DIR = "/kadalu/templates"
VOLINFO_DIR = "/var/lib/gluster"
MKFS_XFS_CMD = "/sbin/mkfs.xfs"
def create_brickdir(brick_path):
"""Create Brick directory and other directories required"""
os.makedirs(os.path.join(brick_path, ".glusterfs"),
mode=0o755,
exist_ok=True)
def verify_brickdir_xattr_support(brick_path):
"""Verify Brick dir supports xattrs"""
test_xattr_name = "user.testattr"
test_xattr_value = b"testvalue"
try:
xattr.set(brick_path, test_xattr_name, test_xattr_value)
val = xattr.get(brick_path, test_xattr_name)
if val != test_xattr_value:
logging.error(logf("Xattr value mismatch.",
actual=val,
expected=test_xattr_value))
sys.exit(1)
except OSError as err:
logging.error(logf("Extended attributes are not "
"supported",
error=err))
sys.exit(1)
def set_volume_id_xattr(brick_path, volume_id):
"""Set Volume ID xattr"""
volume_id_bytes = uuid.UUID(volume_id).bytes
try:
xattr.set(brick_path, VOLUME_ID_XATTR_NAME,
volume_id_bytes, xattr.XATTR_CREATE)
except FileExistsError:
pass
except OSError as err:
logging.error(logf("Unable to set volume-id on "
"brick root",
error=err))
sys.exit(1)
def generate_brick_volfile(volfile_path, volname, volume_id, brick_path):
"""
Generate Volfile based on Volinfo stored in Config map
For now, Generated Volfile is used in configmap
"""
content = ""
template_file = os.path.join(TEMPLATES_DIR, "brick.vol.j2")
with open(template_file) as tmpl_file:
content = tmpl_file.read()
data = {}
# Brick volfile needs only these 3 parameters
data["volname"] = volname
data["volume_id"] = volume_id
data["brick_path"] = brick_path
tmpl = Template(content)
tmpl.stream(**data).dump(volfile_path)
def create_and_mount_brick(brick_device, brick_path, brickfs):
"""
Create brick filesystem and mount the brick. Currently
only xfs is supported
"""
# If brick device path is not starts with /dev then use
# /brickdev prefix. Brick device directory passed by the user
# is mounted as /brickdev to avoid mixing with any other
# dirs inside container.
if not brick_device.startswith("/dev/"):
brick_device = "/brickdev/" + os.path.basename(brick_device)
mountdir = os.path.dirname(brick_path)
os.makedirs(mountdir,
mode=0o755,
exist_ok=True)
try:
execute("mount", brick_device, mountdir)
logging.info(logf(
"Successfully mounted device on path",
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
))
except CommandException as err:
logging.info(logf(
"Failed to mount device, continuing with mkfs",
err=err,
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
))
if 'wrong fs type' in err.err:
# This error pops up when we do mount on an empty device or wrong fs
# Try doing a mkfs and try mount
try:
execute(MKFS_XFS_CMD, brick_device)
logging.info(logf(
"Successfully created xfs file system on device",
fstype=brickfs,
device=brick_device,
))
except CommandException as err:
if "appears to contain an existing filesystem" not in err.err:
logging.error(logf(
"Failed to create file system",
fstype=brickfs,
device=brick_device,
error=err,
))
sys.exit(1)
else:
logging.info(logf(
"Failed to perform mkfs on device. continuing with mount",
err=err,
device=brick_device,
mountdir=mountdir,
))
try:
execute("mount", brick_device, mountdir)
logging.info(logf(
"Successfully mounted device on path",
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
))
except CommandException as err:
logging.error(logf(
"Failed to mount export brick (after mkfs)",
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
error=err,
))
sys.exit(1)
elif 'already mounted' not in err.err:
logging.error(logf(
"Failed to mount export brick",
fstype=brickfs,
device=brick_device,
mountdir=mountdir,
error=err,
))
sys.exit(1)
else:
pass
def start_args():
"""
Prepare the things required for Brick Start and Returns the Proc
object required to start Brick Process.
"""
brick_device = os.environ.get("BRICK_DEVICE", None)
brick_path = os.environ["BRICK_PATH"]
if brick_device is not None and brick_device != "":
brickfs = os.environ.get("BRICK_FS", "xfs")
create_and_mount_brick(brick_device, brick_path, brickfs)
volume_id = os.environ["VOLUME_ID"]
brick_path_name = brick_path.strip("/").replace("/", "-")
volname = os.environ["VOLUME"]
nodename = os.environ["HOSTNAME"]
create_brickdir(brick_path)
verify_brickdir_xattr_support(brick_path)
set_volume_id_xattr(brick_path, volume_id)
volfile_id = "%s.%s.%s" % (volname, nodename, brick_path_name)
volfile_path = os.path.join(VOLFILES_DIR, "%s.vol" % volfile_id)
generate_brick_volfile(volfile_path, volname, volume_id, brick_path)
# UID is stored at the time of installation in configmap.
uid = None
with open(os.path.join(VOLINFO_DIR, "uid")) as uid_file:
uid = uid_file.read()
# Send Analytics Tracker
# The information from this analytics is available for
# developers to understand and build project in a better way
send_analytics_tracker("server", uid)
return Proc(
"glusterfsd",
"/opt/sbin/glusterfsd",
[
"-N",
"--volfile-id", volfile_id,
"-p", "/var/run/gluster/glusterfsd-%s.pid" % brick_path_name,
"-S", "/var/run/gluster/brick.socket",
"--brick-name", brick_path,
"-l", "-", # Log to stderr
"--xlator-option",
"*-posix.glusterd-uuid=%s" % os.environ["NODEID"],
"--process-name", "brick",
"--brick-port", "24007",
"--xlator-option",
"%s-server.listen-port=24007" % volname,
"-f", volfile_path
]
)
| 32.904762 | 82 | 0.566636 |
23052ffbae80e2203c3b6659490059ca5d6cd184 | 1,218 | py | Python | examples/injectable_mocking_for_tests/injectable_mocking_example.py | mt3o/injectable | 0ffc5c758b63d9391134cd822158e1846999b404 | [
"MIT"
] | null | null | null | examples/injectable_mocking_for_tests/injectable_mocking_example.py | mt3o/injectable | 0ffc5c758b63d9391134cd822158e1846999b404 | [
"MIT"
] | null | null | null | examples/injectable_mocking_for_tests/injectable_mocking_example.py | mt3o/injectable | 0ffc5c758b63d9391134cd822158e1846999b404 | [
"MIT"
] | null | null | null | """
This is an example of how one can use the testing utility functions
:meth:`clear_injectables <injectable.testing.clear_injectables>` and
:meth:`register_injectables <injectable.testing.register_injectables>` for mocking
a dependency for tests.
"""
# sphinx-start
from unittest.mock import Mock
from examples import Example
from injectable import (
injectable,
autowired,
Autowired,
Injectable,
load_injection_container,
)
from injectable.testing import clear_injectables, register_injectables
@injectable
class RealDep:
@staticmethod
def print():
print("RealDep")
class InjectableMocking(Example):
def __init__(self):
clear_injectables(RealDep)
mocked_dep = Mock(wraps=RealDep)
mocked_dep.print = Mock(side_effect=lambda: print("MockedDep"))
mocked_injectable = Injectable(lambda: mocked_dep)
register_injectables({mocked_injectable}, RealDep)
@autowired
def run(self, dep: Autowired(RealDep)):
dep.print()
# MockedDep
dep.print.assert_called()
def run_example():
load_injection_container()
example = InjectableMocking()
example.run()
if __name__ == "__main__":
run_example()
| 23.882353 | 82 | 0.716749 |
2c5ee4c680422e39bc1b4f05645d8e4538d7cf7f | 108 | py | Python | Lib/site-packages/MySQLdb/release.py | pavanmaganti9/djangoapp | d6210386af89af9dae6397176a26a8fcd588d3b4 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/MySQLdb/release.py | pavanmaganti9/djangoapp | d6210386af89af9dae6397176a26a8fcd588d3b4 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/MySQLdb/release.py | pavanmaganti9/djangoapp | d6210386af89af9dae6397176a26a8fcd588d3b4 | [
"bzip2-1.0.6"
] | null | null | null |
__author__ = "Inada Naoki <songofacandy@gmail.com>"
version_info = (1,4,2,'final',0)
__version__ = "1.4.2"
| 21.6 | 51 | 0.694444 |
b89a65f26215240c5c060be9a1a31bad6d26d23c | 421 | py | Python | aiochrome/exceptions.py | fate0/aiochrome | b3cf41e95184c99fa64973527edd9f2c502b845f | [
"Apache-2.0"
] | 20 | 2017-10-20T10:33:23.000Z | 2021-04-04T22:23:51.000Z | aiochrome/exceptions.py | hnyaoqingping/aiochrome | b3cf41e95184c99fa64973527edd9f2c502b845f | [
"Apache-2.0"
] | 1 | 2017-09-19T12:29:08.000Z | 2017-09-19T12:29:08.000Z | aiochrome/exceptions.py | hnyaoqingping/aiochrome | b3cf41e95184c99fa64973527edd9f2c502b845f | [
"Apache-2.0"
] | 5 | 2018-04-12T21:04:29.000Z | 2021-01-29T12:35:02.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class AioChromeException(Exception):
pass
class UserAbortException(AioChromeException):
pass
class TabConnectionException(AioChromeException):
pass
class CallMethodException(AioChromeException):
pass
class TimeoutException(AioChromeException):
pass
class RuntimeException(AioChromeException):
pass | 15.035714 | 49 | 0.767221 |
f5cb9d7bc36873b7066f22db382764796ef90d54 | 6,989 | py | Python | run.py | GindaChen/LEBench | 502f5c880513ef67e6315a5444f7180ffbbcf291 | [
"Apache-2.0"
] | null | null | null | run.py | GindaChen/LEBench | 502f5c880513ef67e6315a5444f7180ffbbcf291 | [
"Apache-2.0"
] | null | null | null | run.py | GindaChen/LEBench | 502f5c880513ef67e6315a5444f7180ffbbcf291 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import signal
import platform
import sys
from subprocess import check_call, check_output, call
from os.path import join
from datetime import datetime
DEBUG = True
GRUB_CFG_FILE = '/boot/grub/grub.cfg'
GRUB_FILE = '/etc/default/grub'
WORKING_DIR = ''
KERN_INDEX_FILE = '/iteration'
LOCAL_GRUB_FILE = '/grub'
KERN_LIST_FILE = '/kern_list'
RESULT_DIR = '/RESULT_DIR/'
TEST_DIR = '/TEST_DIR/'
TEST_NAME = 'OS_Eval'
""" Grabs the ith kernel from KERN_LIST_FILE.
"""
def get_kern_list(idx):
with open(KERN_LIST_FILE, 'r') as fp:
lines = fp.readlines()
if -1 < idx < len(lines):
return lines[idx].strip()
elif idx >= len(lines):
print '[INFO] LEBench run concluded, finished testing on ' + str(len(lines)) + ' kernels.'
os.remove(KERN_INDEX_FILE)
sys.exit(0)
else:
raise ValueError('Kernel index out of range, '
'expect index to be between 0 and ' + str(len(lines)))
""" Modifies the grub file to boot into the target kernel the next time.
"""
def generate_grub_file(f, target_kern):
if DEBUG: print '[DEBUG] Preparing grub for kernel: ' + target_kern
if DEBUG: print '[DEBUG]--------------------------------------------------'
if not os.path.exists(f):
raise ValueError("File %s does not exist." % f)
kern_image_name = 'vmlinuz-%s' % target_kern
if not os.path.exists(os.path.join('/', 'boot', kern_image_name)):
raise ValueError('Kernel image %s does not exist' % kern_image_name)
print '[INFO] Setting boot version to ' + target_kern + '.'
with open(f, 'r') as fp:
lines = fp.readlines()
for idx, line in enumerate(lines):
if line.startswith('GRUB_DEFAULT'):
line = 'GRUB_DEFAULT="Advanced options for Ubuntu>Ubuntu, with Linux %s"\n' % target_kern
lines[idx] = line
with open(LOCAL_GRUB_FILE, 'w+') as fp:
fp.writelines(lines)
return True
"""Sets up grub using configtured grub file and shell cmds
"""
def install_grub_file():
if DEBUG: print "[DEBUG] Copying GRUB config to %s" % GRUB_FILE
call(['sudo', 'cp', LOCAL_GRUB_FILE, GRUB_FILE])
if DEBUG: print "[DEBUG] Configuring boot"
call(['sudo', 'grub-install', '--force', '--target=i386-pc', '/dev/sda1'])
if DEBUG: print "[DEBUG] Making grub config"
call(['sudo', 'grub-mkconfig', '-o', GRUB_CFG_FILE])
def restart():
print '[INFO] Restarting the machine now.'
call(['sudo', 'reboot'])
""" Running the LEBench tests for the current kernel version.
"""
def run_bench():
print '[INFO] --------------------------------------------------'
print '[INFO] Starting LEBench tests'
print '[INFO] Current time: ' + str(datetime.now().time())
kern_version = platform.uname()[2]
print '[INFO] current kernel version: ' + kern_version + '.'
test_file = join(TEST_DIR, TEST_NAME)
print '[INFO] Preparing to run test ' + TEST_NAME + '.'
print '[INFO] Compiling test ' + TEST_NAME + ".c."
call(('make -C ' + TEST_DIR).split())
result_path = join(RESULT_DIR, kern_version)
if not os.path.exists(result_path):
os.makedirs(result_path)
result_filename = join(RESULT_DIR, kern_version, TEST_NAME)
result_error_filename = join(RESULT_DIR, kern_version, TEST_NAME + '_err')
result_fp = open(result_filename, 'w+')
result_error_fp = open(result_error_filename, 'w+')
test_cmd = [TEST_DIR + TEST_NAME, '0', kern_version]
print '[INFO] Running test with command: ' + ' '.join(test_cmd)
ret = call(test_cmd, stdout=result_fp, stderr=result_error_fp)
print '[INFO] Finished running test ' + TEST_NAME + \
', test returned ' + str(ret) + ', log written to: ' + result_path + "."
print '[INFO] Current time: ' + str(datetime.now().time())
with open(result_error_filename, 'r') as fp:
lines = fp.readlines()
if len(lines) > 0:
for line in lines:
print line
raise Exception('[FATAL] test run encountered error.')
if __name__ == '__main__':
# Setting up working directory and sanity checks.
if not os.geteuid() == 0:
raise Exception('This script should be run with "sudo".')
try:
WORKING_DIR = os.environ['LEBENCH_DIR']
except:
raise ValueError('$LEBENCH_DIR is not set. Example: "/home/username/LEBench/".')
if 'LEBench' not in WORKING_DIR:
raise ValueError('$LEBENCH_DIR should point to the directory containing LEBench. Example: "/home/username/LEBench/".')
KERN_INDEX_FILE = WORKING_DIR + KERN_INDEX_FILE
LOCAL_GRUB_FILE = WORKING_DIR + LOCAL_GRUB_FILE
KERN_LIST_FILE = WORKING_DIR + KERN_LIST_FILE
RESULT_DIR = WORKING_DIR + RESULT_DIR
TEST_DIR = WORKING_DIR + TEST_DIR
if not os.path.exists(KERN_LIST_FILE):
raise IOError('Cannot open "kern_list" file. If it\'s not present, '
'run "get_kern.py" to generate this file by grepping all install kernels.')
with open(KERN_LIST_FILE, 'r') as fp:
lines = fp.readlines()
if len(lines) == 0:
raise ValueError('"kern_list" file is empty, '
'run "get_kern.py" to generate this file by grepping all install kernels.')
# For running LEBench on one specified kernel version.
if len(sys.argv) > 1:
kern_version = sys.argv[1]
print "[INFO] Configuring to boot into " + kern_version + "."
generate_grub_file(WORKING_DIR + 'template/grub', kern_version)
install_grub_file()
sys.exit(0)
# For running LEBench on a list of specified kernel versions.
if not os.path.exists(KERN_INDEX_FILE):
with open(KERN_INDEX_FILE, 'w') as f:
f.write("-1\n")
with open(KERN_INDEX_FILE, 'r') as f:
kern_idx = int(f.read())
next_kern_idx = kern_idx + 1
if DEBUG: print '[DEBUG] Running at kernel index: ' + str(kern_idx)
with open(KERN_INDEX_FILE, 'w') as fp:
fp.write(str(next_kern_idx).strip())
if DEBUG: print '[DEBUG] Done writing kernel index %d for the next iteration' % next_kern_idx + '.'
if next_kern_idx == 0:
# Need to boot into the right kernel version first.
print '[INFO] LEBench tests will start after booting into the first kernel.'
else:
# We are at the right kernel version, actually run LEBench.
run_bench()
print '[INFO] * End of execution *'
print '[INFO] * Not intend to restart the machine *'
# if DEBUG: print '[DEBUG] Preparing to modify grub.'
# if generate_grub_file(WORKING_DIR + 'template/grub', get_kern_list(next_kern_idx)):
# install_grub_file()
# if DEBUG: print '[DEBUG] Done configuring grub for the next kernel.'
# restart()
| 35.841026 | 126 | 0.619974 |
1b3c128995a785caa5aedb286419ff711992b63f | 104,645 | py | Python | tests/python/unittest/test_sparse_operator.py | feevos/incubator-mxnet | 275378a49a6035fd5bdead4a74ac36b6070295a7 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_sparse_operator.py | feevos/incubator-mxnet | 275378a49a6035fd5bdead4a74ac36b6070295a7 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_sparse_operator.py | feevos/incubator-mxnet | 275378a49a6035fd5bdead4a74ac36b6070295a7 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from mxnet.test_utils import *
from common import setup_module, with_seed
import random
import warnings
def is_scalar(var):
return False if hasattr(var, "__len__") else True
def get_result_type(call, dflt_stype):
"""Try to infer result storage type for a sparse matrix and a given unary operation"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
result = do_normalize(call(zero))
if not almost_equal(result, zero, equal_nan=True):
expected_result_type = 'default'
else:
if dflt_stype is not None:
expected_result_type = dflt_stype;
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_with_scalar(call, dflt_stype):
"""Try to infer result storage type when operating a sparse matrices and a scalar"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
result = call(zero, 5)
if not almost_equal(result, zero, equal_nan=True):
expected_result_type = 'default'
else:
if dflt_stype is not None:
expected_result_type = dflt_stype;
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_2(call, dflt_stype):
"""Try to infer result storage type when operating on two sparse matrices"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
need_default = False
for outer in [zero, np.ones(zero.shape)]:
for inner in [zero, np.ones(zero.shape)]:
result = do_normalize(call(outer, inner))
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
if need_default is True:
break
if not need_default and dflt_stype is not None:
expected_result_type = dflt_stype
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_result_type_3(call, dflt_stype):
"""Try to infer result storage type when operating on three sparse matrices"""
if call is not None and dflt_stype != 'default':
zero = np.zeros(([1]))
need_default = False
for moon in [zero]:
for outer in [zero]:
for inner in [zero]:
res_1, res_2 = call(moon, outer, inner)
result = do_normalize(res_1)
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
result = do_normalize(res_2)
if not almost_equal(result, zero, equal_nan=True):
need_default = True
break
if need_default is True:
break
if need_default is True:
break
if not need_default and dflt_stype is not None:
expected_result_type = dflt_stype
else:
expected_result_type = 'default'
else:
expected_result_type = 'default'
return expected_result_type
def get_fw_bw_result_types(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type(forward_numpy_call, fwd_res_dflt),
get_result_type(backward_numpy_call, bwd_res_dflt))
def get_fw_bw_result_types_2(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type(forward_numpy_call, fwd_res_dflt),
get_result_type_2(backward_numpy_call, bwd_res_dflt))
def get_fw_bw_result_types_with_scalar(forward_numpy_call, fwd_res_dflt,
backward_numpy_call, bwd_res_dflt):
return (get_result_type_with_scalar(forward_numpy_call, fwd_res_dflt),
get_result_type_with_scalar(backward_numpy_call, bwd_res_dflt))
def gen_rsp_random_indices(shape, density=.5, force_indices=None):
assert density >= 0 and density <= 1
indices = set()
if force_indices is not None:
for val in force_indices:
indices.add(int(val))
if not np.isclose(density, .0, rtol=1.e-3, atol=1.e-3, equal_nan=True) and len(shape) > 0:
row_count = shape[0]
for i in range(row_count):
r = random.uniform(0, 1)
if r <= density and len(indices) < shape[0]:
indices.add(i)
assert len(indices) <= shape[0]
return list(indices)
def all_zero(var):
return 0
@with_seed()
def test_elemwise_binary_ops():
def test_elemwise_binary_op(name, lhs_stype, rhs_stype, shape,
forward_mxnet_call, forward_numpy_call, backward_numpy_call,
lhs_grad_stype,
rhs_grad_stype,
expected_result_storage_type=None,
modifier_func=None,
lhs_density=.5,
rhs_density=.5,
force_lr_overlap=False,
force_grad_overlap=False,
ograd_density=0.0,
skip_gradient_check=False,
shuffle_csr_indices=True,
verbose=False):
if lhs_grad_stype is None:
lhs_grad_stype = lhs_stype
if rhs_grad_stype is None:
rhs_grad_stype = rhs_stype
lhs_grad_stype = get_result_type_3(backward_numpy_call, lhs_grad_stype)
rhs_grad_stype = get_result_type_3(backward_numpy_call, rhs_grad_stype)
if verbose is True:
print("testing: {} lhs={}, rhs={}, lhs_grad_stype={}, rhs_grad_stype={}"
.format(name, lhs_stype, rhs_stype, lhs_grad_stype, rhs_grad_stype))
# Output type should be same as lvalue type, unless otherwise specified
if expected_result_storage_type is None:
if lhs_stype == 'default' or rhs_stype == 'default':
expected_result_storage_type = 'default'
else:
expected_result_storage_type = lhs_stype
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
grad_stypes = dict()
grad_stypes['lhs'] = lhs_grad_stype
grad_stypes['rhs'] = rhs_grad_stype
if lhs_stype == 'default':
lhs_nd = rand_ndarray(shape, 'default')
if abs(lhs_density) < 1e-4:
func = all_zero
else:
func = modifier_func
lhs_nd = mx.nd.array(assign_each(lhs_nd.asnumpy(), func))
else:
lhs_nd = create_sparse_array_zd(
shape, lhs_stype, density=lhs_density,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=lhs_density,
force_indices=[(shape[0]/2)] if force_lr_overlap is True else None
))
if rhs_stype == 'default':
rhs_nd = rand_ndarray(shape, 'default')
if abs(rhs_density) < 1e-4:
func = all_zero
else:
func = modifier_func
rhs_nd = mx.nd.array(assign_each(rhs_nd.asnumpy(), func))
else:
rhs_nd = create_sparse_array_zd(
shape, rhs_stype, density=rhs_density,
modifier_func=modifier_func,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=rhs_density,
force_indices=[(shape[0]/2)] if force_lr_overlap is True else None
))
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd.asnumpy()
if verbose is True:
print("lhs input: {}".format(lhs_np))
print("rhs input: {}".format(rhs_np))
out_np = forward_numpy_call(lhs_np, rhs_np)
if verbose is True:
print("out_np: {}".format(out_np))
test = forward_mxnet_call(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
outputs = check_symbolic_forward(test, location, [out_np], equal_nan=True)
assert len(outputs) == 1
assert outputs[0].stype == expected_result_storage_type
if verbose is True:
print ("mx forward output: ", outputs[0].asnumpy())
print ("lhs_nd: ", lhs_nd.stype)
print ("rhs_nd: ", rhs_nd.stype)
print ("forward output: ", outputs[0].stype)
if outputs[0].stype != 'default':
out_grad = create_sparse_array_zd(
shape, outputs[0].stype, density=ograd_density,
data_init=1,
modifier_func=lambda x: 2,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=ograd_density,
force_indices=[(shape[0]/2)] if force_grad_overlap is True else None
))
else:
if abs(ograd_density) < 1e-4:
out_grad = mx.nd.array(np.zeros(shape))
else:
out_grad = mx.nd.array(np.ones(shape))
out_grad_np = out_grad.asnumpy()
if verbose is True:
print("out_grad_np", out_grad_np)
ingrad_lhs_np, ingrad_rhs_np = backward_numpy_call(out_grad_np, lhs_np, rhs_np)
if verbose is True:
print("out_grad", out_grad.asnumpy())
print("ingrad_lhs_np", ingrad_lhs_np)
print("ingrad_rhs_np", ingrad_rhs_np)
igrads_result = check_symbolic_backward(test, location, [out_grad],
[ingrad_lhs_np, ingrad_rhs_np],
grad_stypes=grad_stypes,
equal_nan=True)
if verbose is True:
print("ingrad_lhs", igrads_result['lhs'].asnumpy())
print("ingrad_rhs", igrads_result['rhs'].asnumpy())
assert len(igrads_result) == 2
if lhs_grad_stype is not None:
assert igrads_result['lhs'].stype == lhs_grad_stype
if rhs_grad_stype is not None:
assert igrads_result['rhs'].stype == rhs_grad_stype
if skip_gradient_check is not True:
check_numeric_gradient(test, location,
grad_stype_dict=grad_stypes)
def check_all(l, r, check_function):
assert l.shape == r.shape
return check_function(l, r)
def gt(l, r):
return check_all(l, r, lambda a, b: a > b)
def ge(l, r):
return check_all(l, r, lambda a, b: a >= b)
def lt(l, r):
return check_all(l, r, lambda a, b: a < b)
def le(l, r):
return check_all(l, r, lambda a, b: a <= b)
def elemwise_mul_stype(lstype, rstype):
if lstype == rstype:
return lstype
elif lstype == 'default' and rstype == 'row_sparse':
return 'row_sparse'
elif lstype == 'row_sparse' and rstype == 'default':
return 'row_sparse'
else:
return 'default'
def check_elemwise_binary_ops(lhs_stype, rhs_stype, shape,
lhs_grad_stype=None, rhs_grad_stype=None,
lhs_density=.5, rhs_density=.5,
force_lr_overlap=False,
force_grad_overlap=False,
ograd_density=0.0):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
if ((lhs_stype is 'default' and rhs_stype is 'row_sparse') or
(lhs_stype is 'default' and rhs_stype is 'csr')):
test_elemwise_binary_op("elemwise_add", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_add(l, r, out=l),
lambda l, r: l + r,
lambda outg, l, r: (outg, outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_sub", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_sub(l, r),
lambda l, r: l - r,
lambda outg, l, r: (outg, -outg),
lhs_grad_stype, rhs_grad_stype,
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density,
rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_mul", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_mul(l, r),
lambda l, r: l * r,
lambda outg, l, r: (outg * r, outg * l),
elemwise_mul_stype(lhs_stype, rhs_stype),
elemwise_mul_stype(lhs_stype, rhs_stype),
expected_result_storage_type=elemwise_mul_stype(lhs_stype, rhs_stype),
ograd_density=ograd_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
verbose=False)
test_elemwise_binary_op("elemwise_div", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym.sparse.elemwise_div(l, r),
lambda l, r: l / r,
lambda outg, l, r: (outg * (1/r), outg * (-l/(r*r))),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
expected_result_storage_type='default',
skip_gradient_check=True,
verbose=False)
test_elemwise_binary_op("maximum", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._maximum(l, r),
lambda l, r: np.maximum(l, r),
lambda outg, l, r: (outg * ge(l, r), outg * lt(l, r)),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
skip_gradient_check=True,
ograd_density=ograd_density,
verbose=False)
test_elemwise_binary_op("minimum", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._minimum(l, r),
lambda l, r: np.minimum(l, r),
lambda outg, l, r: (outg * le(l, r), outg * gt(l, r)),
lhs_grad_stype, rhs_grad_stype,
modifier_func=lambda a: a if abs(a) > 0.25 else abs(a) + 1,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
skip_gradient_check=True,
verbose=False)
test_elemwise_binary_op("hypot", lhs_stype, rhs_stype, shape,
lambda l, r: mx.sym._internal._hypot(l, r),
lambda l, r: np.hypot(l, r),
lambda outg, l, r: (
outg * assign_each2(
l, r, lambda a, b: a/np.sqrt(a * a + b * b)),
outg * assign_each2(
l, r, lambda a, b: b/np.sqrt(a * a + b * b))
),
lhs_grad_stype, rhs_grad_stype,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
lhs_density=lhs_density, rhs_density=rhs_density,
ograd_density=ograd_density,
skip_gradient_check=True,
verbose=False)
# Run basic tests
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for ii in range(1):
# Run defaults
check_elemwise_binary_ops('default', 'default', rand_shape_2d())
# Try different densities
shape = rand_shape_2d()
for lhs_density in [0.0, random.uniform(0, 1), 1.0]:
for rhs_density in [0.0, random.uniform(0, 1), 1.0]:
for ograd_density in [0.0, random.uniform(0, 1), 1.0]:
print("lhs_density={}, rhs_density={}, ograd_density={}, shape: {}".format(
lhs_density, rhs_density, ograd_density, shape))
# Try row_sparse overlaps
for force_lr_overlap in [False, True]:
for force_grad_overlap in [False, True]:
print(" force_lr_overlap={}, force_grad_overlap={}, shape={}".
format(force_lr_overlap, force_grad_overlap, shape))
# Left and right always overlap when one is default storage
# (assuming the row_sparse one has some entries in it)
if force_lr_overlap is False:
check_elemwise_binary_ops('default', 'row_sparse', shape,
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
check_elemwise_binary_ops('row_sparse', 'default', shape,
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
# Back to left-right overlap possiblities
check_elemwise_binary_ops('row_sparse', 'row_sparse', shape,
lhs_grad_stype='row_sparse',
rhs_grad_stype='row_sparse',
lhs_density=lhs_density,
rhs_density=rhs_density,
force_lr_overlap=force_lr_overlap,
force_grad_overlap=force_grad_overlap,
ograd_density=ograd_density)
# No overlap flags for CSR
check_elemwise_binary_ops('csr', 'csr', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('csr', 'csr', shape,
lhs_grad_stype='default',
rhs_grad_stype='default',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('default', 'csr', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
check_elemwise_binary_ops('csr', 'default', shape,
lhs_grad_stype='csr',
rhs_grad_stype='csr',
lhs_density=lhs_density,
rhs_density=rhs_density,
ograd_density=ograd_density)
@with_seed()
def test_elemwise_csr_same_zeros():
# Zeroes
a = mx.nd.sparse.zeros('csr', (1,1))
b = mx.nd.elemwise_add(a,a)
res = a.asnumpy() + a.asnumpy()
assert_almost_equal(b.asnumpy(), res)
def as_dense(arr):
if arr.stype != 'default':
return mx.nd.cast_storage(arr, stype='default')
else:
return arr;
# Make sure that 0's look like 0's when we do a comparison
def do_normalize(arr):
ret = arr.copy()
idx = np.isclose(arr, -0, rtol=1.e-3, atol=1.e-3, equal_nan=True)
ret[idx] = 0
return ret
def check_sparse_mathematical_core(name, stype,
forward_mxnet_call, forward_numpy_call, backward_numpy_call=None,
rhs_arg=None, data_init=9., grad_init=2., output_grad_stype=None,
input_grad_stype=None, force_overlap=False, density=.5,
ograd_density=.5, verbose=False, shuffle_csr_indices=True):
if verbose is True:
print("TESTING: " + name)
data = mx.symbol.Variable('data', stype=stype)
temp_input_grad_stype = input_grad_stype
if temp_input_grad_stype is None:
temp_input_grad_stype = stype
if rhs_arg is not None:
if is_scalar(rhs_arg):
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_with_scalar(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
else:
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_2(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
else:
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types(forward_numpy_call, stype,
backward_numpy_call, temp_input_grad_stype)
if input_grad_stype is not None and input_grad_stype != expected_grad_result_type:
print("{}: explicit override of deduced input grad type '{}' with '{}'".format(
name, expected_grad_result_type, input_grad_stype))
expected_grad_result_type = input_grad_stype
shape = rand_shape_2d()
if verbose is True:
print("Shape: ", shape, "density: ", density, "force_overlap", force_overlap)
if stype == 'default':
data_tmp = np.zeros(shape)
if abs(density) >= 1e-4:
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
else:
arr_data = create_sparse_array_zd(
shape, stype, density=density,
data_init=data_init,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] if force_overlap is True else None
)
)
data_tmp = arr_data.asnumpy()
if verbose is True:
print("arr_data indices", arr_data.indices.asnumpy())
if verbose is True:
print("input", data_tmp)
if backward_numpy_call is None:
arr_grad = None
elif expected_grad_result_type == 'default':
if abs(density) < 1e-4:
arr_grad = mx.nd.zeros(shape)
else:
arr_grad = mx.nd.ones(shape)
else:
arr_grad = create_sparse_array_zd(
shape,
expected_grad_result_type,
density=density,
data_init=1,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] if force_overlap is True else None
)
)
if rhs_arg is not None:
test = forward_mxnet_call(data, rhs_arg)
else:
test = forward_mxnet_call(data)
args = list()
args.append(arr_data)
if arr_grad is not None:
exe_test = test.bind(default_context(), args=args, args_grad=[arr_grad])
else:
exe_test = test.bind(default_context(), args=args)
exe_test.forward(is_train=True)
assert exe_test.outputs[0].stype == expected_result_type
out = exe_test.outputs[0].asnumpy()
if rhs_arg is not None:
npout = forward_numpy_call(data_tmp, rhs_arg)
else:
npout = forward_numpy_call(data_tmp)
if verbose is True:
print("out", out)
print("npout", npout)
assert_almost_equal(out, npout, equal_nan=True)
if backward_numpy_call is not None:
if output_grad_stype == 'default' or output_grad_stype is None:
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
else:
out_grad = create_sparse_array_zd(
shape, output_grad_stype,
density=density,
data_init=grad_init,
shuffle_csr_indices=shuffle_csr_indices,
rsp_indices=gen_rsp_random_indices(
shape,
density=ograd_density,
force_indices=[(shape[0]/2)] if force_overlap is True else None))
npout_grad = out_grad.asnumpy()
if verbose is True:
print("npout_grad", npout_grad)
if rhs_arg is not None:
temp = backward_numpy_call(data_tmp, rhs_arg)
else:
temp = backward_numpy_call(data_tmp)
input_grad = npout_grad * temp
if verbose is True:
print(arr_grad.asnumpy())
exe_test.backward(out_grad)
if verbose is True:
print(arr_grad.asnumpy())
assert arr_grad.stype == expected_grad_result_type
arr_grad = arr_grad.asnumpy()
if verbose is True:
print(name)
print("arr_grad", arr_grad)
print("input_grad", input_grad)
assert_almost_equal(arr_grad, input_grad, equal_nan=True)
@with_seed()
def test_sparse_mathematical_core():
def util_sign(a):
if np.isclose(a, -0, rtol=1.e-3, atol=1.e-3, equal_nan=True):
return 0
elif np.isclose(a, 0, rtol=1.e-3, atol=1.e-3, equal_nan=True):
return 0
elif a < 0.0:
return -1
else: # a > 0.0:
return 1
# Check scalar binary operators
def check_binary_op_with_scalar(stype,
output_grad_stype=None,
input_grad_stype=None,
density=.5, ograd_density=.5,
force_overlap=False,):
# mul_scalar
check_sparse_mathematical_core("mul_scalar", stype,
lambda x, y: x * y,
lambda x, y: x * y,
lambda input, rhs: rhs,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# plus_scalar
check_sparse_mathematical_core("plus_scalar", stype,
lambda x, y: x + y,
lambda x, y: x + y,
lambda input, rhs: 1,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# minus_scalar
check_sparse_mathematical_core("minus_scalar", stype,
lambda x, y: x - y,
lambda x, y: x - y,
lambda input, rhs: 1,
rhs_arg=5.0,
data_init=2, grad_init=3,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density,
force_overlap=force_overlap,
verbose=False)
# Check many basic unary operators
def check_mathematical_core(stype, output_grad_stype=None,
input_grad_stype=None, force_overlap=False,
density=.5, ograd_density=.5):
# negative
check_sparse_mathematical_core("negative", stype,
lambda x: mx.sym.sparse.negative(x),
lambda x: np.negative(x),
force_overlap=force_overlap,
density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# square
check_sparse_mathematical_core("square", stype,
lambda x: mx.sym.sparse.square(x),
lambda x: np.square(x),
lambda x: 2 * x,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
if stype != "csr":
# sqrt
check_sparse_mathematical_core("sqrt", stype,
lambda x: mx.sym.sparse.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 1.0/(2.0 * np.sqrt(x)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density,
verbose=False)
# rsqrt
check_sparse_mathematical_core("rsqrt", stype,
lambda x: mx.sym.sparse.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# tan
check_sparse_mathematical_core("tan", stype,
lambda x: mx.sym.sparse.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
density=density,
ograd_density=ograd_density)
# abs
check_sparse_mathematical_core("abs", stype,
lambda x: mx.sym.sparse.abs(x),
lambda x: np.abs(x),
lambda x: assign_each(x, function=util_sign),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# floor
check_sparse_mathematical_core("floor", stype, lambda x: mx.sym.sparse.floor(x),
lambda x: np.floor(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# ceil
check_sparse_mathematical_core("ceil", stype,
lambda x: mx.sym.sparse.ceil(x),
lambda x: np.ceil(x),
force_overlap=force_overlap,
input_grad_stype=input_grad_stype,
density=density, ograd_density=ograd_density)
# sign
check_sparse_mathematical_core("sign", stype,
lambda x: mx.sym.sparse.sign(x),
lambda x: np.sign(x),
lambda x: np.zeros(x.shape),
output_grad_stype=output_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# cos
check_sparse_mathematical_core("cos", stype,
lambda x: mx.sym.sparse.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# sin
check_sparse_mathematical_core("sin", stype,
lambda x: mx.sym.sparse.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arcsin
check_sparse_mathematical_core("arcsin", stype,
lambda x: mx.sym.sparse.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# arccos
check_sparse_mathematical_core("arccos", stype,
lambda x: mx.sym.sparse.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arctan
check_sparse_mathematical_core("arctan", stype,
lambda x: mx.sym.sparse.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# degrees
check_sparse_mathematical_core("degrees", stype,
lambda x: mx.sym.sparse.degrees(x),
lambda x: np.degrees(x),
lambda x: assign_each(x, lambda a: 180./np.pi),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# radians
check_sparse_mathematical_core("radians", stype,
lambda x: mx.sym.sparse.radians(x),
lambda x: np.radians(x),
lambda x: assign_each(x, lambda a: np.pi / 180.),
data_init=0.6, grad_init=1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# sinh
check_sparse_mathematical_core("sinh", stype,
lambda x: mx.sym.sparse.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# cosh
check_sparse_mathematical_core("cosh", stype,
lambda x: mx.sym.sparse.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
data_init=5, grad_init=5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# tanh
check_sparse_mathematical_core("tanh", stype,
lambda x: mx.sym.sparse.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
data_init=0.5, grad_init=1,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arcsinh
check_sparse_mathematical_core("arcsinh", stype,
lambda x: mx.sym.sparse.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arccosh
check_sparse_mathematical_core("arccosh", stype,
lambda x: mx.sym.sparse.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# arctanh
check_sparse_mathematical_core("arctanh", stype,
lambda x: mx.sym.sparse.arctanh(x),
lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.),
data_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log1p
check_sparse_mathematical_core("log1p", stype,
lambda x: mx.sym.sparse.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# expm1
check_sparse_mathematical_core("expm1", stype,
lambda x: mx.sym.sparse.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
data_init=0.5, grad_init=0.5,
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log10
check_sparse_mathematical_core("log10", stype,
lambda x: mx.sym.sparse.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# log2
check_sparse_mathematical_core("log2", stype,
lambda x: mx.sym.sparse.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap, density=density,
ograd_density=ograd_density)
# rint
check_sparse_mathematical_core("rint", stype,
lambda x: mx.sym.sparse.rint(x),
lambda x: np.rint(x),
force_overlap=force_overlap, density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
# fix
check_sparse_mathematical_core("fix", stype,
lambda x: mx.sym.sparse.fix(x),
lambda x: np.fix(x),
force_overlap=force_overlap, density=density,
input_grad_stype=input_grad_stype,
ograd_density=ograd_density)
try:
from scipy import special as scipy_special
import_succeeded = True
# gamma
check_sparse_mathematical_core("gamma", stype,
lambda x: mx.sym.sparse.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# gammaln
check_sparse_mathematical_core("gammaln", stype,
lambda x: mx.sym.sparse.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
output_grad_stype=output_grad_stype,
input_grad_stype=input_grad_stype,
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
except:
if import_succeeded == False:
print("Could not import scipy. Skipping unit tests for special functions")
else:
raise
for i in range(1):
print("pass", i)
for density in [0.0, random.uniform(0, 1), 1.0]:
for ograd_density in [0.0, random.uniform(0, 1), 1.0]:
for force_overlap in [False, True]:
print("{}, {}, {}".format(density, ograd_density, force_overlap))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check unary ops (unary fwd, binary bwd)
check_mathematical_core('default', force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', output_grad_stype='default',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('row_sparse', output_grad_stype='row_sparse',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('csr', output_grad_stype='default',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
check_mathematical_core('csr', output_grad_stype='csr',
force_overlap=force_overlap,
density=density, ograd_density=ograd_density)
# Check binary with scalar ops
check_binary_op_with_scalar('default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse', output_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('row_sparse',
output_grad_stype='row_sparse',
density=density, ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='csr',
input_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='csr',
input_grad_stype='csr',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
check_binary_op_with_scalar('csr',
output_grad_stype='default',
density=density,
ograd_density=ograd_density,
force_overlap=force_overlap)
@with_seed()
def test_elemwise_add_ex():
def check_elemwise_add_ex(lhs_stype, rhs_stype, shape, lhs_grad_stype=None, rhs_grad_stype=None):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
lhs_nd = rand_ndarray(shape, lhs_stype)
rhs_nd = rand_ndarray(shape, rhs_stype)
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd.asnumpy()
out_np = lhs_np + rhs_np
test = mx.symbol.sparse.elemwise_add(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(test, location, [out_np])
check_numeric_gradient(test, location)
grad_stypes = {}
if lhs_grad_stype is not None and lhs_grad_stype != 'default':
grad_stypes['lhs'] = lhs_grad_stype
if rhs_grad_stype is not None and rhs_grad_stype != 'default':
grad_stypes['rhs'] = rhs_grad_stype
check_symbolic_backward(test, location, [out_np], [out_np, out_np],
grad_stypes=grad_stypes)
shapes = [rand_shape_2d(), rand_shape_3d()]
for shape in shapes:
check_elemwise_add_ex('default', 'default', shape)
check_elemwise_add_ex('row_sparse', 'row_sparse', shape,
lhs_grad_stype='row_sparse', rhs_grad_stype='row_sparse')
@with_seed()
def test_cast_storage_ex():
def check_cast_storage(shape, density, from_stype, to_stype, check_numeric_grad=True):
x = mx.symbol.Variable('x', stype=from_stype)
x_nd = rand_ndarray(shape, from_stype, density=density)
x_np = x_nd.asnumpy()
out_np = x_np
test = mx.symbol.cast_storage(x, stype=to_stype)
location = {'x': x_nd}
check_symbolic_forward(test, location, [out_np])
# consider disable the numeric grad check for gpu block kernel since the input is large
if check_numeric_grad:
check_numeric_gradient(test, location)
grad_stypes = {'x': to_stype}
check_symbolic_backward(test, location, [out_np], [out_np], grad_stypes=grad_stypes)
density = [1.00, 0.50, 0.01]
for d in density:
shape_2d = rand_shape_2d()
shape_3d = rand_shape_3d()
check_cast_storage(shape_2d, d, 'csr', 'default')
check_cast_storage(shape_2d, d, 'default', 'csr')
check_cast_storage(shape_2d, d, 'csr', 'csr')
check_cast_storage(shape_2d, d, 'row_sparse', 'default')
check_cast_storage(shape_2d, d, 'default', 'row_sparse')
check_cast_storage(shape_2d, d, 'row_sparse', 'row_sparse')
check_cast_storage(shape_3d, d, 'row_sparse', 'default')
check_cast_storage(shape_3d, d, 'default', 'row_sparse')
check_cast_storage(shape_3d, d, 'row_sparse', 'row_sparse')
for i in range(4, 6):
shape = rand_shape_nd(i, 5)
check_cast_storage(shape, d, 'default', 'row_sparse')
check_cast_storage(shape, d, 'row_sparse', 'default')
# Test specific gpu kernels
if default_context().device_type is 'gpu':
dim0 = rnd.randint(1, 10)
# test gpu thread kernel
check_cast_storage((dim0, rnd.randint( 1, 32)), d, 'default', 'csr')
# test gpu warp kernel
check_cast_storage((dim0, rnd.randint( 32, 512)), d, 'default', 'csr')
# test gpu block kernel
check_cast_storage((dim0, rnd.randint(512, 1024)), d, 'default', 'csr',
check_numeric_grad=False)
# check race condition in block kernel
check_cast_storage((200, 128 * 2 + 1), d, 'default', 'csr',
check_numeric_grad=False)
# test gpu thread kernel
check_cast_storage((dim0, rnd.randint( 1, 32)), d, 'default', 'row_sparse')
# test gpu warp kernel
check_cast_storage((dim0, rnd.randint( 32, 512)), d, 'default', 'row_sparse')
# test gpu block kernel
check_cast_storage((dim0, rnd.randint(512, 1024)), d, 'default', 'row_sparse',
check_numeric_grad=False)
@with_seed()
def test_sparse_dot():
def test_infer_forward_stype(lhs_shape, rhs_shape, lhs_density, rhs_density, trans_a, trans_b):
all_stypes = ["default", "csr", "row_sparse"]
lhs_nd = rand_ndarray(lhs_shape, 'default', density=lhs_density)
rhs_nd = rand_ndarray(rhs_shape, 'default', density=rhs_density)
out_nd = mx.nd.dot(lhs_nd, rhs_nd, transpose_a=trans_a, transpose_b=trans_b)
out_np = out_nd.asnumpy()
for lhs_stype in all_stypes:
for rhs_stype in all_stypes:
for forward_stype in all_stypes:
lhs = lhs_nd.tostype(lhs_stype)
rhs = rhs_nd.tostype(rhs_stype)
out = mx.nd.dot(lhs, rhs, forward_stype=forward_stype,
transpose_a=trans_a, transpose_b=trans_b)
assert_almost_equal(out.tostype('default').asnumpy(), out_np, rtol=1e-4, atol=1e-5)
lhs_var = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs_var = mx.symbol.Variable('rhs', stype=rhs_stype)
out = mx.symbol.sparse.dot(lhs_var, rhs_var,
forward_stype=forward_stype,
transpose_a=trans_a, transpose_b=trans_b)
location = {'lhs': lhs, 'rhs': rhs}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
def test_dot_csr(lhs_shape, rhs_shape, rhs_stype, trans_lhs, lhs_density, rhs_density):
lhs_nd = rand_ndarray(lhs_shape, 'csr', density=lhs_density, shuffle_csr_indices=False)
lhs_dns = lhs_nd.tostype('default')
rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=rhs_density)
rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.tostype('default')
out = mx.nd.dot(lhs_nd, rhs_nd, transpose_a=trans_lhs)
out_dns = mx.nd.dot(lhs_dns, rhs_dns, transpose_a=trans_lhs)
out_np = out_dns.asnumpy()
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-4, atol=1e-5)
# test symbolic forward
lhs = mx.symbol.Variable('lhs', stype='csr')
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
out = mx.symbol.sparse.dot(lhs, rhs, transpose_a=trans_lhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
# test symbolic backward
backward_trans = not trans_lhs
rhs_backward_grad = mx.nd.dot(lhs_dns, out_dns, transpose_a=backward_trans).asnumpy()
expected = {'rhs': rhs_backward_grad}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'null', 'rhs': 'write'},
rtol=1e-3, atol=1e-4)
def test_dot_dns_csr(lhs_shape, rhs_shape, lhs_density, rhs_density, trans_lhs=False, trans_rhs=False):
lhs_nd = rand_ndarray(lhs_shape, stype='default', density=lhs_density)
rhs_nd = rand_ndarray(rhs_shape, stype='csr', density=rhs_density)
rhs_dns = rhs_nd.tostype('default')
if default_context() == mx.cpu():
forward_stype = 'csr'
else:
forward_stype = 'default'
out = mx.nd.sparse.dot(lhs_nd, rhs_nd, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
out_dns = mx.nd.dot(lhs_nd, rhs_dns, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
out_np = out_dns.asnumpy()
assert_almost_equal(out.asnumpy(), out_np, rtol=1e-4, atol=1e-5)
# test symbolic forward
lhs = mx.symbol.Variable('lhs', stype='default')
rhs = mx.symbol.Variable('rhs', stype='csr')
out = mx.symbol.sparse.dot(lhs, rhs, transpose_a=trans_lhs, transpose_b=trans_rhs, forward_stype=forward_stype)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(out, location, [out_np], rtol=1e-3, atol=1e-4)
if default_context() == mx.cpu():
# test symbolic backward
backward_trans = not trans_lhs
rhs_backward_grad = mx.nd.dot(lhs_nd, out_dns, transpose_a=backward_trans).asnumpy()
if trans_rhs is True:
rhs_backward_grad = rhs_backward_grad.T
expected = {'rhs': rhs_backward_grad}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'null', 'rhs': 'write'},
rtol=1e-3, atol=1e-4)
else:
transpose_b = not trans_rhs
lhs_backward_grad = mx.nd.dot(out_dns, rhs_dns, transpose_b=transpose_b)
expected = {'lhs': lhs_backward_grad.asnumpy()}
check_symbolic_backward(out, location, [out_np], expected,
grad_req={'lhs': 'write', 'rhs': 'null'},
rtol=1e-3, atol=1e-4)
def test_sparse_dot_zero_output(lhs_shape, trans_lhs, rhs_num_cols):
"""Test for nnr_out = 0. Before the fix, the test would fail."""
lhs = mx.nd.zeros(lhs_shape)
irow = np.random.randint(0, lhs_shape[0])
icol = np.random.randint(0, lhs_shape[1])
lhs[irow, icol] = 1.0
if trans_lhs:
rhs = rand_ndarray(shape=(lhs_shape[0], rhs_num_cols), stype='default')
rhs[irow, :] = 0
else:
rhs = rand_ndarray(shape=(lhs_shape[1], rhs_num_cols), stype='default')
rhs[icol, :] = 0
dns_out = mx.nd.dot(lhs, rhs, transpose_a=trans_lhs)
assert mx.nd.sum(mx.nd.abs(dns_out)).asscalar() == 0
sps_out = mx.nd.sparse.dot(lhs.tostype('csr'), rhs.tostype('row_sparse'), transpose_a=trans_lhs)
assert same(dns_out.asnumpy(), sps_out.asnumpy())
density = [1.00, 0.5, 0.01]
for lhs_d in density:
lhs_shape = rand_shape_2d(50, 200)
rhs_d = 1
test_dot_csr(lhs_shape, (lhs_shape[1], 1), 'default', False, lhs_d, rhs_d) # test gpu SpMV
test_dot_csr(lhs_shape, (lhs_shape[0], 1), 'default', True, lhs_d, rhs_d) # (vector kernel)
test_dot_csr(lhs_shape, (lhs_shape[1], rnd.randint(5, 10)), 'default', False, lhs_d, rhs_d) # test gpu SpMM
test_dot_csr(lhs_shape, (lhs_shape[0], rnd.randint(5, 10)), 'default', True, lhs_d, rhs_d) # (scalar kernel)
test_dot_dns_csr(lhs_shape, (lhs_shape[1], rnd.randint(50, 200)), lhs_d, lhs_d)
test_dot_dns_csr(lhs_shape, (rnd.randint(50, 200), lhs_shape[1]), lhs_d, lhs_d, trans_rhs=True)
for rhs_d in density:
test_dot_csr(lhs_shape, (lhs_shape[1], rnd.randint(1, 10)), 'row_sparse', False, lhs_d, rhs_d)
test_dot_csr(lhs_shape, (lhs_shape[0], rnd.randint(1, 10)), 'row_sparse', True, lhs_d, rhs_d)
test_infer_forward_stype(lhs_shape, (lhs_shape[1], rnd.randint(10, 20)),
lhs_d, rhs_d, False, False)
test_infer_forward_stype(lhs_shape, (rnd.randint(10, 20), lhs_shape[1]),
lhs_d, rhs_d, False, True)
test_infer_forward_stype(lhs_shape, (lhs_shape[0], rnd.randint(10, 20)),
lhs_d, rhs_d, True, False)
test_infer_forward_stype(lhs_shape, (rnd.randint(10, 20), lhs_shape[0]),
lhs_d, rhs_d, True, True)
test_sparse_dot_zero_output(rand_shape_2d(50, 200), False, 40)
test_sparse_dot_zero_output(rand_shape_2d(50, 200), True, 40)
@with_seed()
def test_sparse_dot_determinism():
def test_dot_determinism(lhs_stype, rhs_stype, lhs_density, rhs_density, transpose_a, transpose_b, forward_stype):
lhs_row = rnd.randint(50, 100)
lhs_col = rnd.randint(50, 100)
if transpose_a:
if transpose_b:
rhs_shape = (rnd.randint(50, 100), lhs_row)
else:
rhs_shape = (lhs_row, rnd.randint(50, 100))
else:
if transpose_b:
rhs_shape = (rnd.randint(50, 100), lhs_col)
else:
rhs_shape = (lhs_col, rnd.randint(50, 100))
lhs_shape = (lhs_row, lhs_col)
lhs = rand_ndarray(lhs_shape, lhs_stype, density=lhs_density)
rhs = rand_ndarray(rhs_shape, rhs_stype, density=rhs_density)
res1 = mx.nd.sparse.dot(lhs, rhs, transpose_a=transpose_a, transpose_b=transpose_b, forward_stype=forward_stype)
res2 = mx.nd.sparse.dot(lhs, rhs, transpose_a=transpose_a, transpose_b=transpose_b, forward_stype=forward_stype)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.0, atol=0.0)
test_dot_determinism('csr', 'default', 0.1, 1.0, True, False, 'row_sparse')
forward_stype = 'csr' if default_context() == mx.cpu() else 'default'
test_dot_determinism('default', 'csr', 1.0, 0.1, False, False, forward_stype)
test_dot_determinism('default', 'csr', 1.0, 0.1, False, True, forward_stype)
@with_seed()
def test_sparse_slice():
def check_csr_slice(shape, slice_input):
storage_type = 'csr'
B, _ = rand_sparse_ndarray(shape, storage_type)
np = B.asnumpy()
begin = rnd.randint(0, B.shape[0] - 1)
end = rnd.randint(begin + 1, B.shape[0])
nd_slice = mx.nd.crop(B, begin=begin, end=end)
assert same(nd_slice.asnumpy(), np[begin:end]), (nd_slice.asnumpy(), np[begin:end])
shape = (rnd.randint(7, 15), rnd.randint(1, 10))
check_csr_slice(shape, True)
check_csr_slice(shape, False)
@with_seed()
def test_sparse_retain():
def check_sparse_retain(shape, density, index_type=np.int64):
num_rows = shape[0]
rsp, _ = rand_sparse_ndarray(shape=shape, stype='row_sparse', density=density)
length = np.random.randint(1, num_rows + 1)
idx = random_sample(list(range(0, num_rows)), length)
idx.sort()
dns = rsp.asnumpy()
tensor_retained_expected = np.zeros(shape)
for i in idx:
tensor_retained_expected[i][:] = dns[i]
indices = mx.nd.array(idx, dtype=index_type)
rsp_retained = mx.nd.sparse.retain(rsp, indices=indices)
assert same(tensor_retained_expected, rsp_retained.asnumpy())
# check numeric gradient
data = mx.symbol.Variable('data')
idx = mx.symbol.Variable('indices')
sym = mx.sym.sparse.retain(data=data, indices=idx)
check_numeric_gradient(sym, [rsp, indices], grad_nodes=['data'],
grad_stype_dict={'data': 'row_sparse'})
shape = rand_shape_2d()
shape_3d = rand_shape_3d()
densities = [0.01, 0.5, 1.0]
index_types = [np.float32, np.int32, np.int64]
for density in densities:
for itype in index_types:
check_sparse_retain(shape, density, itype)
check_sparse_retain(shape_3d, density, itype)
@with_seed()
def test_sparse_unary_with_numerics():
def check_sparse_simple(name, stype, mxnet_func, forward_numpy_call,
backward_numpy_call, output_grad_stype=None,
backward_is_use_output=False):
if output_grad_stype is None:
output_grad_stype = stype
expected_result_type, expected_grad_result_type = \
get_fw_bw_result_types_2(forward_numpy_call, stype, backward_numpy_call, output_grad_stype)
if backward_is_use_output is True:
expected_grad_result_type = expected_result_type
shape = (3, 4)
data = mx.symbol.Variable("data")
grad_stypes = {'data' : expected_grad_result_type}
y = mxnet_func(data)
if stype == 'default':
xa = np.random.uniform(low=-1.0, high=1.0, size=shape)
xa_np = xa
else:
xa = create_sparse_array(shape, stype, data_init=None, rsp_indices=[1],
modifier_func=lambda a: a - 0.5,
shuffle_csr_indices=True)
xa_np = xa.asnumpy()
if output_grad_stype != 'default':
out_grad = create_sparse_array(shape, output_grad_stype, data_init=None,
rsp_indices=[1, 2],
modifier_func=lambda a: a - 0.5,
shuffle_csr_indices=True)
out_grad_np = out_grad.asnumpy()
else:
out_grad_np = np.ones(xa.shape)
out_grad = mx.nd.array(out_grad_np)
output_np = forward_numpy_call(xa_np)
input_grad_np = backward_numpy_call(output_np, out_grad_np)
outputs = check_symbolic_forward(y, [xa], [output_np])
output = outputs[0]
assert output.stype == expected_result_type
input_grad_dict = check_symbolic_backward(y, location=[xa], out_grads=[out_grad],
expected=[input_grad_np],
grad_stypes=grad_stypes)
inp_grad = input_grad_dict["data"]
assert inp_grad.stype == expected_grad_result_type
def check_sparse_function(name, mxnet_func, forward_numpy_call, backward_numpy_call,
backward_is_use_output=False):
check_sparse_simple(name, 'default', mxnet_func, forward_numpy_call, backward_numpy_call)
for output_grad_stype in [None, "row_sparse", "default"]:
check_sparse_simple(name, 'row_sparse', mxnet_func, forward_numpy_call, backward_numpy_call,
output_grad_stype=output_grad_stype,
backward_is_use_output=backward_is_use_output)
check_sparse_function('relu',
lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.0),
lambda output, outg: outg * assign_each(output, lambda x: x > 0.0), backward_is_use_output=True)
check_sparse_function('sigmoid',
lambda x: mx.sym.sigmoid(x),
lambda x: np.divide(1.0, (1.0 + np.exp(-x))),
lambda output, outg: outg * assign_each(output, lambda x: x * (1.0 - x)),
backward_is_use_output=True)
@with_seed()
def test_sparse_nd_zeros():
def check_sparse_nd_zeros(stype, shape):
zero = mx.nd.zeros(shape)
sparse_zero = mx.nd.zeros(shape=shape, stype=stype)
assert_almost_equal(sparse_zero.asnumpy(), zero.asnumpy())
shape = rand_shape_2d()
check_sparse_nd_zeros('row_sparse', shape)
check_sparse_nd_zeros('csr', shape)
check_sparse_nd_zeros('default', shape)
@with_seed()
def test_sparse_nd_zeros_like():
def check_sparse_nd_zeros_like(stype, shape):
zero = mx.nd.zeros(shape, stype=stype)
zero_like = mx.nd.sparse.zeros_like(zero)
assert_almost_equal(zero.asnumpy(), zero_like.asnumpy())
shape = rand_shape_2d()
check_sparse_nd_zeros_like('row_sparse', shape)
check_sparse_nd_zeros_like('csr', shape)
@with_seed()
def test_sparse_axis_operations():
def test_variations(func_name):
dim0 = 30
dim1 = 100
axes = [0, 1]
densities = [0, 0.5, 1]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
csr_array = rand_ndarray(shape=shape, stype='csr', density=density)
dns = csr_array.tostype('default')
for axis in axes:
ret = func_name(csr_array, axis=axis)
assert ret.stype == 'default'
ret_expected = func_name(dns, axis=axis)
assert_almost_equal(ret.asnumpy(), ret_expected.asnumpy())
def test_fallback(func_name, axis=0, keepdims=True, exclude=True):
dim0 = 30
dim1 = 100
shape = rand_shape_2d(dim0, dim1)
csr_array = rand_ndarray(shape=shape, stype='csr', density=0.01)
ret= func_name(csr_array, axis=axis, keepdims=keepdims,
exclude=exclude)
test_variations(mx.nd.sum)
test_fallback(mx.nd.sum, axis=0, keepdims=True, exclude=True)
test_variations(mx.nd.mean)
test_fallback(mx.nd.mean, axis=0, keepdims=True, exclude=True)
@with_seed()
def test_sparse_square_sum():
dim0 = 30
dim1 = 30
axes = [0, 1]
keepdims = [False, True]
densities = [0, 0.01, 0.2, 0.5, 1.0]
for density in densities:
shape = rand_shape_2d(dim0, dim1)
rsp = rand_ndarray(shape, 'row_sparse', density)
dns = rsp.tostype('default')
for axis in axes:
for keepdim in keepdims:
ret = mx.nd._internal._square_sum(rsp, axis=axis, keepdims=keepdim)
if axis == 1 and keepdim:
assert ret.stype == 'row_sparse'
else:
assert ret.stype == 'default'
ret_expected = mx.nd.sum(dns*dns, axis=axis, keepdims=keepdim)
# check forward result
assert_almost_equal(ret.asnumpy(), ret_expected.asnumpy())
rsp_data = mx.sym.Variable('data', stype='row_sparse')
test = mx.symbol._internal._square_sum(rsp_data, axis=axis, keepdims=keepdim)
# check symbolic backward since ograd can be an rsp
# and cannot be checked through check_numeric_gradient
# because it will add a loss layer as the output layer
# which makes ograd of the square_sum dense
if axis == 1 and keepdim:
dns_data = mx.sym.Variable('data')
baseline = mx.sym.sum(mx.sym.square(dns_data), axis=axis, keepdims=keepdim)
igrad_expected = mx.nd.empty(dns.shape)
baseline_exec = baseline.bind(default_context(), args=[dns],
args_grad=[igrad_expected])
baseline_exec.forward(is_train=True)
baseline_exec.backward([ret_expected])
# check backward when ograd is row sparse
check_symbolic_backward(test, [rsp], [ret_expected.tostype('row_sparse')],
[igrad_expected.asnumpy()], grad_stypes={'data': 'row_sparse'})
# check backward when ograd is dense
# the stype of output of the square_sum is deteremined in symbol binding stage.
# The ograd stype of the last layer is the same as the output stype of the last layer.
# Need to add one more layer after square_sum to trigger the kernel for ograd
# with default stype in square_sum op.
baseline1 = baseline + 1
baseline_exec1 = baseline1.bind(default_context(), args=[dns],
args_grad=[igrad_expected])
baseline_exec1.forward(is_train=True)
baseline_exec1.backward([ret_expected])
test1 = test + 1
check_symbolic_backward(test1, [rsp], [ret_expected], [igrad_expected.asnumpy()],
grad_stypes={'data': 'row_sparse'})
# check numeric gradient
check_numeric_gradient(test, [rsp], grad_stype_dict={'data': 'row_sparse'},
atol=1e-2, rtol=0.1)
@with_seed()
def test_sparse_storage_fallback():
""" test operators which don't implement FComputeEx or FStatefulComputeEx """
def check_broadcast_add(shape, lhs_stype, rhs_stype):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
lhs_nd = rand_ndarray(shape, lhs_stype)
rhs_nd = rand_ndarray(shape, rhs_stype)
lhs_dns = mx.nd.cast_storage(lhs_nd, stype='default')
rhs_dns = mx.nd.cast_storage(rhs_nd, stype='default')
out_dns = (lhs_dns + rhs_dns).asnumpy()
test = mx.symbol.broadcast_add(lhs, rhs)
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
check_symbolic_forward(test, location, [out_dns])
check_numeric_gradient(test, location)
check_symbolic_backward(test, location, [out_dns], [out_dns, out_dns])
def np_softmax(x, axis=-1):
# fix for old numpy on Travis not supporting keepdims
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_softmax_with_shape(lhs_stype, rhs_stype, shape, preserve_shape=False):
# bind with label
ctx = default_context()
X = mx.symbol.Variable('X', stype=lhs_stype)
L = mx.symbol.Variable('L', stype=rhs_stype)
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = rand_ndarray(shape, lhs_stype)
l = rand_ndarray(shape, rhs_stype)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx=ctx)
exec1 = Y.bind(ctx, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=1e-4)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(),
rtol=1e-3, atol=1e-4)
def check_concat(shape, lhs_stype, rhs_stype):
x = mx.symbol.Variable('x', stype=lhs_stype)
w = mx.symbol.Variable('w', stype=rhs_stype)
test = mx.sym.Concat(x, w)
x_nd = rand_ndarray(shape, lhs_stype)
w_nd = rand_ndarray(shape, rhs_stype)
location = {'x': x_nd, 'w': w_nd}
check_numeric_gradient(test, location)
def check_operator_with_temp_resource(shape, stype):
x = mx.symbol.Variable('x', stype=stype)
test = mx.sym.sum(x)
x_nd = rand_ndarray(shape, stype)
location = {'x': x_nd}
check_numeric_gradient(test, location)
shape = rand_shape_2d()
stypes = ['default', 'csr', 'row_sparse']
for lhs in stypes:
check_operator_with_temp_resource(shape, lhs)
for rhs in stypes:
check_broadcast_add(shape, lhs, rhs)
check_concat(shape, lhs, rhs)
check_softmax_with_shape(lhs, rhs, shape, preserve_shape=False)
check_softmax_with_shape(rhs, rhs, shape, preserve_shape=True)
@with_seed()
def test_sparse_elementwise_sum():
def check_sparse_elementwise_sum_with_shape(stype, shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.sparse.add_n(*inputs, name='esum')
arr = []
arr_grad = [mx.nd.empty(shape, stype=stype) for _ in range(n)]
densities = [0, 0.01, 0.5, 1.0]
for i in range(n):
arr.append(rand_ndarray(shape, stype, densities[np.random.randint(0, len(densities))]))
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy())
for dim in range(2, 4):
shape = tuple(np.random.randint(5, 10, size=dim))
check_sparse_elementwise_sum_with_shape('row_sparse', shape, np.random.randint(1, 9))
@with_seed()
def test_contrib_sparse_embedding():
''' test sparse embedding operator '''
def check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, weight_stype):
# init executor
data = mx.sym.Variable("data")
weight = mx.sym.Variable("embed_weight", stype=weight_stype)
embed = mx.sym.contrib.SparseEmbedding(data=data, weight=weight, input_dim=in_dim,
output_dim=out_dim, deterministic=deterministic,
name="embed")
grad_req = {'data': 'null', 'embed_weight': 'write'}
exe_test = embed.simple_bind(default_context(), grad_req=grad_req, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
# init data
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_onehot = np.zeros((batch, in_dim)).astype(np.float32)
np_onehot[np.arange(batch), np_data] = 1.0
arg_map["data"][:] = np_data
# init grad
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
# weight
weight = arg_map["embed_weight"]
for density in densities:
# update weight based on density
weight[:] = rand_ndarray(weight.shape, weight_stype, density=density)
# check forward
exe_test.forward(is_train=True)
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, weight.asnumpy()), atol=1e-4)
# check backward
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, grad.asnumpy()), atol=1e-4)
# run twice to check if the result is deterministic when passing "deterministic=True" to SparseEmbedding
if deterministic:
grad_ref = grad_map["embed_weight"].asnumpy()
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), grad_ref, atol=0, rtol=0)
densities = [0, 0.5, 1]
in_dim = 50
out_dim = 3
batch = 8
stypes = ['default', 'row_sparse']
deterministics = [True, False]
for stype in stypes:
for deterministic in deterministics:
check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, stype)
check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, stype)
@with_seed()
def test_sparse_embedding():
''' test sparse embedding operator '''
def check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad, weight_stype):
target_stype = 'row_sparse' if sparse_grad else 'default'
# init executor
data = mx.sym.Variable("data")
weight = mx.sym.Variable("embed_weight", stype=weight_stype)
embed = mx.sym.sparse.Embedding(data=data, weight=weight, input_dim=in_dim,
sparse_grad=sparse_grad, output_dim=out_dim, name='embed')
grad_req = {'data': 'null', 'embed_weight': 'write'}
exe_test = embed.simple_bind(default_context(), grad_req=grad_req, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
# init data
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_onehot = np.zeros((batch, in_dim)).astype(np.float32)
np_onehot[np.arange(batch), np_data] = 1.0
arg_map["data"][:] = np_data
# init grad
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
# weight
weight = arg_map["embed_weight"]
for density in densities:
# update weight based on density
weight[:] = rand_ndarray(weight.shape, weight_stype, density=density)
# check forward
exe_test.forward(is_train=True)
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, weight.asnumpy()), atol=1e-4)
# check backward
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, grad.asnumpy()), atol=1e-4)
# check grad stype
assert(grad_map["embed_weight"].stype == target_stype)
densities = [0, 0.5, 1]
in_dim = 50
out_dim = 3
batch = 8
weight_stypes = ['default', 'row_sparse']
sparse_grads = [True, False]
for weight_stype in weight_stypes:
for sparse_grad in sparse_grads:
check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad, weight_stype)
check_sparse_embedding(in_dim, out_dim, batch, densities, sparse_grad, weight_stype)
@with_seed()
def test_sparse_broadcast_add_sub():
def check_broadcast_add(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.add(mx_lhs, mx_rhs).asnumpy(), np.add(np_lhs, np_rhs), atol=1e-4)
def check_broadcast_sub(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.subtract(mx_lhs, mx_rhs).asnumpy(), np.subtract(np_lhs, np_rhs), atol=1e-4)
stype = 'csr'
shape = rand_shape_2d()
num_rows = shape[0]
num_cols = shape[1]
for density in [0.1 * i for i in range(10)]:
mx_lhs = rand_ndarray(shape, stype, density)
np_lhs = mx_lhs.asnumpy()
mx_rhs_row_2D = rand_ndarray((1, num_cols), 'default')
mx_rhs_row_1D = mx_rhs_row_2D.reshape((num_cols))
mx_rhs_col = rand_ndarray((num_rows, 1), 'default')
mx_rhs_scalar_2D = rand_ndarray((1, 1), 'default')
mx_rhs_scalar_1D = mx_rhs_scalar_2D.reshape((1, ))
for mx_rhs in [mx_rhs_row_2D, mx_rhs_row_1D, mx_rhs_col, mx_rhs_scalar_2D, mx_rhs_scalar_1D]:
np_rhs = mx_rhs.asnumpy()
check_broadcast_add(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_sub(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_add(mx_rhs, mx_lhs, np_rhs, np_lhs, np.float32)
check_broadcast_sub(mx_rhs, mx_lhs, np_rhs, np_lhs, np.float32)
@with_seed()
def test_sparse_broadcast_mul_div():
def check_broadcast_mul(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.multiply(mx_lhs, mx_rhs).asnumpy(), np.multiply(np_lhs, np_rhs), atol=1e-4)
def check_broadcast_div(mx_lhs, mx_rhs, np_lhs, np_rhs, dtype):
assert_almost_equal(mx.nd.sparse.divide(mx_lhs, mx_rhs).asnumpy(), np.divide(np_lhs, np_rhs), atol=1e-4)
stype = 'csr'
shape = rand_shape_2d()
num_rows = shape[0]
num_cols = shape[1]
for density in [0.1 * i for i in range(10)]:
mx_lhs = rand_ndarray(shape, stype, density)
np_lhs = mx_lhs.asnumpy()
mx_rhs_row_2D = rand_ndarray((1, num_cols), 'default')
mx_rhs_row_1D = mx_rhs_row_2D.reshape((num_cols))
mx_rhs_col = rand_ndarray((num_rows, 1), 'default')
mx_rhs_scalar_2D = rand_ndarray((1, 1), 'default')
mx_rhs_scalar_1D = mx_rhs_scalar_2D.reshape((1, ))
for mx_rhs in [mx_rhs_row_2D, mx_rhs_row_1D, mx_rhs_col, mx_rhs_scalar_2D, mx_rhs_scalar_1D]:
np_rhs = mx_rhs.asnumpy()
check_broadcast_mul(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
check_broadcast_div(mx_lhs, mx_rhs, np_lhs, np_rhs, np.float32)
@with_seed()
def test_scatter_ops():
def csr_get_seen_points(name, csr_array, verbose=False):
"""Get a unique list of points int he CSR array as well as a
corresponding parallel list of points and values"""
seen_points = set()
seen_point_list = list()
values = list()
row_count = csr_array.shape[0]
row_pointers = csr_array.indptr.asnumpy()
col_indexes = csr_array.indices.asnumpy()
data = csr_array.data.asnumpy()
for row in range(row_count):
start_pos = row_pointers[row]
end_pos = row_pointers[row + 1]
for col_index in range(start_pos, end_pos):
col = col_indexes[col_index]
val = data[col_index]
if verbose is True:
print("{}: (row, col = ({}, {}) = {}".format(name, row, col, val))
seen_points.add((row, col))
seen_point_list.append((row, col))
values.append(val)
return seen_points, values, seen_point_list
def check_scatter_ops(name, shape, lhs_stype, rhs_stype, forward_mxnet_call, forward_numpy_call,
density=0.25, rhs_is_scalar=False, verbose=False):
lhs = mx.symbol.Variable('lhs', stype=lhs_stype)
if rhs_is_scalar is False:
rhs = mx.symbol.Variable('rhs', stype=rhs_stype)
if verbose is True:
print(name)
if lhs_stype != 'default':
lhs_nd = create_sparse_array_zd(
shape, lhs_stype, density=density,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] # force at least one overlap
))
else:
lhs_nd = rand_ndarray(shape, 'default')
if rhs_is_scalar is False:
if rhs_stype != 'default':
rhs_nd = create_sparse_array_zd(
shape, rhs_stype, density=density,
rsp_indices=gen_rsp_random_indices(
shape,
density=density,
force_indices=[(shape[0]/2)] # force at least one overlap
))
else:
rhs_nd = rand_ndarray(shape, 'default')
else:
rhs_nd = 9
rhs = rhs_nd
lhs_np = lhs_nd.asnumpy()
rhs_np = rhs_nd if rhs_is_scalar is True else rhs_nd.asnumpy()
if verbose is True:
print("lhs = {}".format(lhs_np))
print("rhs = {}".format(rhs_np))
out_np = forward_numpy_call(lhs_np, rhs_np)
if verbose is True:
print("Numpy: out_np = {}".format(out_np))
location = {'lhs': lhs_nd, 'rhs': rhs_nd}
out = forward_mxnet_call(lhs, rhs)
exe_test = out.bind(default_context(), args=location)
exe_test.forward(is_train=False)
out_nd = exe_test.outputs[0]
if verbose is True:
print("Sym: out_nd = {}".format(out_nd.asnumpy()))
# For row_sparse, check that rows only exist for rows that are
# either int lhs or rhs, and if they exist, they should equal
# the numpy values
if lhs_stype == 'default':
almost_equal(out_nd.asnumpy(), out_np, equal_nan=True)
elif lhs_stype == 'row_sparse':
seen_rows = set()
indices = lhs_nd.indices.asnumpy()
for i in range(len(indices)):
seen_rows.add(indices[i])
assert len(out_nd.indices.asnumpy()) == len(seen_rows)
out_nd_np = out_nd.asnumpy()
for row in seen_rows:
row_nd = out_nd_np[row]
row_np = out_np[row]
almost_equal(row_nd, row_np, equal_nan=True)
elif lhs_stype == 'csr' and rhs_is_scalar is False:
almost_equal(out_nd.asnumpy(), out_np, equal_nan=True)
else:
assert rhs_is_scalar
lhs_seen_points, _, _ = csr_get_seen_points("lhs", lhs_nd, verbose)
if rhs_is_scalar is False:
rhs_seen_points, _, _ = csr_get_seen_points("rhs", rhs_nd, verbose)
else:
rhs_seen_points = set()
input_seen_points = lhs_seen_points.union(rhs_seen_points)
out_seen_pounts, out_values, seen_point_list = csr_get_seen_points("out_nd", out_nd, verbose)
# Some may have been zero
assert len(out_seen_pounts) <= len(input_seen_points)
out_nd_np = out_nd.asnumpy()
val_index = 0
for row_col in seen_point_list:
row = row_col[0]
col = row_col[1]
val = out_values[val_index]
val_np = out_nd_np[row, col]
almost_equal(val, val_np, equal_nan=True)
val_index += 1
shape = (10, 5)
for lhs_stype in ['row_sparse', 'default', 'csr']:
for rhs_stype in ['row_sparse', 'default', 'csr']:
print("op: {}, lhs_stype: {}, rhs_stype: {}".format('_scatter_elemwise_div',
lhs_stype, rhs_stype))
check_scatter_ops('_scatter_elemwise_div', shape, lhs_stype, rhs_stype,
lambda l, r: mx.sym._internal._scatter_elemwise_div(l, r),
lambda l, r: l / r,
verbose=False)
for lhs_stype in ['row_sparse', 'default', 'csr']:
print("op: {}, lhs_stype: {}".format('_scatter_plus', lhs_stype))
check_scatter_ops('_scatter_plus', shape, lhs_stype, 'scalar',
lambda l, r: mx.sym._internal._scatter_plus_scalar(l, r),
lambda l, r: l + r,
rhs_is_scalar=True, verbose=False)
print("op: {}, lhs_stype: {}".format('_scatter_minus', lhs_stype))
check_scatter_ops('_scatter_minus', shape, lhs_stype, 'scalar',
lambda l, r: mx.sym._internal._scatter_minus_scalar(l, r),
lambda l, r: l + r,
rhs_is_scalar=True, verbose=False, density=0.5)
@with_seed()
def test_mkldnn_sparse():
# This test is trying to create a race condition describedd in
# https://github.com/apache/incubator-mxnet/issues/10189
arr = mx.nd.random.uniform(shape=(10, 10, 32, 32))
weight1 = mx.nd.random.uniform(shape=(10, 10, 3, 3))
arr = mx.nd.Convolution(data=arr, weight=weight1, no_bias=True, kernel=(3, 3), num_filter=10)
rs_arr = mx.nd.sparse.row_sparse_array((mx.nd.zeros_like(arr), np.arange(arr.shape[0])))
weight2 = mx.nd.random.uniform(shape=(10, np.prod(arr.shape[1:4])))
fc_res = mx.nd.FullyConnected(data=arr, weight=weight2, no_bias=True, num_hidden=10)
sum_res = mx.nd.elemwise_sub(arr, rs_arr)
res1 = np.dot(mx.nd.flatten(sum_res).asnumpy(), weight2.asnumpy().T)
print(res1 - fc_res.asnumpy())
almost_equal(res1, fc_res.asnumpy())
@with_seed()
def test_sparse_nd_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape):
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y \
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition', stype='csr')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
cond_nd = mx.nd.array(condition_np).tostype('csr')
outputs = where_exe_write.forward(is_train=True, \
condition=cond_nd, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=cond_nd.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=cond_nd, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
def test_where_numeric_gradient(shape):
condition = mx.sym.Variable('condition', stype='csr')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
test_where_helper((5, 9))
test_where_numeric_gradient((5, 9))
@with_seed()
def test_sparse_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
def check_sparse_quadratic_function(a, b, c, expected_stype):
# check forward and compare the result with dense op
ndim = 2
shape = rand_shape_nd(ndim, 5)
data = rand_ndarray(shape=shape, stype='csr')
data_np = data.asnumpy()
expected = f(data_np, a, b, c)
output = mx.nd.contrib.quadratic(data, a=a, b=b, c=c)
assert(output.stype == expected_stype)
assert_almost_equal(output.asnumpy(), expected)
a = np.random.random_sample()
b = np.random.random_sample()
check_sparse_quadratic_function(a, b, 0.0, 'csr')
check_sparse_quadratic_function(a, b, 1.0, 'default')
if __name__ == '__main__':
import nose
nose.runmodule()
| 48.694742 | 122 | 0.523475 |
f22068c8846c8a3acd2b7b6fab6cf8e2d1534ae7 | 8,054 | py | Python | espnet2/bin/tokenize_text.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 5,053 | 2017-12-13T06:21:41.000Z | 2022-03-31T13:38:29.000Z | espnet2/bin/tokenize_text.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 3,666 | 2017-12-14T05:58:50.000Z | 2022-03-31T22:11:49.000Z | espnet2/bin/tokenize_text.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 1,709 | 2017-12-13T01:02:42.000Z | 2022-03-31T11:57:45.000Z | #!/usr/bin/env python3
import argparse
from collections import Counter
import logging
from pathlib import Path
import sys
from typing import List
from typing import Optional
from typeguard import check_argument_types
from espnet.utils.cli_utils import get_commandline_args
from espnet2.text.build_tokenizer import build_tokenizer
from espnet2.text.cleaner import TextCleaner
from espnet2.text.phoneme_tokenizer import g2p_choices
from espnet2.utils.types import str2bool
from espnet2.utils.types import str_or_none
def field2slice(field: Optional[str]) -> slice:
"""Convert field string to slice
Note that field string accepts 1-based integer.
Examples:
>>> field2slice("1-")
slice(0, None, None)
>>> field2slice("1-3")
slice(0, 3, None)
>>> field2slice("-3")
slice(None, 3, None)
"""
field = field.strip()
try:
if "-" in field:
# e.g. "2-" or "2-5" or "-7"
s1, s2 = field.split("-", maxsplit=1)
if s1.strip() == "":
s1 = None
else:
s1 = int(s1)
if s1 == 0:
raise ValueError("1-based string")
if s2.strip() == "":
s2 = None
else:
s2 = int(s2)
else:
# e.g. "2"
s1 = int(field)
s2 = s1 + 1
if s1 == 0:
raise ValueError("must be 1 or more value")
except ValueError:
raise RuntimeError(f"Format error: e.g. '2-', '2-5', or '-5': {field}")
if s1 is None:
slic = slice(None, s2)
else:
# -1 because of 1-based integer following "cut" command
# e.g "1-3" -> slice(0, 3)
slic = slice(s1 - 1, s2)
return slic
def tokenize(
input: str,
output: str,
field: Optional[str],
delimiter: Optional[str],
token_type: str,
space_symbol: str,
non_linguistic_symbols: Optional[str],
bpemodel: Optional[str],
log_level: str,
write_vocabulary: bool,
vocabulary_size: int,
remove_non_linguistic_symbols: bool,
cutoff: int,
add_symbol: List[str],
cleaner: Optional[str],
g2p: Optional[str],
):
assert check_argument_types()
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if input == "-":
fin = sys.stdin
else:
fin = Path(input).open("r", encoding="utf-8")
if output == "-":
fout = sys.stdout
else:
p = Path(output)
p.parent.mkdir(parents=True, exist_ok=True)
fout = p.open("w", encoding="utf-8")
cleaner = TextCleaner(cleaner)
tokenizer = build_tokenizer(
token_type=token_type,
bpemodel=bpemodel,
delimiter=delimiter,
space_symbol=space_symbol,
non_linguistic_symbols=non_linguistic_symbols,
remove_non_linguistic_symbols=remove_non_linguistic_symbols,
g2p_type=g2p,
)
counter = Counter()
if field is not None:
field = field2slice(field)
for line in fin:
line = line.rstrip()
if field is not None:
# e.g. field="2-"
# uttidA hello world!! -> hello world!!
tokens = line.split(delimiter)
tokens = tokens[field]
if delimiter is None:
line = " ".join(tokens)
else:
line = delimiter.join(tokens)
line = cleaner(line)
tokens = tokenizer.text2tokens(line)
if not write_vocabulary:
fout.write(" ".join(tokens) + "\n")
else:
for t in tokens:
counter[t] += 1
if not write_vocabulary:
return
# ======= write_vocabulary mode from here =======
# Sort by the number of occurrences in descending order
# and filter lower frequency words than cutoff value
words_and_counts = list(
filter(lambda x: x[1] > cutoff, sorted(counter.items(), key=lambda x: -x[1]))
)
# Restrict the vocabulary size
if vocabulary_size > 0:
if vocabulary_size < len(add_symbol):
raise RuntimeError(f"vocabulary_size is too small: {vocabulary_size}")
words_and_counts = words_and_counts[: vocabulary_size - len(add_symbol)]
# Parse the values of --add_symbol
for symbol_and_id in add_symbol:
# e.g symbol="<blank>:0"
try:
symbol, idx = symbol_and_id.split(":")
idx = int(idx)
except ValueError:
raise RuntimeError(f"Format error: e.g. '<blank>:0': {symbol_and_id}")
symbol = symbol.strip()
# e.g. idx=0 -> append as the first symbol
# e.g. idx=-1 -> append as the last symbol
if idx < 0:
idx = len(words_and_counts) + 1 + idx
words_and_counts.insert(idx, (symbol, None))
# Write words
for w, c in words_and_counts:
fout.write(w + "\n")
# Logging
total_count = sum(counter.values())
invocab_count = sum(c for w, c in words_and_counts if c is not None)
logging.info(f"OOV rate = {(total_count - invocab_count) / total_count * 100} %")
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Tokenize texts",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument(
"--input", "-i", required=True, help="Input text. - indicates sys.stdin"
)
parser.add_argument(
"--output", "-o", required=True, help="Output text. - indicates sys.stdout"
)
parser.add_argument(
"--field",
"-f",
help="The target columns of the input text as 1-based integer. e.g 2-",
)
parser.add_argument(
"--token_type",
"-t",
default="char",
choices=["char", "bpe", "word", "phn"],
help="Token type",
)
parser.add_argument("--delimiter", "-d", default=None, help="The delimiter")
parser.add_argument("--space_symbol", default="<space>", help="The space symbol")
parser.add_argument("--bpemodel", default=None, help="The bpemodel file path")
parser.add_argument(
"--non_linguistic_symbols",
type=str_or_none,
help="non_linguistic_symbols file path",
)
parser.add_argument(
"--remove_non_linguistic_symbols",
type=str2bool,
default=False,
help="Remove non-language-symbols from tokens",
)
parser.add_argument(
"--cleaner",
type=str_or_none,
choices=[None, "tacotron", "jaconv", "vietnamese", "korean_cleaner"],
default=None,
help="Apply text cleaning",
)
parser.add_argument(
"--g2p",
type=str_or_none,
choices=g2p_choices,
default=None,
help="Specify g2p method if --token_type=phn",
)
group = parser.add_argument_group("write_vocabulary mode related")
group.add_argument(
"--write_vocabulary",
type=str2bool,
default=False,
help="Write tokens list instead of tokenized text per line",
)
group.add_argument("--vocabulary_size", type=int, default=0, help="Vocabulary size")
group.add_argument(
"--cutoff",
default=0,
type=int,
help="cut-off frequency used for write-vocabulary mode",
)
group.add_argument(
"--add_symbol",
type=str,
default=[],
action="append",
help="Append symbol e.g. --add_symbol '<blank>:0' --add_symbol '<unk>:1'",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
tokenize(**kwargs)
if __name__ == "__main__":
main()
| 29.610294 | 88 | 0.585672 |
f1794edf1c25a2a49c3a2b9fb8748914902b334e | 479 | py | Python | src/01/amabry_01B.py | AlexMabry/aoc21 | da492f53f93ba960e282b8c664041b76871631ea | [
"Apache-2.0"
] | null | null | null | src/01/amabry_01B.py | AlexMabry/aoc21 | da492f53f93ba960e282b8c664041b76871631ea | [
"Apache-2.0"
] | null | null | null | src/01/amabry_01B.py | AlexMabry/aoc21 | da492f53f93ba960e282b8c664041b76871631ea | [
"Apache-2.0"
] | null | null | null | from aocd import models
from src.utils import parse_data
# create puzzle
puzzle = models.Puzzle(year=2021, day=1)
# format data
input_data = parse_data(puzzle.input_data, is_numbers=True)
data_length = len(input_data)
increased = 0
previous = sum(input_data[0:3])
for index in range(data_length-2):
current = sum(input_data[index:index+3])
if current > previous:
increased = increased + 1
previous = current
# submit answer
puzzle.answer_b = increased
| 19.958333 | 59 | 0.732777 |
58b953889a70bdb0fe67a87be783da489b0721de | 890 | py | Python | Python Fundamentals/Objects and Classes/classes_exam_the_lift.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | Python Fundamentals/Objects and Classes/classes_exam_the_lift.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | Python Fundamentals/Objects and Classes/classes_exam_the_lift.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | tourists = int(input())
wagon_list = [int(el) for el in input().split()]
all_tourists = tourists
for wagon in range(0, len(wagon_list)):
if tourists >= 4:
added_tourist = 4 - wagon_list[wagon]
wagon_list[wagon] = 4
tourists -= added_tourist
elif tourists > 0:
wagon_list[wagon] += tourists
tourists = 0
max_seats = len(wagon_list) * 4
if sum(wagon_list) == max_seats and tourists == 0:
wagon_list = [str(el) for el in wagon_list]
print(" ".join(wagon_list))
elif sum(wagon_list) < max_seats:
wagon_list = [str(el) for el in wagon_list]
print(f"The lift has empty spots!")
print(" ".join(wagon_list))
elif all_tourists > max_seats:
wagon_list = [str(el) for el in wagon_list]
print(f"There isn't enough space! {tourists} people in a queue!")
print(" ".join(wagon_list))
| 26.969697 | 70 | 0.624719 |
b8ea7e6a77f2ac99150e3be52a55e96104bd8cfd | 1,876 | py | Python | wstest/handler/connection_handler_test.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | null | null | null | wstest/handler/connection_handler_test.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | 42 | 2016-07-04T11:17:54.000Z | 2018-03-18T18:36:09.000Z | wstest/handler/connection_handler_test.py | PedalController/PedalPiREST | aa9418d44f2f5dbec604753a03bf8a74057c627c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from wstest.handler.handler_test import Test
from pluginsmanager.model.connection import Connection
class ConnectionHandlerTest(Test):
def test_put(self):
bank = self.default_bank_mock
bank.index = self.rest.create_bank(bank).json()['index']
pedalboard = bank.pedalboards[0]
reverb, reverb2 = pedalboard.effects
connection = Connection(reverb2.outputs[0], reverb.inputs[0])
pedalboard.connections.append(connection)
response = self.rest.create_connection(connection)
self.assertEqual(Test.SUCCESS, response.status_code)
response = self.rest.get_pedalboard(pedalboard)
self.assertEqual(pedalboard.json, response.json())
self.rest.delete_bank(bank)
def test_post_is_delete(self):
bank = self.default_bank_mock
bank.index = self.rest.create_bank(bank).json()['index']
pedalboard = bank.pedalboards[0]
connection = pedalboard.connections[0]
response = self.rest.delete_connection(connection)
self.assertEqual(Test.DELETED, response.status_code)
response = self.rest.get_pedalboard(pedalboard)
pedalboard.connections.remove(connection)
self.assertEqual(pedalboard.json, response.json())
self.rest.delete_bank(bank)
| 34.740741 | 74 | 0.720682 |
883d95e226e3b9d64ba94d8bfe390014dcdd8e28 | 1,622 | py | Python | WEEKS/CD_Sata-Structures/general/MAIN_DATA_STRUCTURES/ternary-search-trees.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/general/MAIN_DATA_STRUCTURES/ternary-search-trees.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/general/MAIN_DATA_STRUCTURES/ternary-search-trees.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | class Node(object):
def __init__(self, character):
self.character = character
self.leftNode = None
self.middleNode = None
self.rightNode = None
self.value = 0
class TST(object):
def __init__(self):
self.rootNode = None
def put(self, key, value):
self.rootNode = self.putItem(self.rootNode, key, value, 0)
def putItem(self, node, key, value, index):
c = key[index]
if node is None:
node = Node(c)
if c < node.character:
node.leftNode = self.putItem(node.leftNode, key, value, index)
elif c > node.character:
node.rightNode = self.putItem(node.rightNode, key, value, index)
elif index < len(key) - 1:
node.middleNode = self.putItem(node.middleNode, key, value, index + 1)
else:
node.value = value
return node
def get(self, key):
node = self.getItem(self.rootNode, key, 0)
if node is None:
return -1
return node.value
def getItem(self, node, key, index):
if node is None:
return None
c = key[index]
if c < node.character:
return self.getItem(node.leftNode, key, index)
elif c > node.character:
return self.getItem(node.rightNode, key, index)
elif index < len(key) - 1:
return self.getItem(node.middleNode, key, index + 1)
else:
return node
if __name__ == "__main__":
tst = TST()
tst.put("apple", 100)
tst.put("orange", 200)
print(tst.get("orange"))
| 23.852941 | 82 | 0.55672 |
f3a23d5d9a964594401dc3e581bd240b2bfb3bea | 11,490 | py | Python | examples/dmri_camino_dti.py | nicholsn/nipype | 6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3 | [
"BSD-3-Clause"
] | null | null | null | examples/dmri_camino_dti.py | nicholsn/nipype | 6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3 | [
"BSD-3-Clause"
] | null | null | null | examples/dmri_camino_dti.py | nicholsn/nipype | 6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
=================
dMRI: Camino, DTI
=================
Introduction
============
This script, camino_dti_tutorial.py, demonstrates the ability to perform basic diffusion analysis
in a Nipype pipeline::
python dmri_camino_dti.py
We perform this analysis using the FSL course data, which can be acquired from here:
http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
Import necessary modules from nipype.
"""
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.camino as camino
import nipype.interfaces.fsl as fsl
import nipype.interfaces.camino2trackvis as cam2trk
import nipype.algorithms.misc as misc
import os # system functions
"""
We use the following functions to scrape the voxel and data dimensions of the input images. This allows the
pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial
(fmri_spm_face.py) also implements this inferral of voxel size from the data.
"""
def get_vox_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.get_header()
voxdims = hdr.get_zooms()
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]
def get_data_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.get_header()
datadims = hdr.get_data_shape()
return [int(datadims[0]), int(datadims[1]), int(datadims[2])]
def get_affine(volume):
import nibabel as nb
nii = nb.load(volume)
return nii.get_affine()
subject_list = ['subj1']
fsl.FSLCommand.set_default_output_type('NIFTI')
"""
Map field names to individual subject runs
"""
info = dict(dwi=[['subject_id', 'data']],
bvecs=[['subject_id','bvecs']],
bvals=[['subject_id','bvals']])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.engine.Node` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=info.keys()),
name = 'datasource')
datasource.inputs.template = "%s/%s"
# This needs to point to the fdt folder you can find after extracting
# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/')
datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
An inputnode is used to pass the data obtained by the data grabber to the actual processing functions
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode")
"""
Setup for Diffusion Tensor Computation
--------------------------------------
In this section we create the nodes necessary for diffusion analysis.
First, the diffusion image is converted to voxel order.
"""
image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel")
fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme")
fsl2scheme.inputs.usegradmod = True
"""
Second, diffusion tensors are fit to the voxel-order data.
"""
dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit')
"""
Next, a lookup table is generated from the schemefile and the
signal-to-noise ratio (SNR) of the unweighted (q=0) data.
"""
dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen")
dtlutgen.inputs.snr = 16.0
dtlutgen.inputs.inversion = 1
"""
In this tutorial we implement probabilistic tractography using the PICo algorithm.
PICo tractography requires an estimate of the fibre direction and a model of its
uncertainty in each voxel; this is produced using the following node.
"""
picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs")
picopdfs.inputs.inputmodel = 'dt'
"""
An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography.
"""
bet = pe.Node(interface=fsl.BET(), name="bet")
bet.inputs.mask = True
"""
Finally, tractography is performed.
First DT streamline tractography.
"""
trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt")
"""
Now camino's Probablistic Index of connectivity algorithm.
In this tutorial, we will use only 1 iteration for time-saving purposes.
"""
trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico")
trackpico.inputs.iterations = 1
"""
Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to
convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse.
"""
cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt")
cam2trk_dt.inputs.min_length = 30
cam2trk_dt.inputs.voxel_order = 'LAS'
cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico")
cam2trk_pico.inputs.min_length = 30
cam2trk_pico.inputs.voxel_order = 'LAS'
trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino")
"""
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview,
using the following two nodes. For VTK use VtkStreamlines.
"""
procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines")
procstreamlines.inputs.outputtracts = 'oogl'
"""
We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the
fractional anisotropy and diffusivity trace maps and their associated headers.
"""
fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa')
trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace')
dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig')
analyzeheader_fa = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_fa")
analyzeheader_fa.inputs.datatype = "double"
analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace')
fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii')
trace2nii = fa2nii.clone("trace2nii")
"""
Since we have now created all our nodes, we can now define our workflow and start making connections.
"""
tractography = pe.Workflow(name='tractography')
tractography.connect([(inputnode, bet,[("dwi","in_file")])])
"""
File format conversion
"""
tractography.connect([(inputnode, image2voxel, [("dwi", "in_file")]),
(inputnode, fsl2scheme, [("bvecs", "bvec_file"),
("bvals", "bval_file")])
])
"""
Tensor fitting
"""
tractography.connect([(image2voxel, dtifit,[['voxel_order','in_file']]),
(fsl2scheme, dtifit,[['scheme','scheme_file']])
])
"""
Workflow for applying DT streamline tractogpahy
"""
tractography.connect([(bet, trackdt,[("mask_file","seed_file")])])
tractography.connect([(dtifit, trackdt,[("tensor_fitted","in_file")])])
"""
Workflow for applying PICo
"""
tractography.connect([(bet, trackpico,[("mask_file","seed_file")])])
tractography.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])])
tractography.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])])
tractography.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])])
tractography.connect([(picopdfs, trackpico,[("pdfs","in_file")])])
# ProcStreamlines might throw memory errors - comment this line out in such case
tractography.connect([(trackdt, procstreamlines,[("tracked","in_file")])])
"""
Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the
tensor fitting.
This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with
the original DWI image from the input node, to the header-generating nodes. This ensures that the files
will be correct and readable.
"""
tractography.connect([(dtifit, fa,[("tensor_fitted","in_file")])])
tractography.connect([(fa, analyzeheader_fa,[("fa","in_file")])])
tractography.connect([(inputnode, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(fa, fa2nii,[('fa','data_file')])])
tractography.connect([(inputnode, fa2nii,[(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])])
tractography.connect([(dtifit, trace,[("tensor_fitted","in_file")])])
tractography.connect([(trace, analyzeheader_trace,[("trace","in_file")])])
tractography.connect([(inputnode, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(trace, trace2nii,[('trace','data_file')])])
tractography.connect([(inputnode, trace2nii,[(('dwi', get_affine), 'affine')])])
tractography.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])])
tractography.connect([(dtifit, dteig,[("tensor_fitted","in_file")])])
tractography.connect([(trackpico, cam2trk_pico, [('tracked','in_file')])])
tractography.connect([(trackdt, cam2trk_dt, [('tracked','in_file')])])
tractography.connect([(inputnode, cam2trk_pico,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
tractography.connect([(inputnode, cam2trk_dt,[(('dwi', get_vox_dims), 'voxel_dims'),
(('dwi', get_data_dims), 'data_dims')])])
"""
Finally, we create another higher-level workflow to connect our tractography workflow with the info and datagrabbing nodes
declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding
their names to the subject list and their data to the proper folders.
"""
workflow = pe.Workflow(name="workflow")
workflow.base_dir = os.path.abspath('camino_dti_tutorial')
workflow.connect([(infosource,datasource,[('subject_id', 'subject_id')]),
(datasource,tractography,[('dwi','inputnode.dwi'),
('bvals','inputnode.bvals'),
('bvecs','inputnode.bvecs')
])
])
"""
The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline.
"""
if __name__ == '__main__':
workflow.run()
workflow.write_graph()
"""
You can choose the format of the experted graph with the ``format`` option. For example ``workflow.write_graph(format='eps')``
"""
| 36.018809 | 126 | 0.704091 |
1bc43b6e7c8befa66da9042eb860413e84795a29 | 2,532 | py | Python | egs/iwslt18/st1/local/ctm2segments.py | Syzygianinfern0/espnet | 3ea59a0050e8a6a40138ac2365c258825b02f9cd | [
"Apache-2.0"
] | 5 | 2020-10-26T11:28:04.000Z | 2021-12-17T07:49:11.000Z | egs/iwslt18/st1/local/ctm2segments.py | Syzygianinfern0/espnet | 3ea59a0050e8a6a40138ac2365c258825b02f9cd | [
"Apache-2.0"
] | 1 | 2021-03-05T10:43:49.000Z | 2021-03-05T10:43:49.000Z | egs/iwslt18/st1/local/ctm2segments.py | Syzygianinfern0/espnet | 3ea59a0050e8a6a40138ac2365c258825b02f9cd | [
"Apache-2.0"
] | 2 | 2021-03-05T02:04:18.000Z | 2021-03-05T10:23:03.000Z | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import codecs
import re
def main():
parser = argparse.ArgumentParser()
parser.add_argument("text", type=str, help="input text")
parser.add_argument("ctm", type=str, help="input ctm file (ASR results)")
parser.add_argument("set", type=str, help="")
parser.add_argument("talk_id", type=str, help="")
args = parser.parse_args()
refs = []
with codecs.open(args.text, encoding="utf-8") as f:
for line in f:
line = line.strip().lower()
utt_id = line.split(" ")[0].split("_")[0]
ref = " ".join(line.split()[1:])
refs += [(utt_id, ref)]
ctms = []
with codecs.open(args.ctm, encoding="utf-8") as f:
for line in f:
ctms.append(re.sub(r"[\s]+", " ", line.strip()))
ctms = sorted(ctms, key=lambda x: float(x.split()[2]))
threshold = 0.2
hyps = []
utt_id = 1
start_t = None
end_t = None
hyp = ""
num_lines = len(ctms)
for i, ctm in enumerate(ctms):
_, _, start_time_w, duration_w, w = ctm.split()[:5]
w = re.sub(r"([^\(\)]*)\([^\)]+\)", r"\1", w.replace("$", ""))
if start_t is not None and i < num_lines - 1:
if (float(start_time_w) - end_t >= threshold) and (end_t - start_t > 0.2):
# differnece utterance
hyps += [(utt_id, start_t, end_t, hyp[1:])]
# reset
hyp = ""
start_t = None
end_t = None
utt_id += 1
# normalize
if start_t is None:
start_t = float(start_time_w)
end_t = float(start_time_w)
end_t = float(start_time_w) + float(duration_w)
if w != "":
hyp += " " + w.lower()
# last word in the session
if i == num_lines - 1:
hyps += [(utt_id, start_t, end_t, hyp[1:])]
for i, (utt_id, start_t, end_t, hyp) in enumerate(hyps):
assert end_t - start_t > 0
print(
"%s_%07d_%07d %s %.2f %.2f"
% (
args.set + "." + args.talk_id,
int(start_t * 1000 + 0.5),
int(end_t * 1000 + 0.5),
args.set + "." + args.talk_id,
start_t,
end_t,
)
)
if __name__ == "__main__":
main()
| 28.772727 | 86 | 0.505134 |
ca8676825fc90bd923486225d1847f10a4707e81 | 994 | py | Python | Die Kunst des Zaehlens/hypergeom_distr.py | d4tadriven2/info-website | ef0b47eee6b82da054769322c80c490b19abdfa3 | [
"MIT"
] | 2 | 2022-03-01T15:14:53.000Z | 2022-03-05T21:16:53.000Z | Die Kunst des Zaehlens/hypergeom_distr.py | d4tadriven2/info-website | ef0b47eee6b82da054769322c80c490b19abdfa3 | [
"MIT"
] | null | null | null | Die Kunst des Zaehlens/hypergeom_distr.py | d4tadriven2/info-website | ef0b47eee6b82da054769322c80c490b19abdfa3 | [
"MIT"
] | null | null | null | from scipy.stats import hypergeom
import matplotlib.pyplot as plt
# Diskrete Verteilungen
# komb.u. perm. u. hypergeom. Verteilung
def main():
zahl = 5
# Random Program-Durchlaeufe
print(factorial(zahl))
print(comb(12, 30))
print(comb(30, 12))
print(perm(20, 2))
print(hypergeom(6, 1, 4, 60))
plt.show()
exit(0)
# Fakultaet nach Produktregel
def factorial(n):
x = 1
for i in range(1,n+1):
x *= i
return x
# Kombination ohne Wdhl.
def comb(n, k):
if n < k:
return "N < K"
else:
noverk = (factorial(n) / (factorial(k) * (factorial(n - k))))
return int(noverk)
# Permutation
def perm(n, k):
if n < k:
return "N < K"
else:
return int((factorial(n))/(factorial(n-k)))
# n - Stichprobenumfang; k - Erfolge; M - Eigenschaft; N - Gesamtumfang
def hypergeom(n, k, M, N):
nom = (comb(M, k) * (comb((N - M), (n - k))))
denom = comb(N, n)
return float(nom / denom)
main()
| 19.88 | 71 | 0.574447 |
de1f5d6ee298b319f6450613cd3a1a41935e97e2 | 3,617 | py | Python | app/auth/routes.py | Icoqu/SecretShare | 1b0c25c3cc64803157499d2c62870254d32b3022 | [
"MIT"
] | null | null | null | app/auth/routes.py | Icoqu/SecretShare | 1b0c25c3cc64803157499d2c62870254d32b3022 | [
"MIT"
] | 206 | 2020-05-23T18:44:20.000Z | 2022-03-31T19:11:25.000Z | app/auth/routes.py | Icoqu/SecretShare | 1b0c25c3cc64803157499d2c62870254d32b3022 | [
"MIT"
] | null | null | null | from flask import render_template, redirect, url_for, request, abort, current_app
from app.helpers import flash
from werkzeug.urls import url_parse
from flask_login import login_user, logout_user, current_user
from flask_babel import _
from app import db
from app.auth import bp
from app.auth.forms import LoginForm, RegistrationForm, \
ResetPasswordRequestForm, SetPasswordForm
from app.models import User
from app.auth.email import send_password_reset_email, send_account_activation_token
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_app.config['LOGIN_DISABLED']:
abort(404)
if current_user.is_authenticated:
return redirect(url_for('secret.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash(_('Invalid username or password'), category='danger')
return redirect(url_for('auth.login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('secret.index')
return redirect(next_page)
return render_template('auth/login.html', title=_('Sign In'), form=form, not_register=current_app.config['REGISTRATION_DISABLED'])
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('secret.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_app.config['REGISTRATION_DISABLED'] or current_app.config['LOGIN_DISABLED']:
abort(404)
if current_user.is_authenticated:
return redirect(url_for('secret.index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
db.session.add(user)
db.session.commit()
send_account_activation_token(user)
flash(_('Congratulations, you are now a registered user! Check your e-mail to activate account.'))
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title=_('Register'),
form=form)
@bp.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_app.config['LOGIN_DISABLED']:
abort(404)
if current_user.is_authenticated:
return redirect(url_for('secret.index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash(
_('Check your email for the instructions to reset your password'))
return redirect(url_for('auth.login'))
return render_template('auth/reset_password_request.html',
title=_('Reset Password'), form=form)
@bp.route('/passwd/<token>', methods=['GET', 'POST'])
def set_password(token):
if current_user.is_authenticated:
flash(_('Looks like you have already activated your account.'))
return redirect(url_for('secret.index'))
user = User.verify_set_password_token(token)
if not user:
abort(404)
form = SetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash(_('Your password has been setup.'))
return redirect(url_for('auth.login'))
return render_template('auth/set_password.html', form=form)
| 38.478723 | 134 | 0.687863 |
0304c921a5a4883f7de3761047a1d94e73af48fa | 3,029 | py | Python | veidt/rfxas/tests/test_core.py | yimingchen95/veidt | 90f201f856d2f71c578f74b7391c0c9ff284986b | [
"BSD-3-Clause"
] | 18 | 2018-01-18T08:32:26.000Z | 2022-02-06T16:56:30.000Z | veidt/rfxas/tests/test_core.py | yimingchen95/veidt | 90f201f856d2f71c578f74b7391c0c9ff284986b | [
"BSD-3-Clause"
] | 77 | 2017-08-16T18:09:07.000Z | 2020-04-03T13:17:17.000Z | veidt/rfxas/tests/test_core.py | yimingchen95/veidt | 90f201f856d2f71c578f74b7391c0c9ff284986b | [
"BSD-3-Clause"
] | 12 | 2017-09-11T17:23:26.000Z | 2020-01-30T02:19:21.000Z |
from veidt.rfxas.core import XANES
from veidt.rfxas.prediction import CenvPrediction
import pandas as pd
import os, unittest
import warnings
comp_test_df_path = os.path.join(os.path.dirname(__file__), 'comp_spectra_test.pkl')
comp_test_df = pd.read_pickle(comp_test_df_path)
Fe_tsv = os.path.join(os.path.dirname(__file__), 'xas.XANES.K.Fe.mp-13.tsv')
Fe2O3_xdi = os.path.join(os.path.dirname(__file__), 'fe2o3_rt.xdi')
binary_exist = False
for path in ['/usr/lib', '/usr/lib64', '/usr/local/lib', '/usr/local/lib64']:
if os.path.isfile(os.path.join(path, "libxdifile.so")):
binary_exist = True
class RfxasXANESTest(unittest.TestCase):
def setUp(self):
self.test_row = comp_test_df.iloc[0]
self.test_row_formula = self.test_row['formula']
self.test_row_ele_group = self.test_row['ele_tm_alka_metalloid']
self.test_row_xas_id = self.test_row['xas_id']
self.test_row_absorb_specie = self.test_row['absorbing_species']
self.test_row_energy_e0 = self.test_row['energy_e0']
self.test_row_structure = self.test_row['structure']
self.test_row_x = self.test_row['x_axis_energy_55eV']
self.test_row_spect = self.test_row['interp_spectrum_55eV']
self.test_row_add_paras = {
'composition': self.test_row_formula, 'elemental_group': self.test_row_ele_group,
'xas_id': self.test_row_xas_id
}
def test_raise_warning(self):
with warnings.catch_warnings(record=True) as w:
xanes_test = XANES(self.test_row_x, self.test_row_spect, self.test_row_absorb_specie, edge='K',
**self.test_row_add_paras)
self.assertTrue('maximum derivative' in str(w[-1].message))
self.assertEqual(xanes_test.composition, 'NaB(CO2)4')
self.assertEqual(len(xanes_test.x), 200)
self.assertEqual(xanes_test.xas_id, 'mp-559618-4-XANES-K')
self.assertEqual(xanes_test.elemental_group, 'Carbon')
with warnings.catch_warnings(record=True) as w:
xanes_test_2 = XANES(self.test_row_x, self.test_row_spect, self.test_row_absorb_specie, edge='K',
e0=self.test_row_energy_e0, **self.test_row_add_paras)
self.assertEqual(len(w), 0)
self.assertEqual(xanes_test_2.composition, 'NaB(CO2)4')
self.assertEqual(len(xanes_test_2.x), 200)
self.assertEqual(xanes_test_2.e0, 274.98)
self.assertEqual(xanes_test_2.xas_id, 'mp-559618-4-XANES-K')
self.assertEqual(xanes_test_2.elemental_group, 'Carbon')
def test_tsv_loading(self):
self.Fe_xanes = XANES.from_K_XANES_MP_tsv(Fe_tsv, sep='\t', header=3)
self.Fe_CenvPred = CenvPrediction(self.Fe_xanes, 'lowest', 45)
self.Fe_CenvPred.cenv_prediction()
self.assertEqual(self.Fe_CenvPred.pred_cnum_ranklist, 'CN_4')
self.assertEqual(self.Fe_CenvPred.pred_cenv[0], 'CN_4-tetrahedral-trigonal pyramidal-see-saw-like-square co-planar')
| 50.483333 | 124 | 0.688346 |
c21c2b9c69fc2f7bf240152f434289a17c3190a1 | 1,409 | py | Python | stor/full_node/hint_store.py | Stor-Network/stor-blockchain | 3c3cd1a3b99592e88160107ca5b81afc0937b992 | [
"Apache-2.0"
] | 19 | 2021-06-29T20:06:09.000Z | 2022-02-09T04:33:00.000Z | stor/full_node/hint_store.py | Stor-Network/stor-blockchain | 3c3cd1a3b99592e88160107ca5b81afc0937b992 | [
"Apache-2.0"
] | 8 | 2021-07-04T03:21:51.000Z | 2021-12-27T07:56:09.000Z | stor/full_node/hint_store.py | Stor-Network/stor-blockchain | 3c3cd1a3b99592e88160107ca5b81afc0937b992 | [
"Apache-2.0"
] | 6 | 2021-10-04T17:15:30.000Z | 2022-03-15T08:40:01.000Z | from typing import List, Tuple
import aiosqlite
from stor.types.blockchain_format.sized_bytes import bytes32
from stor.util.db_wrapper import DBWrapper
import logging
log = logging.getLogger(__name__)
class HintStore:
coin_record_db: aiosqlite.Connection
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
await self.coin_record_db.execute(
"CREATE TABLE IF NOT EXISTS hints(id INTEGER PRIMARY KEY AUTOINCREMENT, coin_id blob, hint blob)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS hint_index on hints(hint)")
await self.coin_record_db.commit()
return self
async def get_coin_ids(self, hint: bytes) -> List[bytes32]:
cursor = await self.coin_record_db.execute("SELECT * from hints WHERE hint=?", (hint,))
rows = await cursor.fetchall()
await cursor.close()
coin_ids = []
for row in rows:
coin_ids.append(row[1])
return coin_ids
async def add_hints(self, coin_hint_list: List[Tuple[bytes32, bytes]]) -> None:
cursor = await self.coin_record_db.executemany(
"INSERT INTO hints VALUES(?, ?, ?)",
[(None,) + record for record in coin_hint_list],
)
await cursor.close()
| 34.365854 | 110 | 0.664301 |
5f01f36e7d0aaf04a4d159f729ca24f6a2307aad | 6,932 | py | Python | tensorflow_probability/python/bijectors/chain.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/chain.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/chain.py | brianwa84/probability | 6f8e78d859ac41170be5147c8c7bde54cc5aa83e | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Chain bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import composition
from tensorflow_probability.python.bijectors import ldj_ratio
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
__all__ = [
'Chain',
]
class Chain(composition.Composition):
"""Bijector which applies a sequence of bijectors.
Example Use:
```python
chain = Chain([Exp(), Softplus()], name="one_plus_exp")
```
Results in:
* Forward:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).forward(x)
= exp.forward(softplus.forward(x))
= tf.exp(tf.log(1. + tf.exp(x)))
= 1. + tf.exp(x)
```
* Inverse:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).inverse(y)
= softplus.inverse(exp.inverse(y))
= tf.log(tf.exp(tf.log(y)) - 1.)
= tf.log(y - 1.)
```
Keyword arguments can be passed to the inner bijectors by utilizing the inner
bijector names, e.g.:
```python
chain = Chain([Bijector1(name='b1'), Bijector2(name='b2')])
y = chain.forward(x, b1={'arg': 1}, b2={'arg': 2})
# Equivalent to:
z = Bijector2().forward(x, arg=1)
y = Bijector1().forward(z, arg=2)
```
"""
def __init__(self,
bijectors=None,
validate_args=False,
validate_event_size=True,
parameters=None,
name=None):
"""Instantiates `Chain` bijector.
Args:
bijectors: Python `list` of bijector instances. An empty list makes this
bijector equivalent to the `Identity` bijector.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
validate_event_size: Checks that bijectors are not applied to inputs with
incomplete support (that is, inputs where one or more elements are a
deterministic transformation of the others). For example, the following
LDJ would be incorrect:
`Chain([Scale(), SoftmaxCentered()]).forward_log_det_jacobian([1], [1])`
The jacobian contribution from `Scale` applies to a 2-dimensional input,
but the output from `SoftMaxCentered` is a 1-dimensional input embedded
in a 2-dimensional space. Setting `validate_event_size=True` (default)
prints warnings in these cases. When `validate_args` is also `True`, the
warning is promoted to an exception.
parameters: Locals dict captured by subclass constructor, to be used for
copy/slice re-instantiation operators.
name: Python `str`, name given to ops managed by this object. Default:
E.g., `Chain([Exp(), Softplus()]).name == "chain_of_exp_of_softplus"`.
Raises:
ValueError: if bijectors have different dtypes.
"""
parameters = dict(locals()) if parameters is None else parameters
if name is None:
name = ('identity' if not bijectors else
'_of_'.join(['chain'] + [b.name for b in bijectors]))
name = name.replace('/', '')
# If there are no bijectors, treat this like a single-part Identity.
forward_min_event_ndims = 0
inverse_min_event_ndims = 0
if bijectors:
forward_min_event_ndims = None # Inferred by base class.
inverse_min_event_ndims = None # Inferred by base class.
with tf.name_scope(name) as name:
super(Chain, self).__init__(
bijectors=bijectors or (),
validate_args=validate_args,
validate_event_size=validate_event_size,
forward_min_event_ndims=forward_min_event_ndims,
inverse_min_event_ndims=inverse_min_event_ndims,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict(
bijectors=parameter_properties.BatchedComponentProperties(
event_ndims=lambda self: [None for _ in self.bijectors]))
def _is_increasing(self, **kwargs):
# desc(desc)=>asc, asc(asc)=>asc, other cases=>desc.
is_increasing = True
for b in self._bijectors:
is_increasing = ps.equal(
is_increasing, b._internal_is_increasing(**kwargs.get(b.name, {}))) # pylint: disable=protected-access
return is_increasing
def _walk_forward(self, step_fn, x, **kwargs):
"""Applies `transform_fn` to `x` sequentially over nested bijectors."""
for bij in reversed(self._bijectors):
x = step_fn(bij, x, **kwargs.get(bij.name, {}))
return x # Now `y`
def _walk_inverse(self, step_fn, y, **kwargs):
"""Applies `transform_fn` to `y` sequentially over nested bijectors."""
for bij in self._bijectors:
y = step_fn(bij, y, **kwargs.get(bij.name, {}))
return y # Now `x`
@ldj_ratio.RegisterFLDJRatio(Chain)
def _fldj_ratio_chain(p, x, q, y):
"""Sum-of-diffs FLDJRatio for Chains."""
if len(p.bijectors) != len(q.bijectors):
raise ValueError('Mismatched lengths of bijectors: `p` has '
f'{len(p.bijectors)} but `q` has {len(q.bijectors)}.')
ratios = []
max_shp = []
for p, q in zip(reversed(p.bijectors), reversed(q.bijectors)):
ratios.append(ldj_ratio.forward_log_det_jacobian_ratio(
p, x, q, y, p.forward_min_event_ndims))
max_shp = ps.broadcast_shape(max_shp, ps.shape(ratios[-1]))
x, y = p.forward(x), q.forward(y)
ratios = [tf.broadcast_to(r, max_shp) for r in ratios]
return tf.add_n(ratios)
@ldj_ratio.RegisterILDJRatio(Chain)
def _ildj_ratio_chain(p, x, q, y):
"""Sum-of-diffs ILDJRatio for Chains."""
if len(p.bijectors) != len(q.bijectors):
raise ValueError('Mismatched lengths of bijectors: `p` has '
f'{len(p.bijectors)} but `q` has {len(q.bijectors)}.')
ratios = []
max_shp = []
for p, q in zip(p.bijectors, q.bijectors):
ratios.append(ldj_ratio.inverse_log_det_jacobian_ratio(
p, x, q, y, p.inverse_min_event_ndims))
max_shp = ps.broadcast_shape(max_shp, ps.shape(ratios[-1]))
x, y = p.inverse(x), q.inverse(y)
ratios = [tf.broadcast_to(r, max_shp) for r in ratios]
return tf.add_n(ratios)
| 35.548718 | 113 | 0.666042 |
5f4087ce7654aa47bbcf3fd281148f28905a1841 | 861 | py | Python | Crawlers/tempCodeRunnerFile.py | sailinglove/personal-general | b2e932dcd7989bdf856d4852e38f96cbbfc9c907 | [
"MIT"
] | null | null | null | Crawlers/tempCodeRunnerFile.py | sailinglove/personal-general | b2e932dcd7989bdf856d4852e38f96cbbfc9c907 | [
"MIT"
] | null | null | null | Crawlers/tempCodeRunnerFile.py | sailinglove/personal-general | b2e932dcd7989bdf856d4852e38f96cbbfc9c907 | [
"MIT"
] | null | null | null | verWait(driver, 10).until(lambda d: d.find_element_by_xpath('//*[@class="iv-login"]')).click()
# driver.find_element_by_xpath("//*[@class='ivu-cascader-menu']/li[2]").click()
# driver.find_element_by_xpath("//li[contains(text(), '志愿者')]").click()
# WebDriverWait(driver, 10).until(lambda d: d.find_element_by_xpath("//input[@class='user']")).send_keys(username)
# driver.find_element_by_xpath("//input[@class='password']").send_keys(password)
# driver.find_element_by_xpath("//a[@class='login-btn']").click()
# temp_input_box = driver.find_element_by_xpath("//input[@placeholder='请输入手机动态口令']")
# otp = input('OTP: ')
# temp_input_box.send_keys(otp)
# driver.find_element_by_xpath("/html/body/div[3]/div[2]/div[3]/form/a").click()
# WebDriverWait(driver, 30).until(lambda d: d.find_element_by_xpath('//*[contains(text(), "必修课程")]/following-sibling::div'))
| 47.833333 | 124 | 0.713124 |
6c5660ea9de2c5370fdf571abad8950cb27bc097 | 3,700 | py | Python | 3rdParty/V8/v7.1.302.28/test/intl/testcfg.py | cclauss/arangodb | 089f7a7e60483f0fb73171d159f922dd3de283e9 | [
"BSL-1.0",
"Apache-2.0"
] | 4 | 2019-04-20T15:56:13.000Z | 2019-12-23T07:14:01.000Z | 3rdParty/V8/v7.1.302.28/test/intl/testcfg.py | fceller/arangodb | 22eec2e35407d868ac36f06b9abdbee3fb3c3ef3 | [
"Apache-2.0"
] | 143 | 2015-05-21T14:33:52.000Z | 2017-10-20T17:46:37.000Z | 3rdParty/V8/v7.1.302.28/test/intl/testcfg.py | fceller/arangodb | 22eec2e35407d868ac36f06b9abdbee3fb3c3ef3 | [
"Apache-2.0"
] | 2 | 2015-04-26T22:08:16.000Z | 2018-09-28T09:49:42.000Z | # Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
class TestSuite(testsuite.TestSuite):
def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if (filename.endswith(".js") and filename != "assert.js" and
filename != "utils.js" and filename != "regexp-assert.js" and
filename != "regexp-prepare.js"):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
test = self._create_test(testname)
tests.append(test)
return tests
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
self._source_flags = self._parse_source_flags()
source = self.get_source()
self._env = self._parse_source_env(source)
def _parse_source_env(self, source):
env_match = ENV_PATTERN.search(source)
env = {}
if env_match:
for env_pair in env_match.group(1).strip().split():
var, value = env_pair.split('=')
env[var] = value
return env
def _get_cmd_env(self):
return self._env
def _get_files_params(self):
files = map(lambda f: os.path.join(self.suite.root, f), [
'assert.js',
'utils.js',
'regexp-prepare.js',
self.path + self._get_suffix(),
'regexp-assert.js',
])
if self._test_config.isolates:
files += ['--isolate'] + files
return files
def _get_source_flags(self):
return self._source_flags
def _get_suite_flags(self):
return ['--allow-natives-syntax']
def _get_source_path(self):
return os.path.join(self.suite.root, self.path + self._get_suffix())
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
| 35.576923 | 73 | 0.693514 |
299b3905ada02f1dc4d64d89f1f28f27e5ddbc36 | 8,261 | py | Python | eis_pointing/coregister/slits.py | gpelouze/eis_pointing | 2ee714a2295bafae3492ab956792535336dd2a81 | [
"MIT"
] | 3 | 2019-04-01T09:35:01.000Z | 2021-12-14T15:39:40.000Z | eis_pointing/coregister/slits.py | gpelouze/eis_pointing | 2ee714a2295bafae3492ab956792535336dd2a81 | [
"MIT"
] | null | null | null | eis_pointing/coregister/slits.py | gpelouze/eis_pointing | 2ee714a2295bafae3492ab956792535336dd2a81 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import datetime
import warnings
import functools
import itertools
import multiprocessing as mp
import numpy as np
from ..utils import num
from . import tools
def cc_step(a, x, y, t, ref_raster_builder,
x_shift, y_shift, ang_shift, norm=None):
''' Compute the explicit cross-correlation between two arrays for a given
integer shift and rotation.
Returns
=======
cc : float
The cross-correlation of a with im for shift (i, j)
'''
a_x, a_y = num.affine_transform(
x, y,
tools.transform_matrix([y_shift, x_shift, ang_shift], 'rotation'),
center=tools.transform_center(x, y, 'raster'),
)
im = ref_raster_builder.get_raster(a_x, a_y, t, extrapolate_t=True)
# keep only the parts where non-nan values overlap
try:
mask1 = a.mask | np.isnan(a.data)
except AttributeError:
mask1 = np.isnan(a)
try:
mask2 = im.mask | np.isnan(im.data)
except AttributeError:
mask2 = np.isnan(im)
mask = mask1 | mask2
a = a[~mask]
im = im[~mask]
if norm is None:
a, im, norm = tools.prep_for_cc(a, im)
return np.sum(a * im) / norm
def compute_cc(arr, x, y, t, ref_raster_builder,
x_set=None, y_set=None, a_set=None,
cores=None):
''' Compute the cross-correlation with rotation of a 1D array and a 2D
image using explicit multiplication in the real space.
Parameters
==========
arr : 1D ndarray
img : 2D ndarray
x, y, t : 1D ndarrays
coordinates for the points of arr
x_set, y_set, a_set : OffsetSet (default: None)
cores : int or None (default: None)
If not None, use multiprocessing to compute the steps using the
specified number processes.
'''
nx = x_set.number
ny = y_set.number
na = a_set.number
n_iter = nx * na * na
cc_worker = functools.partial(
cc_step, arr, x, y, t, ref_raster_builder)
cc_iter = itertools.product(x_set.world, y_set.world, a_set.world)
if cores is None:
cc = itertools.starmap(cc_worker, cc_iter)
cc = list(cc)
else:
p = mp.Pool(cores)
try:
chunksize, extra = divmod(n_iter, len(p._pool) * 2)
if extra:
chunksize += 1
print('start', datetime.datetime.now())
cc = p.starmap(cc_worker, cc_iter, chunksize=chunksize)
finally:
p.terminate()
cc = np.array(cc)
cc = cc.reshape(nx, ny, na)
cc = cc.swapaxes(0, 1) # from (x, y, a) to (y, x, a)
return cc
def track_slit(ref_raster_builder, arr, x, y, t, missing=np.nan,
x_set=None, y_set=None, a_set=None,
**kwargs):
''' Find the optimal position of a 1D array within a 2D image using
compute_cc().
Parameters
==========
arr : 1D ndarray
ref_raster_builder : SyntheticRasterBuilder
x, y, t : 1D ndarrays
the coordinates of arr.
missing : float or None (default: None)
The value of the pixels in the image that should be considered as
'missing', and thus discarded before computing the cross correlation.
If set to None, don't handle missing values.
If your missing values are 'None', you’re out of luck.
**kwargs : passed to compute_cc()
Returns
=======
offset : ndarray
An array containing the optimal (y, x, angle) offset between the input
array and image
cc : float or 3D array
The full cross-correlation array.
'''
if missing is not None:
if np.isnan(missing):
mask1 = np.isnan(arr)
mask2 = np.isnan(img)
else:
mask1 = (arr == missing)
mask2 = (img == missing)
arr = np.ma.array(arr, mask=mask1)
img = np.ma.array(img, mask=mask2)
if np.all(arr.mask):
offset = np.zeros(3) * np.nan
cc = np.zeros((y_set.number, x_set.number, a_set.number)) * np.nan
return offset, cc
cc = compute_cc(arr, x, y, t, ref_raster_builder,
x_set=x_set, y_set=y_set, a_set=a_set, **kwargs)
offset = num.get_max_location(cc)
offset = tools.convert_offsets(offset, [y_set, x_set, a_set])
return offset, cc
def track_raster(raster, x, y, t, ref_raster_builder,
x_set=None, y_set=None, a_set=None,
cores=None, mp_mode='track', **kwargs):
'''
mp_mode : 'track' or 'cc'
Wether to parallelize track_slit() calls (ie over each slit position),
or cc_step() calls (ie. over each point of the cross-correlation
cube).
'''
cc = []
offset = []
track_cores, cc_cores = None, None
if mp_mode == 'track':
track_cores = cores
elif mp_mode == 'cc':
cc_cores = cores
_, n_iter = raster.shape
track_iter = zip(raster.T, x.T, y.T, t.T)
track_worker = functools.partial(
track_slit, ref_raster_builder,
x_set=x_set, y_set=y_set, a_set=a_set,
missing=None, cores=cc_cores,
**kwargs)
if track_cores is None:
res = itertools.starmap(track_worker, track_iter)
res = list(res)
else:
p = mp.Pool(track_cores)
try:
chunksize, extra = divmod(n_iter, len(p._pool) * 2)
if extra:
chunksize += 1
res = p.starmap(track_worker, track_iter, chunksize=chunksize)
finally:
p.terminate()
offset = [r[0] for r in res]
cc = [r[1] for r in res]
cc = np.array(cc)
offset = np.array(offset)
return offset, cc
def align(raster, x, y, t, ref_raster_builder,
x_set=None, y_set=None, a_set=None,
cores=None, mp_mode='track'):
''' Align raster individual slit positions using a reference image '''
# explore raster for all slit positions, with rotation
raster = np.ma.array(raster, mask=np.isnan(raster))
offset, cc = track_raster(
raster, x, y, t, ref_raster_builder,
x_set=x_set, y_set=y_set, a_set=a_set,
cores=cores)
# The additionnal shift in world units wrt the ref image, as determined by
# track()
offset_xy = offset[:, 1::-1]
offset_a = offset[:, 2]
# fill nan offsets with zeros
offset_xy = np.ma.array(offset_xy, mask=np.isnan(offset_xy)).filled(0)
offset_a = np.ma.array(offset_a, mask=np.isnan(offset_a)).filled(0)
ny, nx = x.shape
# center of each slit (n_slit_pos, 2):
xy0 = np.stack((x, y))[:, ny//2].T
# new center of each slit (n_slit_pos, 2):
new_xy0 = xy0 + offset_xy
# Transformation matrices - shape (nx, 3, 3)
# - start with identity, repeat it nx times, and reshape to (3, 3, nx)
# - move last axis to the beginning to get shape (nx, 3, 3)
# - set values using slices
# Translation matrix of -x0, -y0, for each slit position
translation_xy0 = np.repeat(np.identity(3), nx).reshape(3, 3, nx)
translation_xy0 = np.moveaxis(translation_xy0, -1, 0)
translation_xy0[:, :2, 2] = - xy0
# Translation matrix of new_x0, new_y0, for each slit position
translation_new_xy0 = np.repeat(np.identity(3), nx).reshape(3, 3, nx)
translation_new_xy0 = np.moveaxis(translation_new_xy0, -1, 0)
translation_new_xy0[:, :2, 2] = new_xy0
# Rotation matrix of offset_a for each slit position
rotation_a = np.repeat(np.identity(3), nx).reshape(3, 3, nx)
rotation_a = np.moveaxis(rotation_a, -1, 0)
ca = np.cos(np.deg2rad(offset_a))
sa = np.sin(np.deg2rad(offset_a))
rotation_a[:, 0, 0] = ca
rotation_a[:, 0, 1] = -sa
rotation_a[:, 1, 1] = ca
rotation_a[:, 1, 0] = sa
# transform_matrix = translation_new_xy0 @ rotation_a @ translation_xy0
transform_matrix = np.matmul(rotation_a, translation_xy0)
transform_matrix = np.matmul(translation_new_xy0, transform_matrix)
# apply transformation to
xy = np.stack((x, y, np.ones_like(x))) # (3, ny, nx)
xy = np.moveaxis(xy, 0, -1) # (ny, nx, 3)
xy = xy.reshape(ny, nx, 3, 1) # (ny, nx, 3, 1)
new_xy = np.matmul(transform_matrix, xy) # (ny, nx, 3, 1)
new_xy = new_xy.reshape(ny, nx, 3) # (ny, nx, 3)
new_x = new_xy[:, :, 0] # (ny, nx)
new_y = new_xy[:, :, 1] # (ny, nx)
return new_x, new_y, [offset, cc]
| 32.01938 | 78 | 0.611669 |
6386c97bb03804839c4bd2dea0358e082e8bce2c | 1,357 | py | Python | ZHOUJIANPING/main_dailyupdata.py | StudentZZZ-CQF/Algo-ETL | 67d59f85d85f18be6b79a7c8faa412a465975105 | [
"MIT"
] | null | null | null | ZHOUJIANPING/main_dailyupdata.py | StudentZZZ-CQF/Algo-ETL | 67d59f85d85f18be6b79a7c8faa412a465975105 | [
"MIT"
] | null | null | null | ZHOUJIANPING/main_dailyupdata.py | StudentZZZ-CQF/Algo-ETL | 67d59f85d85f18be6b79a7c8faa412a465975105 | [
"MIT"
] | null | null | null | #Daily update creat tables
import finnhub
import pandas as pd
import psycopg2
import time
import sqlalchemy
import numpy as np
import os
import os.path
import connect_db
import Rdate
import DBtable
from datetime import datetime
from dotenv import load_dirname
#download the SP500 stocks as taget
table=pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
SP500_NAME=table[0]['Symbol']
Table_ND=pd.read_csv('tablenameD.csv')
Table_NM=pd.read_csv('tablenameM.csv')
#connect to database
engine=connect_to_db
cur=engine.cursor()
enddate=int(datetime.timestamp(time))
cur.execute("SELECT MAX(timestamp) FROM DailyD;")
lasttime=cur.fetchall()[0][0]
Starttime=lasttime+1
for i in range(100):
engine=connect_to_db()
cur=engine.cursor()
Table_ND[i]=DBtable.creat_dailysqltable(cur,engine,SP500_NAME[i])
df_D=Rdate.df_finhub(SP500_NAME[i],'D',Starttime,enddate)
time.sleep(1)
Tabledf=Table_ND[i:i+1]
Table_nameD=np.array(Tabledf)[0,1]
uploaddata_daily(cur,Table_nameD,df_D)
for i in range(100):
engine=connect_to_db()
cur=engine.cursor()
Table_NM[i]=DBtable.creat_dailysqltable(cur,engine,SP500_NAME[i])
df_D=Rdate.df_finhub(SP500_NAME[i],1,Starttime,enddate)
time.sleep(1)
Tabledf=Table_NM[i:i+1]
Table_nameM=np.array(Tabledf)[0,1]
uploaddata_daily(cur,Table_nameM,df_D)
| 27.693878 | 79 | 0.761238 |
94ea4ca5a3d2cfac71b1e78e3719ec873b38506f | 782 | py | Python | src/azure-cli/azure/cli/command_modules/synapse/_completers.py | xaliciayang/azure-cli | 38c80c875e8a79d08d06a2f42ec82fd54934343e | [
"MIT"
] | 7 | 2020-04-26T09:54:05.000Z | 2021-07-22T16:54:41.000Z | src/azure-cli/azure/cli/command_modules/synapse/_completers.py | xaliciayang/azure-cli | 38c80c875e8a79d08d06a2f42ec82fd54934343e | [
"MIT"
] | 120 | 2018-03-27T19:14:40.000Z | 2020-12-10T23:53:35.000Z | src/azure-cli/azure/cli/command_modules/synapse/_completers.py | xaliciayang/azure-cli | 38c80c875e8a79d08d06a2f42ec82fd54934343e | [
"MIT"
] | 13 | 2020-06-30T16:23:36.000Z | 2022-03-29T17:12:05.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.decorators import Completer
from azure.cli.command_modules.synapse.operations.accesscontrol import list_role_definitions
@Completer
def get_role_definition_name_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
if namespace.workspace_name:
definitions = list_role_definitions(cmd, namespace.workspace_name)
return [x.name for x in definitions]
return []
| 48.875 | 114 | 0.598465 |
35f1781ee8057c533b5bf48b0f54ab43b40865c1 | 452 | py | Python | alipay/aop/api/response/AlipayUserBenefitStatusUpdateResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayUserBenefitStatusUpdateResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayUserBenefitStatusUpdateResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayUserBenefitStatusUpdateResponse(AlipayResponse):
def __init__(self):
super(AlipayUserBenefitStatusUpdateResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayUserBenefitStatusUpdateResponse, self).parse_response_content(response_content)
| 28.25 | 110 | 0.785398 |
05d3ce372282c2df25e1d41cbc7633bbf5d7779d | 13,098 | py | Python | pfruck_contabo/model/create_custom_image_request.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | 2 | 2022-01-27T10:36:33.000Z | 2022-03-09T14:21:12.000Z | pfruck_contabo/model/create_custom_image_request.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | 7 | 2022-01-13T10:44:19.000Z | 2022-02-15T23:44:44.000Z | pfruck_contabo/model/create_custom_image_request.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | null | null | null | """
Contabo API
The version of the OpenAPI document: 1.0.0
Contact: support@contabo.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pfruck_contabo.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from pfruck_contabo.exceptions import ApiAttributeError
class CreateCustomImageRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('os_type',): {
'WINDOWS': "Windows",
'LINUX': "Linux",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'url': (str,), # noqa: E501
'os_type': (str,), # noqa: E501
'version': (str,), # noqa: E501
'description': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'url': 'url', # noqa: E501
'os_type': 'osType', # noqa: E501
'version': 'version', # noqa: E501
'description': 'description', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, name, url, os_type, version, *args, **kwargs): # noqa: E501
"""CreateCustomImageRequest - a model defined in OpenAPI
Args:
name (str): Image Name
url (str): URL from where the image has been downloaded / provided.
os_type (str): Provided type of operating system (OS). Please specify `Windows` for MS Windows and `Linux` for other OS. Specifying wrong OS type may lead to disfunctional cloud instance.
version (str): Version number to distinguish the contents of an image. Could be the version of the operating system for example.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): Image Description. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.url = url
self.os_type = os_type
self.version = version
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, url, os_type, version, *args, **kwargs): # noqa: E501
"""CreateCustomImageRequest - a model defined in OpenAPI
Args:
name (str): Image Name
url (str): URL from where the image has been downloaded / provided.
os_type (str): Provided type of operating system (OS). Please specify `Windows` for MS Windows and `Linux` for other OS. Specifying wrong OS type may lead to disfunctional cloud instance.
version (str): Version number to distinguish the contents of an image. Could be the version of the operating system for example.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): Image Description. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.url = url
self.os_type = os_type
self.version = version
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.25 | 199 | 0.563368 |
1af42d3c909c0a0b6263dd4e513f9b916efc81c3 | 452 | py | Python | Day 07/problem 02.py | RealTigerCZ/AoC2021 | 22d8a05a15fec4abab6713e678f8c29400ee56a3 | [
"MIT"
] | null | null | null | Day 07/problem 02.py | RealTigerCZ/AoC2021 | 22d8a05a15fec4abab6713e678f8c29400ee56a3 | [
"MIT"
] | null | null | null | Day 07/problem 02.py | RealTigerCZ/AoC2021 | 22d8a05a15fec4abab6713e678f8c29400ee56a3 | [
"MIT"
] | null | null | null | path = "input.txt"
file = open(path)
input = file.readlines()
file.close()
coords = sorted([[int(n) for n in line.split(",")] for line in input][0])
#super slow
#print(min([sum([sum([m + 1 for m in range(abs(n - i))]) for n in coords])] for i in range(coords[0], coords[-1]+1))[0])
add = [0]
for idx in range(2000):
add.append(add[idx] + idx + 1)
print(min([sum([add[abs(n - i)] for n in coords])] for i in range(coords[0], coords[-1]+1))[0])
| 26.588235 | 120 | 0.608407 |
8497d48578fb1a2f0d44758974eb8ab05eb44d54 | 234,349 | py | Python | src/python_pachyderm/proto/admin/v1_11/pps/pps_pb2.py | barretthinson/python-pachyderm | 82cea22d1105d70833a5522ccac750ca521694ff | [
"Apache-2.0"
] | null | null | null | src/python_pachyderm/proto/admin/v1_11/pps/pps_pb2.py | barretthinson/python-pachyderm | 82cea22d1105d70833a5522ccac750ca521694ff | [
"Apache-2.0"
] | null | null | null | src/python_pachyderm/proto/admin/v1_11/pps/pps_pb2.py | barretthinson/python-pachyderm | 82cea22d1105d70833a5522ccac750ca521694ff | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: client/admin/v1_11/pps/pps.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from python_pachyderm.proto.admin.v1_11.pfs import pfs_pb2 as client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='client/admin/v1_11/pps/pps.proto',
package='pps_1_11',
syntax='proto3',
serialized_options=b'Z9github.com/pachyderm/pachyderm/src/client/admin/v1_11/pps',
serialized_pb=b'\n client/admin/v1_11/pps/pps.proto\x12\x08pps_1_11\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/duration.proto\x1a client/admin/v1_11/pfs/pfs.proto\"M\n\x0bSecretMount\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x04 \x01(\t\x12\x12\n\nmount_path\x18\x02 \x01(\t\x12\x0f\n\x07\x65nv_var\x18\x03 \x01(\t\"\xfb\x02\n\tTransform\x12\r\n\x05image\x18\x01 \x01(\t\x12\x0b\n\x03\x63md\x18\x02 \x03(\t\x12\x0f\n\x07\x65rr_cmd\x18\r \x03(\t\x12)\n\x03\x65nv\x18\x03 \x03(\x0b\x32\x1c.pps_1_11.Transform.EnvEntry\x12&\n\x07secrets\x18\x04 \x03(\x0b\x32\x15.pps_1_11.SecretMount\x12\x1a\n\x12image_pull_secrets\x18\t \x03(\t\x12\r\n\x05stdin\x18\x05 \x03(\t\x12\x11\n\terr_stdin\x18\x0e \x03(\t\x12\x1a\n\x12\x61\x63\x63\x65pt_return_code\x18\x06 \x03(\x03\x12\r\n\x05\x64\x65\x62ug\x18\x07 \x01(\x08\x12\x0c\n\x04user\x18\n \x01(\t\x12\x13\n\x0bworking_dir\x18\x0b \x01(\t\x12\x12\n\ndockerfile\x18\x0c \x01(\t\x12\"\n\x05\x62uild\x18\x0f \x01(\x0b\x32\x13.pps_1_11.BuildSpec\x1a*\n\x08\x45nvEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\":\n\tBuildSpec\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x10\n\x08language\x18\x02 \x01(\t\x12\r\n\x05image\x18\x03 \x01(\t\"\x17\n\x05TFJob\x12\x0e\n\x06tf_job\x18\x01 \x01(\t\"\x15\n\x06\x45gress\x12\x0b\n\x03URL\x18\x01 \x01(\t\"\x11\n\x03Job\x12\n\n\x02id\x18\x01 \x01(\t\"\xd7\x01\n\x08Metadata\x12\x38\n\x0b\x61nnotations\x18\x01 \x03(\x0b\x32#.pps_1_11.Metadata.AnnotationsEntry\x12.\n\x06labels\x18\x02 \x03(\x0b\x32\x1e.pps_1_11.Metadata.LabelsEntry\x1a\x32\n\x10\x41nnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"Q\n\x07Service\x12\x15\n\rinternal_port\x18\x01 \x01(\x05\x12\x15\n\rexternal_port\x18\x02 \x01(\x05\x12\n\n\x02ip\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\"N\n\x05Spout\x12\x11\n\toverwrite\x18\x01 \x01(\x08\x12\"\n\x07service\x18\x02 \x01(\x0b\x32\x11.pps_1_11.Service\x12\x0e\n\x06marker\x18\x03 \x01(\t\"\x94\x01\n\x08PFSInput\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04repo\x18\x02 \x01(\t\x12\x0e\n\x06\x62ranch\x18\x03 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x04 \x01(\t\x12\x0c\n\x04glob\x18\x05 \x01(\t\x12\x0f\n\x07join_on\x18\x08 \x01(\t\x12\x0c\n\x04lazy\x18\x06 \x01(\x08\x12\x13\n\x0b\x65mpty_files\x18\x07 \x01(\x08\x12\n\n\x02s3\x18\t \x01(\x08\"\x83\x01\n\tCronInput\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04repo\x18\x02 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x03 \x01(\t\x12\x0c\n\x04spec\x18\x04 \x01(\t\x12\x11\n\toverwrite\x18\x06 \x01(\x08\x12)\n\x05start\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"E\n\x08GitInput\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0e\n\x06\x62ranch\x18\x03 \x01(\t\x12\x0e\n\x06\x63ommit\x18\x04 \x01(\t\"\xcb\x01\n\x05Input\x12\x1f\n\x03pfs\x18\x06 \x01(\x0b\x32\x12.pps_1_11.PFSInput\x12\x1d\n\x04join\x18\x07 \x03(\x0b\x32\x0f.pps_1_11.Input\x12\x1e\n\x05\x63ross\x18\x02 \x03(\x0b\x32\x0f.pps_1_11.Input\x12\x1e\n\x05union\x18\x03 \x03(\x0b\x32\x0f.pps_1_11.Input\x12!\n\x04\x63ron\x18\x04 \x01(\x0b\x32\x13.pps_1_11.CronInput\x12\x1f\n\x03git\x18\x05 \x01(\x0b\x32\x12.pps_1_11.GitInput\"V\n\x08JobInput\x12\x0c\n\x04name\x18\x04 \x01(\t\x12 \n\x06\x63ommit\x18\x01 \x01(\x0b\x32\x10.pfs_1_11.Commit\x12\x0c\n\x04glob\x18\x02 \x01(\t\x12\x0c\n\x04lazy\x18\x03 \x01(\x08\">\n\x0fParallelismSpec\x12\x10\n\x08\x63onstant\x18\x02 \x01(\x04\x12\x13\n\x0b\x63oefficient\x18\x03 \x01(\x01J\x04\x08\x01\x10\x02\" \n\x0cHashtreeSpec\x12\x10\n\x08\x63onstant\x18\x01 \x01(\x04\"\'\n\tInputFile\x12\x0c\n\x04path\x18\x04 \x01(\t\x12\x0c\n\x04hash\x18\x05 \x01(\x0c\"/\n\x05\x44\x61tum\x12\n\n\x02id\x18\x01 \x01(\t\x12\x1a\n\x03job\x18\x02 \x01(\x0b\x32\r.pps_1_11.Job\"\xbc\x01\n\tDatumInfo\x12\x1e\n\x05\x64\x61tum\x18\x01 \x01(\x0b\x32\x0f.pps_1_11.Datum\x12#\n\x05state\x18\x02 \x01(\x0e\x32\x14.pps_1_11.DatumState\x12%\n\x05stats\x18\x03 \x01(\x0b\x32\x16.pps_1_11.ProcessStats\x12!\n\tpfs_state\x18\x04 \x01(\x0b\x32\x0e.pfs_1_11.File\x12 \n\x04\x64\x61ta\x18\x05 \x03(\x0b\x32\x12.pfs_1_11.FileInfo\"s\n\tAggregate\x12\r\n\x05\x63ount\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x01\x12\x0e\n\x06stddev\x18\x03 \x01(\x01\x12\x18\n\x10\x66ifth_percentile\x18\x04 \x01(\x01\x12\x1f\n\x17ninety_fifth_percentile\x18\x05 \x01(\x01\"\xcf\x01\n\x0cProcessStats\x12\x30\n\rdownload_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12/\n\x0cprocess_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\x0bupload_time\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x16\n\x0e\x64ownload_bytes\x18\x04 \x01(\x04\x12\x14\n\x0cupload_bytes\x18\x05 \x01(\x04\"\xf0\x01\n\x15\x41ggregateProcessStats\x12*\n\rdownload_time\x18\x01 \x01(\x0b\x32\x13.pps_1_11.Aggregate\x12)\n\x0cprocess_time\x18\x02 \x01(\x0b\x32\x13.pps_1_11.Aggregate\x12(\n\x0bupload_time\x18\x03 \x01(\x0b\x32\x13.pps_1_11.Aggregate\x12+\n\x0e\x64ownload_bytes\x18\x04 \x01(\x0b\x32\x13.pps_1_11.Aggregate\x12)\n\x0cupload_bytes\x18\x05 \x01(\x0b\x32\x13.pps_1_11.Aggregate\"\xbc\x01\n\x0cWorkerStatus\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x0e\n\x06job_id\x18\x02 \x01(\t\x12!\n\x04\x64\x61ta\x18\x03 \x03(\x0b\x32\x13.pps_1_11.InputFile\x12+\n\x07started\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12%\n\x05stats\x18\x05 \x01(\x0b\x32\x16.pps_1_11.ProcessStats\x12\x12\n\nqueue_size\x18\x06 \x01(\x03\"_\n\x0cResourceSpec\x12\x0b\n\x03\x63pu\x18\x01 \x01(\x02\x12\x0e\n\x06memory\x18\x02 \x01(\t\x12\x1e\n\x03gpu\x18\x05 \x01(\x0b\x32\x11.pps_1_11.GPUSpec\x12\x0c\n\x04\x64isk\x18\x04 \x01(\tJ\x04\x08\x03\x10\x04\"\'\n\x07GPUSpec\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x03\"\xd5\x03\n\x0b\x45tcdJobInfo\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\x12$\n\x08pipeline\x18\x02 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12\'\n\routput_commit\x18\x03 \x01(\x0b\x32\x10.pfs_1_11.Commit\x12\x0f\n\x07restart\x18\x04 \x01(\x04\x12\x16\n\x0e\x64\x61ta_processed\x18\x05 \x01(\x03\x12\x14\n\x0c\x64\x61ta_skipped\x18\x06 \x01(\x03\x12\x12\n\ndata_total\x18\x07 \x01(\x03\x12\x13\n\x0b\x64\x61ta_failed\x18\x08 \x01(\x03\x12\x16\n\x0e\x64\x61ta_recovered\x18\x0f \x01(\x03\x12%\n\x05stats\x18\t \x01(\x0b\x32\x16.pps_1_11.ProcessStats\x12&\n\x0cstats_commit\x18\n \x01(\x0b\x32\x10.pfs_1_11.Commit\x12!\n\x05state\x18\x0b \x01(\x0e\x32\x12.pps_1_11.JobState\x12\x0e\n\x06reason\x18\x0c \x01(\t\x12+\n\x07started\x18\r \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x66inished\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xfe\n\n\x07JobInfo\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\x12&\n\ttransform\x18\x02 \x01(\x0b\x32\x13.pps_1_11.Transform\x12$\n\x08pipeline\x18\x03 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12\x18\n\x10pipeline_version\x18\r \x01(\x04\x12%\n\x0bspec_commit\x18/ \x01(\x0b\x32\x10.pfs_1_11.Commit\x12\x33\n\x10parallelism_spec\x18\x0c \x01(\x0b\x32\x19.pps_1_11.ParallelismSpec\x12 \n\x06\x65gress\x18\x0f \x01(\x0b\x32\x10.pps_1_11.Egress\x12!\n\nparent_job\x18\x06 \x01(\x0b\x32\r.pps_1_11.Job\x12+\n\x07started\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x66inished\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\'\n\routput_commit\x18\t \x01(\x0b\x32\x10.pfs_1_11.Commit\x12!\n\x05state\x18\n \x01(\x0e\x32\x12.pps_1_11.JobState\x12\x0e\n\x06reason\x18# \x01(\t\x12\"\n\x07service\x18\x0e \x01(\x0b\x32\x11.pps_1_11.Service\x12\x1e\n\x05spout\x18- \x01(\x0b\x32\x0f.pps_1_11.Spout\x12#\n\x0boutput_repo\x18\x12 \x01(\x0b\x32\x0e.pfs_1_11.Repo\x12\x15\n\routput_branch\x18\x11 \x01(\t\x12\x0f\n\x07restart\x18\x14 \x01(\x04\x12\x16\n\x0e\x64\x61ta_processed\x18\x16 \x01(\x03\x12\x14\n\x0c\x64\x61ta_skipped\x18\x1e \x01(\x03\x12\x13\n\x0b\x64\x61ta_failed\x18( \x01(\x03\x12\x16\n\x0e\x64\x61ta_recovered\x18. \x01(\x03\x12\x12\n\ndata_total\x18\x17 \x01(\x03\x12%\n\x05stats\x18\x1f \x01(\x0b\x32\x16.pps_1_11.ProcessStats\x12-\n\rworker_status\x18\x18 \x03(\x0b\x32\x16.pps_1_11.WorkerStatus\x12\x31\n\x11resource_requests\x18\x19 \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12/\n\x0fresource_limits\x18$ \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12\x37\n\x17sidecar_resource_limits\x18\x30 \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12\x1e\n\x05input\x18\x1a \x01(\x0b\x32\x0f.pps_1_11.Input\x12(\n\nnew_branch\x18\x1b \x01(\x0b\x32\x14.pfs_1_11.BranchInfo\x12&\n\x0cstats_commit\x18\x1d \x01(\x0b\x32\x10.pfs_1_11.Commit\x12\x14\n\x0c\x65nable_stats\x18 \x01(\x08\x12\x0c\n\x04salt\x18! \x01(\t\x12\'\n\nchunk_spec\x18% \x01(\x0b\x32\x13.pps_1_11.ChunkSpec\x12\x30\n\rdatum_timeout\x18& \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\x0bjob_timeout\x18\' \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x61tum_tries\x18) \x01(\x03\x12\x31\n\x0fscheduling_spec\x18* \x01(\x0b\x32\x18.pps_1_11.SchedulingSpec\x12\x10\n\x08pod_spec\x18+ \x01(\t\x12\x11\n\tpod_patch\x18, \x01(\tJ\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x1c\x10\x1dJ\x04\x08\"\x10#\"<\n\x06Worker\x12\x0c\n\x04name\x18\x01 \x01(\t\x12$\n\x05state\x18\x02 \x01(\x0e\x32\x15.pps_1_11.WorkerState\"/\n\x08JobInfos\x12#\n\x08job_info\x18\x01 \x03(\x0b\x32\x11.pps_1_11.JobInfo\"\x18\n\x08Pipeline\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xb7\x02\n\x10\x45tcdPipelineInfo\x12&\n\x05state\x18\x01 \x01(\x0e\x32\x17.pps_1_11.PipelineState\x12\x0e\n\x06reason\x18\x04 \x01(\t\x12%\n\x0bspec_commit\x18\x02 \x01(\x0b\x32\x10.pfs_1_11.Commit\x12=\n\njob_counts\x18\x03 \x03(\x0b\x32).pps_1_11.EtcdPipelineInfo.JobCountsEntry\x12\x12\n\nauth_token\x18\x05 \x01(\t\x12*\n\x0elast_job_state\x18\x06 \x01(\x0e\x32\x12.pps_1_11.JobState\x12\x13\n\x0bparallelism\x18\x07 \x01(\x04\x1a\x30\n\x0eJobCountsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\x9a\x0b\n\x0cPipelineInfo\x12\n\n\x02id\x18\x11 \x01(\t\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12\x0f\n\x07version\x18\x0b \x01(\x04\x12&\n\ttransform\x18\x02 \x01(\x0b\x32\x13.pps_1_11.Transform\x12\x1f\n\x06tf_job\x18. \x01(\x0b\x32\x0f.pps_1_11.TFJob\x12\x33\n\x10parallelism_spec\x18\n \x01(\x0b\x32\x19.pps_1_11.ParallelismSpec\x12-\n\rhashtree_spec\x18* \x01(\x0b\x32\x16.pps_1_11.HashtreeSpec\x12 \n\x06\x65gress\x18\x0f \x01(\x0b\x32\x10.pps_1_11.Egress\x12.\n\ncreated_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12&\n\x05state\x18\x07 \x01(\x0e\x32\x17.pps_1_11.PipelineState\x12\x0f\n\x07stopped\x18& \x01(\x08\x12\x14\n\x0crecent_error\x18\x08 \x01(\t\x12\x19\n\x11workers_requested\x18\x31 \x01(\x03\x12\x19\n\x11workers_available\x18\x32 \x01(\x03\x12\x39\n\njob_counts\x18\t \x03(\x0b\x32%.pps_1_11.PipelineInfo.JobCountsEntry\x12*\n\x0elast_job_state\x18+ \x01(\x0e\x32\x12.pps_1_11.JobState\x12\x15\n\routput_branch\x18\x10 \x01(\t\x12\x31\n\x11resource_requests\x18\x13 \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12/\n\x0fresource_limits\x18\x1f \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12\x37\n\x17sidecar_resource_limits\x18\x33 \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12\x1e\n\x05input\x18\x14 \x01(\x0b\x32\x0f.pps_1_11.Input\x12\x13\n\x0b\x64\x65scription\x18\x15 \x01(\t\x12\x12\n\ncache_size\x18\x17 \x01(\t\x12\x14\n\x0c\x65nable_stats\x18\x18 \x01(\x08\x12\x0c\n\x04salt\x18\x19 \x01(\t\x12\x0e\n\x06reason\x18\x1c \x01(\t\x12\x16\n\x0emax_queue_size\x18\x1d \x01(\x03\x12\"\n\x07service\x18\x1e \x01(\x0b\x32\x11.pps_1_11.Service\x12\x1e\n\x05spout\x18- \x01(\x0b\x32\x0f.pps_1_11.Spout\x12\'\n\nchunk_spec\x18 \x01(\x0b\x32\x13.pps_1_11.ChunkSpec\x12\x30\n\rdatum_timeout\x18! \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\x0bjob_timeout\x18\" \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0bgithook_url\x18# \x01(\t\x12%\n\x0bspec_commit\x18$ \x01(\x0b\x32\x10.pfs_1_11.Commit\x12\x0f\n\x07standby\x18% \x01(\x08\x12\x13\n\x0b\x64\x61tum_tries\x18\' \x01(\x03\x12\x31\n\x0fscheduling_spec\x18( \x01(\x0b\x32\x18.pps_1_11.SchedulingSpec\x12\x10\n\x08pod_spec\x18) \x01(\t\x12\x11\n\tpod_patch\x18, \x01(\t\x12\x0e\n\x06s3_out\x18/ \x01(\x08\x12$\n\x08metadata\x18\x30 \x01(\x0b\x32\x12.pps_1_11.Metadata\x1a\x30\n\x0eJobCountsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x16\x10\x17J\x04\x08\x1a\x10\x1bJ\x04\x08\x1b\x10\x1cJ\x04\x08\x12\x10\x13\">\n\rPipelineInfos\x12-\n\rpipeline_info\x18\x01 \x03(\x0b\x32\x16.pps_1_11.PipelineInfo\"\xbc\x04\n\x10\x43reateJobRequest\x12$\n\x08pipeline\x18\x02 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12\'\n\routput_commit\x18\x19 \x01(\x0b\x32\x10.pfs_1_11.Commit\x12\x0f\n\x07restart\x18\x1a \x01(\x04\x12\x16\n\x0e\x64\x61ta_processed\x18\x1b \x01(\x03\x12\x14\n\x0c\x64\x61ta_skipped\x18\x1c \x01(\x03\x12\x12\n\ndata_total\x18\x1d \x01(\x03\x12\x13\n\x0b\x64\x61ta_failed\x18\x1e \x01(\x03\x12\x16\n\x0e\x64\x61ta_recovered\x18\x1f \x01(\x03\x12%\n\x05stats\x18 \x01(\x0b\x32\x16.pps_1_11.ProcessStats\x12&\n\x0cstats_commit\x18! \x01(\x0b\x32\x10.pfs_1_11.Commit\x12!\n\x05state\x18\" \x01(\x0e\x32\x12.pps_1_11.JobState\x12\x0e\n\x06reason\x18# \x01(\t\x12+\n\x07started\x18$ \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x66inished\x18% \x01(\x0b\x32\x1a.google.protobuf.TimestampJ\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x01\x10\x02J\x04\x08\n\x10\x0bJ\x04\x08\x07\x10\x08J\x04\x08\t\x10\nJ\x04\x08\x08\x10\tJ\x04\x08\x0c\x10\rJ\x04\x08\x0b\x10\x0cJ\x04\x08\r\x10\x0eJ\x04\x08\x0e\x10\x0fJ\x04\x08\x15\x10\x16J\x04\x08\x0f\x10\x10J\x04\x08\x10\x10\x11J\x04\x08\x11\x10\x12J\x04\x08\x12\x10\x13J\x04\x08\x13\x10\x14J\x04\x08\x14\x10\x15J\x04\x08\x16\x10\x17J\x04\x08\x17\x10\x18J\x04\x08\x18\x10\x19\"{\n\x11InspectJobRequest\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\x12\'\n\routput_commit\x18\x03 \x01(\x0b\x32\x10.pfs_1_11.Commit\x12\x13\n\x0b\x62lock_state\x18\x02 \x01(\x08\x12\x0c\n\x04\x66ull\x18\x04 \x01(\x08\"\xa6\x01\n\x0eListJobRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12&\n\x0cinput_commit\x18\x02 \x03(\x0b\x32\x10.pfs_1_11.Commit\x12\'\n\routput_commit\x18\x03 \x01(\x0b\x32\x10.pfs_1_11.Commit\x12\x0f\n\x07history\x18\x04 \x01(\x03\x12\x0c\n\x04\x66ull\x18\x05 \x01(\x08\"^\n\x0f\x46lushJobRequest\x12!\n\x07\x63ommits\x18\x01 \x03(\x0b\x32\x10.pfs_1_11.Commit\x12(\n\x0cto_pipelines\x18\x02 \x03(\x0b\x32\x12.pps_1_11.Pipeline\".\n\x10\x44\x65leteJobRequest\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\",\n\x0eStopJobRequest\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\"\x8d\x02\n\x15UpdateJobStateRequest\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\x12!\n\x05state\x18\x02 \x01(\x0e\x32\x12.pps_1_11.JobState\x12\x0e\n\x06reason\x18\x03 \x01(\t\x12\x0f\n\x07restart\x18\x04 \x01(\x04\x12\x16\n\x0e\x64\x61ta_processed\x18\x05 \x01(\x03\x12\x14\n\x0c\x64\x61ta_skipped\x18\x06 \x01(\x03\x12\x13\n\x0b\x64\x61ta_failed\x18\x07 \x01(\x03\x12\x16\n\x0e\x64\x61ta_recovered\x18\x08 \x01(\x03\x12\x12\n\ndata_total\x18\t \x01(\x03\x12%\n\x05stats\x18\n \x01(\x0b\x32\x16.pps_1_11.ProcessStats\"\xd6\x01\n\x0eGetLogsRequest\x12$\n\x08pipeline\x18\x02 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\x12\x14\n\x0c\x64\x61ta_filters\x18\x03 \x03(\t\x12\x1e\n\x05\x64\x61tum\x18\x06 \x01(\x0b\x32\x0f.pps_1_11.Datum\x12\x0e\n\x06master\x18\x05 \x01(\x08\x12\x0e\n\x06\x66ollow\x18\x07 \x01(\x08\x12\x0c\n\x04tail\x18\x08 \x01(\x03\x12\x18\n\x10use_loki_backend\x18\t \x01(\x08J\x04\x08\x04\x10\x05\"\xd2\x01\n\nLogMessage\x12\x15\n\rpipeline_name\x18\x01 \x01(\t\x12\x0e\n\x06job_id\x18\x03 \x01(\t\x12\x11\n\tworker_id\x18\x07 \x01(\t\x12\x10\n\x08\x64\x61tum_id\x18\t \x01(\t\x12\x0e\n\x06master\x18\n \x01(\x08\x12!\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32\x13.pps_1_11.InputFile\x12\x0c\n\x04user\x18\x08 \x01(\x08\x12&\n\x02ts\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0f\n\x07message\x18\x06 \x01(\t\"G\n\x13RestartDatumRequest\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\x12\x14\n\x0c\x64\x61ta_filters\x18\x02 \x03(\t\"5\n\x13InspectDatumRequest\x12\x1e\n\x05\x64\x61tum\x18\x01 \x01(\x0b\x32\x0f.pps_1_11.Datum\"O\n\x10ListDatumRequest\x12\x1a\n\x03job\x18\x01 \x01(\x0b\x32\r.pps_1_11.Job\x12\x11\n\tpage_size\x18\x02 \x01(\x03\x12\x0c\n\x04page\x18\x03 \x01(\x03\"`\n\x11ListDatumResponse\x12(\n\x0b\x64\x61tum_infos\x18\x01 \x03(\x0b\x32\x13.pps_1_11.DatumInfo\x12\x13\n\x0btotal_pages\x18\x02 \x01(\x03\x12\x0c\n\x04page\x18\x03 \x01(\x03\"e\n\x17ListDatumStreamResponse\x12\'\n\ndatum_info\x18\x01 \x01(\x0b\x32\x13.pps_1_11.DatumInfo\x12\x13\n\x0btotal_pages\x18\x02 \x01(\x03\x12\x0c\n\x04page\x18\x03 \x01(\x03\"/\n\tChunkSpec\x12\x0e\n\x06number\x18\x01 \x01(\x03\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\"\xa5\x01\n\x0eSchedulingSpec\x12\x41\n\rnode_selector\x18\x01 \x03(\x0b\x32*.pps_1_11.SchedulingSpec.NodeSelectorEntry\x12\x1b\n\x13priority_class_name\x18\x02 \x01(\t\x1a\x33\n\x11NodeSelectorEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xb0\x08\n\x15\x43reatePipelineRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12\x1f\n\x06tf_job\x18# \x01(\x0b\x32\x0f.pps_1_11.TFJob\x12&\n\ttransform\x18\x02 \x01(\x0b\x32\x13.pps_1_11.Transform\x12\x33\n\x10parallelism_spec\x18\x07 \x01(\x0b\x32\x19.pps_1_11.ParallelismSpec\x12-\n\rhashtree_spec\x18\x1f \x01(\x0b\x32\x16.pps_1_11.HashtreeSpec\x12 \n\x06\x65gress\x18\t \x01(\x0b\x32\x10.pps_1_11.Egress\x12\x0e\n\x06update\x18\x05 \x01(\x08\x12\x15\n\routput_branch\x18\n \x01(\t\x12\x0e\n\x06s3_out\x18$ \x01(\x08\x12\x31\n\x11resource_requests\x18\x0c \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12/\n\x0fresource_limits\x18\x16 \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12\x37\n\x17sidecar_resource_limits\x18/ \x01(\x0b\x32\x16.pps_1_11.ResourceSpec\x12\x1e\n\x05input\x18\r \x01(\x0b\x32\x0f.pps_1_11.Input\x12\x13\n\x0b\x64\x65scription\x18\x0e \x01(\t\x12\x12\n\ncache_size\x18\x10 \x01(\t\x12\x14\n\x0c\x65nable_stats\x18\x11 \x01(\x08\x12\x11\n\treprocess\x18\x12 \x01(\x08\x12\x16\n\x0emax_queue_size\x18\x14 \x01(\x03\x12\"\n\x07service\x18\x15 \x01(\x0b\x32\x11.pps_1_11.Service\x12\x1e\n\x05spout\x18! \x01(\x0b\x32\x0f.pps_1_11.Spout\x12\'\n\nchunk_spec\x18\x17 \x01(\x0b\x32\x13.pps_1_11.ChunkSpec\x12\x30\n\rdatum_timeout\x18\x18 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\x0bjob_timeout\x18\x19 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04salt\x18\x1a \x01(\t\x12\x0f\n\x07standby\x18\x1b \x01(\x08\x12\x13\n\x0b\x64\x61tum_tries\x18\x1c \x01(\x03\x12\x31\n\x0fscheduling_spec\x18\x1d \x01(\x0b\x32\x18.pps_1_11.SchedulingSpec\x12\x10\n\x08pod_spec\x18\x1e \x01(\t\x12\x11\n\tpod_patch\x18 \x01(\t\x12%\n\x0bspec_commit\x18\" \x01(\x0b\x32\x10.pfs_1_11.Commit\x12$\n\x08metadata\x18. \x01(\x0b\x32\x12.pps_1_11.MetadataJ\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x0b\x10\x0cJ\x04\x08\x0f\x10\x10J\x04\x08\x13\x10\x14\">\n\x16InspectPipelineRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\"L\n\x13ListPipelineRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12\x0f\n\x07history\x18\x02 \x01(\x03\"x\n\x15\x44\x65letePipelineRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12\x0b\n\x03\x61ll\x18\x04 \x01(\x08\x12\r\n\x05\x66orce\x18\x05 \x01(\x08\x12\x11\n\tkeep_repo\x18\x06 \x01(\x08J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"<\n\x14StartPipelineRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\";\n\x13StopPipelineRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\"\x80\x01\n\x12RunPipelineRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\x12.\n\nprovenance\x18\x02 \x03(\x0b\x32\x1a.pfs_1_11.CommitProvenance\x12\x0e\n\x06job_id\x18\x04 \x01(\tJ\x04\x08\x03\x10\x04\"6\n\x0eRunCronRequest\x12$\n\x08pipeline\x18\x01 \x01(\x0b\x32\x12.pps_1_11.Pipeline\"#\n\x13\x43reateSecretRequest\x12\x0c\n\x04\x66ile\x18\x01 \x01(\x0c\"7\n\x13\x44\x65leteSecretRequest\x12 \n\x06secret\x18\x01 \x01(\x0b\x32\x10.pps_1_11.Secret\"8\n\x14InspectSecretRequest\x12 \n\x06secret\x18\x01 \x01(\x0b\x32\x10.pps_1_11.Secret\"\x16\n\x06Secret\x12\x0c\n\x04name\x18\x01 \x01(\t\"t\n\nSecretInfo\x12 \n\x06secret\x18\x01 \x01(\x0b\x32\x10.pps_1_11.Secret\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x36\n\x12\x63reation_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"8\n\x0bSecretInfos\x12)\n\x0bsecret_info\x18\x01 \x03(\x0b\x32\x14.pps_1_11.SecretInfo\"-\n\x15GarbageCollectRequest\x12\x14\n\x0cmemory_bytes\x18\x01 \x01(\x03\"\x18\n\x16GarbageCollectResponse\"\x15\n\x13\x41\x63tivateAuthRequest\"\x16\n\x14\x41\x63tivateAuthResponse*\x83\x01\n\x08JobState\x12\x10\n\x0cJOB_STARTING\x10\x00\x12\x0f\n\x0bJOB_RUNNING\x10\x01\x12\x0f\n\x0bJOB_FAILURE\x10\x02\x12\x0f\n\x0bJOB_SUCCESS\x10\x03\x12\x0e\n\nJOB_KILLED\x10\x04\x12\x0f\n\x0bJOB_MERGING\x10\x05\x12\x11\n\rJOB_EGRESSING\x10\x06*O\n\nDatumState\x12\n\n\x06\x46\x41ILED\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x0b\n\x07SKIPPED\x10\x02\x12\x0c\n\x08STARTING\x10\x03\x12\r\n\tRECOVERED\x10\x04*?\n\x0bWorkerState\x12\x0f\n\x0bPOD_RUNNING\x10\x00\x12\x0f\n\x0bPOD_SUCCESS\x10\x01\x12\x0e\n\nPOD_FAILED\x10\x02*\xad\x01\n\rPipelineState\x12\x15\n\x11PIPELINE_STARTING\x10\x00\x12\x14\n\x10PIPELINE_RUNNING\x10\x01\x12\x17\n\x13PIPELINE_RESTARTING\x10\x02\x12\x14\n\x10PIPELINE_FAILURE\x10\x03\x12\x13\n\x0fPIPELINE_PAUSED\x10\x04\x12\x14\n\x10PIPELINE_STANDBY\x10\x05\x12\x15\n\x11PIPELINE_CRASHING\x10\x06\x32\xc3\x0f\n\x03\x41PI\x12\x38\n\tCreateJob\x12\x1a.pps_1_11.CreateJobRequest\x1a\r.pps_1_11.Job\"\x00\x12>\n\nInspectJob\x12\x1b.pps_1_11.InspectJobRequest\x1a\x11.pps_1_11.JobInfo\"\x00\x12\x39\n\x07ListJob\x12\x18.pps_1_11.ListJobRequest\x1a\x12.pps_1_11.JobInfos\"\x00\x12@\n\rListJobStream\x12\x18.pps_1_11.ListJobRequest\x1a\x11.pps_1_11.JobInfo\"\x00\x30\x01\x12<\n\x08\x46lushJob\x12\x19.pps_1_11.FlushJobRequest\x1a\x11.pps_1_11.JobInfo\"\x00\x30\x01\x12\x41\n\tDeleteJob\x12\x1a.pps_1_11.DeleteJobRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\x07StopJob\x12\x18.pps_1_11.StopJobRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x0cInspectDatum\x12\x1d.pps_1_11.InspectDatumRequest\x1a\x13.pps_1_11.DatumInfo\"\x00\x12\x46\n\tListDatum\x12\x1a.pps_1_11.ListDatumRequest\x1a\x1b.pps_1_11.ListDatumResponse\"\x00\x12T\n\x0fListDatumStream\x12\x1a.pps_1_11.ListDatumRequest\x1a!.pps_1_11.ListDatumStreamResponse\"\x00\x30\x01\x12G\n\x0cRestartDatum\x12\x1d.pps_1_11.RestartDatumRequest\x1a\x16.google.protobuf.Empty\"\x00\x12K\n\x0e\x43reatePipeline\x12\x1f.pps_1_11.CreatePipelineRequest\x1a\x16.google.protobuf.Empty\"\x00\x12M\n\x0fInspectPipeline\x12 .pps_1_11.InspectPipelineRequest\x1a\x16.pps_1_11.PipelineInfo\"\x00\x12H\n\x0cListPipeline\x12\x1d.pps_1_11.ListPipelineRequest\x1a\x17.pps_1_11.PipelineInfos\"\x00\x12K\n\x0e\x44\x65letePipeline\x12\x1f.pps_1_11.DeletePipelineRequest\x1a\x16.google.protobuf.Empty\"\x00\x12I\n\rStartPipeline\x12\x1e.pps_1_11.StartPipelineRequest\x1a\x16.google.protobuf.Empty\"\x00\x12G\n\x0cStopPipeline\x12\x1d.pps_1_11.StopPipelineRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\x0bRunPipeline\x12\x1c.pps_1_11.RunPipelineRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\x07RunCron\x12\x18.pps_1_11.RunCronRequest\x1a\x16.google.protobuf.Empty\"\x00\x12G\n\x0c\x43reateSecret\x12\x1d.pps_1_11.CreateSecretRequest\x1a\x16.google.protobuf.Empty\"\x00\x12G\n\x0c\x44\x65leteSecret\x12\x1d.pps_1_11.DeleteSecretRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\nListSecret\x12\x16.google.protobuf.Empty\x1a\x15.pps_1_11.SecretInfos\"\x00\x12G\n\rInspectSecret\x12\x1e.pps_1_11.InspectSecretRequest\x1a\x14.pps_1_11.SecretInfo\"\x00\x12=\n\tDeleteAll\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\x07GetLogs\x12\x18.pps_1_11.GetLogsRequest\x1a\x14.pps_1_11.LogMessage\"\x00\x30\x01\x12U\n\x0eGarbageCollect\x12\x1f.pps_1_11.GarbageCollectRequest\x1a .pps_1_11.GarbageCollectResponse\"\x00\x12O\n\x0c\x41\x63tivateAuth\x12\x1d.pps_1_11.ActivateAuthRequest\x1a\x1e.pps_1_11.ActivateAuthResponse\"\x00\x12K\n\x0eUpdateJobState\x12\x1f.pps_1_11.UpdateJobStateRequest\x1a\x16.google.protobuf.Empty\"\x00\x42;Z9github.com/pachyderm/pachyderm/src/client/admin/v1_11/ppsb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2.DESCRIPTOR,])
_JOBSTATE = _descriptor.EnumDescriptor(
name='JobState',
full_name='pps_1_11.JobState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='JOB_STARTING', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_RUNNING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_FAILURE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_SUCCESS', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_KILLED', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_MERGING', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JOB_EGRESSING', index=6, number=6,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=11414,
serialized_end=11545,
)
_sym_db.RegisterEnumDescriptor(_JOBSTATE)
JobState = enum_type_wrapper.EnumTypeWrapper(_JOBSTATE)
_DATUMSTATE = _descriptor.EnumDescriptor(
name='DatumState',
full_name='pps_1_11.DatumState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FAILED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SKIPPED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STARTING', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECOVERED', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=11547,
serialized_end=11626,
)
_sym_db.RegisterEnumDescriptor(_DATUMSTATE)
DatumState = enum_type_wrapper.EnumTypeWrapper(_DATUMSTATE)
_WORKERSTATE = _descriptor.EnumDescriptor(
name='WorkerState',
full_name='pps_1_11.WorkerState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='POD_RUNNING', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POD_SUCCESS', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POD_FAILED', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=11628,
serialized_end=11691,
)
_sym_db.RegisterEnumDescriptor(_WORKERSTATE)
WorkerState = enum_type_wrapper.EnumTypeWrapper(_WORKERSTATE)
_PIPELINESTATE = _descriptor.EnumDescriptor(
name='PipelineState',
full_name='pps_1_11.PipelineState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PIPELINE_STARTING', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PIPELINE_RUNNING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PIPELINE_RESTARTING', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PIPELINE_FAILURE', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PIPELINE_PAUSED', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PIPELINE_STANDBY', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PIPELINE_CRASHING', index=6, number=6,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=11694,
serialized_end=11867,
)
_sym_db.RegisterEnumDescriptor(_PIPELINESTATE)
PipelineState = enum_type_wrapper.EnumTypeWrapper(_PIPELINESTATE)
JOB_STARTING = 0
JOB_RUNNING = 1
JOB_FAILURE = 2
JOB_SUCCESS = 3
JOB_KILLED = 4
JOB_MERGING = 5
JOB_EGRESSING = 6
FAILED = 0
SUCCESS = 1
SKIPPED = 2
STARTING = 3
RECOVERED = 4
POD_RUNNING = 0
POD_SUCCESS = 1
POD_FAILED = 2
PIPELINE_STARTING = 0
PIPELINE_RUNNING = 1
PIPELINE_RESTARTING = 2
PIPELINE_FAILURE = 3
PIPELINE_PAUSED = 4
PIPELINE_STANDBY = 5
PIPELINE_CRASHING = 6
_SECRETMOUNT = _descriptor.Descriptor(
name='SecretMount',
full_name='pps_1_11.SecretMount',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pps_1_11.SecretMount.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='key', full_name='pps_1_11.SecretMount.key', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mount_path', full_name='pps_1_11.SecretMount.mount_path', index=2,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='env_var', full_name='pps_1_11.SecretMount.env_var', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=174,
serialized_end=251,
)
_TRANSFORM_ENVENTRY = _descriptor.Descriptor(
name='EnvEntry',
full_name='pps_1_11.Transform.EnvEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pps_1_11.Transform.EnvEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pps_1_11.Transform.EnvEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=591,
serialized_end=633,
)
_TRANSFORM = _descriptor.Descriptor(
name='Transform',
full_name='pps_1_11.Transform',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='pps_1_11.Transform.image', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cmd', full_name='pps_1_11.Transform.cmd', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='err_cmd', full_name='pps_1_11.Transform.err_cmd', index=2,
number=13, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='env', full_name='pps_1_11.Transform.env', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='secrets', full_name='pps_1_11.Transform.secrets', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_pull_secrets', full_name='pps_1_11.Transform.image_pull_secrets', index=5,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stdin', full_name='pps_1_11.Transform.stdin', index=6,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='err_stdin', full_name='pps_1_11.Transform.err_stdin', index=7,
number=14, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='accept_return_code', full_name='pps_1_11.Transform.accept_return_code', index=8,
number=6, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug', full_name='pps_1_11.Transform.debug', index=9,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user', full_name='pps_1_11.Transform.user', index=10,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='working_dir', full_name='pps_1_11.Transform.working_dir', index=11,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dockerfile', full_name='pps_1_11.Transform.dockerfile', index=12,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='build', full_name='pps_1_11.Transform.build', index=13,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TRANSFORM_ENVENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=254,
serialized_end=633,
)
_BUILDSPEC = _descriptor.Descriptor(
name='BuildSpec',
full_name='pps_1_11.BuildSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='pps_1_11.BuildSpec.path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language', full_name='pps_1_11.BuildSpec.language', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='pps_1_11.BuildSpec.image', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=635,
serialized_end=693,
)
_TFJOB = _descriptor.Descriptor(
name='TFJob',
full_name='pps_1_11.TFJob',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tf_job', full_name='pps_1_11.TFJob.tf_job', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=695,
serialized_end=718,
)
_EGRESS = _descriptor.Descriptor(
name='Egress',
full_name='pps_1_11.Egress',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='URL', full_name='pps_1_11.Egress.URL', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=720,
serialized_end=741,
)
_JOB = _descriptor.Descriptor(
name='Job',
full_name='pps_1_11.Job',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pps_1_11.Job.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=743,
serialized_end=760,
)
_METADATA_ANNOTATIONSENTRY = _descriptor.Descriptor(
name='AnnotationsEntry',
full_name='pps_1_11.Metadata.AnnotationsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pps_1_11.Metadata.AnnotationsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pps_1_11.Metadata.AnnotationsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=881,
serialized_end=931,
)
_METADATA_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='pps_1_11.Metadata.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pps_1_11.Metadata.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pps_1_11.Metadata.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=933,
serialized_end=978,
)
_METADATA = _descriptor.Descriptor(
name='Metadata',
full_name='pps_1_11.Metadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='annotations', full_name='pps_1_11.Metadata.annotations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='pps_1_11.Metadata.labels', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_METADATA_ANNOTATIONSENTRY, _METADATA_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=763,
serialized_end=978,
)
_SERVICE = _descriptor.Descriptor(
name='Service',
full_name='pps_1_11.Service',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='internal_port', full_name='pps_1_11.Service.internal_port', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='external_port', full_name='pps_1_11.Service.external_port', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ip', full_name='pps_1_11.Service.ip', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='pps_1_11.Service.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=980,
serialized_end=1061,
)
_SPOUT = _descriptor.Descriptor(
name='Spout',
full_name='pps_1_11.Spout',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='overwrite', full_name='pps_1_11.Spout.overwrite', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service', full_name='pps_1_11.Spout.service', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='marker', full_name='pps_1_11.Spout.marker', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1063,
serialized_end=1141,
)
_PFSINPUT = _descriptor.Descriptor(
name='PFSInput',
full_name='pps_1_11.PFSInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pps_1_11.PFSInput.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repo', full_name='pps_1_11.PFSInput.repo', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='branch', full_name='pps_1_11.PFSInput.branch', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='commit', full_name='pps_1_11.PFSInput.commit', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='glob', full_name='pps_1_11.PFSInput.glob', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='join_on', full_name='pps_1_11.PFSInput.join_on', index=5,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lazy', full_name='pps_1_11.PFSInput.lazy', index=6,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='empty_files', full_name='pps_1_11.PFSInput.empty_files', index=7,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s3', full_name='pps_1_11.PFSInput.s3', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1144,
serialized_end=1292,
)
_CRONINPUT = _descriptor.Descriptor(
name='CronInput',
full_name='pps_1_11.CronInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pps_1_11.CronInput.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='repo', full_name='pps_1_11.CronInput.repo', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='commit', full_name='pps_1_11.CronInput.commit', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spec', full_name='pps_1_11.CronInput.spec', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='overwrite', full_name='pps_1_11.CronInput.overwrite', index=4,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start', full_name='pps_1_11.CronInput.start', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1295,
serialized_end=1426,
)
_GITINPUT = _descriptor.Descriptor(
name='GitInput',
full_name='pps_1_11.GitInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pps_1_11.GitInput.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='pps_1_11.GitInput.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='branch', full_name='pps_1_11.GitInput.branch', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='commit', full_name='pps_1_11.GitInput.commit', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1428,
serialized_end=1497,
)
_INPUT = _descriptor.Descriptor(
name='Input',
full_name='pps_1_11.Input',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pfs', full_name='pps_1_11.Input.pfs', index=0,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='join', full_name='pps_1_11.Input.join', index=1,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cross', full_name='pps_1_11.Input.cross', index=2,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='union', full_name='pps_1_11.Input.union', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cron', full_name='pps_1_11.Input.cron', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='git', full_name='pps_1_11.Input.git', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1500,
serialized_end=1703,
)
_JOBINPUT = _descriptor.Descriptor(
name='JobInput',
full_name='pps_1_11.JobInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pps_1_11.JobInput.name', index=0,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='commit', full_name='pps_1_11.JobInput.commit', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='glob', full_name='pps_1_11.JobInput.glob', index=2,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lazy', full_name='pps_1_11.JobInput.lazy', index=3,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1705,
serialized_end=1791,
)
_PARALLELISMSPEC = _descriptor.Descriptor(
name='ParallelismSpec',
full_name='pps_1_11.ParallelismSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='constant', full_name='pps_1_11.ParallelismSpec.constant', index=0,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coefficient', full_name='pps_1_11.ParallelismSpec.coefficient', index=1,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1793,
serialized_end=1855,
)
_HASHTREESPEC = _descriptor.Descriptor(
name='HashtreeSpec',
full_name='pps_1_11.HashtreeSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='constant', full_name='pps_1_11.HashtreeSpec.constant', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1857,
serialized_end=1889,
)
_INPUTFILE = _descriptor.Descriptor(
name='InputFile',
full_name='pps_1_11.InputFile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='pps_1_11.InputFile.path', index=0,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hash', full_name='pps_1_11.InputFile.hash', index=1,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1891,
serialized_end=1930,
)
_DATUM = _descriptor.Descriptor(
name='Datum',
full_name='pps_1_11.Datum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pps_1_11.Datum.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.Datum.job', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1932,
serialized_end=1979,
)
_DATUMINFO = _descriptor.Descriptor(
name='DatumInfo',
full_name='pps_1_11.DatumInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='datum', full_name='pps_1_11.DatumInfo.datum', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pps_1_11.DatumInfo.state', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats', full_name='pps_1_11.DatumInfo.stats', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pfs_state', full_name='pps_1_11.DatumInfo.pfs_state', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='pps_1_11.DatumInfo.data', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1982,
serialized_end=2170,
)
_AGGREGATE = _descriptor.Descriptor(
name='Aggregate',
full_name='pps_1_11.Aggregate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='count', full_name='pps_1_11.Aggregate.count', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean', full_name='pps_1_11.Aggregate.mean', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stddev', full_name='pps_1_11.Aggregate.stddev', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fifth_percentile', full_name='pps_1_11.Aggregate.fifth_percentile', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ninety_fifth_percentile', full_name='pps_1_11.Aggregate.ninety_fifth_percentile', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2172,
serialized_end=2287,
)
_PROCESSSTATS = _descriptor.Descriptor(
name='ProcessStats',
full_name='pps_1_11.ProcessStats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='download_time', full_name='pps_1_11.ProcessStats.download_time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='process_time', full_name='pps_1_11.ProcessStats.process_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upload_time', full_name='pps_1_11.ProcessStats.upload_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='download_bytes', full_name='pps_1_11.ProcessStats.download_bytes', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upload_bytes', full_name='pps_1_11.ProcessStats.upload_bytes', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2290,
serialized_end=2497,
)
_AGGREGATEPROCESSSTATS = _descriptor.Descriptor(
name='AggregateProcessStats',
full_name='pps_1_11.AggregateProcessStats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='download_time', full_name='pps_1_11.AggregateProcessStats.download_time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='process_time', full_name='pps_1_11.AggregateProcessStats.process_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upload_time', full_name='pps_1_11.AggregateProcessStats.upload_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='download_bytes', full_name='pps_1_11.AggregateProcessStats.download_bytes', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='upload_bytes', full_name='pps_1_11.AggregateProcessStats.upload_bytes', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2500,
serialized_end=2740,
)
_WORKERSTATUS = _descriptor.Descriptor(
name='WorkerStatus',
full_name='pps_1_11.WorkerStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='worker_id', full_name='pps_1_11.WorkerStatus.worker_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='pps_1_11.WorkerStatus.job_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='pps_1_11.WorkerStatus.data', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='started', full_name='pps_1_11.WorkerStatus.started', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats', full_name='pps_1_11.WorkerStatus.stats', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='queue_size', full_name='pps_1_11.WorkerStatus.queue_size', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2743,
serialized_end=2931,
)
_RESOURCESPEC = _descriptor.Descriptor(
name='ResourceSpec',
full_name='pps_1_11.ResourceSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cpu', full_name='pps_1_11.ResourceSpec.cpu', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='pps_1_11.ResourceSpec.memory', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gpu', full_name='pps_1_11.ResourceSpec.gpu', index=2,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disk', full_name='pps_1_11.ResourceSpec.disk', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2933,
serialized_end=3028,
)
_GPUSPEC = _descriptor.Descriptor(
name='GPUSpec',
full_name='pps_1_11.GPUSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='pps_1_11.GPUSpec.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number', full_name='pps_1_11.GPUSpec.number', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3030,
serialized_end=3069,
)
_ETCDJOBINFO = _descriptor.Descriptor(
name='EtcdJobInfo',
full_name='pps_1_11.EtcdJobInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.EtcdJobInfo.job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.EtcdJobInfo.pipeline', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_commit', full_name='pps_1_11.EtcdJobInfo.output_commit', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restart', full_name='pps_1_11.EtcdJobInfo.restart', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_processed', full_name='pps_1_11.EtcdJobInfo.data_processed', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_skipped', full_name='pps_1_11.EtcdJobInfo.data_skipped', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_total', full_name='pps_1_11.EtcdJobInfo.data_total', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_failed', full_name='pps_1_11.EtcdJobInfo.data_failed', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_recovered', full_name='pps_1_11.EtcdJobInfo.data_recovered', index=8,
number=15, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats', full_name='pps_1_11.EtcdJobInfo.stats', index=9,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats_commit', full_name='pps_1_11.EtcdJobInfo.stats_commit', index=10,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pps_1_11.EtcdJobInfo.state', index=11,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='pps_1_11.EtcdJobInfo.reason', index=12,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='started', full_name='pps_1_11.EtcdJobInfo.started', index=13,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='finished', full_name='pps_1_11.EtcdJobInfo.finished', index=14,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3072,
serialized_end=3541,
)
_JOBINFO = _descriptor.Descriptor(
name='JobInfo',
full_name='pps_1_11.JobInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.JobInfo.job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transform', full_name='pps_1_11.JobInfo.transform', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.JobInfo.pipeline', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline_version', full_name='pps_1_11.JobInfo.pipeline_version', index=3,
number=13, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spec_commit', full_name='pps_1_11.JobInfo.spec_commit', index=4,
number=47, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parallelism_spec', full_name='pps_1_11.JobInfo.parallelism_spec', index=5,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='egress', full_name='pps_1_11.JobInfo.egress', index=6,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parent_job', full_name='pps_1_11.JobInfo.parent_job', index=7,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='started', full_name='pps_1_11.JobInfo.started', index=8,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='finished', full_name='pps_1_11.JobInfo.finished', index=9,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_commit', full_name='pps_1_11.JobInfo.output_commit', index=10,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pps_1_11.JobInfo.state', index=11,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='pps_1_11.JobInfo.reason', index=12,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service', full_name='pps_1_11.JobInfo.service', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spout', full_name='pps_1_11.JobInfo.spout', index=14,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_repo', full_name='pps_1_11.JobInfo.output_repo', index=15,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_branch', full_name='pps_1_11.JobInfo.output_branch', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restart', full_name='pps_1_11.JobInfo.restart', index=17,
number=20, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_processed', full_name='pps_1_11.JobInfo.data_processed', index=18,
number=22, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_skipped', full_name='pps_1_11.JobInfo.data_skipped', index=19,
number=30, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_failed', full_name='pps_1_11.JobInfo.data_failed', index=20,
number=40, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_recovered', full_name='pps_1_11.JobInfo.data_recovered', index=21,
number=46, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_total', full_name='pps_1_11.JobInfo.data_total', index=22,
number=23, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats', full_name='pps_1_11.JobInfo.stats', index=23,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='worker_status', full_name='pps_1_11.JobInfo.worker_status', index=24,
number=24, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_requests', full_name='pps_1_11.JobInfo.resource_requests', index=25,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_limits', full_name='pps_1_11.JobInfo.resource_limits', index=26,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sidecar_resource_limits', full_name='pps_1_11.JobInfo.sidecar_resource_limits', index=27,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input', full_name='pps_1_11.JobInfo.input', index=28,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='new_branch', full_name='pps_1_11.JobInfo.new_branch', index=29,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats_commit', full_name='pps_1_11.JobInfo.stats_commit', index=30,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_stats', full_name='pps_1_11.JobInfo.enable_stats', index=31,
number=32, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='salt', full_name='pps_1_11.JobInfo.salt', index=32,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chunk_spec', full_name='pps_1_11.JobInfo.chunk_spec', index=33,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datum_timeout', full_name='pps_1_11.JobInfo.datum_timeout', index=34,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_timeout', full_name='pps_1_11.JobInfo.job_timeout', index=35,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datum_tries', full_name='pps_1_11.JobInfo.datum_tries', index=36,
number=41, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scheduling_spec', full_name='pps_1_11.JobInfo.scheduling_spec', index=37,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pod_spec', full_name='pps_1_11.JobInfo.pod_spec', index=38,
number=43, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pod_patch', full_name='pps_1_11.JobInfo.pod_patch', index=39,
number=44, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3544,
serialized_end=4950,
)
_WORKER = _descriptor.Descriptor(
name='Worker',
full_name='pps_1_11.Worker',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pps_1_11.Worker.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pps_1_11.Worker.state', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4952,
serialized_end=5012,
)
_JOBINFOS = _descriptor.Descriptor(
name='JobInfos',
full_name='pps_1_11.JobInfos',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job_info', full_name='pps_1_11.JobInfos.job_info', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5014,
serialized_end=5061,
)
_PIPELINE = _descriptor.Descriptor(
name='Pipeline',
full_name='pps_1_11.Pipeline',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pps_1_11.Pipeline.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5063,
serialized_end=5087,
)
_ETCDPIPELINEINFO_JOBCOUNTSENTRY = _descriptor.Descriptor(
name='JobCountsEntry',
full_name='pps_1_11.EtcdPipelineInfo.JobCountsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pps_1_11.EtcdPipelineInfo.JobCountsEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pps_1_11.EtcdPipelineInfo.JobCountsEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5353,
serialized_end=5401,
)
_ETCDPIPELINEINFO = _descriptor.Descriptor(
name='EtcdPipelineInfo',
full_name='pps_1_11.EtcdPipelineInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='pps_1_11.EtcdPipelineInfo.state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='pps_1_11.EtcdPipelineInfo.reason', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spec_commit', full_name='pps_1_11.EtcdPipelineInfo.spec_commit', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_counts', full_name='pps_1_11.EtcdPipelineInfo.job_counts', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='auth_token', full_name='pps_1_11.EtcdPipelineInfo.auth_token', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_job_state', full_name='pps_1_11.EtcdPipelineInfo.last_job_state', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parallelism', full_name='pps_1_11.EtcdPipelineInfo.parallelism', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ETCDPIPELINEINFO_JOBCOUNTSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5090,
serialized_end=5401,
)
_PIPELINEINFO_JOBCOUNTSENTRY = _descriptor.Descriptor(
name='JobCountsEntry',
full_name='pps_1_11.PipelineInfo.JobCountsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pps_1_11.PipelineInfo.JobCountsEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pps_1_11.PipelineInfo.JobCountsEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5353,
serialized_end=5401,
)
_PIPELINEINFO = _descriptor.Descriptor(
name='PipelineInfo',
full_name='pps_1_11.PipelineInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='pps_1_11.PipelineInfo.id', index=0,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.PipelineInfo.pipeline', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='pps_1_11.PipelineInfo.version', index=2,
number=11, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transform', full_name='pps_1_11.PipelineInfo.transform', index=3,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tf_job', full_name='pps_1_11.PipelineInfo.tf_job', index=4,
number=46, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parallelism_spec', full_name='pps_1_11.PipelineInfo.parallelism_spec', index=5,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hashtree_spec', full_name='pps_1_11.PipelineInfo.hashtree_spec', index=6,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='egress', full_name='pps_1_11.PipelineInfo.egress', index=7,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created_at', full_name='pps_1_11.PipelineInfo.created_at', index=8,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pps_1_11.PipelineInfo.state', index=9,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stopped', full_name='pps_1_11.PipelineInfo.stopped', index=10,
number=38, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recent_error', full_name='pps_1_11.PipelineInfo.recent_error', index=11,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workers_requested', full_name='pps_1_11.PipelineInfo.workers_requested', index=12,
number=49, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workers_available', full_name='pps_1_11.PipelineInfo.workers_available', index=13,
number=50, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_counts', full_name='pps_1_11.PipelineInfo.job_counts', index=14,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_job_state', full_name='pps_1_11.PipelineInfo.last_job_state', index=15,
number=43, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_branch', full_name='pps_1_11.PipelineInfo.output_branch', index=16,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_requests', full_name='pps_1_11.PipelineInfo.resource_requests', index=17,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_limits', full_name='pps_1_11.PipelineInfo.resource_limits', index=18,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sidecar_resource_limits', full_name='pps_1_11.PipelineInfo.sidecar_resource_limits', index=19,
number=51, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input', full_name='pps_1_11.PipelineInfo.input', index=20,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='pps_1_11.PipelineInfo.description', index=21,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cache_size', full_name='pps_1_11.PipelineInfo.cache_size', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_stats', full_name='pps_1_11.PipelineInfo.enable_stats', index=23,
number=24, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='salt', full_name='pps_1_11.PipelineInfo.salt', index=24,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='pps_1_11.PipelineInfo.reason', index=25,
number=28, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_queue_size', full_name='pps_1_11.PipelineInfo.max_queue_size', index=26,
number=29, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service', full_name='pps_1_11.PipelineInfo.service', index=27,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spout', full_name='pps_1_11.PipelineInfo.spout', index=28,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chunk_spec', full_name='pps_1_11.PipelineInfo.chunk_spec', index=29,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datum_timeout', full_name='pps_1_11.PipelineInfo.datum_timeout', index=30,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_timeout', full_name='pps_1_11.PipelineInfo.job_timeout', index=31,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='githook_url', full_name='pps_1_11.PipelineInfo.githook_url', index=32,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spec_commit', full_name='pps_1_11.PipelineInfo.spec_commit', index=33,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='standby', full_name='pps_1_11.PipelineInfo.standby', index=34,
number=37, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datum_tries', full_name='pps_1_11.PipelineInfo.datum_tries', index=35,
number=39, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scheduling_spec', full_name='pps_1_11.PipelineInfo.scheduling_spec', index=36,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pod_spec', full_name='pps_1_11.PipelineInfo.pod_spec', index=37,
number=41, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pod_patch', full_name='pps_1_11.PipelineInfo.pod_patch', index=38,
number=44, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s3_out', full_name='pps_1_11.PipelineInfo.s3_out', index=39,
number=47, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='pps_1_11.PipelineInfo.metadata', index=40,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PIPELINEINFO_JOBCOUNTSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5404,
serialized_end=6838,
)
_PIPELINEINFOS = _descriptor.Descriptor(
name='PipelineInfos',
full_name='pps_1_11.PipelineInfos',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline_info', full_name='pps_1_11.PipelineInfos.pipeline_info', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6840,
serialized_end=6902,
)
_CREATEJOBREQUEST = _descriptor.Descriptor(
name='CreateJobRequest',
full_name='pps_1_11.CreateJobRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.CreateJobRequest.pipeline', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_commit', full_name='pps_1_11.CreateJobRequest.output_commit', index=1,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restart', full_name='pps_1_11.CreateJobRequest.restart', index=2,
number=26, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_processed', full_name='pps_1_11.CreateJobRequest.data_processed', index=3,
number=27, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_skipped', full_name='pps_1_11.CreateJobRequest.data_skipped', index=4,
number=28, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_total', full_name='pps_1_11.CreateJobRequest.data_total', index=5,
number=29, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_failed', full_name='pps_1_11.CreateJobRequest.data_failed', index=6,
number=30, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_recovered', full_name='pps_1_11.CreateJobRequest.data_recovered', index=7,
number=31, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats', full_name='pps_1_11.CreateJobRequest.stats', index=8,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats_commit', full_name='pps_1_11.CreateJobRequest.stats_commit', index=9,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pps_1_11.CreateJobRequest.state', index=10,
number=34, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='pps_1_11.CreateJobRequest.reason', index=11,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='started', full_name='pps_1_11.CreateJobRequest.started', index=12,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='finished', full_name='pps_1_11.CreateJobRequest.finished', index=13,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6905,
serialized_end=7477,
)
_INSPECTJOBREQUEST = _descriptor.Descriptor(
name='InspectJobRequest',
full_name='pps_1_11.InspectJobRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.InspectJobRequest.job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_commit', full_name='pps_1_11.InspectJobRequest.output_commit', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='block_state', full_name='pps_1_11.InspectJobRequest.block_state', index=2,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='full', full_name='pps_1_11.InspectJobRequest.full', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7479,
serialized_end=7602,
)
_LISTJOBREQUEST = _descriptor.Descriptor(
name='ListJobRequest',
full_name='pps_1_11.ListJobRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.ListJobRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_commit', full_name='pps_1_11.ListJobRequest.input_commit', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_commit', full_name='pps_1_11.ListJobRequest.output_commit', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='history', full_name='pps_1_11.ListJobRequest.history', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='full', full_name='pps_1_11.ListJobRequest.full', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7605,
serialized_end=7771,
)
_FLUSHJOBREQUEST = _descriptor.Descriptor(
name='FlushJobRequest',
full_name='pps_1_11.FlushJobRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='commits', full_name='pps_1_11.FlushJobRequest.commits', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='to_pipelines', full_name='pps_1_11.FlushJobRequest.to_pipelines', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7773,
serialized_end=7867,
)
_DELETEJOBREQUEST = _descriptor.Descriptor(
name='DeleteJobRequest',
full_name='pps_1_11.DeleteJobRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.DeleteJobRequest.job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7869,
serialized_end=7915,
)
_STOPJOBREQUEST = _descriptor.Descriptor(
name='StopJobRequest',
full_name='pps_1_11.StopJobRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.StopJobRequest.job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7917,
serialized_end=7961,
)
_UPDATEJOBSTATEREQUEST = _descriptor.Descriptor(
name='UpdateJobStateRequest',
full_name='pps_1_11.UpdateJobStateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.UpdateJobStateRequest.job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='pps_1_11.UpdateJobStateRequest.state', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reason', full_name='pps_1_11.UpdateJobStateRequest.reason', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restart', full_name='pps_1_11.UpdateJobStateRequest.restart', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_processed', full_name='pps_1_11.UpdateJobStateRequest.data_processed', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_skipped', full_name='pps_1_11.UpdateJobStateRequest.data_skipped', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_failed', full_name='pps_1_11.UpdateJobStateRequest.data_failed', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_recovered', full_name='pps_1_11.UpdateJobStateRequest.data_recovered', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_total', full_name='pps_1_11.UpdateJobStateRequest.data_total', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stats', full_name='pps_1_11.UpdateJobStateRequest.stats', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7964,
serialized_end=8233,
)
_GETLOGSREQUEST = _descriptor.Descriptor(
name='GetLogsRequest',
full_name='pps_1_11.GetLogsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.GetLogsRequest.pipeline', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.GetLogsRequest.job', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_filters', full_name='pps_1_11.GetLogsRequest.data_filters', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datum', full_name='pps_1_11.GetLogsRequest.datum', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='master', full_name='pps_1_11.GetLogsRequest.master', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='follow', full_name='pps_1_11.GetLogsRequest.follow', index=5,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tail', full_name='pps_1_11.GetLogsRequest.tail', index=6,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_loki_backend', full_name='pps_1_11.GetLogsRequest.use_loki_backend', index=7,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8236,
serialized_end=8450,
)
_LOGMESSAGE = _descriptor.Descriptor(
name='LogMessage',
full_name='pps_1_11.LogMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline_name', full_name='pps_1_11.LogMessage.pipeline_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='pps_1_11.LogMessage.job_id', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='worker_id', full_name='pps_1_11.LogMessage.worker_id', index=2,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datum_id', full_name='pps_1_11.LogMessage.datum_id', index=3,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='master', full_name='pps_1_11.LogMessage.master', index=4,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='pps_1_11.LogMessage.data', index=5,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user', full_name='pps_1_11.LogMessage.user', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ts', full_name='pps_1_11.LogMessage.ts', index=7,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='pps_1_11.LogMessage.message', index=8,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8453,
serialized_end=8663,
)
_RESTARTDATUMREQUEST = _descriptor.Descriptor(
name='RestartDatumRequest',
full_name='pps_1_11.RestartDatumRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.RestartDatumRequest.job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_filters', full_name='pps_1_11.RestartDatumRequest.data_filters', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8665,
serialized_end=8736,
)
_INSPECTDATUMREQUEST = _descriptor.Descriptor(
name='InspectDatumRequest',
full_name='pps_1_11.InspectDatumRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='datum', full_name='pps_1_11.InspectDatumRequest.datum', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8738,
serialized_end=8791,
)
_LISTDATUMREQUEST = _descriptor.Descriptor(
name='ListDatumRequest',
full_name='pps_1_11.ListDatumRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='job', full_name='pps_1_11.ListDatumRequest.job', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='pps_1_11.ListDatumRequest.page_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page', full_name='pps_1_11.ListDatumRequest.page', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8793,
serialized_end=8872,
)
_LISTDATUMRESPONSE = _descriptor.Descriptor(
name='ListDatumResponse',
full_name='pps_1_11.ListDatumResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='datum_infos', full_name='pps_1_11.ListDatumResponse.datum_infos', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_pages', full_name='pps_1_11.ListDatumResponse.total_pages', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page', full_name='pps_1_11.ListDatumResponse.page', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8874,
serialized_end=8970,
)
_LISTDATUMSTREAMRESPONSE = _descriptor.Descriptor(
name='ListDatumStreamResponse',
full_name='pps_1_11.ListDatumStreamResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='datum_info', full_name='pps_1_11.ListDatumStreamResponse.datum_info', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_pages', full_name='pps_1_11.ListDatumStreamResponse.total_pages', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page', full_name='pps_1_11.ListDatumStreamResponse.page', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8972,
serialized_end=9073,
)
_CHUNKSPEC = _descriptor.Descriptor(
name='ChunkSpec',
full_name='pps_1_11.ChunkSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number', full_name='pps_1_11.ChunkSpec.number', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size_bytes', full_name='pps_1_11.ChunkSpec.size_bytes', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9075,
serialized_end=9122,
)
_SCHEDULINGSPEC_NODESELECTORENTRY = _descriptor.Descriptor(
name='NodeSelectorEntry',
full_name='pps_1_11.SchedulingSpec.NodeSelectorEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='pps_1_11.SchedulingSpec.NodeSelectorEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='pps_1_11.SchedulingSpec.NodeSelectorEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9239,
serialized_end=9290,
)
_SCHEDULINGSPEC = _descriptor.Descriptor(
name='SchedulingSpec',
full_name='pps_1_11.SchedulingSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_selector', full_name='pps_1_11.SchedulingSpec.node_selector', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='priority_class_name', full_name='pps_1_11.SchedulingSpec.priority_class_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SCHEDULINGSPEC_NODESELECTORENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9125,
serialized_end=9290,
)
_CREATEPIPELINEREQUEST = _descriptor.Descriptor(
name='CreatePipelineRequest',
full_name='pps_1_11.CreatePipelineRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.CreatePipelineRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tf_job', full_name='pps_1_11.CreatePipelineRequest.tf_job', index=1,
number=35, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transform', full_name='pps_1_11.CreatePipelineRequest.transform', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parallelism_spec', full_name='pps_1_11.CreatePipelineRequest.parallelism_spec', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hashtree_spec', full_name='pps_1_11.CreatePipelineRequest.hashtree_spec', index=4,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='egress', full_name='pps_1_11.CreatePipelineRequest.egress', index=5,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='pps_1_11.CreatePipelineRequest.update', index=6,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_branch', full_name='pps_1_11.CreatePipelineRequest.output_branch', index=7,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s3_out', full_name='pps_1_11.CreatePipelineRequest.s3_out', index=8,
number=36, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_requests', full_name='pps_1_11.CreatePipelineRequest.resource_requests', index=9,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_limits', full_name='pps_1_11.CreatePipelineRequest.resource_limits', index=10,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sidecar_resource_limits', full_name='pps_1_11.CreatePipelineRequest.sidecar_resource_limits', index=11,
number=47, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input', full_name='pps_1_11.CreatePipelineRequest.input', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='pps_1_11.CreatePipelineRequest.description', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cache_size', full_name='pps_1_11.CreatePipelineRequest.cache_size', index=14,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_stats', full_name='pps_1_11.CreatePipelineRequest.enable_stats', index=15,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reprocess', full_name='pps_1_11.CreatePipelineRequest.reprocess', index=16,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_queue_size', full_name='pps_1_11.CreatePipelineRequest.max_queue_size', index=17,
number=20, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service', full_name='pps_1_11.CreatePipelineRequest.service', index=18,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spout', full_name='pps_1_11.CreatePipelineRequest.spout', index=19,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='chunk_spec', full_name='pps_1_11.CreatePipelineRequest.chunk_spec', index=20,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datum_timeout', full_name='pps_1_11.CreatePipelineRequest.datum_timeout', index=21,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_timeout', full_name='pps_1_11.CreatePipelineRequest.job_timeout', index=22,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='salt', full_name='pps_1_11.CreatePipelineRequest.salt', index=23,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='standby', full_name='pps_1_11.CreatePipelineRequest.standby', index=24,
number=27, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='datum_tries', full_name='pps_1_11.CreatePipelineRequest.datum_tries', index=25,
number=28, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scheduling_spec', full_name='pps_1_11.CreatePipelineRequest.scheduling_spec', index=26,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pod_spec', full_name='pps_1_11.CreatePipelineRequest.pod_spec', index=27,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pod_patch', full_name='pps_1_11.CreatePipelineRequest.pod_patch', index=28,
number=32, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spec_commit', full_name='pps_1_11.CreatePipelineRequest.spec_commit', index=29,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='pps_1_11.CreatePipelineRequest.metadata', index=30,
number=46, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=9293,
serialized_end=10365,
)
_INSPECTPIPELINEREQUEST = _descriptor.Descriptor(
name='InspectPipelineRequest',
full_name='pps_1_11.InspectPipelineRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.InspectPipelineRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10367,
serialized_end=10429,
)
_LISTPIPELINEREQUEST = _descriptor.Descriptor(
name='ListPipelineRequest',
full_name='pps_1_11.ListPipelineRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.ListPipelineRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='history', full_name='pps_1_11.ListPipelineRequest.history', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10431,
serialized_end=10507,
)
_DELETEPIPELINEREQUEST = _descriptor.Descriptor(
name='DeletePipelineRequest',
full_name='pps_1_11.DeletePipelineRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.DeletePipelineRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all', full_name='pps_1_11.DeletePipelineRequest.all', index=1,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force', full_name='pps_1_11.DeletePipelineRequest.force', index=2,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keep_repo', full_name='pps_1_11.DeletePipelineRequest.keep_repo', index=3,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10509,
serialized_end=10629,
)
_STARTPIPELINEREQUEST = _descriptor.Descriptor(
name='StartPipelineRequest',
full_name='pps_1_11.StartPipelineRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.StartPipelineRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10631,
serialized_end=10691,
)
_STOPPIPELINEREQUEST = _descriptor.Descriptor(
name='StopPipelineRequest',
full_name='pps_1_11.StopPipelineRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.StopPipelineRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10693,
serialized_end=10752,
)
_RUNPIPELINEREQUEST = _descriptor.Descriptor(
name='RunPipelineRequest',
full_name='pps_1_11.RunPipelineRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.RunPipelineRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='provenance', full_name='pps_1_11.RunPipelineRequest.provenance', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='job_id', full_name='pps_1_11.RunPipelineRequest.job_id', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10755,
serialized_end=10883,
)
_RUNCRONREQUEST = _descriptor.Descriptor(
name='RunCronRequest',
full_name='pps_1_11.RunCronRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pipeline', full_name='pps_1_11.RunCronRequest.pipeline', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10885,
serialized_end=10939,
)
_CREATESECRETREQUEST = _descriptor.Descriptor(
name='CreateSecretRequest',
full_name='pps_1_11.CreateSecretRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file', full_name='pps_1_11.CreateSecretRequest.file', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10941,
serialized_end=10976,
)
_DELETESECRETREQUEST = _descriptor.Descriptor(
name='DeleteSecretRequest',
full_name='pps_1_11.DeleteSecretRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secret', full_name='pps_1_11.DeleteSecretRequest.secret', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10978,
serialized_end=11033,
)
_INSPECTSECRETREQUEST = _descriptor.Descriptor(
name='InspectSecretRequest',
full_name='pps_1_11.InspectSecretRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secret', full_name='pps_1_11.InspectSecretRequest.secret', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11035,
serialized_end=11091,
)
_SECRET = _descriptor.Descriptor(
name='Secret',
full_name='pps_1_11.Secret',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='pps_1_11.Secret.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11093,
serialized_end=11115,
)
_SECRETINFO = _descriptor.Descriptor(
name='SecretInfo',
full_name='pps_1_11.SecretInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secret', full_name='pps_1_11.SecretInfo.secret', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='pps_1_11.SecretInfo.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creation_timestamp', full_name='pps_1_11.SecretInfo.creation_timestamp', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11117,
serialized_end=11233,
)
_SECRETINFOS = _descriptor.Descriptor(
name='SecretInfos',
full_name='pps_1_11.SecretInfos',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secret_info', full_name='pps_1_11.SecretInfos.secret_info', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11235,
serialized_end=11291,
)
_GARBAGECOLLECTREQUEST = _descriptor.Descriptor(
name='GarbageCollectRequest',
full_name='pps_1_11.GarbageCollectRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='memory_bytes', full_name='pps_1_11.GarbageCollectRequest.memory_bytes', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11293,
serialized_end=11338,
)
_GARBAGECOLLECTRESPONSE = _descriptor.Descriptor(
name='GarbageCollectResponse',
full_name='pps_1_11.GarbageCollectResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11340,
serialized_end=11364,
)
_ACTIVATEAUTHREQUEST = _descriptor.Descriptor(
name='ActivateAuthRequest',
full_name='pps_1_11.ActivateAuthRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11366,
serialized_end=11387,
)
_ACTIVATEAUTHRESPONSE = _descriptor.Descriptor(
name='ActivateAuthResponse',
full_name='pps_1_11.ActivateAuthResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11389,
serialized_end=11411,
)
_TRANSFORM_ENVENTRY.containing_type = _TRANSFORM
_TRANSFORM.fields_by_name['env'].message_type = _TRANSFORM_ENVENTRY
_TRANSFORM.fields_by_name['secrets'].message_type = _SECRETMOUNT
_TRANSFORM.fields_by_name['build'].message_type = _BUILDSPEC
_METADATA_ANNOTATIONSENTRY.containing_type = _METADATA
_METADATA_LABELSENTRY.containing_type = _METADATA
_METADATA.fields_by_name['annotations'].message_type = _METADATA_ANNOTATIONSENTRY
_METADATA.fields_by_name['labels'].message_type = _METADATA_LABELSENTRY
_SPOUT.fields_by_name['service'].message_type = _SERVICE
_CRONINPUT.fields_by_name['start'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INPUT.fields_by_name['pfs'].message_type = _PFSINPUT
_INPUT.fields_by_name['join'].message_type = _INPUT
_INPUT.fields_by_name['cross'].message_type = _INPUT
_INPUT.fields_by_name['union'].message_type = _INPUT
_INPUT.fields_by_name['cron'].message_type = _CRONINPUT
_INPUT.fields_by_name['git'].message_type = _GITINPUT
_JOBINPUT.fields_by_name['commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_DATUM.fields_by_name['job'].message_type = _JOB
_DATUMINFO.fields_by_name['datum'].message_type = _DATUM
_DATUMINFO.fields_by_name['state'].enum_type = _DATUMSTATE
_DATUMINFO.fields_by_name['stats'].message_type = _PROCESSSTATS
_DATUMINFO.fields_by_name['pfs_state'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._FILE
_DATUMINFO.fields_by_name['data'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._FILEINFO
_PROCESSSTATS.fields_by_name['download_time'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_PROCESSSTATS.fields_by_name['process_time'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_PROCESSSTATS.fields_by_name['upload_time'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_AGGREGATEPROCESSSTATS.fields_by_name['download_time'].message_type = _AGGREGATE
_AGGREGATEPROCESSSTATS.fields_by_name['process_time'].message_type = _AGGREGATE
_AGGREGATEPROCESSSTATS.fields_by_name['upload_time'].message_type = _AGGREGATE
_AGGREGATEPROCESSSTATS.fields_by_name['download_bytes'].message_type = _AGGREGATE
_AGGREGATEPROCESSSTATS.fields_by_name['upload_bytes'].message_type = _AGGREGATE
_WORKERSTATUS.fields_by_name['data'].message_type = _INPUTFILE
_WORKERSTATUS.fields_by_name['started'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_WORKERSTATUS.fields_by_name['stats'].message_type = _PROCESSSTATS
_RESOURCESPEC.fields_by_name['gpu'].message_type = _GPUSPEC
_ETCDJOBINFO.fields_by_name['job'].message_type = _JOB
_ETCDJOBINFO.fields_by_name['pipeline'].message_type = _PIPELINE
_ETCDJOBINFO.fields_by_name['output_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_ETCDJOBINFO.fields_by_name['stats'].message_type = _PROCESSSTATS
_ETCDJOBINFO.fields_by_name['stats_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_ETCDJOBINFO.fields_by_name['state'].enum_type = _JOBSTATE
_ETCDJOBINFO.fields_by_name['started'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ETCDJOBINFO.fields_by_name['finished'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_JOBINFO.fields_by_name['job'].message_type = _JOB
_JOBINFO.fields_by_name['transform'].message_type = _TRANSFORM
_JOBINFO.fields_by_name['pipeline'].message_type = _PIPELINE
_JOBINFO.fields_by_name['spec_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_JOBINFO.fields_by_name['parallelism_spec'].message_type = _PARALLELISMSPEC
_JOBINFO.fields_by_name['egress'].message_type = _EGRESS
_JOBINFO.fields_by_name['parent_job'].message_type = _JOB
_JOBINFO.fields_by_name['started'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_JOBINFO.fields_by_name['finished'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_JOBINFO.fields_by_name['output_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_JOBINFO.fields_by_name['state'].enum_type = _JOBSTATE
_JOBINFO.fields_by_name['service'].message_type = _SERVICE
_JOBINFO.fields_by_name['spout'].message_type = _SPOUT
_JOBINFO.fields_by_name['output_repo'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._REPO
_JOBINFO.fields_by_name['stats'].message_type = _PROCESSSTATS
_JOBINFO.fields_by_name['worker_status'].message_type = _WORKERSTATUS
_JOBINFO.fields_by_name['resource_requests'].message_type = _RESOURCESPEC
_JOBINFO.fields_by_name['resource_limits'].message_type = _RESOURCESPEC
_JOBINFO.fields_by_name['sidecar_resource_limits'].message_type = _RESOURCESPEC
_JOBINFO.fields_by_name['input'].message_type = _INPUT
_JOBINFO.fields_by_name['new_branch'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._BRANCHINFO
_JOBINFO.fields_by_name['stats_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_JOBINFO.fields_by_name['chunk_spec'].message_type = _CHUNKSPEC
_JOBINFO.fields_by_name['datum_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_JOBINFO.fields_by_name['job_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_JOBINFO.fields_by_name['scheduling_spec'].message_type = _SCHEDULINGSPEC
_WORKER.fields_by_name['state'].enum_type = _WORKERSTATE
_JOBINFOS.fields_by_name['job_info'].message_type = _JOBINFO
_ETCDPIPELINEINFO_JOBCOUNTSENTRY.containing_type = _ETCDPIPELINEINFO
_ETCDPIPELINEINFO.fields_by_name['state'].enum_type = _PIPELINESTATE
_ETCDPIPELINEINFO.fields_by_name['spec_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_ETCDPIPELINEINFO.fields_by_name['job_counts'].message_type = _ETCDPIPELINEINFO_JOBCOUNTSENTRY
_ETCDPIPELINEINFO.fields_by_name['last_job_state'].enum_type = _JOBSTATE
_PIPELINEINFO_JOBCOUNTSENTRY.containing_type = _PIPELINEINFO
_PIPELINEINFO.fields_by_name['pipeline'].message_type = _PIPELINE
_PIPELINEINFO.fields_by_name['transform'].message_type = _TRANSFORM
_PIPELINEINFO.fields_by_name['tf_job'].message_type = _TFJOB
_PIPELINEINFO.fields_by_name['parallelism_spec'].message_type = _PARALLELISMSPEC
_PIPELINEINFO.fields_by_name['hashtree_spec'].message_type = _HASHTREESPEC
_PIPELINEINFO.fields_by_name['egress'].message_type = _EGRESS
_PIPELINEINFO.fields_by_name['created_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_PIPELINEINFO.fields_by_name['state'].enum_type = _PIPELINESTATE
_PIPELINEINFO.fields_by_name['job_counts'].message_type = _PIPELINEINFO_JOBCOUNTSENTRY
_PIPELINEINFO.fields_by_name['last_job_state'].enum_type = _JOBSTATE
_PIPELINEINFO.fields_by_name['resource_requests'].message_type = _RESOURCESPEC
_PIPELINEINFO.fields_by_name['resource_limits'].message_type = _RESOURCESPEC
_PIPELINEINFO.fields_by_name['sidecar_resource_limits'].message_type = _RESOURCESPEC
_PIPELINEINFO.fields_by_name['input'].message_type = _INPUT
_PIPELINEINFO.fields_by_name['service'].message_type = _SERVICE
_PIPELINEINFO.fields_by_name['spout'].message_type = _SPOUT
_PIPELINEINFO.fields_by_name['chunk_spec'].message_type = _CHUNKSPEC
_PIPELINEINFO.fields_by_name['datum_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_PIPELINEINFO.fields_by_name['job_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_PIPELINEINFO.fields_by_name['spec_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_PIPELINEINFO.fields_by_name['scheduling_spec'].message_type = _SCHEDULINGSPEC
_PIPELINEINFO.fields_by_name['metadata'].message_type = _METADATA
_PIPELINEINFOS.fields_by_name['pipeline_info'].message_type = _PIPELINEINFO
_CREATEJOBREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_CREATEJOBREQUEST.fields_by_name['output_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_CREATEJOBREQUEST.fields_by_name['stats'].message_type = _PROCESSSTATS
_CREATEJOBREQUEST.fields_by_name['stats_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_CREATEJOBREQUEST.fields_by_name['state'].enum_type = _JOBSTATE
_CREATEJOBREQUEST.fields_by_name['started'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CREATEJOBREQUEST.fields_by_name['finished'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INSPECTJOBREQUEST.fields_by_name['job'].message_type = _JOB
_INSPECTJOBREQUEST.fields_by_name['output_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_LISTJOBREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_LISTJOBREQUEST.fields_by_name['input_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_LISTJOBREQUEST.fields_by_name['output_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_FLUSHJOBREQUEST.fields_by_name['commits'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_FLUSHJOBREQUEST.fields_by_name['to_pipelines'].message_type = _PIPELINE
_DELETEJOBREQUEST.fields_by_name['job'].message_type = _JOB
_STOPJOBREQUEST.fields_by_name['job'].message_type = _JOB
_UPDATEJOBSTATEREQUEST.fields_by_name['job'].message_type = _JOB
_UPDATEJOBSTATEREQUEST.fields_by_name['state'].enum_type = _JOBSTATE
_UPDATEJOBSTATEREQUEST.fields_by_name['stats'].message_type = _PROCESSSTATS
_GETLOGSREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_GETLOGSREQUEST.fields_by_name['job'].message_type = _JOB
_GETLOGSREQUEST.fields_by_name['datum'].message_type = _DATUM
_LOGMESSAGE.fields_by_name['data'].message_type = _INPUTFILE
_LOGMESSAGE.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_RESTARTDATUMREQUEST.fields_by_name['job'].message_type = _JOB
_INSPECTDATUMREQUEST.fields_by_name['datum'].message_type = _DATUM
_LISTDATUMREQUEST.fields_by_name['job'].message_type = _JOB
_LISTDATUMRESPONSE.fields_by_name['datum_infos'].message_type = _DATUMINFO
_LISTDATUMSTREAMRESPONSE.fields_by_name['datum_info'].message_type = _DATUMINFO
_SCHEDULINGSPEC_NODESELECTORENTRY.containing_type = _SCHEDULINGSPEC
_SCHEDULINGSPEC.fields_by_name['node_selector'].message_type = _SCHEDULINGSPEC_NODESELECTORENTRY
_CREATEPIPELINEREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_CREATEPIPELINEREQUEST.fields_by_name['tf_job'].message_type = _TFJOB
_CREATEPIPELINEREQUEST.fields_by_name['transform'].message_type = _TRANSFORM
_CREATEPIPELINEREQUEST.fields_by_name['parallelism_spec'].message_type = _PARALLELISMSPEC
_CREATEPIPELINEREQUEST.fields_by_name['hashtree_spec'].message_type = _HASHTREESPEC
_CREATEPIPELINEREQUEST.fields_by_name['egress'].message_type = _EGRESS
_CREATEPIPELINEREQUEST.fields_by_name['resource_requests'].message_type = _RESOURCESPEC
_CREATEPIPELINEREQUEST.fields_by_name['resource_limits'].message_type = _RESOURCESPEC
_CREATEPIPELINEREQUEST.fields_by_name['sidecar_resource_limits'].message_type = _RESOURCESPEC
_CREATEPIPELINEREQUEST.fields_by_name['input'].message_type = _INPUT
_CREATEPIPELINEREQUEST.fields_by_name['service'].message_type = _SERVICE
_CREATEPIPELINEREQUEST.fields_by_name['spout'].message_type = _SPOUT
_CREATEPIPELINEREQUEST.fields_by_name['chunk_spec'].message_type = _CHUNKSPEC
_CREATEPIPELINEREQUEST.fields_by_name['datum_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_CREATEPIPELINEREQUEST.fields_by_name['job_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_CREATEPIPELINEREQUEST.fields_by_name['scheduling_spec'].message_type = _SCHEDULINGSPEC
_CREATEPIPELINEREQUEST.fields_by_name['spec_commit'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMIT
_CREATEPIPELINEREQUEST.fields_by_name['metadata'].message_type = _METADATA
_INSPECTPIPELINEREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_LISTPIPELINEREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_DELETEPIPELINEREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_STARTPIPELINEREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_STOPPIPELINEREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_RUNPIPELINEREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_RUNPIPELINEREQUEST.fields_by_name['provenance'].message_type = client_dot_admin_dot_v1__11_dot_pfs_dot_pfs__pb2._COMMITPROVENANCE
_RUNCRONREQUEST.fields_by_name['pipeline'].message_type = _PIPELINE
_DELETESECRETREQUEST.fields_by_name['secret'].message_type = _SECRET
_INSPECTSECRETREQUEST.fields_by_name['secret'].message_type = _SECRET
_SECRETINFO.fields_by_name['secret'].message_type = _SECRET
_SECRETINFO.fields_by_name['creation_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SECRETINFOS.fields_by_name['secret_info'].message_type = _SECRETINFO
DESCRIPTOR.message_types_by_name['SecretMount'] = _SECRETMOUNT
DESCRIPTOR.message_types_by_name['Transform'] = _TRANSFORM
DESCRIPTOR.message_types_by_name['BuildSpec'] = _BUILDSPEC
DESCRIPTOR.message_types_by_name['TFJob'] = _TFJOB
DESCRIPTOR.message_types_by_name['Egress'] = _EGRESS
DESCRIPTOR.message_types_by_name['Job'] = _JOB
DESCRIPTOR.message_types_by_name['Metadata'] = _METADATA
DESCRIPTOR.message_types_by_name['Service'] = _SERVICE
DESCRIPTOR.message_types_by_name['Spout'] = _SPOUT
DESCRIPTOR.message_types_by_name['PFSInput'] = _PFSINPUT
DESCRIPTOR.message_types_by_name['CronInput'] = _CRONINPUT
DESCRIPTOR.message_types_by_name['GitInput'] = _GITINPUT
DESCRIPTOR.message_types_by_name['Input'] = _INPUT
DESCRIPTOR.message_types_by_name['JobInput'] = _JOBINPUT
DESCRIPTOR.message_types_by_name['ParallelismSpec'] = _PARALLELISMSPEC
DESCRIPTOR.message_types_by_name['HashtreeSpec'] = _HASHTREESPEC
DESCRIPTOR.message_types_by_name['InputFile'] = _INPUTFILE
DESCRIPTOR.message_types_by_name['Datum'] = _DATUM
DESCRIPTOR.message_types_by_name['DatumInfo'] = _DATUMINFO
DESCRIPTOR.message_types_by_name['Aggregate'] = _AGGREGATE
DESCRIPTOR.message_types_by_name['ProcessStats'] = _PROCESSSTATS
DESCRIPTOR.message_types_by_name['AggregateProcessStats'] = _AGGREGATEPROCESSSTATS
DESCRIPTOR.message_types_by_name['WorkerStatus'] = _WORKERSTATUS
DESCRIPTOR.message_types_by_name['ResourceSpec'] = _RESOURCESPEC
DESCRIPTOR.message_types_by_name['GPUSpec'] = _GPUSPEC
DESCRIPTOR.message_types_by_name['EtcdJobInfo'] = _ETCDJOBINFO
DESCRIPTOR.message_types_by_name['JobInfo'] = _JOBINFO
DESCRIPTOR.message_types_by_name['Worker'] = _WORKER
DESCRIPTOR.message_types_by_name['JobInfos'] = _JOBINFOS
DESCRIPTOR.message_types_by_name['Pipeline'] = _PIPELINE
DESCRIPTOR.message_types_by_name['EtcdPipelineInfo'] = _ETCDPIPELINEINFO
DESCRIPTOR.message_types_by_name['PipelineInfo'] = _PIPELINEINFO
DESCRIPTOR.message_types_by_name['PipelineInfos'] = _PIPELINEINFOS
DESCRIPTOR.message_types_by_name['CreateJobRequest'] = _CREATEJOBREQUEST
DESCRIPTOR.message_types_by_name['InspectJobRequest'] = _INSPECTJOBREQUEST
DESCRIPTOR.message_types_by_name['ListJobRequest'] = _LISTJOBREQUEST
DESCRIPTOR.message_types_by_name['FlushJobRequest'] = _FLUSHJOBREQUEST
DESCRIPTOR.message_types_by_name['DeleteJobRequest'] = _DELETEJOBREQUEST
DESCRIPTOR.message_types_by_name['StopJobRequest'] = _STOPJOBREQUEST
DESCRIPTOR.message_types_by_name['UpdateJobStateRequest'] = _UPDATEJOBSTATEREQUEST
DESCRIPTOR.message_types_by_name['GetLogsRequest'] = _GETLOGSREQUEST
DESCRIPTOR.message_types_by_name['LogMessage'] = _LOGMESSAGE
DESCRIPTOR.message_types_by_name['RestartDatumRequest'] = _RESTARTDATUMREQUEST
DESCRIPTOR.message_types_by_name['InspectDatumRequest'] = _INSPECTDATUMREQUEST
DESCRIPTOR.message_types_by_name['ListDatumRequest'] = _LISTDATUMREQUEST
DESCRIPTOR.message_types_by_name['ListDatumResponse'] = _LISTDATUMRESPONSE
DESCRIPTOR.message_types_by_name['ListDatumStreamResponse'] = _LISTDATUMSTREAMRESPONSE
DESCRIPTOR.message_types_by_name['ChunkSpec'] = _CHUNKSPEC
DESCRIPTOR.message_types_by_name['SchedulingSpec'] = _SCHEDULINGSPEC
DESCRIPTOR.message_types_by_name['CreatePipelineRequest'] = _CREATEPIPELINEREQUEST
DESCRIPTOR.message_types_by_name['InspectPipelineRequest'] = _INSPECTPIPELINEREQUEST
DESCRIPTOR.message_types_by_name['ListPipelineRequest'] = _LISTPIPELINEREQUEST
DESCRIPTOR.message_types_by_name['DeletePipelineRequest'] = _DELETEPIPELINEREQUEST
DESCRIPTOR.message_types_by_name['StartPipelineRequest'] = _STARTPIPELINEREQUEST
DESCRIPTOR.message_types_by_name['StopPipelineRequest'] = _STOPPIPELINEREQUEST
DESCRIPTOR.message_types_by_name['RunPipelineRequest'] = _RUNPIPELINEREQUEST
DESCRIPTOR.message_types_by_name['RunCronRequest'] = _RUNCRONREQUEST
DESCRIPTOR.message_types_by_name['CreateSecretRequest'] = _CREATESECRETREQUEST
DESCRIPTOR.message_types_by_name['DeleteSecretRequest'] = _DELETESECRETREQUEST
DESCRIPTOR.message_types_by_name['InspectSecretRequest'] = _INSPECTSECRETREQUEST
DESCRIPTOR.message_types_by_name['Secret'] = _SECRET
DESCRIPTOR.message_types_by_name['SecretInfo'] = _SECRETINFO
DESCRIPTOR.message_types_by_name['SecretInfos'] = _SECRETINFOS
DESCRIPTOR.message_types_by_name['GarbageCollectRequest'] = _GARBAGECOLLECTREQUEST
DESCRIPTOR.message_types_by_name['GarbageCollectResponse'] = _GARBAGECOLLECTRESPONSE
DESCRIPTOR.message_types_by_name['ActivateAuthRequest'] = _ACTIVATEAUTHREQUEST
DESCRIPTOR.message_types_by_name['ActivateAuthResponse'] = _ACTIVATEAUTHRESPONSE
DESCRIPTOR.enum_types_by_name['JobState'] = _JOBSTATE
DESCRIPTOR.enum_types_by_name['DatumState'] = _DATUMSTATE
DESCRIPTOR.enum_types_by_name['WorkerState'] = _WORKERSTATE
DESCRIPTOR.enum_types_by_name['PipelineState'] = _PIPELINESTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SecretMount = _reflection.GeneratedProtocolMessageType('SecretMount', (_message.Message,), {
'DESCRIPTOR' : _SECRETMOUNT,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.SecretMount)
})
_sym_db.RegisterMessage(SecretMount)
Transform = _reflection.GeneratedProtocolMessageType('Transform', (_message.Message,), {
'EnvEntry' : _reflection.GeneratedProtocolMessageType('EnvEntry', (_message.Message,), {
'DESCRIPTOR' : _TRANSFORM_ENVENTRY,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Transform.EnvEntry)
})
,
'DESCRIPTOR' : _TRANSFORM,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Transform)
})
_sym_db.RegisterMessage(Transform)
_sym_db.RegisterMessage(Transform.EnvEntry)
BuildSpec = _reflection.GeneratedProtocolMessageType('BuildSpec', (_message.Message,), {
'DESCRIPTOR' : _BUILDSPEC,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.BuildSpec)
})
_sym_db.RegisterMessage(BuildSpec)
TFJob = _reflection.GeneratedProtocolMessageType('TFJob', (_message.Message,), {
'DESCRIPTOR' : _TFJOB,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.TFJob)
})
_sym_db.RegisterMessage(TFJob)
Egress = _reflection.GeneratedProtocolMessageType('Egress', (_message.Message,), {
'DESCRIPTOR' : _EGRESS,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Egress)
})
_sym_db.RegisterMessage(Egress)
Job = _reflection.GeneratedProtocolMessageType('Job', (_message.Message,), {
'DESCRIPTOR' : _JOB,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Job)
})
_sym_db.RegisterMessage(Job)
Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
'AnnotationsEntry' : _reflection.GeneratedProtocolMessageType('AnnotationsEntry', (_message.Message,), {
'DESCRIPTOR' : _METADATA_ANNOTATIONSENTRY,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Metadata.AnnotationsEntry)
})
,
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _METADATA_LABELSENTRY,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Metadata.LabelsEntry)
})
,
'DESCRIPTOR' : _METADATA,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Metadata)
})
_sym_db.RegisterMessage(Metadata)
_sym_db.RegisterMessage(Metadata.AnnotationsEntry)
_sym_db.RegisterMessage(Metadata.LabelsEntry)
Service = _reflection.GeneratedProtocolMessageType('Service', (_message.Message,), {
'DESCRIPTOR' : _SERVICE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Service)
})
_sym_db.RegisterMessage(Service)
Spout = _reflection.GeneratedProtocolMessageType('Spout', (_message.Message,), {
'DESCRIPTOR' : _SPOUT,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Spout)
})
_sym_db.RegisterMessage(Spout)
PFSInput = _reflection.GeneratedProtocolMessageType('PFSInput', (_message.Message,), {
'DESCRIPTOR' : _PFSINPUT,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.PFSInput)
})
_sym_db.RegisterMessage(PFSInput)
CronInput = _reflection.GeneratedProtocolMessageType('CronInput', (_message.Message,), {
'DESCRIPTOR' : _CRONINPUT,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.CronInput)
})
_sym_db.RegisterMessage(CronInput)
GitInput = _reflection.GeneratedProtocolMessageType('GitInput', (_message.Message,), {
'DESCRIPTOR' : _GITINPUT,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.GitInput)
})
_sym_db.RegisterMessage(GitInput)
Input = _reflection.GeneratedProtocolMessageType('Input', (_message.Message,), {
'DESCRIPTOR' : _INPUT,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Input)
})
_sym_db.RegisterMessage(Input)
JobInput = _reflection.GeneratedProtocolMessageType('JobInput', (_message.Message,), {
'DESCRIPTOR' : _JOBINPUT,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.JobInput)
})
_sym_db.RegisterMessage(JobInput)
ParallelismSpec = _reflection.GeneratedProtocolMessageType('ParallelismSpec', (_message.Message,), {
'DESCRIPTOR' : _PARALLELISMSPEC,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ParallelismSpec)
})
_sym_db.RegisterMessage(ParallelismSpec)
HashtreeSpec = _reflection.GeneratedProtocolMessageType('HashtreeSpec', (_message.Message,), {
'DESCRIPTOR' : _HASHTREESPEC,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.HashtreeSpec)
})
_sym_db.RegisterMessage(HashtreeSpec)
InputFile = _reflection.GeneratedProtocolMessageType('InputFile', (_message.Message,), {
'DESCRIPTOR' : _INPUTFILE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.InputFile)
})
_sym_db.RegisterMessage(InputFile)
Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), {
'DESCRIPTOR' : _DATUM,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Datum)
})
_sym_db.RegisterMessage(Datum)
DatumInfo = _reflection.GeneratedProtocolMessageType('DatumInfo', (_message.Message,), {
'DESCRIPTOR' : _DATUMINFO,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.DatumInfo)
})
_sym_db.RegisterMessage(DatumInfo)
Aggregate = _reflection.GeneratedProtocolMessageType('Aggregate', (_message.Message,), {
'DESCRIPTOR' : _AGGREGATE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Aggregate)
})
_sym_db.RegisterMessage(Aggregate)
ProcessStats = _reflection.GeneratedProtocolMessageType('ProcessStats', (_message.Message,), {
'DESCRIPTOR' : _PROCESSSTATS,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ProcessStats)
})
_sym_db.RegisterMessage(ProcessStats)
AggregateProcessStats = _reflection.GeneratedProtocolMessageType('AggregateProcessStats', (_message.Message,), {
'DESCRIPTOR' : _AGGREGATEPROCESSSTATS,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.AggregateProcessStats)
})
_sym_db.RegisterMessage(AggregateProcessStats)
WorkerStatus = _reflection.GeneratedProtocolMessageType('WorkerStatus', (_message.Message,), {
'DESCRIPTOR' : _WORKERSTATUS,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.WorkerStatus)
})
_sym_db.RegisterMessage(WorkerStatus)
ResourceSpec = _reflection.GeneratedProtocolMessageType('ResourceSpec', (_message.Message,), {
'DESCRIPTOR' : _RESOURCESPEC,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ResourceSpec)
})
_sym_db.RegisterMessage(ResourceSpec)
GPUSpec = _reflection.GeneratedProtocolMessageType('GPUSpec', (_message.Message,), {
'DESCRIPTOR' : _GPUSPEC,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.GPUSpec)
})
_sym_db.RegisterMessage(GPUSpec)
EtcdJobInfo = _reflection.GeneratedProtocolMessageType('EtcdJobInfo', (_message.Message,), {
'DESCRIPTOR' : _ETCDJOBINFO,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.EtcdJobInfo)
})
_sym_db.RegisterMessage(EtcdJobInfo)
JobInfo = _reflection.GeneratedProtocolMessageType('JobInfo', (_message.Message,), {
'DESCRIPTOR' : _JOBINFO,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.JobInfo)
})
_sym_db.RegisterMessage(JobInfo)
Worker = _reflection.GeneratedProtocolMessageType('Worker', (_message.Message,), {
'DESCRIPTOR' : _WORKER,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Worker)
})
_sym_db.RegisterMessage(Worker)
JobInfos = _reflection.GeneratedProtocolMessageType('JobInfos', (_message.Message,), {
'DESCRIPTOR' : _JOBINFOS,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.JobInfos)
})
_sym_db.RegisterMessage(JobInfos)
Pipeline = _reflection.GeneratedProtocolMessageType('Pipeline', (_message.Message,), {
'DESCRIPTOR' : _PIPELINE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Pipeline)
})
_sym_db.RegisterMessage(Pipeline)
EtcdPipelineInfo = _reflection.GeneratedProtocolMessageType('EtcdPipelineInfo', (_message.Message,), {
'JobCountsEntry' : _reflection.GeneratedProtocolMessageType('JobCountsEntry', (_message.Message,), {
'DESCRIPTOR' : _ETCDPIPELINEINFO_JOBCOUNTSENTRY,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.EtcdPipelineInfo.JobCountsEntry)
})
,
'DESCRIPTOR' : _ETCDPIPELINEINFO,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.EtcdPipelineInfo)
})
_sym_db.RegisterMessage(EtcdPipelineInfo)
_sym_db.RegisterMessage(EtcdPipelineInfo.JobCountsEntry)
PipelineInfo = _reflection.GeneratedProtocolMessageType('PipelineInfo', (_message.Message,), {
'JobCountsEntry' : _reflection.GeneratedProtocolMessageType('JobCountsEntry', (_message.Message,), {
'DESCRIPTOR' : _PIPELINEINFO_JOBCOUNTSENTRY,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.PipelineInfo.JobCountsEntry)
})
,
'DESCRIPTOR' : _PIPELINEINFO,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.PipelineInfo)
})
_sym_db.RegisterMessage(PipelineInfo)
_sym_db.RegisterMessage(PipelineInfo.JobCountsEntry)
PipelineInfos = _reflection.GeneratedProtocolMessageType('PipelineInfos', (_message.Message,), {
'DESCRIPTOR' : _PIPELINEINFOS,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.PipelineInfos)
})
_sym_db.RegisterMessage(PipelineInfos)
CreateJobRequest = _reflection.GeneratedProtocolMessageType('CreateJobRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEJOBREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.CreateJobRequest)
})
_sym_db.RegisterMessage(CreateJobRequest)
InspectJobRequest = _reflection.GeneratedProtocolMessageType('InspectJobRequest', (_message.Message,), {
'DESCRIPTOR' : _INSPECTJOBREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.InspectJobRequest)
})
_sym_db.RegisterMessage(InspectJobRequest)
ListJobRequest = _reflection.GeneratedProtocolMessageType('ListJobRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTJOBREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ListJobRequest)
})
_sym_db.RegisterMessage(ListJobRequest)
FlushJobRequest = _reflection.GeneratedProtocolMessageType('FlushJobRequest', (_message.Message,), {
'DESCRIPTOR' : _FLUSHJOBREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.FlushJobRequest)
})
_sym_db.RegisterMessage(FlushJobRequest)
DeleteJobRequest = _reflection.GeneratedProtocolMessageType('DeleteJobRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEJOBREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.DeleteJobRequest)
})
_sym_db.RegisterMessage(DeleteJobRequest)
StopJobRequest = _reflection.GeneratedProtocolMessageType('StopJobRequest', (_message.Message,), {
'DESCRIPTOR' : _STOPJOBREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.StopJobRequest)
})
_sym_db.RegisterMessage(StopJobRequest)
UpdateJobStateRequest = _reflection.GeneratedProtocolMessageType('UpdateJobStateRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEJOBSTATEREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.UpdateJobStateRequest)
})
_sym_db.RegisterMessage(UpdateJobStateRequest)
GetLogsRequest = _reflection.GeneratedProtocolMessageType('GetLogsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETLOGSREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.GetLogsRequest)
})
_sym_db.RegisterMessage(GetLogsRequest)
LogMessage = _reflection.GeneratedProtocolMessageType('LogMessage', (_message.Message,), {
'DESCRIPTOR' : _LOGMESSAGE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.LogMessage)
})
_sym_db.RegisterMessage(LogMessage)
RestartDatumRequest = _reflection.GeneratedProtocolMessageType('RestartDatumRequest', (_message.Message,), {
'DESCRIPTOR' : _RESTARTDATUMREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.RestartDatumRequest)
})
_sym_db.RegisterMessage(RestartDatumRequest)
InspectDatumRequest = _reflection.GeneratedProtocolMessageType('InspectDatumRequest', (_message.Message,), {
'DESCRIPTOR' : _INSPECTDATUMREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.InspectDatumRequest)
})
_sym_db.RegisterMessage(InspectDatumRequest)
ListDatumRequest = _reflection.GeneratedProtocolMessageType('ListDatumRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTDATUMREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ListDatumRequest)
})
_sym_db.RegisterMessage(ListDatumRequest)
ListDatumResponse = _reflection.GeneratedProtocolMessageType('ListDatumResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTDATUMRESPONSE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ListDatumResponse)
})
_sym_db.RegisterMessage(ListDatumResponse)
ListDatumStreamResponse = _reflection.GeneratedProtocolMessageType('ListDatumStreamResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTDATUMSTREAMRESPONSE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ListDatumStreamResponse)
})
_sym_db.RegisterMessage(ListDatumStreamResponse)
ChunkSpec = _reflection.GeneratedProtocolMessageType('ChunkSpec', (_message.Message,), {
'DESCRIPTOR' : _CHUNKSPEC,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ChunkSpec)
})
_sym_db.RegisterMessage(ChunkSpec)
SchedulingSpec = _reflection.GeneratedProtocolMessageType('SchedulingSpec', (_message.Message,), {
'NodeSelectorEntry' : _reflection.GeneratedProtocolMessageType('NodeSelectorEntry', (_message.Message,), {
'DESCRIPTOR' : _SCHEDULINGSPEC_NODESELECTORENTRY,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.SchedulingSpec.NodeSelectorEntry)
})
,
'DESCRIPTOR' : _SCHEDULINGSPEC,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.SchedulingSpec)
})
_sym_db.RegisterMessage(SchedulingSpec)
_sym_db.RegisterMessage(SchedulingSpec.NodeSelectorEntry)
CreatePipelineRequest = _reflection.GeneratedProtocolMessageType('CreatePipelineRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEPIPELINEREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.CreatePipelineRequest)
})
_sym_db.RegisterMessage(CreatePipelineRequest)
InspectPipelineRequest = _reflection.GeneratedProtocolMessageType('InspectPipelineRequest', (_message.Message,), {
'DESCRIPTOR' : _INSPECTPIPELINEREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.InspectPipelineRequest)
})
_sym_db.RegisterMessage(InspectPipelineRequest)
ListPipelineRequest = _reflection.GeneratedProtocolMessageType('ListPipelineRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTPIPELINEREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ListPipelineRequest)
})
_sym_db.RegisterMessage(ListPipelineRequest)
DeletePipelineRequest = _reflection.GeneratedProtocolMessageType('DeletePipelineRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEPIPELINEREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.DeletePipelineRequest)
})
_sym_db.RegisterMessage(DeletePipelineRequest)
StartPipelineRequest = _reflection.GeneratedProtocolMessageType('StartPipelineRequest', (_message.Message,), {
'DESCRIPTOR' : _STARTPIPELINEREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.StartPipelineRequest)
})
_sym_db.RegisterMessage(StartPipelineRequest)
StopPipelineRequest = _reflection.GeneratedProtocolMessageType('StopPipelineRequest', (_message.Message,), {
'DESCRIPTOR' : _STOPPIPELINEREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.StopPipelineRequest)
})
_sym_db.RegisterMessage(StopPipelineRequest)
RunPipelineRequest = _reflection.GeneratedProtocolMessageType('RunPipelineRequest', (_message.Message,), {
'DESCRIPTOR' : _RUNPIPELINEREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.RunPipelineRequest)
})
_sym_db.RegisterMessage(RunPipelineRequest)
RunCronRequest = _reflection.GeneratedProtocolMessageType('RunCronRequest', (_message.Message,), {
'DESCRIPTOR' : _RUNCRONREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.RunCronRequest)
})
_sym_db.RegisterMessage(RunCronRequest)
CreateSecretRequest = _reflection.GeneratedProtocolMessageType('CreateSecretRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATESECRETREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.CreateSecretRequest)
})
_sym_db.RegisterMessage(CreateSecretRequest)
DeleteSecretRequest = _reflection.GeneratedProtocolMessageType('DeleteSecretRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETESECRETREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.DeleteSecretRequest)
})
_sym_db.RegisterMessage(DeleteSecretRequest)
InspectSecretRequest = _reflection.GeneratedProtocolMessageType('InspectSecretRequest', (_message.Message,), {
'DESCRIPTOR' : _INSPECTSECRETREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.InspectSecretRequest)
})
_sym_db.RegisterMessage(InspectSecretRequest)
Secret = _reflection.GeneratedProtocolMessageType('Secret', (_message.Message,), {
'DESCRIPTOR' : _SECRET,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.Secret)
})
_sym_db.RegisterMessage(Secret)
SecretInfo = _reflection.GeneratedProtocolMessageType('SecretInfo', (_message.Message,), {
'DESCRIPTOR' : _SECRETINFO,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.SecretInfo)
})
_sym_db.RegisterMessage(SecretInfo)
SecretInfos = _reflection.GeneratedProtocolMessageType('SecretInfos', (_message.Message,), {
'DESCRIPTOR' : _SECRETINFOS,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.SecretInfos)
})
_sym_db.RegisterMessage(SecretInfos)
GarbageCollectRequest = _reflection.GeneratedProtocolMessageType('GarbageCollectRequest', (_message.Message,), {
'DESCRIPTOR' : _GARBAGECOLLECTREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.GarbageCollectRequest)
})
_sym_db.RegisterMessage(GarbageCollectRequest)
GarbageCollectResponse = _reflection.GeneratedProtocolMessageType('GarbageCollectResponse', (_message.Message,), {
'DESCRIPTOR' : _GARBAGECOLLECTRESPONSE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.GarbageCollectResponse)
})
_sym_db.RegisterMessage(GarbageCollectResponse)
ActivateAuthRequest = _reflection.GeneratedProtocolMessageType('ActivateAuthRequest', (_message.Message,), {
'DESCRIPTOR' : _ACTIVATEAUTHREQUEST,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ActivateAuthRequest)
})
_sym_db.RegisterMessage(ActivateAuthRequest)
ActivateAuthResponse = _reflection.GeneratedProtocolMessageType('ActivateAuthResponse', (_message.Message,), {
'DESCRIPTOR' : _ACTIVATEAUTHRESPONSE,
'__module__' : 'client.admin.v1_11.pps.pps_pb2'
# @@protoc_insertion_point(class_scope:pps_1_11.ActivateAuthResponse)
})
_sym_db.RegisterMessage(ActivateAuthResponse)
DESCRIPTOR._options = None
_TRANSFORM_ENVENTRY._options = None
_METADATA_ANNOTATIONSENTRY._options = None
_METADATA_LABELSENTRY._options = None
_ETCDPIPELINEINFO_JOBCOUNTSENTRY._options = None
_PIPELINEINFO_JOBCOUNTSENTRY._options = None
_SCHEDULINGSPEC_NODESELECTORENTRY._options = None
_API = _descriptor.ServiceDescriptor(
name='API',
full_name='pps_1_11.API',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=11870,
serialized_end=13857,
methods=[
_descriptor.MethodDescriptor(
name='CreateJob',
full_name='pps_1_11.API.CreateJob',
index=0,
containing_service=None,
input_type=_CREATEJOBREQUEST,
output_type=_JOB,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='InspectJob',
full_name='pps_1_11.API.InspectJob',
index=1,
containing_service=None,
input_type=_INSPECTJOBREQUEST,
output_type=_JOBINFO,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListJob',
full_name='pps_1_11.API.ListJob',
index=2,
containing_service=None,
input_type=_LISTJOBREQUEST,
output_type=_JOBINFOS,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListJobStream',
full_name='pps_1_11.API.ListJobStream',
index=3,
containing_service=None,
input_type=_LISTJOBREQUEST,
output_type=_JOBINFO,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='FlushJob',
full_name='pps_1_11.API.FlushJob',
index=4,
containing_service=None,
input_type=_FLUSHJOBREQUEST,
output_type=_JOBINFO,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DeleteJob',
full_name='pps_1_11.API.DeleteJob',
index=5,
containing_service=None,
input_type=_DELETEJOBREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='StopJob',
full_name='pps_1_11.API.StopJob',
index=6,
containing_service=None,
input_type=_STOPJOBREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='InspectDatum',
full_name='pps_1_11.API.InspectDatum',
index=7,
containing_service=None,
input_type=_INSPECTDATUMREQUEST,
output_type=_DATUMINFO,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListDatum',
full_name='pps_1_11.API.ListDatum',
index=8,
containing_service=None,
input_type=_LISTDATUMREQUEST,
output_type=_LISTDATUMRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListDatumStream',
full_name='pps_1_11.API.ListDatumStream',
index=9,
containing_service=None,
input_type=_LISTDATUMREQUEST,
output_type=_LISTDATUMSTREAMRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='RestartDatum',
full_name='pps_1_11.API.RestartDatum',
index=10,
containing_service=None,
input_type=_RESTARTDATUMREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='CreatePipeline',
full_name='pps_1_11.API.CreatePipeline',
index=11,
containing_service=None,
input_type=_CREATEPIPELINEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='InspectPipeline',
full_name='pps_1_11.API.InspectPipeline',
index=12,
containing_service=None,
input_type=_INSPECTPIPELINEREQUEST,
output_type=_PIPELINEINFO,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListPipeline',
full_name='pps_1_11.API.ListPipeline',
index=13,
containing_service=None,
input_type=_LISTPIPELINEREQUEST,
output_type=_PIPELINEINFOS,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DeletePipeline',
full_name='pps_1_11.API.DeletePipeline',
index=14,
containing_service=None,
input_type=_DELETEPIPELINEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='StartPipeline',
full_name='pps_1_11.API.StartPipeline',
index=15,
containing_service=None,
input_type=_STARTPIPELINEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='StopPipeline',
full_name='pps_1_11.API.StopPipeline',
index=16,
containing_service=None,
input_type=_STOPPIPELINEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='RunPipeline',
full_name='pps_1_11.API.RunPipeline',
index=17,
containing_service=None,
input_type=_RUNPIPELINEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='RunCron',
full_name='pps_1_11.API.RunCron',
index=18,
containing_service=None,
input_type=_RUNCRONREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='CreateSecret',
full_name='pps_1_11.API.CreateSecret',
index=19,
containing_service=None,
input_type=_CREATESECRETREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DeleteSecret',
full_name='pps_1_11.API.DeleteSecret',
index=20,
containing_service=None,
input_type=_DELETESECRETREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ListSecret',
full_name='pps_1_11.API.ListSecret',
index=21,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=_SECRETINFOS,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='InspectSecret',
full_name='pps_1_11.API.InspectSecret',
index=22,
containing_service=None,
input_type=_INSPECTSECRETREQUEST,
output_type=_SECRETINFO,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='DeleteAll',
full_name='pps_1_11.API.DeleteAll',
index=23,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetLogs',
full_name='pps_1_11.API.GetLogs',
index=24,
containing_service=None,
input_type=_GETLOGSREQUEST,
output_type=_LOGMESSAGE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GarbageCollect',
full_name='pps_1_11.API.GarbageCollect',
index=25,
containing_service=None,
input_type=_GARBAGECOLLECTREQUEST,
output_type=_GARBAGECOLLECTRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='ActivateAuth',
full_name='pps_1_11.API.ActivateAuth',
index=26,
containing_service=None,
input_type=_ACTIVATEAUTHREQUEST,
output_type=_ACTIVATEAUTHRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='UpdateJobState',
full_name='pps_1_11.API.UpdateJobState',
index=27,
containing_service=None,
input_type=_UPDATEJOBSTATEREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_API)
DESCRIPTOR.services_by_name['API'] = _API
# @@protoc_insertion_point(module_scope)
| 43.526932 | 24,292 | 0.747658 |
db65997cc151952b993c25b2453f0752eaabf751 | 4,515 | py | Python | test/ci/test_jenkins_testplan_dispatch_for_pr.py | bogner/apple-llvm-infrastructure-tools | 6ff7ef700df19370e48b3ddd6929d280f011f5a3 | [
"Apache-2.0"
] | null | null | null | test/ci/test_jenkins_testplan_dispatch_for_pr.py | bogner/apple-llvm-infrastructure-tools | 6ff7ef700df19370e48b3ddd6929d280f011f5a3 | [
"Apache-2.0"
] | null | null | null | test/ci/test_jenkins_testplan_dispatch_for_pr.py | bogner/apple-llvm-infrastructure-tools | 6ff7ef700df19370e48b3ddd6929d280f011f5a3 | [
"Apache-2.0"
] | null | null | null | """
Tests for the config files.
"""
import os
import pytest
from git_apple_llvm.git_tools import git
from git_apple_llvm.config import write_config
import git_apple_llvm.ci.jenkins_ci as jenkins
import git_apple_llvm.ci.test_plans as test_plans
import json
import httpretty
TEST_API_URL = 'https://test.foo/bar'
@pytest.fixture(scope='session')
def ci_tool_git_repo(tmp_path_factory) -> str:
path = str(tmp_path_factory.mktemp('simple-ci-tool-dir'))
test_plans = {
"test-plans": {
"check-llvm": {
"description": "Runs lit and unit tests for LLVM",
"infer-from-changes": [
"llvm"
],
"ci-jobs": "pull-request-RA",
"params": {
"monorepo_projects": "",
"test_targets": "check-llvm"
}
}
}
}
ci_jobs = {
"type": "jenkins",
"url": TEST_API_URL,
"jobs": [
{
"name": "a-RA",
"url": TEST_API_URL + "/view/monorepo/job/pr-build-test",
"params": {
"build_variant": "a"
}
},
{
"name": "b-RA",
"url": TEST_API_URL + "/view/monorepo/job/pr-build-test",
"params": {
"build_variant": "b"
}
}
]
}
# Create the repo with the CI and test plan configs.
git('init', git_dir=path)
os.mkdir(os.path.join(path, 'apple-llvm-config'))
with open(os.path.join(path, 'apple-llvm-config', 'ci-test-plans.json'), 'w') as f:
f.write(json.dumps(test_plans))
os.mkdir(os.path.join(path, 'apple-llvm-config/ci-jobs'))
with open(os.path.join(path, 'apple-llvm-config/ci-jobs', 'pull-request-RA.json'), 'w') as f:
f.write(json.dumps(ci_jobs))
git('add', 'apple-llvm-config', git_dir=path)
git('commit', '-m', 'ci config', git_dir=path)
return path
@pytest.fixture(scope='function')
def cd_to_pr_tool_repo(ci_tool_git_repo: str):
prev = os.getcwd()
os.chdir(ci_tool_git_repo)
yield
os.chdir(prev)
@pytest.fixture(scope='function')
def config_dir(tmp_path):
dir = str(tmp_path / 'git-apple-llvm')
os.environ['GIT_APPLE_LLVM_CONFIG_DIR'] = dir
yield dir
del os.environ['GIT_APPLE_LLVM_CONFIG_DIR']
@httpretty.activate()
def test_pr_tool_list(config_dir: str, cd_to_pr_tool_repo, capfd):
write_config('jenkins-test.foo-bar', '{"username": "user", "token": "123"}')
def request_callback(request, uri, response_headers):
return [201, response_headers, '']
url1 = f'{TEST_API_URL}/view/monorepo/job/pr-build-test/buildWithParameters?token=GIT_APPLE_LLVM'
url1 += '&cause=started%20by%20user%20using%20git%20apple-llvm&pullRequestID=9&monorepo_projects='
url1 += '&test_targets=check-llvm&build_variant=a'
httpretty.register_uri(httpretty.POST, url1,
body=request_callback,
match_querystring=True)
url2 = f'{TEST_API_URL}/view/monorepo/job/pr-build-test/buildWithParameters?token=GIT_APPLE_LLVM'
url2 += '&cause=started%20by%20user%20using%20git%20apple-llvm&pullRequestID=9&monorepo_projects='
url2 += '&test_targets=check-llvm&build_variant=b'
httpretty.register_uri(httpretty.POST, url2,
body=request_callback,
match_querystring=True)
tp = test_plans.TestPlanDispatcher()
tp.dispatch_test_plan_for_pull_request('check-llvm', 9)
out, err = capfd.readouterr()
assert out == """✅ requested check-llvm [a-RA] ci job for PR #9
✅ requested check-llvm [b-RA] ci job for PR #9
"""
def request_callback_err(request, uri, response_headers):
return [402, response_headers, 'problem on server']
url1 = f'{TEST_API_URL}/view/monorepo/job/pr-build-test/buildWithParameters?token=GIT_APPLE_LLVM'
url1 += '&cause=started%20by%20user%20using%20git%20apple-llvm&pullRequestID=1&monorepo_projects='
url1 += '&test_targets=check-llvm&build_variant=a'
httpretty.register_uri(httpretty.POST, url1,
body=request_callback_err,
match_querystring=True)
with pytest.raises(jenkins.CIDispatchError) as err:
tp.dispatch_test_plan_for_pull_request('check-llvm', 1)
assert err.value.status_code == 402
assert err.value.error == 'problem on server'
| 35.833333 | 102 | 0.614839 |
5841d95a4ae58d7ebf19e7d8e32b1740f88edc2e | 1,559 | py | Python | configs/urban3d_optimal_d.py | asyrovprog/cs230project | cc9aebe256393b39140f69a2f31d1e9688c1e3d4 | [
"MIT"
] | 1 | 2021-04-28T08:29:28.000Z | 2021-04-28T08:29:28.000Z | configs/urban3d_optimal_d.py | asyrovprog/cs230project | cc9aebe256393b39140f69a2f31d1e9688c1e3d4 | [
"MIT"
] | null | null | null | configs/urban3d_optimal_d.py | asyrovprog/cs230project | cc9aebe256393b39140f69a2f31d1e9688c1e3d4 | [
"MIT"
] | 1 | 2021-01-27T21:52:07.000Z | 2021-01-27T21:52:07.000Z | from mrcnn.config import Config
from .constants import *
class Urban3dOptimalD(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "urban3d_optimal_d"
BACKBONE = 'resnet50'
# We use a GPU with 12GB memory, which can fit two images. Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = GLOB_IMAGES_PER_GPU
# Uncomment to train on 8 GPUs (default is 1)
GPU_COUNT = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + building footprints
BATCH_SIZE = GLOB_BATCH_SIZE
# Number of training steps per epoch
STEPS_PER_EPOCH = 128
# Skip detections with < 80% confidence
DETECTION_MIN_CONFIDENCE = GLOB_DETECTION_MIN_CONFIDENCE
IMAGE_DIM = GLOB_IMAGE_SIZE
IMAGE_MIN_DIM = IMAGE_DIM
IMAGE_MAX_DIM = IMAGE_DIM
TRAIN_ROIS_PER_IMAGE = 100
MAX_GT_INSTANCES = 50
LEARNING_RATE = 0.001
# First we train heads, while the rest is freezed, then we go deeper with lower learning rate (see training.py)
LEARNING_RATES = [LEARNING_RATE, LEARNING_RATE / 5, LEARNING_RATE / 10]
LEARNING_LAYERS = ["heads", "3+", "all"]
LEARNING_EPOCHS = [5, 20, 50]
IMAGE_TYPE = "D"
IMAGE_CHANNEL_COUNT = 1
MEAN_PIXEL = 1
TRAIN_CONV1 = True
EXT_USE_AUGMENTATION = GLOB_USE_AUGMENTATION
class Urban3dOptimalDInference(Urban3dOptimalD):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
| 26.87931 | 115 | 0.709429 |
b8080861cdb7156e5315a0757ce35452933f00fd | 1,762 | py | Python | setup.py | Saitama-chan/django-qsessions | 2494a3b459f0611be86faea5322261483627c0e2 | [
"MIT"
] | null | null | null | setup.py | Saitama-chan/django-qsessions | 2494a3b459f0611be86faea5322261483627c0e2 | [
"MIT"
] | null | null | null | setup.py | Saitama-chan/django-qsessions | 2494a3b459f0611be86faea5322261483627c0e2 | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
dev_requirements = [
'geoip2==3.0.0', # for testing GeoIP2
'pytest',
'pytest-cov',
'pytest-django',
]
setup(
name='django-qsessions',
version='0.4.1',
description='Extended session backends for Django',
long_description=README,
author='Mohammad Javad Naderi',
url='https://github.com/QueraTeam/django-qsessions',
download_url='https://pypi.python.org/pypi/django-qsessions',
license='MIT',
packages=find_packages('.', include=('qsessions', 'qsessions.*')),
include_package_data=True,
install_requires=[
'Django>=1.10',
'user-agents>=1.1.0',
'django-ipware>=1.1.5',
],
extras_require={'dev': dev_requirements},
tests_require=dev_requirements,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Security',
],
)
| 32.036364 | 78 | 0.6084 |
50aa059b284e8c1f29bb56d43c68583040723a24 | 7,349 | py | Python | script/train.py | PatxiofromAlphensign/web_DataUtils | ac4fa1ea352f109f6630d1bf642db29c0cf76d2d | [
"MIT"
] | null | null | null | script/train.py | PatxiofromAlphensign/web_DataUtils | ac4fa1ea352f109f6630d1bf642db29c0cf76d2d | [
"MIT"
] | null | null | null | script/train.py | PatxiofromAlphensign/web_DataUtils | ac4fa1ea352f109f6630d1bf642db29c0cf76d2d | [
"MIT"
] | null | null | null | import argparse
import torch
import time
import json
import numpy as np
import math
import random
np.random.seed(1337)
random.seed(1337)
torch.manual_seed(1337)
torch.cuda.manual_seed(337)
def batch_generator(X, y, batch_size=128, scaled=False, return_idx=False, crf=False):
b_lens = []
idx = []
for offset in range(0, X.shape[0]*batch_size if scaled else X.shape[0] , batch_size):
cmpr_X = X[offset:offset+batch_size]!=0
cmpr_y = y[offset:offset+batch_size]!= 0
sum_x = (torch.sum(cmpr_X))
b_lens.append(sum_x)
x_idx = b_lens[sum_x.argsort()]
if len(idx) > 0:
if x_idx == idx[-1]:
idx.append(idx)
if False:
return batch_X_len
batch_X_mask=(X[offset:offset+batch_size]!=0)[batch_idx].astype(np.uint8)
batch_X=X[offset:offset+batch_size][batch_idx]
batch_y=y[offset:offset+batch_size][batch_idx]
batch_X = torch.autograd.Variable(torch.from_numpy(batch_X).long().cuda() )
batch_X_mask=torch.autograd.Variable(torch.from_numpy(batch_X_mask).long().cuda() )
batch_y = torch.autograd.Variable(torch.from_numpy(batch_y).long().cuda() )
if len(batch_y.size() )==2 and not crf:
batch_y=torch.nn.utils.rnn.pack_padded_sequence(batch_y, batch_X_len, batch_first=True)
if return_idx: #in testing, need to sort back.
return (batch_X, batch_y, batch_X_len, batch_X_mask, batch_idx)
else:
return (batch_X, batch_y, batch_X_len, batch_X_mask)
return np.unique(b_lens)
class Model(torch.nn.Module):
def __init__(self, gen_emb, domain_emb, num_classes=3, dropout=0.5, crf=False):
super(Model, self).__init__()
self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
self.gen_embedding.weight=torch.nn.Parameter(torch.from_numpy(gen_emb), requires_grad=False)
self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
self.domain_embedding.weight=torch.nn.Parameter(torch.from_numpy(domain_emb), requires_grad=False)
self.conv1=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 5, padding=2 )
self.conv2=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 3, padding=1 )
self.dropout=torch.nn.Dropout(dropout)
self.conv3=torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv4=torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv5=torch.nn.Conv1d(256, 256, 5, padding=2)
self.linear_ae=torch.nn.Linear(256, num_classes)
self.crf_flag=crf
if self.crf_flag:
from allennlp.modules import ConditionalRandomField
self.crf=ConditionalRandomField(num_classes)
def forward(self, x, x_len, x_mask, x_tag=None, testing=False):
x_emb=torch.cat((self.gen_embedding(x), self.domain_embedding(x) ), dim=2)
x_emb=self.dropout(x_emb).transpose(1, 2)
x_conv=torch.nn.functional.relu(torch.cat((self.conv1(x_emb), self.conv2(x_emb)), dim=1) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv3(x_conv) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv4(x_conv) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv5(x_conv) )
x_conv=x_conv.transpose(1, 2)
x_logit=self.linear_ae(x_conv)
if testing:
if self.crf_flag:
score=self.crf.viterbi_tags(x_logit, x_mask)
else:
x_logit=x_logit.transpose(2, 0)
score=torch.nn.functional.log_softmax(x_logit).transpose(2, 0)
else:
if self.crf_flag:
score=-self.crf(x_logit, x_tag, x_mask)
else:
x_logit=torch.nn.utils.rnn.pack_padded_sequence(x_logit, x_len, batch_first=True)
score=torch.nn.functional.nll_loss(torch.nn.functional.log_softmax(x_logit.data), x_tag.data)
return score
def valid_loss(model, valid_X, valid_y, crf=False):
model.eval()
losses=[]
for batch in batch_generator(valid_X, valid_y, crf=crf):
batch_valid_X, batch_valid_y, batch_valid_X_len, batch_valid_X_mask=batch
loss=model(batch_valid_X, batch_valid_X_len, batch_valid_X_mask, batch_valid_y)
losses.append(loss.data[0])
model.train()
return sum(losses)/len(losses)
def train(train_X, train_y, valid_X, valid_y, model, model_fn, optimizer, parameters, epochs=200, batch_size=128, crf=False):
best_loss=float("inf")
valid_history=[]
train_history=[]
for epoch in range(epochs):
for batch in batch_generator(train_X, train_y, batch_size, crf=crf):
batch_train_X, batch_train_y, batch_train_X_len, batch_train_X_mask=batch
loss=model(batch_train_X, batch_train_X_len, batch_train_X_mask, batch_train_y)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(parameters, 1.)
optimizer.step()
loss=valid_loss(model, train_X, train_y, crf=crf)
train_history.append(loss)
loss=valid_loss(model, valid_X, valid_y, crf=crf)
valid_history.append(loss)
if loss<best_loss:
best_loss=loss
torch.save(model, model_fn)
shuffle_idx=np.random.permutation(len(train_X) )
train_X=train_X[shuffle_idx]
train_y=train_y[shuffle_idx]
model=torch.load(model_fn)
return train_history, valid_history
def run(domain, data_dir, model_dir, valid_split, runs, epochs, lr, dropout, batch_size=128):
gen_emb=np.load(data_dir+"gen.vec.npy")
domain_emb=np.load(data_dir+domain+"_emb.vec.npy")
ae_data=np.load(data_dir+domain+".npz")
valid_X=ae_data['train_X'][-valid_split:]
valid_y=ae_data['train_y'][-valid_split:]
train_X=ae_data['train_X'][:-valid_split]
train_y=ae_data['train_y'][:-valid_split]
for r in range(runs):
print(r)
model=Model(gen_emb, domain_emb, 3, dropout=dropout, crf=False)
model.cuda()
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer=torch.optim.Adam(parameters, lr=lr)
train_history, valid_history=train(train_X, train_y, valid_X, valid_y, model, model_dir+domain+str(r), optimizer, parameters, epochs, crf=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default="model/")
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--runs', type=int, default=5)
parser.add_argument('--domain', type=str, default="laptop")
parser.add_argument('--data_dir', type=str, default="data/prep_data/")
parser.add_argument('--valid', type=int, default=150) #number of validation data.
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--dropout', type=float, default=0.55)
args = parser.parse_args()
run(args.domain, args.data_dir, args.model_dir, args.valid, args.runs, args.epochs, args.lr, args.dropout, args.batch_size)
| 45.08589 | 152 | 0.661314 |
fa9dd7503f7bb11f43f146b27a1c27efe4722bb0 | 5,703 | py | Python | tests/selectedtests/work_items/test_process_test_mapping_work_items.py | isabella232/selected-tests | 890cd5f39f5571d50f0406b4c25a1a2eef1006a3 | [
"Apache-2.0"
] | 2 | 2020-04-13T11:26:57.000Z | 2022-01-21T00:03:52.000Z | tests/selectedtests/work_items/test_process_test_mapping_work_items.py | mongodb/selected-tests | 467f71f1d45b06ac3cc5db252f18658f8cd93083 | [
"Apache-2.0"
] | 54 | 2019-09-26T18:56:34.000Z | 2022-03-12T01:07:00.000Z | tests/selectedtests/work_items/test_process_test_mapping_work_items.py | isabella232/selected-tests | 890cd5f39f5571d50f0406b4c25a1a2eef1006a3 | [
"Apache-2.0"
] | 6 | 2019-10-01T14:24:27.000Z | 2020-02-13T15:53:47.000Z | from unittest.mock import MagicMock, patch
import selectedtests.work_items.process_test_mapping_work_items as under_test
from selectedtests.test_mappings.create_test_mappings import TestMappingsResult
NS = "selectedtests.work_items.process_test_mapping_work_items"
def ns(relative_name): # pylint: disable=invalid-name
"""Return a full name from a name relative to the test module"s name space."""
return NS + "." + relative_name
class TestProcessQueuedTestMappingWorkItems:
@patch(ns("_process_one_test_mapping_work_item"))
@patch(ns("_generate_test_mapping_work_items"))
def test_analyze_runs_while_work_available(
self, mock_gen_test_map_work_items, mock_process_one_test_mapping_work_item
):
n_work_items = 3
mock_gen_test_map_work_items.return_value = [MagicMock() for _ in range(n_work_items)]
evg_api_mock = MagicMock()
mongo_mock = MagicMock()
under_test.process_queued_test_mapping_work_items(evg_api_mock, mongo_mock, after_date=None)
assert n_work_items == mock_process_one_test_mapping_work_item.call_count
@patch(ns("_process_one_test_mapping_work_item"))
def test_analyze_does_not_throw_exceptions(self, mock_process_one_test_mapping_work_item):
mock_process_one_test_mapping_work_item.side_effect = ValueError("Unexpected Exception")
evg_api_mock = MagicMock()
mongo_mock = MagicMock()
under_test.process_queued_test_mapping_work_items(evg_api_mock, mongo_mock, after_date=None)
class TestProcessOneTestMappingWorkItem:
@patch(ns("_seed_test_mappings_for_project"))
def test_work_items_completed_successfully_are_marked_complete(
self, run_create_test_mappings_mock
):
work_item_mock = MagicMock()
run_create_test_mappings_mock.return_value = True
evg_api_mock = MagicMock()
mongo_mock = MagicMock()
under_test._process_one_test_mapping_work_item(
work_item_mock, evg_api_mock, mongo_mock, after_date=None
)
work_item_mock.complete.assert_called_once()
@patch(ns("_seed_test_mappings_for_project"))
def test_work_items_completed_unsuccessfully_are_marked_not_complete(
self, run_create_test_mappings_mock
):
work_item_mock = MagicMock()
run_create_test_mappings_mock.return_value = False
evg_api_mock = MagicMock()
mongo_mock = MagicMock()
under_test._process_one_test_mapping_work_item(
work_item_mock, evg_api_mock, mongo_mock, after_date=None
)
work_item_mock.complete.assert_not_called()
class TestSeedTestMappingsForProject:
@patch(ns("update_test_mappings"))
@patch(ns("generate_test_mappings"))
@patch(ns("ProjectConfig.get"))
def test_mappings_are_created(
self, project_config_mock, generate_test_mappings_mock, update_test_mappings_mock
):
evg_api_mock = MagicMock()
mongo_mock = MagicMock()
logger_mock = MagicMock()
test_mappings_list = ["mock-mapping"]
generate_test_mappings_mock.return_value = TestMappingsResult(
test_mappings_list=test_mappings_list,
most_recent_project_commit_analyzed="last-project-sha-analyzed",
most_recent_module_commit_analyzed="last-module-sha-analyzed",
)
work_item_mock = MagicMock(source_file_regex="src", test_file_regex="test", module=None)
under_test._seed_test_mappings_for_project(
evg_api_mock, mongo_mock, work_item_mock, after_date=None, log=logger_mock
)
project_config_mock.return_value.test_config.update.assert_called_once_with(
"last-project-sha-analyzed",
work_item_mock.source_file_regex,
work_item_mock.test_file_regex,
work_item_mock.module,
"last-module-sha-analyzed",
work_item_mock.module_source_file_regex,
work_item_mock.module_test_file_regex,
)
project_config_mock.return_value.save.assert_called_once_with(mongo_mock.project_config())
update_test_mappings_mock.assert_called_once_with(test_mappings_list, mongo_mock)
@patch(ns("generate_test_mappings"))
@patch(ns("ProjectConfig.get"))
def test_no_test_mappings_are_created(self, project_config_mock, generate_test_mappings_mock):
evg_api_mock = MagicMock()
mongo_mock = MagicMock()
logger_mock = MagicMock()
generate_test_mappings_mock.return_value = TestMappingsResult(
test_mappings_list=[],
most_recent_project_commit_analyzed="last-project-sha-analyzed",
most_recent_module_commit_analyzed="last-module-sha-analyzed",
)
mongo_mock.test_mappings.return_value.insert_many.side_effect = TypeError(
"documents must be a non-empty list"
)
work_item_mock = MagicMock(source_file_regex="src", test_file_regex="test", module=None)
under_test._seed_test_mappings_for_project(
evg_api_mock, mongo_mock, work_item_mock, after_date=None, log=logger_mock
)
project_config_mock.return_value.test_config.update.assert_called_once_with(
"last-project-sha-analyzed",
work_item_mock.source_file_regex,
work_item_mock.test_file_regex,
work_item_mock.module,
"last-module-sha-analyzed",
work_item_mock.module_source_file_regex,
work_item_mock.module_test_file_regex,
)
project_config_mock.return_value.save.assert_called_once_with(mongo_mock.project_config())
mongo_mock.test_mappings.return_value.insert_many.assert_not_called()
| 41.933824 | 100 | 0.732246 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.