hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b23570c217e3063a89eec461f7ca7f7b3d5f7a30 | 651 | py | Python | venv/bin/rst2html.py | waynshang/python_debug_module | c8e0d24b675e849757709ae41ecee6772af40dea | [
"MIT"
] | null | null | null | venv/bin/rst2html.py | waynshang/python_debug_module | c8e0d24b675e849757709ae41ecee6772af40dea | [
"MIT"
] | null | null | null | venv/bin/rst2html.py | waynshang/python_debug_module | c8e0d24b675e849757709ae41ecee6772af40dea | [
"MIT"
] | null | null | null | #!/Users/wayneshang/Documents/project/python_debug_package/venv/bin/python3
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| 27.125 | 78 | 0.74808 |
411bd4a1843d85b30c25685310378c045469adc3 | 4,463 | py | Python | sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleList.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleList.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleList.py | Carlangueitor/pulumi-kubernetes | 859ccaaeb8291de49128dbc202fbac1358b2a25a | [
"Apache-2.0"
] | null | null | null | # *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class ClusterRoleList(pulumi.CustomResource):
"""
ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of
rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.
"""
apiVersion: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should
convert recognized schemas to the latest internal value, and may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer
this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More
info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
"""
items: pulumi.Output[list]
"""
Items is a list of ClusterRoles
"""
metadata: pulumi.Output[dict]
"""
Standard object's metadata.
"""
def __init__(self, resource_name, opts=None, items=None, metadata=None, __name__=None, __opts__=None):
"""
Create a ClusterRoleList resource with the given unique name, arguments, and options.
:param str resource_name: The _unique_ name of the resource.
:param pulumi.ResourceOptions opts: A bag of options that control this resource's behavior.
:param pulumi.Input[list] items: Items is a list of ClusterRoles
:param pulumi.Input[dict] metadata: Standard object's metadata.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'rbac.authorization.k8s.io/v1beta1'
__props__['kind'] = 'ClusterRoleList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
__props__['status'] = None
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(),
))
super(ClusterRoleList, self).__init__(
"kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRoleList",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
"""
Get the state of an existing `ClusterRoleList` resource, as identified by `id`.
The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted,
then (per Kubernetes convention) the ID becomes `default/[name]`.
Pulumi will keep track of this resource using `resource_name` as the Pulumi ID.
:param str resource_name: _Unique_ name used to register this resource with Pulumi.
:param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve.
Takes the form `[namespace]/[name]` or `[name]`.
:param Optional[pulumi.ResourceOptions] opts: A bag of options that control this
resource's behavior.
"""
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return ClusterRoleList(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 40.572727 | 107 | 0.678467 |
ee372692d5de0fc8fee12e13ff17ff697f5b3a41 | 5,063 | py | Python | docs/source/conf.py | keleustes/oslc-operator | 3a61657da5718eb9804528ffb2a1c72b5f6333a0 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | keleustes/oslc-operator | 3a61657da5718eb9804528ffb2a1c72b5f6333a0 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | keleustes/oslc-operator | 3a61657da5718eb9804528ffb2a1c72b5f6333a0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# shipyard documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 16 03:40:50 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
from recommonmark.parser import CommonMarkParser
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
source_parsers = {
'.md': CommonMarkParser,
}
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenstackLCM Operator'
copyright = u'2019 AT&T Intellectual Property.'
author = u'OpenstackLCM Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.0'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ucpintdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'airshipint.tex', u'OpenstackLCM Operator Documentation',
u'OpenstackLCM Authors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'OpenstackLCM', u'OpenstackLCM Operator Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OpenstackLCM Operator', u'OpenstackLCM Operator Documentation',
author, 'OpenstackLCM Operator',
'OpenstackLCM documentation',
'Miscellaneous'),
]
| 30.317365 | 81 | 0.691092 |
533af34802bddbc6df5469dd53a4599d446a83c5 | 38,002 | py | Python | model.py | JaywongWang/DenseVideoCaptioning | db31ca2b597840494eee31547d3bf3215a995846 | [
"MIT"
] | 150 | 2018-10-06T15:51:30.000Z | 2022-03-22T08:23:24.000Z | model.py | xiaoxinlong/DenseVideoCaptioning | 27f315da7c90f6bb6d7a3fc8038159f7a54ec5bb | [
"MIT"
] | 38 | 2018-10-08T07:19:59.000Z | 2021-05-06T21:13:43.000Z | model.py | xiaoxinlong/DenseVideoCaptioning | 27f315da7c90f6bb6d7a3fc8038159f7a54ec5bb | [
"MIT"
] | 54 | 2018-10-22T07:33:37.000Z | 2022-03-23T04:56:25.000Z | """
Model definition
Implementation of dense captioning model in the paper "Bidirectional Attentive Fusion with Context Gating for Dense Video Captioning" by Jingwen Wang et al. in CVPR, 2018.
The code looks complicated since we need to handle some "dynamic" part of the graph
"""
import tensorflow as tf
class CaptionModel(object):
def __init__(self, options):
self.options = options
self.initializer = tf.random_uniform_initializer(
minval = - self.options['init_scale'],
maxval = self.options['init_scale'])
tf.set_random_seed(options['random_seed'])
"""
build video feature embedding
"""
def build_video_feat_embedding(self, video_feat, reuse=False):
with tf.variable_scope('video_feat_embed', reuse=reuse) as scope:
video_feat_embed = tf.contrib.layers.fully_connected(
inputs=video_feat,
num_outputs=self.options['word_embed_size'],
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer()
)
return video_feat_embed
"""
build word embedding for each word in a caption
"""
def build_caption_embedding(self, caption, reuse=False):
with tf.variable_scope('word_embed', reuse=reuse):
embed_map = tf.get_variable(
name='map',
shape=(self.options['vocab_size'], self.options['word_embed_size']),
initializer=self.initializer
)
caption_embed = tf.nn.embedding_lookup(embed_map, caption)
return caption_embed
"""
Build graph for proposal generation (inference)
"""
def build_proposal_inference(self, reuse=False):
inputs = {}
outputs = {}
# this line of code is just a message to inform that batch size should be set to 1 only
batch_size = 1
#******************** Define Proposal Module ******************#
## dim1: batch, dim2: video sequence length, dim3: video feature dimension
## video feature sequence
# forward
video_feat_fw = tf.placeholder(tf.float32, [None, None, self.options['video_feat_dim']], name='video_feat_fw')
inputs['video_feat_fw'] = video_feat_fw
# backward
video_feat_bw = tf.placeholder(tf.float32, [None, None, self.options['video_feat_dim']], name='video_feat_bw')
inputs['video_feat_bw'] = video_feat_bw
rnn_cell_video_fw = tf.contrib.rnn.LSTMCell(
num_units=self.options['rnn_size'],
state_is_tuple=True,
initializer=tf.orthogonal_initializer()
)
rnn_cell_video_bw = tf.contrib.rnn.LSTMCell(
num_units=self.options['rnn_size'],
state_is_tuple=True,
initializer=tf.orthogonal_initializer()
)
with tf.variable_scope('proposal_module', reuse=reuse) as proposal_scope:
'''video feature sequence encoding: forward pass
'''
with tf.variable_scope('video_encoder_fw', reuse=reuse) as scope:
sequence_length = tf.expand_dims(tf.shape(video_feat_fw)[1], axis=0)
initial_state = rnn_cell_video_fw.zero_state(batch_size=batch_size, dtype=tf.float32)
rnn_outputs_fw, _ = tf.nn.dynamic_rnn(
cell=rnn_cell_video_fw,
inputs=video_feat_fw,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32
)
rnn_outputs_fw_reshape = tf.reshape(rnn_outputs_fw, [-1, self.options['rnn_size']], name='rnn_outputs_fw_reshape')
# predict proposal at each time step: use fully connected layer to output scores for every anchors
with tf.variable_scope('predict_proposal_fw', reuse=reuse) as scope:
logit_output_fw = tf.contrib.layers.fully_connected(
inputs = rnn_outputs_fw_reshape,
num_outputs = self.options['num_anchors'],
activation_fn = None
)
'''video feature sequence encoding: backward pass
'''
with tf.variable_scope('video_encoder_bw', reuse=reuse) as scope:
#sequence_length = tf.reduce_sum(video_feat_mask, axis=-1)
sequence_length = tf.expand_dims(tf.shape(video_feat_bw)[1], axis=0)
initial_state = rnn_cell_video_bw.zero_state(batch_size=batch_size, dtype=tf.float32)
rnn_outputs_bw, _ = tf.nn.dynamic_rnn(
cell=rnn_cell_video_bw,
inputs=video_feat_bw,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32
)
rnn_outputs_bw_reshape = tf.reshape(rnn_outputs_bw, [-1, self.options['rnn_size']], name='rnn_outputs_bw_reshape')
# predict proposal at each time step: use fully connected layer to output scores for every anchors
with tf.variable_scope('predict_proposal_bw', reuse=reuse) as scope:
logit_output_bw = tf.contrib.layers.fully_connected(
inputs = rnn_outputs_bw_reshape,
num_outputs = self.options['num_anchors'],
activation_fn = None
)
# score
proposal_score_fw = tf.sigmoid(logit_output_fw, name='proposal_score_fw')
proposal_score_bw = tf.sigmoid(logit_output_bw, name='proposal_score_bw')
# outputs from proposal module
outputs['proposal_score_fw'] = proposal_score_fw
outputs['proposal_score_bw'] = proposal_score_bw
outputs['rnn_outputs_fw'] = rnn_outputs_fw_reshape
outputs['rnn_outputs_bw'] = rnn_outputs_bw_reshape
return inputs, outputs
"""
Build graph for caption generation (inference)
Surprisingly, I found using beam search leads to worse meteor score on ActivityNet Captions dataset; similar observation has been found by other dense captioning papers
I do not use beam search when generating captions
"""
def build_caption_greedy_inference(self, reuse=False):
inputs = {}
outputs = {}
# proposal feature sequences (the localized proposals/events can be of different length, I set a 'max_proposal_len' to make it easy for GPU processing)
proposal_feats = tf.placeholder(tf.float32, [None, self.options['max_proposal_len'], self.options['video_feat_dim']])
# combination of forward and backward hidden state, which encode event context information
event_hidden_feats = tf.placeholder(tf.float32, [None, 2*self.options['rnn_size']])
inputs['event_hidden_feats'] = event_hidden_feats
inputs['proposal_feats'] = proposal_feats
# batch size for inference, depends on how many proposals are generated for a video
eval_batch_size = tf.shape(proposal_feats)[0]
# intialize the rnn cell for captioning
rnn_cell_caption = tf.contrib.rnn.LSTMCell(
num_units=self.options['rnn_size'],
state_is_tuple=True,
initializer=tf.orthogonal_initializer()
)
def get_rnn_cell():
return tf.contrib.rnn.LSTMCell(num_units=self.options['rnn_size'], state_is_tuple=True, initializer=tf.orthogonal_initializer())
# multi-layer LSTM
multi_rnn_cell_caption = tf.contrib.rnn.MultiRNNCell([get_rnn_cell() for _ in range(self.options['num_rnn_layers'])], state_is_tuple=True)
# start word
word_id = tf.fill([eval_batch_size], self.options['vocab']['<START>'])
word_id = tf.to_int64(word_id)
word_ids = tf.expand_dims(word_id, axis=-1)
# probability (confidence) for the predicted word
word_confidences = tf.expand_dims(tf.fill([eval_batch_size], 1.), axis=-1)
# initial state of caption generation
initial_state = multi_rnn_cell_caption.zero_state(batch_size=eval_batch_size, dtype=tf.float32)
state = initial_state
with tf.variable_scope('caption_module', reuse=reuse) as caption_scope:
# initialize memory cell and hidden output, note that the returned state is a tuple containing all states for each cell in MultiRNNCell
state = multi_rnn_cell_caption.zero_state(batch_size=eval_batch_size, dtype=tf.float32)
proposal_feats_reshape = tf.reshape(proposal_feats, [-1, self.options['video_feat_dim']], name='video_feat_reshape')
## the caption data should be prepared in equal length, namely, with length of 'caption_seq_len'
## use caption mask data to mask out loss from sequence after end of token (<END>)
# only the first loop create variable, the other loops reuse them, need to give variable scope name to each variable, otherwise tensorflow will create a new one
for i in range(self.options['caption_seq_len']-1):
if i > 0:
caption_scope.reuse_variables()
# word embedding
word_embed = self.build_caption_embedding(word_id)
# get attention, receive both hidden state information (previous generated words) and video feature
# state[:, 1] return all hidden states for all cells in MultiRNNCell
h_state = tf.concat([s[1] for s in state], axis=-1)
h_state_tile = tf.tile(h_state, [1, self.options['max_proposal_len']])
h_state_reshape = tf.reshape(h_state_tile, [-1, self.options['num_rnn_layers']*self.options['rnn_size']])
# repeat to match each feature vector in the localized proposal
event_hidden_feats_tile = tf.tile(event_hidden_feats, [1, self.options['max_proposal_len']])
event_hidden_feats_reshape = tf.reshape(event_hidden_feats_tile, [-1, 2*self.options['rnn_size']])
feat_state_concat = tf.concat([proposal_feats_reshape, h_state_reshape, event_hidden_feats_reshape], axis=-1, name='feat_state_concat')
#feat_state_concat = tf.concat([tf.reshape(tf.tile(word_embed, [1, self.options['max_proposal_len']]), [-1, self.options['word_embed_size']]), proposal_feats_reshape, h_state_reshape, event_hidden_feats_reshape], axis=-1, name='feat_state_concat')
# use a two-layer network to model temporal soft attention over proposal feature sequence when predicting next word (dynamic)
with tf.variable_scope('attention', reuse=reuse) as attention_scope:
attention_layer1 = tf.contrib.layers.fully_connected(
inputs = feat_state_concat,
num_outputs = self.options['attention_hidden_size'],
activation_fn = tf.nn.tanh,
weights_initializer = tf.contrib.layers.xavier_initializer()
)
attention_layer2 = tf.contrib.layers.fully_connected(
inputs = attention_layer1,
num_outputs = 1,
activation_fn = None,
weights_initializer = tf.contrib.layers.xavier_initializer()
)
# reshape to match
attention_reshape = tf.reshape(attention_layer2, [-1, self.options['max_proposal_len']], name='attention_reshape')
attention_score = tf.nn.softmax(attention_reshape, dim=-1, name='attention_score')
attention = tf.reshape(attention_score, [-1, 1, self.options['max_proposal_len']], name='attention')
# attended video feature
attended_proposal_feat = tf.matmul(attention, proposal_feats, name='attended_proposal_feat')
attended_proposal_feat_reshape = tf.reshape(attended_proposal_feat, [-1, self.options['video_feat_dim']], name='attended_proposal_feat_reshape')
# whether to use proposal contexts to help generate the corresponding caption
if self.options['no_context']:
proposal_feats_full = attended_proposal_feat_reshape
else:
# whether to use gating function to combine the proposal contexts
if self.options['context_gating']:
# model a gate to weight each element of context and feature
attended_proposal_feat_reshape = tf.nn.tanh(attended_proposal_feat_reshape)
with tf.variable_scope('context_gating', reuse=reuse):
'''
context_feats_transform = tf.contrib.layers.fully_connected(
inputs=event_hidden_feats,
num_outputs=self.options['video_feat_dim'],
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer()
)
'''
context_feats_transform = event_hidden_feats
proposal_feats_transform = tf.contrib.layers.fully_connected(
inputs = attended_proposal_feat_reshape,
num_outputs = 2*self.options['rnn_size'],
activation_fn = tf.nn.tanh,
weights_initializer = tf.contrib.layers.xavier_initializer()
)
gate = tf.contrib.layers.fully_connected(
inputs=tf.concat([word_embed, h_state, context_feats_transform, proposal_feats_transform], axis=-1),
num_outputs=2*self.options['rnn_size'],
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.contrib.layers.xavier_initializer()
)
gated_context_feats = tf.multiply(context_feats_transform, gate)
gated_proposal_feats = tf.multiply(proposal_feats_transform, 1.-gate)
proposal_feats_full = tf.concat([gated_context_feats, gated_proposal_feats], axis=-1)
else:
proposal_feats_full = tf.concat([event_hidden_feats, attended_proposal_feat_reshape], axis=-1)
# proposal feature embedded into word space
proposal_feat_embed = self.build_video_feat_embedding(proposal_feats_full)
# get next state
caption_output, state = multi_rnn_cell_caption(tf.concat([proposal_feat_embed, word_embed], axis=-1), state)
# predict next word
with tf.variable_scope('logits', reuse=reuse) as logits_scope:
logits = tf.contrib.layers.fully_connected(
inputs=caption_output,
num_outputs=self.options['vocab_size'],
activation_fn=None
)
softmax = tf.nn.softmax(logits, name='softmax')
word_id = tf.argmax(softmax, axis=-1)
word_confidence = tf.reduce_max(softmax, axis=-1)
word_ids = tf.concat([word_ids, tf.expand_dims(word_id, axis=-1)], axis=-1)
word_confidences = tf.concat([word_confidences, tf.expand_dims(word_confidence, axis=-1)], axis=-1)
#sentence_confidences = tf.reduce_sum(tf.log(tf.clip_by_value(word_confidences, 1e-20, 1.)), axis=-1)
word_confidences = tf.log(tf.clip_by_value(word_confidences, 1e-20, 1.))
outputs['word_ids'] = word_ids
outputs['word_confidences'] = word_confidences
return inputs, outputs
"""
Build graph for training
"""
def build_train(self):
# this line of code is just a message to inform that batch size should be set to 1 only
batch_size = 1
inputs = {}
outputs = {}
#******************** Define Proposal Module ******************#
## dim1: batch, dim2: video sequence length, dim3: video feature dimension
## video feature sequence
# forward video feature sequence
video_feat_fw = tf.placeholder(tf.float32, [None, None, self.options['video_feat_dim']], name='video_feat_fw')
inputs['video_feat_fw'] = video_feat_fw
# backward video feature sequence
video_feat_bw = tf.placeholder(tf.float32, [None, None, self.options['video_feat_dim']], name='video_feat_bw')
inputs['video_feat_bw'] = video_feat_bw
## proposal data, densely annotated, in forward direction
proposal_fw = tf.placeholder(tf.int32, [None, None, self.options['num_anchors']], name='proposal_fw')
inputs['proposal_fw'] = proposal_fw
## proposal data, densely annotated, in backward direction
proposal_bw = tf.placeholder(tf.int32, [None, None, self.options['num_anchors']], name='proposal_bw')
inputs['proposal_bw'] = proposal_bw
## proposal to feed into captioning module, i choose high tiou proposals for training captioning module, forward pass
proposal_caption_fw = tf.placeholder(tf.int32, [None, None], name='proposal_caption_fw')
inputs['proposal_caption_fw'] = proposal_caption_fw
## proposal to feed into captioning module, i choose high tiou proposals for training captioning module, backward pass
proposal_caption_bw = tf.placeholder(tf.int32, [None, None], name='proposal_caption_bw')
inputs['proposal_caption_bw'] = proposal_caption_bw
## weighting for positive/negative labels (solve imbalance data problem)
proposal_weight = tf.placeholder(tf.float32, [self.options['num_anchors'], 2], name='proposal_weight')
inputs['proposal_weight'] = proposal_weight
rnn_cell_video_fw = tf.contrib.rnn.LSTMCell(
num_units=self.options['rnn_size'],
state_is_tuple=True,
initializer=tf.orthogonal_initializer()
)
rnn_cell_video_bw = tf.contrib.rnn.LSTMCell(
num_units=self.options['rnn_size'],
state_is_tuple=True,
initializer=tf.orthogonal_initializer()
)
if self.options['rnn_drop'] > 0:
print('using dropout in rnn!')
rnn_drop = tf.placeholder(tf.float32)
inputs['rnn_drop'] = rnn_drop
rnn_cell_video_fw = tf.contrib.rnn.DropoutWrapper(
rnn_cell_video_fw,
input_keep_prob=1.0 - rnn_drop,
output_keep_prob=1.0 - rnn_drop
)
rnn_cell_video_bw = tf.contrib.rnn.DropoutWrapper(
rnn_cell_video_bw,
input_keep_prob=1.0 - rnn_drop,
output_keep_prob=1.0 - rnn_drop
)
with tf.variable_scope('proposal_module') as proposal_scope:
'''video feature sequence encoding: forward pass
'''
with tf.variable_scope('video_encoder_fw') as scope:
#sequence_length = tf.reduce_sum(video_feat_mask, axis=-1)
sequence_length = tf.expand_dims(tf.shape(video_feat_fw)[1], axis=0)
initial_state = rnn_cell_video_fw.zero_state(batch_size=batch_size, dtype=tf.float32)
rnn_outputs_fw, _ = tf.nn.dynamic_rnn(
cell=rnn_cell_video_fw,
inputs=video_feat_fw,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32
)
rnn_outputs_fw_reshape = tf.reshape(rnn_outputs_fw, [-1, self.options['rnn_size']], name='rnn_outputs_fw_reshape')
# predict proposal at each time step: use fully connected layer to output scores for every anchors
with tf.variable_scope('predict_proposal_fw') as scope:
logit_output_fw = tf.contrib.layers.fully_connected(
inputs = rnn_outputs_fw_reshape,
num_outputs = self.options['num_anchors'],
activation_fn = None
)
'''video feature sequence encoding: backward pass
'''
with tf.variable_scope('video_encoder_bw') as scope:
#sequence_length = tf.reduce_sum(video_feat_mask, axis=-1)
sequence_length = tf.expand_dims(tf.shape(video_feat_bw)[1], axis=0)
initial_state = rnn_cell_video_bw.zero_state(batch_size=batch_size, dtype=tf.float32)
rnn_outputs_bw, _ = tf.nn.dynamic_rnn(
cell=rnn_cell_video_bw,
inputs=video_feat_bw,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32
)
rnn_outputs_bw_reshape = tf.reshape(rnn_outputs_bw, [-1, self.options['rnn_size']], name='rnn_outputs_bw_reshape')
# predict proposal at each time step: use fully connected layer to output scores for every anchors
with tf.variable_scope('predict_proposal_bw') as scope:
logit_output_bw = tf.contrib.layers.fully_connected(
inputs = rnn_outputs_bw_reshape,
num_outputs = self.options['num_anchors'],
activation_fn = None
)
# calculate multi-label loss: use weighted binary cross entropy objective
proposal_fw_reshape = tf.reshape(proposal_fw, [-1, self.options['num_anchors']], name='proposal_fw_reshape')
proposal_fw_float = tf.to_float(proposal_fw_reshape)
proposal_bw_reshape = tf.reshape(proposal_bw, [-1, self.options['num_anchors']], name='proposal_bw_reshape')
proposal_bw_float = tf.to_float(proposal_bw_reshape)
# weighting positive samples
weight0 = tf.reshape(proposal_weight[:, 0], [-1, self.options['num_anchors']])
# weighting negative samples
weight1 = tf.reshape(proposal_weight[:, 1], [-1, self.options['num_anchors']])
# tile weight batch_size times
weight0 = tf.tile(weight0, [tf.shape(logit_output_fw)[0], 1])
weight1 = tf.tile(weight1, [tf.shape(logit_output_fw)[0], 1])
# get weighted sigmoid xentropy loss
loss_term_fw = tf.nn.weighted_cross_entropy_with_logits(targets=proposal_fw_float, logits=logit_output_fw, pos_weight=weight0)
loss_term_bw = tf.nn.weighted_cross_entropy_with_logits(targets=proposal_bw_float, logits=logit_output_bw, pos_weight=weight0)
loss_term_fw_sum = tf.reduce_sum(loss_term_fw, axis=-1, name='loss_term_fw_sum')
loss_term_bw_sum = tf.reduce_sum(loss_term_bw, axis=-1, name='loss_term_bw_sum')
proposal_fw_loss = tf.reduce_sum(loss_term_fw_sum) / (float(self.options['num_anchors'])*tf.to_float(tf.shape(video_feat_fw)[1]))
proposal_bw_loss = tf.reduce_sum(loss_term_bw_sum) / (float(self.options['num_anchors'])*tf.to_float(tf.shape(video_feat_bw)[1]))
proposal_loss = (proposal_fw_loss + proposal_bw_loss) / 2.
# summary data, for visualization using Tensorboard
tf.summary.scalar('proposal_fw_loss', proposal_fw_loss)
tf.summary.scalar('proposal_bw_loss', proposal_bw_loss)
tf.summary.scalar('proposal_loss', proposal_loss)
# outputs from proposal module
outputs['proposal_fw_loss'] = proposal_fw_loss
outputs['proposal_bw_loss'] = proposal_bw_loss
outputs['proposal_loss'] = proposal_loss
#*************** Define Captioning Module *****************#
## caption data: densely annotate sentences for each time step of a video, use mask data to mask out time steps when no caption should be output
caption = tf.placeholder(tf.int32, [None, None, self.options['caption_seq_len']], name='caption')
caption_mask = tf.placeholder(tf.int32, [None, None, self.options['caption_seq_len']], name='caption_mask')
inputs['caption'] = caption
inputs['caption_mask'] = caption_mask
proposal_caption_fw_reshape = tf.reshape(proposal_caption_fw, [-1], name='proposal_caption_fw_reshape')
proposal_caption_bw_reshape = tf.reshape(proposal_caption_bw, [-1], name='proposal_caption_bw_reshape')
# use correct or 'nearly correct' proposal output as input to the captioning module
boolean_mask = tf.greater(proposal_caption_fw_reshape, 0, name='boolean_mask')
# guarantee that at least one pos has True value
boolean_mask = tf.cond(tf.equal(tf.reduce_sum(tf.to_int32(boolean_mask)), 0), lambda: tf.concat([boolean_mask[:-1], tf.constant([True])], axis=-1), lambda: boolean_mask)
# select input video state
feat_len = tf.shape(video_feat_fw)[1]
forward_indices = tf.boolean_mask(tf.range(feat_len), boolean_mask)
event_feats_fw = tf.boolean_mask(rnn_outputs_fw_reshape, boolean_mask)
backward_indices = tf.boolean_mask(proposal_caption_bw_reshape, boolean_mask)
event_feats_bw = tf.gather_nd(rnn_outputs_bw_reshape, tf.expand_dims(backward_indices, axis=-1))
start_ids = feat_len - 1 - backward_indices
end_ids = forward_indices
event_c3d_seq, _ = self.get_c3d_seq(video_feat_fw[0], start_ids, end_ids, self.options['max_proposal_len'])
context_feats_fw = tf.gather_nd(rnn_outputs_fw_reshape, tf.expand_dims(start_ids, axis=-1))
context_feats_bw = tf.gather_nd(rnn_outputs_bw_reshape, tf.expand_dims(feat_len-1-end_ids, axis=-1))
# proposal feature sequences
proposal_feats = event_c3d_seq
# corresponding caption ground truth (batch size = 1)
caption_proposed = tf.boolean_mask(caption[0], boolean_mask, name='caption_proposed')
caption_mask_proposed = tf.boolean_mask(caption_mask[0], boolean_mask, name='caption_mask_proposed')
# the number of proposal-caption pairs for training
n_proposals = tf.shape(caption_proposed)[0]
rnn_cell_caption = tf.contrib.rnn.LSTMCell(
num_units=self.options['rnn_size'],
state_is_tuple=True,
initializer=tf.orthogonal_initializer()
)
rnn_cell_caption = tf.contrib.rnn.DropoutWrapper(
rnn_cell_caption,
input_keep_prob=1.0 - rnn_drop,
output_keep_prob=1.0 - rnn_drop
)
def get_rnn_cell():
return tf.contrib.rnn.LSTMCell(num_units=self.options['rnn_size'], state_is_tuple=True, initializer=tf.orthogonal_initializer())
# multi-layer LSTM
multi_rnn_cell_caption = tf.contrib.rnn.MultiRNNCell([get_rnn_cell() for _ in range(self.options['num_rnn_layers'])], state_is_tuple=True)
caption_loss = 0
with tf.variable_scope('caption_module') as caption_scope:
batch_size = n_proposals
# initialize memory cell and hidden output, note that the returned state is a tuple containing all states for each cell in MultiRNNCell
state = multi_rnn_cell_caption.zero_state(batch_size=batch_size, dtype=tf.float32)
proposal_feats_reshape = tf.reshape(proposal_feats, [-1, self.options['video_feat_dim']], name='proposal_feats_reshape')
event_hidden_feats = tf.concat([event_feats_fw, event_feats_bw], axis=-1)
event_hidden_feats_tile = tf.tile(event_hidden_feats, [1, self.options['max_proposal_len']])
event_hidden_feats_reshape = tf.reshape(event_hidden_feats_tile, [-1, 2*self.options['rnn_size']])
'''
The caption data should be prepared in equal length, namely, with length of 'caption_seq_len'
## use caption mask data to mask out loss from sequence after end of token (<END>)
Only the first loop create variable, the other loops reuse them
'''
for i in range(self.options['caption_seq_len']-1):
if i > 0:
caption_scope.reuse_variables()
# word embedding
word_embed = self.build_caption_embedding(caption_proposed[:, i])
# calculate attention over proposal feature elements
# state[:, 1] return all hidden states for all cells in MultiRNNCell
h_state = tf.concat([s[1] for s in state], axis=-1)
h_state_tile = tf.tile(h_state, [1, self.options['max_proposal_len']])
h_state_reshape = tf.reshape(h_state_tile, [-1, self.options['num_rnn_layers']*self.options['rnn_size']])
feat_state_concat = tf.concat([proposal_feats_reshape, h_state_reshape, event_hidden_feats_reshape], axis=-1, name='feat_state_concat')
#feat_state_concat = tf.concat([tf.reshape(tf.tile(word_embed, [1, self.options['max_proposal_len']]), [-1, self.options['word_embed_size']]), proposal_feats_reshape, h_state_reshape, event_hidden_feats_reshape], axis=-1, name='feat_state_concat')
# use a two-layer network to model attention over video feature sequence when predicting next word (dynamic)
with tf.variable_scope('attention') as attention_scope:
attention_layer1 = tf.contrib.layers.fully_connected(
inputs = feat_state_concat,
num_outputs = self.options['attention_hidden_size'],
activation_fn = tf.nn.tanh,
weights_initializer = tf.contrib.layers.xavier_initializer()
)
attention_layer2 = tf.contrib.layers.fully_connected(
inputs = attention_layer1,
num_outputs = 1,
activation_fn = None,
weights_initializer = tf.contrib.layers.xavier_initializer()
)
# reshape to match
attention_reshape = tf.reshape(attention_layer2, [-1, self.options['max_proposal_len']], name='attention_reshape')
attention_score = tf.nn.softmax(attention_reshape, dim=-1, name='attention_score')
attention = tf.reshape(attention_score, [-1, 1, self.options['max_proposal_len']], name='attention')
# attended video feature
attended_proposal_feat = tf.matmul(attention, proposal_feats, name='attended_proposal_feat')
attended_proposal_feat_reshape = tf.reshape(attended_proposal_feat, [-1, self.options['video_feat_dim']], name='attended_proposal_feat_reshape')
if self.options['no_context']:
proposal_feats_full = attended_proposal_feat_reshape
else:
if self.options['context_gating']:
# model a gate to weight each element of context and feature
attended_proposal_feat_reshape = tf.nn.tanh(attended_proposal_feat_reshape)
with tf.variable_scope('context_gating'):
'''
context_feats_transform = tf.contrib.layers.fully_connected(
inputs=event_hidden_feats,
num_outputs=self.options['video_feat_dim'],
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer()
)
'''
context_feats_transform = event_hidden_feats
proposal_feats_transform = tf.contrib.layers.fully_connected(
inputs = attended_proposal_feat_reshape,
num_outputs = 2*self.options['rnn_size'],
activation_fn = tf.nn.tanh,
weights_initializer = tf.contrib.layers.xavier_initializer()
)
# context gating
gate = tf.contrib.layers.fully_connected(
inputs=tf.concat([word_embed, h_state, context_feats_transform, proposal_feats_transform], axis=-1),
num_outputs=2*self.options['rnn_size'],
activation_fn=tf.nn.sigmoid,
weights_initializer=tf.contrib.layers.xavier_initializer()
)
gated_context_feats = tf.multiply(context_feats_transform, gate)
gated_proposal_feats = tf.multiply(proposal_feats_transform, 1.-gate)
proposal_feats_full = tf.concat([gated_context_feats, gated_proposal_feats], axis=-1)
else:
proposal_feats_full = tf.concat([event_hidden_feats, attended_proposal_feat_reshape], axis=-1)
# proposal feature embedded into word space
proposal_feat_embed = self.build_video_feat_embedding(proposal_feats_full)
# get next state
caption_output, state = multi_rnn_cell_caption(tf.concat([proposal_feat_embed, word_embed], axis=-1), state)
# predict next word
with tf.variable_scope('logits') as logits_scope:
logits = tf.contrib.layers.fully_connected(
inputs=caption_output,
num_outputs=self.options['vocab_size'],
activation_fn=None
)
labels = caption_proposed[:, i+1] # predict next word
# loss term
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
output_mask = tf.to_float(caption_mask_proposed[:,i])
loss = tf.reduce_sum(tf.multiply(loss, output_mask))
caption_loss = caption_loss + loss
# mean loss for each word
caption_loss = caption_loss / (tf.to_float(batch_size)*tf.to_float(tf.reduce_sum(caption_mask_proposed)) + 1)
tf.summary.scalar('caption_loss', caption_loss)
reg_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if not v.name.startswith('caption_module/word_embed')])
total_loss = self.options['weight_proposal']*proposal_loss + self.options['weight_caption']*caption_loss
tf.summary.scalar('total_loss', total_loss)
outputs['caption_loss'] = caption_loss
outputs['loss'] = total_loss
outputs['reg_loss'] = reg_loss
outputs['n_proposals'] = n_proposals
return inputs, outputs
"""get c3d proposal representation (feature sequence), given start end feature ids
"""
def get_c3d_seq(self, video_feat_sequence, start_ids, end_ids, max_clip_len):
ind = tf.constant(0)
N = tf.shape(start_ids)[0]
event_c3d_sequence = tf.fill([0, max_clip_len, self.options['video_feat_dim']], 0.)
event_c3d_mask = tf.fill([0, max_clip_len], 0.)
event_c3d_mask = tf.to_int32(event_c3d_mask)
def condition(ind, event_c3d_sequence, event_c3d_mask):
return tf.less(ind, N)
def body(ind, event_c3d_sequence, event_c3d_mask):
start_id = start_ids[ind]
end_id = end_ids[ind]
c3d_feats =video_feat_sequence[start_id:end_id]
# padding if needed
clip_len = end_id - start_id
c3d_feats = tf.cond(tf.less(clip_len, max_clip_len), lambda: tf.concat([c3d_feats, tf.fill([max_clip_len-clip_len, self.options['video_feat_dim']], 0.)], axis=0), lambda: c3d_feats[:max_clip_len])
c3d_feats = tf.expand_dims(c3d_feats, axis=0)
event_c3d_sequence = tf.concat([event_c3d_sequence, c3d_feats], axis=0)
this_mask = tf.cond(tf.less(clip_len, max_clip_len), lambda: tf.concat([tf.fill([clip_len], 1.), tf.fill([max_clip_len-clip_len], 0.)], axis=0), lambda: tf.fill([max_clip_len], 1.))
this_mask = tf.expand_dims(this_mask, axis=0)
this_mask = tf.to_int32(this_mask)
event_c3d_mask = tf.concat([event_c3d_mask, this_mask], axis=0)
return tf.add(ind, 1), event_c3d_sequence, event_c3d_mask
_, event_c3d_sequence, event_c3d_mask = tf.while_loop(condition, body, loop_vars=[ind, event_c3d_sequence, event_c3d_mask], shape_invariants=[ind.get_shape(), tf.TensorShape([None, None, self.options['video_feat_dim']]), tf.TensorShape([None, None])])
return event_c3d_sequence, event_c3d_mask
| 52.780556 | 264 | 0.607179 |
36052f7bae909c400475eafd4049bfc9620955c6 | 4,546 | py | Python | scripts/stats/cluster/get_articles_bow_stats.py | foobar999/Wikipedia-Cluster-Analysis | 4dc8166fb01f9b3ab6d7557de331cfc95298ff0c | [
"MIT"
] | null | null | null | scripts/stats/cluster/get_articles_bow_stats.py | foobar999/Wikipedia-Cluster-Analysis | 4dc8166fb01f9b3ab6d7557de331cfc95298ff0c | [
"MIT"
] | null | null | null | scripts/stats/cluster/get_articles_bow_stats.py | foobar999/Wikipedia-Cluster-Analysis | 4dc8166fb01f9b3ab6d7557de331cfc95298ff0c | [
"MIT"
] | null | null | null | import argparse
from pprint import pformat
from gensim.corpora import Dictionary, MmCorpus
from gensim.parsing.preprocessing import STOPWORDS
from scripts.cluster.articles_to_bow import get_filtered_articles_data_from_path
from scripts.utils.utils import init_logger, read_lines
from scripts.utils.documents import is_mainspace_page, get_tokens
logger = init_logger()
def get_corpus_stats(corpus, id2word):
num_docs = 0
num_nnz = 0
num_terms = len(id2word)
sum_elements = 0
for doc in corpus:
num_docs += 1
bow = id2word.doc2bow(doc)
num_nnz += len(bow)
sum_elements += sum(cnt for id,cnt in bow)
logger.info("%ix%i matrix, density=%.3f%% (%i/%i), sum_elements %i", num_docs, num_terms, 100.0 * num_nnz / (num_docs * num_terms), num_nnz, num_docs * num_terms, sum_elements)
logger.info('')
def get_stats(articles_path, article_min_tokens, token_min_len, stopwords, no_below, no_above, namespace_prefixes):
dok_tokens = get_filtered_articles_data_from_path(articles_path, article_min_tokens, token_min_len, stopwords, namespace_prefixes, False)
id2word = Dictionary(dok_tokens)
logger.info('no_below {} discarding'.format(no_below))
id2word.filter_extremes(no_below=no_below, no_above=1, keep_n=None, keep_tokens=None)
logger.info('no_above {} discarding'.format(no_above))
id2word.filter_extremes(no_below=0, no_above=no_above, keep_n=None, keep_tokens=None)
get_corpus_stats(get_filtered_articles_data_from_path(articles_path, article_min_tokens, token_min_len, stopwords, namespace_prefixes, False), id2word)
def main():
parser = argparse.ArgumentParser(description='calculates stats of various bag-of-word-models, adding more preprocessing steps incrementally')
parser.add_argument("--articles-dump", type=argparse.FileType('r'), help='path to input .xml.bz2 articles dump', required=True)
parser.add_argument("--no-below", type=int, help='Keep only tokes which appear in at least NO_BELOW documents (default {})', required=True)
parser.add_argument("--no-above", type=float, help='Keep only tokes which appear in at most NO_ABOVE*CORPUSSIZE documents (default {})', required=True)
parser.add_argument("--token-min-len", type=int, help='Consider only tokens of >= TOKEN_MIN_LEN chars', required=True)
parser.add_argument("--article-min-tokens", type=int, help='Analyze only articles of >= ARTICLE_MIN_TOKENS tokens default {}). Should be >=1', required=True)
parser.add_argument("--namespace-prefixes", type=argparse.FileType('r'), help='file of namespace prefixes to ignore', required=True)
args = parser.parse_args()
input_articles_path = args.articles_dump.name
no_below,no_above = args.no_below,args.no_above
token_min_len = args.token_min_len
article_min_tokens = args.article_min_tokens
namespace_prefixes = read_lines(args.namespace_prefixes.name) if args.namespace_prefixes else ()
logger.info('running with:\n{}'.format(pformat({'input_articles_path':input_articles_path, 'no_below':no_below, 'no_above':no_above,'token_min_len':token_min_len, 'article_min_tokens':article_min_tokens, 'namespace_prefixes':namespace_prefixes})))
logger.info('analyzing vocabulary')
stopwords = STOPWORDS
logger.info('stats without filtering')
get_stats(input_articles_path, 0, 0, (), 0, 1, namespace_prefixes)
logger.info('stats with art_toks>={}'.format(article_min_tokens))
get_stats(input_articles_path, article_min_tokens, 0, (), 0, 1, namespace_prefixes)
logger.info('stats with art_toks>={}, tok_len>={}'.format(article_min_tokens,token_min_len))
get_stats(input_articles_path, article_min_tokens, token_min_len, (), 0, 1, namespace_prefixes)
logger.info('stats with art_toks>={}, tok_len>={}, stopwords'.format(article_min_tokens,token_min_len))
get_stats(input_articles_path, article_min_tokens, token_min_len, stopwords, 0, 1, namespace_prefixes)
logger.info('stats with art_toks>={}, tok_len>={}, stopwords, df>={}'.format(article_min_tokens,token_min_len,no_below))
get_stats(input_articles_path, article_min_tokens, token_min_len, stopwords, no_below, 1, namespace_prefixes)
logger.info('stats with art_toks>={}, tok_len>={}, stopwords, df>={}, df<={}'.format(article_min_tokens,token_min_len,no_below,no_above))
get_stats(input_articles_path, article_min_tokens, token_min_len, stopwords, no_below, no_above, namespace_prefixes)
if __name__ == '__main__':
main()
| 59.815789 | 251 | 0.75143 |
e6077fe9f935bc04c3461860d9b8137d8b019bc5 | 1,522 | py | Python | simdeblur/model/backbone/edvr/dcn/setup.py | ljzycmd/SimDeblur | dd2f60c41176b75c4eaf80d740f547c206aa8227 | [
"MIT"
] | 190 | 2021-03-22T13:59:42.000Z | 2022-03-08T21:14:41.000Z | simdeblur/model/backbone/edvr/dcn/setup.py | Wang-jiahao/SimDeblur | 31d88e1fbec91d5cc9062f4a46538e4ba806ab29 | [
"MIT"
] | 9 | 2021-04-26T06:44:40.000Z | 2022-03-25T07:48:30.000Z | simdeblur/model/backbone/edvr/dcn/setup.py | Wang-jiahao/SimDeblur | 31d88e1fbec91d5cc9062f4a46538e4ba806ab29 | [
"MIT"
] | 27 | 2021-03-23T03:11:00.000Z | 2022-03-19T21:26:02.000Z | #!/usr/bin/env python
from setuptools import find_packages, setup
import os
import subprocess
import sys
import time
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def make_cuda_ext(name, sources, sources_cuda=None):
if sources_cuda is None:
sources_cuda = []
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{name}',
sources=[p for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
if __name__ == '__main__':
ext_modules = [
make_cuda_ext(
name='deform_conv_ext',
sources=['src/deform_conv_ext.cpp'],
sources_cuda=[
'src/deform_conv_cuda.cpp',
'src/deform_conv_cuda_kernel.cu'
]),
]
setup(
name='deform_conv',
version=0.1,
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 26.241379 | 72 | 0.600526 |
5b65e47822199e2562c1c3b764af577d65003994 | 15,649 | py | Python | tf2rl/algos/dqn.py | Chenaah/RAMCO | 746424c14b697e2944dde2ba7aae230f60016001 | [
"MIT"
] | 1 | 2020-10-12T23:44:04.000Z | 2020-10-12T23:44:04.000Z | tf2rl/algos/dqn.py | Utschie/tf2rl | cd8b25a8fdccaa581afebc659cee5d2f509cf1f5 | [
"MIT"
] | null | null | null | tf2rl/algos/dqn.py | Utschie/tf2rl | cd8b25a8fdccaa581afebc659cee5d2f509cf1f5 | [
"MIT"
] | 1 | 2022-02-25T08:02:23.000Z | 2022-02-25T08:02:23.000Z | import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tf2rl.algos.policy_base import OffPolicyAgent
from tf2rl.networks.noisy_dense import NoisyDense
from tf2rl.envs.atari_wrapper import LazyFrames
from tf2rl.misc.target_update_ops import update_target_variables
from tf2rl.misc.huber_loss import huber_loss
class QFunc(tf.keras.Model):
def __init__(self, state_shape, action_dim, units=(32, 32),
name="QFunc", enable_dueling_dqn=False,
enable_noisy_dqn=False, enable_categorical_dqn=False,
n_atoms=51):
super().__init__(name=name)
self._enable_dueling_dqn = enable_dueling_dqn
self._enable_noisy_dqn = enable_noisy_dqn
self._enable_categorical_dqn = enable_categorical_dqn
if enable_categorical_dqn:
self._action_dim = action_dim
self._n_atoms = n_atoms
action_dim = (action_dim + int(enable_dueling_dqn)) * n_atoms
DenseLayer = NoisyDense if enable_noisy_dqn else Dense
self.l1 = DenseLayer(units[0], name="L1", activation="relu")
self.l2 = DenseLayer(units[1], name="L2", activation="relu")
self.l3 = DenseLayer(action_dim, name="L3", activation="linear")
if enable_dueling_dqn and not enable_categorical_dqn:
self.l4 = DenseLayer(1, name="L3", activation="linear")
with tf.device("/cpu:0"):
self(inputs=tf.constant(np.zeros(shape=(1,)+state_shape,
dtype=np.float32)))
def call(self, inputs):
features = self.l1(inputs)
features = self.l2(features)
if self._enable_categorical_dqn:
features = self.l3(features)
if self._enable_dueling_dqn:
features = tf.reshape(
features, (-1, self._action_dim+1, self._n_atoms)) # [batch_size, action_dim, n_atoms]
v_values = tf.reshape(
features[:, 0], (-1, 1, self._n_atoms))
advantages = tf.reshape(
features[:, 1:], [-1, self._action_dim, self._n_atoms])
features = v_values + (advantages - tf.expand_dims(
tf.reduce_mean(advantages, axis=1), axis=1))
else:
features = tf.reshape(
features, (-1, self._action_dim, self._n_atoms)) # [batch_size, action_dim, n_atoms]
# [batch_size, action_dim, n_atoms]
q_dist = tf.keras.activations.softmax(features, axis=2)
return tf.clip_by_value(q_dist, 1e-8, 1.0-1e-8)
else:
if self._enable_dueling_dqn:
advantages = self.l3(features)
v_values = self.l4(features)
q_values = (v_values
+ (advantages
- tf.reduce_mean(advantages, axis=1, keepdims=True)))
else:
q_values = self.l3(features)
return q_values
class DQN(OffPolicyAgent):
def __init__(
self,
state_shape,
action_dim,
q_func=None,
name="DQN",
lr=0.001,
units=(32, 32),
epsilon=0.1,
epsilon_min=None,
epsilon_decay_step=int(1e6),
n_warmup=int(1e4),
target_replace_interval=int(5e3),
memory_capacity=int(1e6),
optimizer=None,
enable_double_dqn=False,
enable_dueling_dqn=False,
enable_noisy_dqn=False,
enable_categorical_dqn=False,
**kwargs):
super().__init__(name=name, memory_capacity=memory_capacity, n_warmup=n_warmup, **kwargs)
q_func = q_func if q_func is not None else QFunc
# Define and initialize Q-function network
kwargs_dqn = {
"state_shape": state_shape,
"action_dim": action_dim,
"units": units,
"enable_dueling_dqn": enable_dueling_dqn,
"enable_noisy_dqn": enable_noisy_dqn,
"enable_categorical_dqn": enable_categorical_dqn}
self.q_func = q_func(**kwargs_dqn)
self.q_func_target = q_func(**kwargs_dqn)
self.q_func_optimizer = (optimizer if optimizer is not None else
tf.keras.optimizers.Adam(learning_rate=lr))
update_target_variables(self.q_func_target.weights,
self.q_func.weights, tau=1.)
self._action_dim = action_dim
# This is used to check if input state to `get_action` is multiple (batch) or single
self._state_ndim = np.array(state_shape).shape[0]
# Distributional DQN
if enable_categorical_dqn:
self._v_max, self._v_min = 10., -10.
self._delta_z = (self._v_max - self._v_min) / (self.q_func._n_atoms - 1)
self._z_list = tf.constant(
[self._v_min + i *
self._delta_z for i in range(self.q_func._n_atoms)],
dtype=tf.float32)
self._z_list_broadcasted = tf.tile(
tf.reshape(self._z_list, [1, self.q_func._n_atoms]),
tf.constant([self._action_dim, 1]))
# Set hyper-parameters
if epsilon_min is not None and not enable_noisy_dqn:
assert epsilon > epsilon_min
self.epsilon_min = epsilon_min
self.epsilon_decay_rate = (
epsilon - epsilon_min) / epsilon_decay_step
self.epsilon = max(epsilon - self.epsilon_decay_rate * self.n_warmup,
self.epsilon_min)
else:
epsilon = epsilon if not enable_noisy_dqn else 0.
self.epsilon = epsilon
self.epsilon_min = epsilon
self.epsilon_decay_rate = 0.
self.target_replace_interval = target_replace_interval
self.n_update = 0
# DQN variants
self._enable_double_dqn = enable_double_dqn
self._enable_noisy_dqn = enable_noisy_dqn
self._enable_categorical_dqn = enable_categorical_dqn
def get_action(self, state, test=False, tensor=False):
if isinstance(state, LazyFrames):
state = np.array(state)
if not tensor:
assert isinstance(state, np.ndarray)
is_single_input = state.ndim == self._state_ndim
if not test and np.random.rand() < self.epsilon:
if is_single_input:
action = np.random.randint(self._action_dim)
else:
action = np.array([np.random.randint(self._action_dim)
for _ in range(state.shape[0])], dtype=np.int64)
if tensor:
return tf.convert_to_tensor(action)
else:
return action
state = np.expand_dims(state, axis=0).astype(
np.float32) if is_single_input else state
if self._enable_categorical_dqn:
action = self._get_action_body_distributional(tf.constant(state))
else:
action = self._get_action_body(tf.constant(state))
if tensor:
return action
else:
if is_single_input:
return action.numpy()[0]
else:
return action.numpy()
@tf.function
def _get_action_body(self, state):
q_values = self.q_func(state)
return tf.argmax(q_values, axis=1)
@tf.function
def _get_action_body_distributional(self, state):
action_probs = self.q_func(state)
return tf.argmax(
tf.reduce_sum(action_probs * self._z_list_broadcasted, axis=2),
axis=1)
def train(self, states, actions, next_states, rewards, done, weights=None):
if weights is None:
weights = np.ones_like(rewards)
td_errors, q_func_loss = self._train_body(
states, actions, next_states, rewards, done, weights)
tf.summary.scalar(name=self.policy_name +
"/q_func_Loss", data=q_func_loss)
# TODO: Remove following by using tf.global_step
self.n_update += 1
# Update target networks
if self.n_update % self.target_replace_interval == 0:
update_target_variables(
self.q_func_target.weights, self.q_func.weights, tau=1.)
# Update exploration rate
self.epsilon = max(self.epsilon - self.epsilon_decay_rate * self.update_interval,
self.epsilon_min)
tf.summary.scalar(name=self.policy_name+"/epsilon", data=self.epsilon)
return td_errors
@tf.function
def _train_body(self, states, actions, next_states, rewards, done, weights):
with tf.device(self.device):
with tf.GradientTape() as tape:
if self._enable_categorical_dqn:
td_errors = self._compute_td_error_body_distributional(
states, actions, next_states, rewards, done)
q_func_loss = tf.reduce_mean(
huber_loss(tf.negative(td_errors),
delta=self.max_grad) * weights)
else:
td_errors = self._compute_td_error_body(
states, actions, next_states, rewards, done)
q_func_loss = tf.reduce_mean(
huber_loss(td_errors,
delta=self.max_grad) * weights)
q_func_grad = tape.gradient(
q_func_loss, self.q_func.trainable_variables)
self.q_func_optimizer.apply_gradients(
zip(q_func_grad, self.q_func.trainable_variables))
return td_errors, q_func_loss
def compute_td_error(self, states, actions, next_states, rewards, dones):
# TODO: fix this ugly conversion
if isinstance(actions, tf.Tensor):
actions = tf.expand_dims(actions, axis=1)
rewards = tf.expand_dims(rewards, axis=1)
dones = tf.expand_dims(dones, 1)
if self._enable_categorical_dqn:
return self._compute_td_error_body_distributional(
states, actions, next_states, rewards, dones)
else:
return self._compute_td_error_body(
states, actions, next_states, rewards, dones)
@tf.function
def _compute_td_error_body(self, states, actions, next_states, rewards, dones):
# TODO: Clean code
batch_size = states.shape[0]
not_dones = 1. - tf.cast(dones, dtype=tf.float32)
actions = tf.cast(actions, dtype=tf.int32)
with tf.device(self.device):
indices = tf.concat(
values=[tf.expand_dims(tf.range(batch_size), axis=1),
actions], axis=1)
current_Q = tf.expand_dims(
tf.gather_nd(self.q_func(states), indices), axis=1)
if self._enable_double_dqn:
max_q_indexes = tf.argmax(self.q_func(next_states),
axis=1, output_type=tf.int32)
# TODO: Reuse predefined `indices`
indices = tf.concat(
values=[tf.expand_dims(tf.range(batch_size), axis=1),
tf.expand_dims(max_q_indexes, axis=1)], axis=1)
target_Q = tf.expand_dims(
tf.gather_nd(self.q_func_target(next_states), indices), axis=1)
target_Q = rewards + not_dones * self.discount * target_Q
else:
target_Q = rewards + not_dones * self.discount * tf.reduce_max(
self.q_func_target(next_states), keepdims=True, axis=1)
target_Q = tf.stop_gradient(target_Q)
td_errors = current_Q - target_Q
return td_errors
@tf.function
def _compute_td_error_body_distributional(self, states, actions, next_states, rewards, done):
actions = tf.cast(actions, dtype=tf.int32)
with tf.device(self.device):
rewards = tf.tile(
tf.reshape(rewards, [-1, 1]),
tf.constant([1, self.q_func._n_atoms])) # [batch_size, n_atoms]
not_done = 1.0 - tf.tile(
tf.reshape(done, [-1, 1]),
tf.constant([1, self.q_func._n_atoms])) # [batch_size, n_atoms]
discounts = tf.cast(
tf.reshape(self.discount, [-1, 1]), tf.float32)
z = tf.reshape(
self._z_list, [1, self.q_func._n_atoms]) # [1, n_atoms]
z = rewards + not_done * discounts * z # [batch_size, n_atoms]
# [batch_size, n_atoms]
z = tf.clip_by_value(z, self._v_min, self._v_max)
b = (z - self._v_min) / self._delta_z # [batch_size, n_atoms]
index_help = tf.expand_dims(
tf.tile(
tf.reshape(tf.range(self.batch_size), [-1, 1]),
tf.constant([1, self.q_func._n_atoms])),
-1) # [batch_size, n_atoms, 1]
u, l = tf.math.ceil(b), tf.math.floor(b) # [batch_size, n_atoms]
u_id = tf.concat(
[index_help, tf.expand_dims(tf.cast(u, tf.int32), -1)],
axis=2) # [batch_size, n_atoms, 2]
l_id = tf.concat(
[index_help, tf.expand_dims(tf.cast(l, tf.int32), -1)],
axis=2) # [batch_size, n_atoms, 2]
target_Q_next_dist = self.q_func_target(
next_states) # [batch_size, n_action, n_atoms]
if self._enable_double_dqn:
# TODO: Check this implementation is correct
target_Q_next_dist = tf.gather_nd(
target_Q_next_dist,
tf.concat(
[tf.reshape(tf.range(self.batch_size), [-1, 1]),
tf.reshape(actions, [-1, 1])],
axis=1))
else:
target_Q_next_sum = tf.reduce_sum(
target_Q_next_dist * self._z_list_broadcasted, axis=2) # [batch_size, n_action]
actions_by_target_Q = tf.cast(
tf.argmax(target_Q_next_sum, axis=1),
tf.int32) # [batch_size,]
target_Q_next_dist = tf.gather_nd(
target_Q_next_dist,
tf.concat(
[tf.reshape(tf.range(self.batch_size), [-1, 1]),
tf.reshape(actions_by_target_Q, [-1, 1])],
axis=1)) # [batch_size, n_atoms]
action_indices = tf.concat(
values=[tf.expand_dims(tf.range(self.batch_size), axis=1),
actions], axis=1)
current_Q_dist = tf.gather_nd(
self.q_func(states), action_indices) # [batch_size, n_atoms]
td_errors = tf.reduce_sum(
target_Q_next_dist * (u - b) * tf.math.log(
tf.gather_nd(current_Q_dist, l_id)) +
target_Q_next_dist * (b - l) * tf.math.log(
tf.gather_nd(current_Q_dist, u_id)),
axis=1)
return td_errors
@staticmethod
def get_argument(parser=None):
parser = OffPolicyAgent.get_argument(parser)
parser.add_argument('--enable-double-dqn', action='store_true')
parser.add_argument('--enable-dueling-dqn', action='store_true')
parser.add_argument('--enable-categorical-dqn', action='store_true')
parser.add_argument('--enable-noisy-dqn', action='store_true')
return parser
| 43.590529 | 107 | 0.57026 |
18ec92d318b53fba68ee3216e0f5fc73ea3e89f5 | 1,694 | py | Python | cls_pdb_atom_pair_distance.py | naotohori/cafysis | 9d8534121c01ea75ae965cf39a1e307052ff8523 | [
"MIT"
] | 2 | 2022-02-25T17:32:41.000Z | 2022-03-31T14:38:55.000Z | cls_pdb_atom_pair_distance.py | naotohori/cafysis | 9d8534121c01ea75ae965cf39a1e307052ff8523 | [
"MIT"
] | 2 | 2020-05-03T08:36:10.000Z | 2021-01-27T12:40:50.000Z | cls_pdb_atom_pair_distance.py | naotohori/life-of-py | 9d8534121c01ea75ae965cf39a1e307052ff8523 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
from file_pdb import PdbFile
if len(sys.argv) != 5 :
print ('\n Usage: SCRIPT [list file] [input PDB1] [input PDB2] [output]\n')
sys.exit(2)
f_list = open(sys.argv[1],'r')
f_pdb = PdbFile(sys.argv[2])
f_pdb.open_to_read()
chains1 = f_pdb.read_all()
f_pdb.close
f_pdb = PdbFile(sys.argv[3])
f_pdb.open_to_read()
chains2 = f_pdb.read_all()
f_pdb.close
f_out = open(sys.argv[4], 'w')
if len(chains1) != len(chains2) :
print(("Error: len(chains1)(=%i) != len(chains2)(=%i)" %(len(chains1), len(chains2))))
sys.exit(2)
# !!! current version is for only single chain !!!
if len(chains1) != 1 or len(chains2) != 1:
print ("Error: len(chains1) != 1 or len(chains2) != 1" )
sys.exit(2)
res1_to_res2 = {}
for line in f_list :
if line.find('#') != -1 :
continue
linesp = line.split()
ires1 = int(linesp[0])
ires2 = int(linesp[1])
res1_to_res2[ires1] = ires2
c1 = chains1[0]
c2 = chains2[0]
data = ()
for ir,r1 in enumerate(c1.residues):
ires1 = ir + 1
if not ires1 in res1_to_res2 :
continue
ires2 = res1_to_res2[ires1]
r2 = c2.residues[ires2-1]
for a1 in r1.atoms:
for a2 in r2.atoms :
if a1.name in (' S ', ' P ') or a2.name in (' S ', ' P ') :
if a1.name == a2.name :
distance = a1.xyz.distance(a2.xyz)
f_out.write('%5i %6i %5i %6i %6.2f\n' % (ires1, a1.serial, ires2, a2.serial, distance))
else :
distance = a1.xyz.distance(a2.xyz)
f_out.write('%5i %6i %5i %6i %6.2f\n' % (ires1, a1.serial, ires2, a2.serial, distance))
f_out.close() | 26.888889 | 107 | 0.569067 |
a3308e66598a352844d9f4c82794cfa6c353574a | 5,456 | py | Python | setup.py | cmeyer/nionui-tool | 78d699308abda509d19da4b4be8365f6d55513b0 | [
"Apache-2.0"
] | null | null | null | setup.py | cmeyer/nionui-tool | 78d699308abda509d19da4b4be8365f6d55513b0 | [
"Apache-2.0"
] | null | null | null | setup.py | cmeyer/nionui-tool | 78d699308abda509d19da4b4be8365f6d55513b0 | [
"Apache-2.0"
] | null | null | null | import os
import pathlib
import setuptools
import sys
tool_id = "nionui"
version = "0.4.7"
launcher = "NionUILauncher"
def package_files(directory, prefix, prefix_drop):
# note: Windows setup does not work with Path
prefixes = dict()
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
full_path = pathlib.Path(path) / filename
if not os.path.islink(str(full_path)):
dest_path = pathlib.Path(prefix) / pathlib.Path(*pathlib.Path(path).parts[prefix_drop:])
prefixes.setdefault(str(dest_path), list()).append(str(pathlib.Path(path) / filename))
return list(prefixes.items())
class BinaryDistribution(setuptools.Distribution):
# force abi+platform in whl
def has_data_files(self):
return True
def has_ext_modules(self):
return True
from distutils.util import get_platform
from wheel.bdist_wheel import bdist_wheel as bdist_wheel_
from packaging import tags
from wheel.bdist_wheel import get_abi_tag, get_platform
# the bdist_wheel tools are awful and undocumented
# much of the techniques in this file were from other libraries and reading the source
# the wheel code is a separate project from setuptools
# see https://github.com/nion-software/nionui-launcher/releases
# see https://fredrikaverpil.github.io/2018/03/09/official-pyside2-wheels/
# see https://pypi.org/project/PySide2/#files
# see https://github.com/pypa/wheel
# see https://github.com/pypa/setuptools
# see https://github.com/pypa/wheel/issues/161
# see http://code.qt.io/cgit/pyside/pyside-setup.git/tree/build_scripts/wheel_override.py?id=824b7733c0bd8b162b198c67014d7f008fb71b8c
# this class overrides some methods of bdist_wheel to avoid its stricter tag checks.
class bdist_wheel(bdist_wheel_):
def run(self):
bdist_wheel_.run(self)
def finalize_options(self):
bdist_wheel_.finalize_options(self)
self.universal = True
self.plat_name_supplied = True
global platform, python_version, abi
self.plat_name = platform
self.py_limited_api = python_version
self.abi_tag = abi
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
# macosx contains system version in platform name so need special handle
if self.plat_name and not self.plat_name.startswith("macosx"):
plat_name = self.plat_name
else:
plat_name = get_platform(self.bdist_dir)
if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
plat_name = 'linux_i686'
plat_name = plat_name.replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = tags.interpreter_name()
impl_ver = tags.interpreter_version()
impl = impl_name + impl_ver
# We don't work on CPython 3.1, 3.0.
if self.py_limited_api and (impl_name + impl_ver).startswith('cp3'):
impl = self.py_limited_api
abi_tag = 'abi3'
else:
abi_tag = str(get_abi_tag()).lower()
abi_tag = self.abi_tag
tag = (impl, abi_tag, plat_name)
supported_tags = [(t.interpreter, t.abi, t.platform) for t in tags.sys_tags()]
# XXX switch to this alternate implementation for non-pure:
if not self.py_limited_api:
assert tag == supported_tags[0], "%s != %s" % (tag, supported_tags[0])
# assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag)
return tag
platform = None
python_version = None
abi = None
dest = None
dir_path = None
dest_drop = None
if sys.platform == "darwin":
platform = "macosx_10_11_intel"
python_version = "cp37.cp38"
abi = "abi3"
dest = "bin"
dir_path = "launcher/build/Release"
dest_drop = 3
if sys.platform == "win32":
platform = "win_amd64"
python_version = "cp37.cp38"
abi = "none"
dest = f"Scripts/{launcher}"
dir_path = "launcher/x64/Release"
dest_drop = 3
if sys.platform == "linux":
platform = "manylinux1_x86_64"
python_version = "cp37.cp38"
abi = "abi3"
dest = f"bin/{launcher}"
dir_path = "launcher/linux/x64"
dest_drop = 3
data_files = package_files(dir_path, dest, dest_drop)
setuptools.setup(
name=f"{tool_id}-tool",
version=version,
packages=[f"nion.{tool_id}_tool"],
url=f"https://github.com/nion-software/{tool_id}-tool",
license='Apache-2.0',
author='Nion Software Team',
author_email='software@nion.com',
description='Python command line access to Nion UI Launcher',
entry_points={
'console_scripts': [
f"{tool_id}-tool=nion.{tool_id}_tool.command:main",
],
},
data_files=data_files,
distclass=BinaryDistribution,
cmdclass={'bdist_wheel': bdist_wheel},
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
verbose=True,
)
| 33.679012 | 133 | 0.648644 |
bbc483ced6ed16060a6581162c75420459d4e6f0 | 5,633 | py | Python | src/claimtrie/libclaimtrie_test.py | FihlaTV/lbrycrd | c54af21ce2758490ae3f8340c73cf0f2401801cc | [
"MIT"
] | 1 | 2020-03-30T14:27:31.000Z | 2020-03-30T14:27:31.000Z | src/claimtrie/libclaimtrie_test.py | FihlaTV/lbrycrd | c54af21ce2758490ae3f8340c73cf0f2401801cc | [
"MIT"
] | null | null | null | src/claimtrie/libclaimtrie_test.py | FihlaTV/lbrycrd | c54af21ce2758490ae3f8340c73cf0f2401801cc | [
"MIT"
] | null | null | null |
from libclaimtrie import *
import unittest
class CacheIterateCallback(CIterateCallback):
def __init__(self, names):
CIterateCallback.__init__(self)
self.names = names
def apply(self, name):
assert(name in self.names), "Incorrect trie names"
class TestClaimTrieTypes(unittest.TestCase):
def setUp(self):
self.uint256s = "1234567890987654321012345678909876543210123456789098765432101234"
self.uint160 = uint160S("1234567890987654321012345678909876543210")
self.uint = uint256S(self.uint256s)
self.txp = COutPoint(self.uint, 1)
def assertClaimEqual(self, claim, txo, cid, amount, effe, height, validHeight, msg):
self.assertEqual(claim.outPoint, txo, msg)
self.assertEqual(claim.claimId, cid, msg)
self.assertEqual(claim.nAmount, amount, msg)
self.assertEqual(claim.nEffectiveAmount, effe, msg)
self.assertEqual(claim.nHeight, height, msg)
self.assertEqual(claim.nValidAtHeight, validHeight, msg)
def assertSupportEqual(self, support, txo, cid, amount, height, validHeight, msg):
self.assertEqual(support.outPoint, txo, msg)
self.assertEqual(support.supportedClaimId, cid, msg)
self.assertEqual(support.nAmount, amount, msg)
self.assertEqual(support.nHeight, height, msg)
self.assertEqual(support.nValidAtHeight, validHeight, msg)
def test_uint256(self):
uint = self.uint
self.assertFalse(uint.IsNull(), "incorrect uint256S or CBaseBlob::IsNull")
self.assertEqual(uint.GetHex(), self.uint256s, "incorrect CBaseBlob::GetHex")
self.assertEqual(uint.GetHex(), uint.ToString(), "incorrect CBaseBlob::ToString")
self.assertEqual(uint.size(), 32, "incorrect CBaseBlob::size")
copy = uint256()
self.assertNotEqual(copy, uint, "incorrect CBaseBlob::operator!=")
self.assertTrue(copy.IsNull(), "incorrect CBaseBlob::IsNull")
copy = uint256(uint)
self.assertEqual(copy, uint, "incorrect CBaseBlob::operator==")
copy.SetNull()
self.assertTrue(copy.IsNull()), "incorrect CBaseBlob::SetNull"
def test_txoupoint(self):
txp = self.txp
uint = self.uint
self.assertEqual(txp.hash, uint, "incorrect COutPoint::COutPoint")
self.assertEqual(txp.n, 1, "incorrect COutPoint::COutPoint")
self.assertFalse(txp.IsNull(), "incorrect COutPoint::IsNull")
pcopy = COutPoint()
self.assertTrue(pcopy.IsNull(), "incorrect COutPoint::IsNull")
self.assertEqual(pcopy.hash, uint256(), "incorrect COutPoint::COutPoint")
self.assertNotEqual(pcopy, txp, "incorrect COutPoint::operator!=")
self.assertIn(uint.ToString()[:10], txp.ToString(), "incorrect COutPoint::ToString")
def test_claim(self):
txp = self.txp
uint160 = self.uint160
self.assertEqual(uint160.size(), 20, "incorrect CBaseBlob::size")
claim = CClaimValue(txp, uint160, 20, 1, 10)
self.assertClaimEqual(claim, txp, uint160, 20, 20, 1, 10, "incorrect CClaimValue::CClaimValue")
def test_support(self):
txp = self.txp
uint160 = self.uint160
claim = CClaimValue(txp, uint160, 20, 1, 10)
support = CSupportValue(txp, uint160, 20, 1, 10)
self.assertSupportEqual(support, claim.outPoint, claim.claimId, claim.nAmount, claim.nHeight, claim.nValidAtHeight, "incorrect CSupportValue::CSupportValue")
def test_claimtrie(self):
txp = self.txp
uint160 = self.uint160
claim = CClaimValue(txp, uint160, 20, 0, 0)
wipe = True; height = 1; data_dir = "."
trie = CClaimTrie(10*1024*1024, wipe, height, data_dir)
cache = CClaimTrieCache(trie)
self.assertTrue(trie.empty(), "incorrect CClaimtrieCache::empty")
self.assertTrue(cache.addClaim("test", txp, uint160, 20, 0, 0), "incorrect CClaimtrieCache::addClaim")
self.assertTrue(cache.haveClaim("test", txp), "incorrect CClaimtrieCache::haveClaim")
self.assertEqual(cache.getTotalNamesInTrie(), 1, "incorrect CClaimtrieCache::getTotalNamesInTrie")
self.assertEqual(cache.getTotalClaimsInTrie(), 1, "incorrect CClaimtrieCache::getTotalClaimsInTrie")
getNamesInTrie(cache, CacheIterateCallback(["test"]))
nValidAtHeight = -1
# add second claim
txp.n = 2
uint1601 = uint160S("1234567890987654321012345678909876543211")
self.assertTrue(cache.addClaim("test", txp, uint1601, 20, 1, 1), "incorrect CClaimtrieCache::addClaim")
result, nValidAtHeight = cache.haveClaimInQueue("test", txp)
self.assertTrue(result, "incorrect CClaimTrieCache::haveClaimInQueue")
self.assertEqual(nValidAtHeight, 1, "incorrect CClaimTrieCache::haveClaimInQueue, nValidAtHeight")
claim1 = CClaimValue()
self.assertTrue(cache.getInfoForName("test", claim1), "incorrect CClaimTrieCache::getInfoForName")
self.assertEqual(claim, claim1, "incorrect CClaimtrieCache::getInfoForName")
proof = CClaimTrieProof()
self.assertTrue(cache.getProofForName("test", uint160, proof), "incorrect CacheProofCallback")
self.assertTrue(proof.hasValue, "incorrect CClaimTrieCache::getProofForName")
claimsToName = cache.getClaimsForName("test")
claims = claimsToName.claimsNsupports
self.assertEqual(claims.size(), 2, "incorrect CClaimTrieCache::getClaimsForName")
self.assertFalse(claims[0].IsNull(), "incorrect CClaimNsupports::IsNull")
self.assertFalse(claims[1].IsNull(), "incorrect CClaimNsupports::IsNull")
unittest.main()
| 51.678899 | 165 | 0.690396 |
9c93eb9a7c7e794d6818a4dc8e3cda0709235706 | 16,094 | py | Python | tests/components/honeywell/test_climate.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 3 | 2020-01-21T18:09:09.000Z | 2022-01-17T08:06:03.000Z | tests/components/honeywell/test_climate.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 39 | 2016-12-16T12:40:34.000Z | 2017-02-13T17:53:42.000Z | tests/components/honeywell/test_climate.py | petewill/home-assistant | 5859dba4344f05fb8774aa1207e47ac28f627a67 | [
"Apache-2.0"
] | 6 | 2020-04-10T06:21:11.000Z | 2021-07-01T08:53:38.000Z | """The test the Honeywell thermostat module."""
import unittest
from unittest import mock
import voluptuous as vol
import requests.exceptions
import somecomfort
import pytest
from homeassistant.const import (
CONF_USERNAME,
CONF_PASSWORD,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HVAC_MODES,
)
import homeassistant.components.honeywell.climate as honeywell
pytestmark = pytest.mark.skip("Need to be fixed!")
class TestHoneywell(unittest.TestCase):
"""A test class for Honeywell themostats."""
@mock.patch("somecomfort.SomeComfort")
@mock.patch("homeassistant.components.honeywell." "climate.HoneywellUSThermostat")
def test_setup_us(self, mock_ht, mock_sc):
"""Test for the US setup."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "us",
}
bad_pass_config = {CONF_USERNAME: "user", honeywell.CONF_REGION: "us"}
bad_region_config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "un",
}
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA(None)
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA({})
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA(bad_pass_config)
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA(bad_region_config)
hass = mock.MagicMock()
add_entities = mock.MagicMock()
locations = [mock.MagicMock(), mock.MagicMock()]
devices_1 = [mock.MagicMock()]
devices_2 = [mock.MagicMock(), mock.MagicMock]
mock_sc.return_value.locations_by_id.values.return_value = locations
locations[0].devices_by_id.values.return_value = devices_1
locations[1].devices_by_id.values.return_value = devices_2
result = honeywell.setup_platform(hass, config, add_entities)
assert result
assert mock_sc.call_count == 1
assert mock_sc.call_args == mock.call("user", "pass")
mock_ht.assert_has_calls(
[
mock.call(mock_sc.return_value, devices_1[0], 18, 28, "user", "pass"),
mock.call(mock_sc.return_value, devices_2[0], 18, 28, "user", "pass"),
mock.call(mock_sc.return_value, devices_2[1], 18, 28, "user", "pass"),
]
)
@mock.patch("somecomfort.SomeComfort")
def test_setup_us_failures(self, mock_sc):
"""Test the US setup."""
hass = mock.MagicMock()
add_entities = mock.MagicMock()
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "us",
}
mock_sc.side_effect = somecomfort.AuthError
result = honeywell.setup_platform(hass, config, add_entities)
assert not result
assert not add_entities.called
mock_sc.side_effect = somecomfort.SomeComfortError
result = honeywell.setup_platform(hass, config, add_entities)
assert not result
assert not add_entities.called
@mock.patch("somecomfort.SomeComfort")
@mock.patch("homeassistant.components.honeywell." "climate.HoneywellUSThermostat")
def _test_us_filtered_devices(self, mock_ht, mock_sc, loc=None, dev=None):
"""Test for US filtered thermostats."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "us",
"location": loc,
"thermostat": dev,
}
locations = {
1: mock.MagicMock(
locationid=mock.sentinel.loc1,
devices_by_id={
11: mock.MagicMock(deviceid=mock.sentinel.loc1dev1),
12: mock.MagicMock(deviceid=mock.sentinel.loc1dev2),
},
),
2: mock.MagicMock(
locationid=mock.sentinel.loc2,
devices_by_id={21: mock.MagicMock(deviceid=mock.sentinel.loc2dev1)},
),
3: mock.MagicMock(
locationid=mock.sentinel.loc3,
devices_by_id={31: mock.MagicMock(deviceid=mock.sentinel.loc3dev1)},
),
}
mock_sc.return_value = mock.MagicMock(locations_by_id=locations)
hass = mock.MagicMock()
add_entities = mock.MagicMock()
assert honeywell.setup_platform(hass, config, add_entities) is True
return mock_ht.call_args_list, mock_sc
def test_us_filtered_thermostat_1(self):
"""Test for US filtered thermostats."""
result, client = self._test_us_filtered_devices(dev=mock.sentinel.loc1dev1)
devices = [x[0][1].deviceid for x in result]
assert [mock.sentinel.loc1dev1] == devices
def test_us_filtered_thermostat_2(self):
"""Test for US filtered location."""
result, client = self._test_us_filtered_devices(dev=mock.sentinel.loc2dev1)
devices = [x[0][1].deviceid for x in result]
assert [mock.sentinel.loc2dev1] == devices
def test_us_filtered_location_1(self):
"""Test for US filtered locations."""
result, client = self._test_us_filtered_devices(loc=mock.sentinel.loc1)
devices = [x[0][1].deviceid for x in result]
assert [mock.sentinel.loc1dev1, mock.sentinel.loc1dev2] == devices
def test_us_filtered_location_2(self):
"""Test for US filtered locations."""
result, client = self._test_us_filtered_devices(loc=mock.sentinel.loc2)
devices = [x[0][1].deviceid for x in result]
assert [mock.sentinel.loc2dev1] == devices
@mock.patch("evohomeclient.EvohomeClient")
@mock.patch("homeassistant.components.honeywell.climate." "HoneywellUSThermostat")
def test_eu_setup_full_config(self, mock_round, mock_evo):
"""Test the EU setup with complete configuration."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "eu",
}
mock_evo.return_value.temperatures.return_value = [{"id": "foo"}, {"id": "bar"}]
hass = mock.MagicMock()
add_entities = mock.MagicMock()
assert honeywell.setup_platform(hass, config, add_entities)
assert mock_evo.call_count == 1
assert mock_evo.call_args == mock.call("user", "pass")
assert mock_evo.return_value.temperatures.call_count == 1
assert mock_evo.return_value.temperatures.call_args == mock.call(
force_refresh=True
)
mock_round.assert_has_calls(
[
mock.call(mock_evo.return_value, "foo", True, 20.0),
mock.call(mock_evo.return_value, "bar", False, 20.0),
]
)
assert 2 == add_entities.call_count
@mock.patch("evohomeclient.EvohomeClient")
@mock.patch("homeassistant.components.honeywell.climate." "HoneywellUSThermostat")
def test_eu_setup_partial_config(self, mock_round, mock_evo):
"""Test the EU setup with partial configuration."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "eu",
}
mock_evo.return_value.temperatures.return_value = [{"id": "foo"}, {"id": "bar"}]
hass = mock.MagicMock()
add_entities = mock.MagicMock()
assert honeywell.setup_platform(hass, config, add_entities)
mock_round.assert_has_calls(
[
mock.call(mock_evo.return_value, "foo", True, 16),
mock.call(mock_evo.return_value, "bar", False, 16),
]
)
@mock.patch("evohomeclient.EvohomeClient")
@mock.patch("homeassistant.components.honeywell.climate." "HoneywellUSThermostat")
def test_eu_setup_bad_temp(self, mock_round, mock_evo):
"""Test the EU setup with invalid temperature."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "eu",
}
with pytest.raises(vol.Invalid):
honeywell.PLATFORM_SCHEMA(config)
@mock.patch("evohomeclient.EvohomeClient")
@mock.patch("homeassistant.components.honeywell.climate." "HoneywellUSThermostat")
def test_eu_setup_error(self, mock_round, mock_evo):
"""Test the EU setup with errors."""
config = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
honeywell.CONF_REGION: "eu",
}
mock_evo.return_value.temperatures.side_effect = (
requests.exceptions.RequestException
)
add_entities = mock.MagicMock()
hass = mock.MagicMock()
assert not honeywell.setup_platform(hass, config, add_entities)
class TestHoneywellRound(unittest.TestCase):
"""A test class for Honeywell Round thermostats."""
def setup_method(self, method):
"""Test the setup method."""
def fake_temperatures(force_refresh=None):
"""Create fake temperatures."""
temps = [
{
"id": "1",
"temp": 20,
"setpoint": 21,
"thermostat": "main",
"name": "House",
},
{
"id": "2",
"temp": 21,
"setpoint": 22,
"thermostat": "DOMESTIC_HOT_WATER",
},
]
return temps
self.device = mock.MagicMock()
self.device.temperatures.side_effect = fake_temperatures
self.round1 = honeywell.RoundThermostat(self.device, "1", True, 16)
self.round1.update()
self.round2 = honeywell.RoundThermostat(self.device, "2", False, 17)
self.round2.update()
def test_attributes(self):
"""Test the attributes."""
assert "House" == self.round1.name
assert TEMP_CELSIUS == self.round1.temperature_unit
assert 20 == self.round1.current_temperature
assert 21 == self.round1.target_temperature
assert not self.round1.is_away_mode_on
assert "Hot Water" == self.round2.name
assert TEMP_CELSIUS == self.round2.temperature_unit
assert 21 == self.round2.current_temperature
assert self.round2.target_temperature is None
assert not self.round2.is_away_mode_on
def test_away_mode(self):
"""Test setting the away mode."""
assert not self.round1.is_away_mode_on
self.round1.turn_away_mode_on()
assert self.round1.is_away_mode_on
assert self.device.set_temperature.call_count == 1
assert self.device.set_temperature.call_args == mock.call("House", 16)
self.device.set_temperature.reset_mock()
self.round1.turn_away_mode_off()
assert not self.round1.is_away_mode_on
assert self.device.cancel_temp_override.call_count == 1
assert self.device.cancel_temp_override.call_args == mock.call("House")
def test_set_temperature(self):
"""Test setting the temperature."""
self.round1.set_temperature(temperature=25)
assert self.device.set_temperature.call_count == 1
assert self.device.set_temperature.call_args == mock.call("House", 25)
def test_set_hvac_mode(self) -> None:
"""Test setting the system operation."""
self.round1.set_hvac_mode("cool")
assert "cool" == self.round1.current_operation
assert "cool" == self.device.system_mode
self.round1.set_hvac_mode("heat")
assert "heat" == self.round1.current_operation
assert "heat" == self.device.system_mode
class TestHoneywellUS(unittest.TestCase):
"""A test class for Honeywell US thermostats."""
def setup_method(self, method):
"""Test the setup method."""
self.client = mock.MagicMock()
self.device = mock.MagicMock()
self.cool_away_temp = 18
self.heat_away_temp = 28
self.honeywell = honeywell.HoneywellUSThermostat(
self.client,
self.device,
self.cool_away_temp,
self.heat_away_temp,
"user",
"password",
)
self.device.fan_running = True
self.device.name = "test"
self.device.temperature_unit = "F"
self.device.current_temperature = 72
self.device.setpoint_cool = 78
self.device.setpoint_heat = 65
self.device.system_mode = "heat"
self.device.fan_mode = "auto"
def test_properties(self):
"""Test the properties."""
assert self.honeywell.is_fan_on
assert "test" == self.honeywell.name
assert 72 == self.honeywell.current_temperature
def test_unit_of_measurement(self):
"""Test the unit of measurement."""
assert TEMP_FAHRENHEIT == self.honeywell.temperature_unit
self.device.temperature_unit = "C"
assert TEMP_CELSIUS == self.honeywell.temperature_unit
def test_target_temp(self):
"""Test the target temperature."""
assert 65 == self.honeywell.target_temperature
self.device.system_mode = "cool"
assert 78 == self.honeywell.target_temperature
def test_set_temp(self):
"""Test setting the temperature."""
self.honeywell.set_temperature(temperature=70)
assert 70 == self.device.setpoint_heat
assert 70 == self.honeywell.target_temperature
self.device.system_mode = "cool"
assert 78 == self.honeywell.target_temperature
self.honeywell.set_temperature(temperature=74)
assert 74 == self.device.setpoint_cool
assert 74 == self.honeywell.target_temperature
def test_set_hvac_mode(self) -> None:
"""Test setting the operation mode."""
self.honeywell.set_hvac_mode("cool")
assert "cool" == self.device.system_mode
self.honeywell.set_hvac_mode("heat")
assert "heat" == self.device.system_mode
def test_set_temp_fail(self):
"""Test if setting the temperature fails."""
self.device.setpoint_heat = mock.MagicMock(
side_effect=somecomfort.SomeComfortError
)
self.honeywell.set_temperature(temperature=123)
def test_attributes(self):
"""Test the attributes."""
expected = {
honeywell.ATTR_FAN: "running",
ATTR_FAN_MODE: "auto",
ATTR_FAN_MODES: somecomfort.FAN_MODES,
ATTR_HVAC_MODES: somecomfort.SYSTEM_MODES,
}
assert expected == self.honeywell.device_state_attributes
expected["fan"] = "idle"
self.device.fan_running = False
assert expected == self.honeywell.device_state_attributes
def test_with_no_fan(self):
"""Test if there is on fan."""
self.device.fan_running = False
self.device.fan_mode = None
expected = {
honeywell.ATTR_FAN: "idle",
ATTR_FAN_MODE: None,
ATTR_FAN_MODES: somecomfort.FAN_MODES,
ATTR_HVAC_MODES: somecomfort.SYSTEM_MODES,
}
assert expected == self.honeywell.device_state_attributes
def test_heat_away_mode(self):
"""Test setting the heat away mode."""
self.honeywell.set_hvac_mode("heat")
assert not self.honeywell.is_away_mode_on
self.honeywell.turn_away_mode_on()
assert self.honeywell.is_away_mode_on
assert self.device.setpoint_heat == self.heat_away_temp
assert self.device.hold_heat is True
self.honeywell.turn_away_mode_off()
assert not self.honeywell.is_away_mode_on
assert self.device.hold_heat is False
@mock.patch("somecomfort.SomeComfort")
def test_retry(self, test_somecomfort):
"""Test retry connection."""
old_device = self.honeywell._device
self.honeywell._retry()
assert self.honeywell._device == old_device
| 37.168591 | 88 | 0.627315 |
4fa6b14fb28e16d7a94d28fde77a13af788ca8f1 | 404 | py | Python | tests/test_config/test_prep.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 26 | 2019-03-04T20:08:57.000Z | 2022-01-22T13:40:00.000Z | tests/test_config/test_prep.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 379 | 2019-03-03T12:16:05.000Z | 2022-03-29T13:44:46.000Z | tests/test_config/test_prep.py | jspaaks/vak | 581ec4869d342e5d52bc057de54c10901f06d343 | [
"BSD-3-Clause"
] | 12 | 2019-11-22T21:19:19.000Z | 2022-03-14T17:44:59.000Z | """tests for vak.config.prep module"""
import vak.config.prep
def test_parse_prep_config_returns_PrepConfig_instance(
all_generated_configs_toml_path_pairs,
):
for config_toml, toml_path in all_generated_configs_toml_path_pairs:
prep_section = config_toml["PREP"]
config = vak.config.prep.PrepConfig(**prep_section)
assert isinstance(config, vak.config.prep.PrepConfig)
| 33.666667 | 72 | 0.769802 |
c9f5e2236d8b8d17954946a3912b5e07c6081bac | 5,891 | py | Python | contrib/seeds/makeseeds.py | minblock/Bindcoin | 6b36875dcddd81aa2d0641d33bda74902ec8f694 | [
"MIT"
] | null | null | null | contrib/seeds/makeseeds.py | minblock/Bindcoin | 6b36875dcddd81aa2d0641d33bda74902ec8f694 | [
"MIT"
] | null | null | null | contrib/seeds/makeseeds.py | minblock/Bindcoin | 6b36875dcddd81aa2d0641d33bda74902ec8f694 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/IfyouarereadingthisitstoolatecoinCore:0.14.(0|1|2|99)/|/IfyouarereadingthisitstoolatecoinCore:0.15.(0|1|2|99)/|/IfyouarereadingthisitstoolatecoinCore:0.16.(0|1|2|99)/|/IfyouarereadingthisitstoolatecoinCore:0.17.(0|1|2|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 34.052023 | 258 | 0.57817 |
7ac29181c2f59380e240903a896d5c9e82fd1857 | 593 | py | Python | frontend/debug.py | Hanabiraa/Boolean-function-interpreter | 6704cff478cafe8f7e659e18ce988b2cf4358ccb | [
"MIT"
] | 1 | 2021-10-23T00:31:09.000Z | 2021-10-23T00:31:09.000Z | frontend/debug.py | Hanabiraa/Boolean-function-interpreter | 6704cff478cafe8f7e659e18ce988b2cf4358ccb | [
"MIT"
] | null | null | null | frontend/debug.py | Hanabiraa/Boolean-function-interpreter | 6704cff478cafe8f7e659e18ce988b2cf4358ccb | [
"MIT"
] | null | null | null | def debug_func(raw_expr=None, word_tokens=None,
semantic_tokens_lst=None, object_tokens_lst=None):
"""
output all variables, which used for create AST
:param raw_expr: str
:param word_tokens: [[str]]
:param semantic_tokens_list: [[str]]
:param object_tokens: [[TokenType]]
:return None
"""
print('your expr: {}'.format(raw_expr),
'simple tokens: {}'.format(word_tokens),
'semantic tokens list: {}'.format(semantic_tokens_lst),
'object tokens list: {}'.format(object_tokens_lst),
sep='\n\n', end='\n')
| 34.882353 | 65 | 0.623946 |
85b9dcde44b201501e3d20be34b51984b86d13c5 | 22,777 | py | Python | api/tools/icd/idcardocr.py | kiddestiny/Darks | 4727004a0e42728ded9bed015b37990a2f34a782 | [
"MIT"
] | null | null | null | api/tools/icd/idcardocr.py | kiddestiny/Darks | 4727004a0e42728ded9bed015b37990a2f34a782 | [
"MIT"
] | null | null | null | api/tools/icd/idcardocr.py | kiddestiny/Darks | 4727004a0e42728ded9bed015b37990a2f34a782 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from PIL import Image
import pytesseract
import cv2
import numpy as np
import re
from multiprocessing import Pool, Queue, Lock, Process, freeze_support
import time
#pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'
x = 1280.00 / 3840.00
pixel_x = int(x * 3840)
print(x, pixel_x)
#mode0:识别姓名,出生日期,身份证号; mode1:识别所有信息
def idcardocr(imgname, mode=1):
print(u'进入身份证光学识别流程...')
if mode==1:
# generate_mask(x)
img_data_gray, img_org = img_resize_gray(imgname)
result_dict = dict()
name_pic = find_name(img_data_gray, img_org)
# showimg(name_pic)
# print 'name'
result_dict['name'] = get_name(name_pic)
# print result_dict['name']
sex_pic = find_sex(img_data_gray, img_org)
# showimg(sex_pic)
# print 'sex'
result_dict['sex'] = get_sex(sex_pic)
# print result_dict['sex']
nation_pic = find_nation(img_data_gray, img_org)
# showimg(nation_pic)
# print 'nation'
result_dict['nation'] = get_nation(nation_pic)
# print result_dict['nation']
address_pic = find_address(img_data_gray, img_org)
# showimg(address_pic)
# print 'address'
result_dict['address'] = get_address(address_pic)
# print result_dict['address']
idnum_pic = find_idnum(img_data_gray, img_org)
# showimg(idnum_pic)
# print 'idnum'
result_dict['idnum'], result_dict['birth'] = get_idnum_and_birth(idnum_pic)
# print result_dict['idnum']
elif mode==0:
# generate_mask(x)
img_data_gray, img_org = img_resize_gray(imgname)
result_dict = dict()
name_pic = find_name(img_data_gray, img_org)
# showimg(name_pic)
# print 'name'
result_dict['name'] = get_name(name_pic)
# print result_dict['name']
idnum_pic = find_idnum(img_data_gray, img_org)
# showimg(idnum_pic)
# print 'idnum'
result_dict['idnum'], result_dict['birth'] = get_idnum_and_birth(idnum_pic)
result_dict['sex']=''
result_dict['nation']=''
result_dict['address']=''
else:
print(u"模式选择错误!")
#showimg(img_data_gray)
return result_dict
def generate_mask(x):
name_mask_pic = cv2.UMat(cv2.imread('name_mask.jpg'))
sex_mask_pic = cv2.UMat(cv2.imread('sex_mask.jpg'))
nation_mask_pic = cv2.UMat(cv2.imread('nation_mask.jpg'))
birth_mask_pic = cv2.UMat(cv2.imread('birth_mask.jpg'))
year_mask_pic = cv2.UMat(cv2.imread('year_mask.jpg'))
month_mask_pic = cv2.UMat(cv2.imread('month_mask.jpg'))
day_mask_pic = cv2.UMat(cv2.imread('day_mask.jpg'))
address_mask_pic = cv2.UMat(cv2.imread('address_mask.jpg'))
idnum_mask_pic = cv2.UMat(cv2.imread('idnum_mask.jpg'))
name_mask_pic = img_resize_x(name_mask_pic)
sex_mask_pic = img_resize_x(sex_mask_pic)
nation_mask_pic = img_resize_x(nation_mask_pic)
birth_mask_pic = img_resize_x(birth_mask_pic)
year_mask_pic = img_resize_x(year_mask_pic)
month_mask_pic = img_resize_x(month_mask_pic)
day_mask_pic = img_resize_x(day_mask_pic)
address_mask_pic = img_resize_x(address_mask_pic)
idnum_mask_pic = img_resize_x(idnum_mask_pic)
cv2.imwrite('name_mask_%s.jpg'%pixel_x, name_mask_pic)
cv2.imwrite('sex_mask_%s.jpg' %pixel_x, sex_mask_pic)
cv2.imwrite('nation_mask_%s.jpg' %pixel_x, nation_mask_pic)
cv2.imwrite('birth_mask_%s.jpg' %pixel_x, birth_mask_pic)
cv2.imwrite('year_mask_%s.jpg' % pixel_x, year_mask_pic)
cv2.imwrite('month_mask_%s.jpg' % pixel_x, month_mask_pic)
cv2.imwrite('day_mask_%s.jpg' % pixel_x, day_mask_pic)
cv2.imwrite('address_mask_%s.jpg' %pixel_x, address_mask_pic)
cv2.imwrite('idnum_mask_%s.jpg' %pixel_x, idnum_mask_pic)
#用于生成模板
def img_resize_x(imggray):
# print 'dheight:%s' % dheight
crop = imggray
size = crop.get().shape
dheight = int(size[0]*x)
dwidth = int(size[1]*x)
crop = cv2.resize(src=crop, dsize=(dwidth, dheight), interpolation=cv2.INTER_CUBIC)
return crop
#idcardocr里面resize以高度为依据, 用于get部分
def img_resize(imggray, dheight):
# print 'dheight:%s' % dheight
crop = imggray
size = crop.get().shape
height = size[0]
width = size[1]
width = width * dheight / height
crop = cv2.resize(src=crop, dsize=(int(width), dheight), interpolation=cv2.INTER_CUBIC)
return crop
def img_resize_gray(imgorg):
#imgorg = cv2.imread(imgname)
crop = imgorg
size = cv2.UMat.get(crop).shape
# print size
height = size[0]
width = size[1]
# 参数是根据3840调的
height = int(height * 3840 * x / width)
# print height
crop = cv2.resize(src=crop, dsize=(int(3840 * x), height), interpolation=cv2.INTER_CUBIC)
return hist_equal(cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)), crop
def find_name(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('name_mask_%s.jpg'%pixel_x, 0))
# showimg(crop_org)
w, h = cv2.UMat.get(template).shape[::-1]
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# print(max_loc)
top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(700*x), top_left[1] + int(300*x))
result = cv2.UMat.get(crop_org)[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
# showimg(result)
return cv2.UMat(result)
def find_sex(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('sex_mask_%s.jpg'%pixel_x, 0))
# showimg(template)
w, h = cv2.UMat.get(template).shape[::-1]
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(300*x), top_left[1] + int(300*x))
result = cv2.UMat.get(crop_org)[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#showimg(crop_gray)
return cv2.UMat(result)
def find_nation(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('nation_mask_%s.jpg'%pixel_x, 0))
#showimg(template)
w, h = cv2.UMat.get(template).shape[::-1]
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = (max_loc[0] + w - int(20*x), max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(500*x), top_left[1] + int(300*x))
result = cv2.UMat.get(crop_org)[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#showimg(crop_gray)
return cv2.UMat(result)
# def find_birth(crop_gray, crop_org):
# template = cv2.UMat(cv2.imread('birth_mask_%s.jpg'%pixel_x, 0))
# # showimg(template)
# w, h = cv2.UMat.get(template).shape[::-1]
# res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
# #showimg(crop_gray)
# min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
# bottom_right = (top_left[0] + int(1500*x), top_left[1] + int(300*x))
# # 提取result需要在rectangle之前
# date_org = cv2.UMat.get(crop_org)[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
# date = cv2.cvtColor(date_org, cv2.COLOR_BGR2GRAY)
# cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
# # cv2.imwrite('date.png',date)
#
# # 提取年份
# template = cv2.UMat(cv2.imread('year_mask_%s.jpg'%pixel_x, 0))
# year_res = cv2.matchTemplate(date, template, cv2.TM_CCOEFF_NORMED)
# min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(year_res)
# bottom_right = (max_loc[0]+int(20*x), int(300*x))
# top_left = (0, 0)
# year = date_org[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
# # cv2.imwrite('year.png',year)
# cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#
# # 提取月
# template = cv2.UMat(cv2.imread('month_mask_%s.jpg'%pixel_x, 0))
# month_res = cv2.matchTemplate(date, template, cv2.TM_CCOEFF_NORMED)
# min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(month_res)
# bottom_right = (max_loc[0]+int(40*x), int(300*x))
# top_left = (max_loc[0] - int(220*x), 0)
# month = date_org[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
# # cv2.imwrite('month.png',month)
# cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#
# # 提取日
# template = cv2.UMat(cv2.imread('day_mask_%s.jpg'%pixel_x, 0))
# day_res = cv2.matchTemplate(date, template, cv2.TM_CCOEFF_NORMED)
# min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(day_res)
# bottom_right = (max_loc[0]+int(20*x), int(300*x))
# top_left = (max_loc[0] - int(220*x), 0)
# day = date_org[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
# # cv2.imwrite('day.png',day)
# cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
# showimg(crop_gray)
# return cv2.UMat(year), cv2.UMat(month), cv2.UMat(day)
def find_address(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('address_mask_%s.jpg'%pixel_x, 0))
# showimg(template)
#showimg(crop_gray)
w, h = cv2.UMat.get(template).shape[::-1]
#t1 = round(time.time()*1000)
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
#t2 = round(time.time()*1000)
#print 'time:%s'%(t2-t1)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(1700*x), top_left[1] + int(550*x))
result = cv2.UMat.get(crop_org)[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#showimg(crop_gray)
return cv2.UMat(result)
def find_idnum(crop_gray, crop_org):
template = cv2.UMat(cv2.imread('idnum_mask_%s.jpg'%pixel_x, 0))
# showimg(template)
#showimg(crop_gray)
w, h = cv2.UMat.get(template).shape[::-1]
res = cv2.matchTemplate(crop_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = (max_loc[0] + w, max_loc[1] - int(20*x))
bottom_right = (top_left[0] + int(2300*x), top_left[1] + int(300*x))
result = cv2.UMat.get(crop_org)[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
cv2.rectangle(crop_gray, top_left, bottom_right, 255, 2)
#showimg(crop_gray)
return cv2.UMat(result)
def showimg(img):
cv2.namedWindow("contours", 0);
cv2.resizeWindow("contours", 1280, 720);
cv2.imshow("contours", img)
cv2.waitKey()
#psm model:
# 0 Orientation and script detection (OSD) only.
# 1 Automatic page segmentation with OSD.
# 2 Automatic page segmentation, but no OSD, or OCR.
# 3 Fully automatic page segmentation, but no OSD. (Default)
# 4 Assume a single column of text of variable sizes.
# 5 Assume a single uniform block of vertically aligned text.
# 6 Assume a single uniform block of text.
# 7 Treat the image as a single text line.
# 8 Treat the image as a single word.
# 9 Treat the image as a single word in a circle.
# 10 Treat the image as a single character.
# 11 Sparse text. Find as much text as possible in no particular order.
# 12 Sparse text with OSD.
# 13 Raw line. Treat the image as a single text line,
# bypassing hacks that are Tesseract-specific
def get_name(img):
# cv2.imshow("method3", img)
# cv2.waitKey()
_, _, red = cv2.split(img) #split 会自动将UMat转换回Mat
red = cv2.UMat(red)
red = hist_equal(red)
red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 151, 50)
# red = cv2.medianBlur(red, 3)
red = img_resize(red, 150)
img = img_resize(img, 150)
# showimg(red)
# cv2.imwrite('name.png', red)
# img2 = Image.open('address.png')
# img = Image.fromarray(cv2.UMat.get(red).astype('uint8'))
return get_result_vary_length(red, 'chi_sim', img, '-psm 7')
# return punc_filter(pytesseract.image_to_string(img, lang='chi_sim', config='-psm 13').replace(" ",""))
def get_sex(img):
_, _, red = cv2.split(img)
red = cv2.UMat(red)
red = hist_equal(red)
red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
# red = cv2.medianBlur(red, 3)
# cv2.imwrite('address.png', img)
# img2 = Image.open('address.png')
red = img_resize(red, 150)
# cv2.imwrite('sex.png', red)
# img = Image.fromarray(cv2.UMat.get(red).astype('uint8'))
return get_result_fix_length(red, 1, 'sex', '-psm 10')
# return pytesseract.image_to_string(img, lang='sex', config='-psm 10').replace(" ","")
def get_nation(img):
_, _, red = cv2.split(img)
red = cv2.UMat(red)
red = hist_equal(red)
red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
red = img_resize(red, 150)
# cv2.imwrite('nation.png', red)
# img = Image.fromarray(cv2.UMat.get(red).astype('uint8'))
return get_result_fix_length(red, 1, 'nation', '-psm 10')
# return pytesseract.image_to_string(img, lang='nation', config='-psm 13').replace(" ","")
# def get_birth(year, month, day):
# _, _, red = cv2.split(year)
# red = cv2.UMat(red)
# red = hist_equal(red)
# red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
# red = img_resize(red, 150)
# # cv2.imwrite('year_red.png', red)
# year_red = red
#
# _, _, red = cv2.split(month)
# red = cv2.UMat(red)
# red = hist_equal(red)
# red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
# #red = cv2.erode(red,kernel,iterations = 1)
# red = img_resize(red, 150)
# # cv2.imwrite('month_red.png', red)
# month_red = red
#
# _, _, red = cv2.split(day)
# red = cv2.UMat(red)
# red = hist_equal(red)
# red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
# red = img_resize(red, 150)
# # cv2.imwrite('day_red.png', red)
# day_red = red
# # return pytesseract.image_to_string(img, lang='birth', config='-psm 7')
# return get_result_fix_length(year_red, 4, 'eng', '-c tessedit_char_whitelist=0123456789 -psm 13'), \
# get_result_vary_length(month_red, 'eng', '-c tessedit_char_whitelist=0123456789 -psm 13'), \
# get_result_vary_length(day_red, 'eng', '-c tessedit_char_whitelist=0123456789 -psm 13')
def get_address(img):
#_, _, red = cv2.split(img)
#red = cv2.medianBlur(red, 3)
_, _, red = cv2.split(img)
red = cv2.UMat(red)
red = hist_equal(red)
red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
red = img_resize(red, 300)
img = img_resize(img, 300)
# cv2.imwrite('address_red.png', red)
#img = Image.fromarray(cv2.UMat.get(red).astype('uint8'))
return punc_filter(get_result_vary_length(red,'chi_sim', img, '-psm 6'))
#return punc_filter(pytesseract.image_to_string(img, lang='chi_sim', config='-psm 3').replace(" ",""))
def get_idnum_and_birth(img):
_, _, red = cv2.split(img)
red = cv2.UMat(red)
red = hist_equal(red)
red = cv2.adaptiveThreshold(red, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 151, 50)
red = img_resize(red, 150)
# cv2.imwrite('idnum_red.png', red)
idnum_str = get_result_fix_length(red, 18, 'idnum', '-psm 8')
return idnum_str, idnum_str[6:14]
def get_result_fix_length(red, fix_length, langset, custom_config=''):
cv2.fastNlMeansDenoising(red, red, 4, 7, 35)
rec, red = cv2.threshold(red, 127, 255, cv2.THRESH_BINARY_INV)
image, contours, hierarchy = cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(len(contours))
# 描边一次可以减少噪点
cv2.drawContours(red, contours, -1, (0, 255, 0), 1)
color_img = cv2.cvtColor(red, cv2.COLOR_GRAY2BGR)
# for x, y, w, h in contours:
# imgrect = cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# showimg(imgrect)
h_threshold = 54
numset_contours = []
calcu_cnt = 1
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if h > h_threshold:
numset_contours.append((x, y, w, h))
while len(numset_contours) != fix_length:
if calcu_cnt > 50:
print(u'计算次数过多!目前阈值为:', h_threshold)
break
numset_contours = []
calcu_cnt += 1
if len(numset_contours) > fix_length:
h_threshold += 1
contours_cnt = 0
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if h > h_threshold:
contours_cnt += 1
numset_contours.append((x, y, w, h))
if len(numset_contours) < fix_length:
h_threshold -= 1
contours_cnt = 0
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if h > h_threshold:
contours_cnt += 1
numset_contours.append((x, y, w, h))
result_string = ''
numset_contours.sort(key=lambda num: num[0])
for x, y, w, h in numset_contours:
result_string += pytesseract.image_to_string(cv2.UMat.get(color_img)[y:y + h, x:x + w], lang=langset, config=custom_config)
# print(new_r)
return result_string
def get_result_vary_length(red, langset, org_img, custom_config=''):
# cv2.fastNlMeansDenoising(red, red, 4, 7, 35)
rec, red = cv2.threshold(red, 127, 255, cv2.THRESH_BINARY_INV)
image, contours, hierarchy = cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# print(len(contours))
# 描边一次可以减少噪点
cv2.drawContours(red, contours, -1, (255, 255, 255), 1)
color_img = cv2.cvtColor(red, cv2.COLOR_GRAY2BGR)
numset_contours = []
height_list=[]
width_list=[]
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
height_list.append(h)
# print(h,w)
width_list.append(w)
height_list.remove(max(height_list))
width_list.remove(max(width_list))
height_threshold = 0.70*max(height_list)
width_threshold = 1.4 * max(width_list)
# print('height_threshold:'+str(height_threshold)+'width_threshold:'+str(width_threshold))
big_rect=[]
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if h > height_threshold and w < width_threshold:
# print(h,w)
numset_contours.append((x, y, w, h))
big_rect.append((x, y))
big_rect.append((x + w, y + h))
big_rect_nparray = np.array(big_rect, ndmin=3)
x, y, w, h = cv2.boundingRect(big_rect_nparray)
# imgrect = cv2.rectangle(color_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# showimg(imgrect)
# showimg(cv2.UMat.get(org_img)[y:y + h, x:x + w])
result_string = ''
result_string += pytesseract.image_to_string(cv2.UMat.get(org_img)[y:y + h, x:x + w], lang=langset,
config=custom_config)
# numset_contours.sort(key=lambda num: num[0])
# for x, y, w, h in numset_contours:
# result_string += pytesseract.image_to_string(cv2.UMat.get(color_img)[y:y + h, x:x + w], lang=langset, config=custom_config)
return punc_filter(result_string)
def punc_filter(str):
temp = str
xx = u"([\u4e00-\u9fff0-9]+)"
pattern = re.compile(xx)
results = pattern.findall(temp)
string = ""
for result in results:
string += result
return string
#这里使用直方图拉伸,不是直方图均衡
def hist_equal(img):
# clahe_size = 8
# clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(clahe_size, clahe_size))
# result = clahe.apply(img)
#test
#result = cv2.equalizeHist(img)
image = img.get() #UMat to Mat
# result = cv2.equalizeHist(image)
lut = np.zeros(256, dtype = image.dtype )#创建空的查找表
#lut = np.zeros(256)
hist= cv2.calcHist([image], #计算图像的直方图
[0], #使用的通道
None, #没有使用mask
[256], #it is a 1D histogram
[0,256])
minBinNo, maxBinNo = 0, 255
#计算从左起第一个不为0的直方图柱的位置
for binNo, binValue in enumerate(hist):
if binValue != 0:
minBinNo = binNo
break
#计算从右起第一个不为0的直方图柱的位置
for binNo, binValue in enumerate(reversed(hist)):
if binValue != 0:
maxBinNo = 255-binNo
break
#print minBinNo, maxBinNo
#生成查找表
for i,v in enumerate(lut):
if i < minBinNo:
lut[i] = 0
elif i > maxBinNo:
lut[i] = 255
else:
lut[i] = int(255.0*(i-minBinNo)/(maxBinNo-minBinNo)+0.5)
#计算,调用OpenCV cv2.LUT函数,参数 image -- 输入图像,lut -- 查找表
#print lut
result = cv2.LUT(image, lut)
#print type(result)
#showimg(result)
return cv2.UMat(result)
if __name__=="__main__":
idocr = idcardocr(cv2.UMat(cv2.imread('testimages/zrh.jpg')))
print(idocr)
# for i in range(15):
# idocr = idcardocr(cv2.UMat(cv2.imread('testimages/%s.jpg'%(i+1))))
# print(idocr['idnum'])
| 42.733583 | 133 | 0.606533 |
c71a90f803db185bae38173155ac33924f09c686 | 1,573 | py | Python | util/buildtoc.py | cezarlz/diveintohtml5 | 2ef63a18cfbc88aa50bc13151b0ee7d6c56268a0 | [
"CC-BY-2.0",
"CC-BY-3.0",
"Apache-2.0"
] | 80 | 2015-01-05T23:35:37.000Z | 2021-02-05T23:55:12.000Z | util/buildtoc.py | sigy/diveintohtml5 | e89af9137ffbd91ad897e99cb2a8f9e1c4dfbdda | [
"CC-BY-2.0",
"CC-BY-3.0",
"Apache-2.0"
] | 3 | 2015-01-05T23:38:07.000Z | 2018-02-23T17:21:07.000Z | util/buildtoc.py | sigy/diveintohtml5 | e89af9137ffbd91ad897e99cb2a8f9e1c4dfbdda | [
"CC-BY-2.0",
"CC-BY-3.0",
"Apache-2.0"
] | 44 | 2015-01-01T21:56:14.000Z | 2019-07-27T15:13:22.000Z | #!/usr/bin/python3
import re
import roman
# get list of chapters
chapters = []
for line in open('index.html'):
if not line.count('<li') or not line.count('<a href'): continue
chapters.append(line.split('<a href=', 1)[1].split('>', 1)[0])
sections = {}
for filename in chapters:
chapter_id = filename.split(".", 1)[0]
with open(filename, encoding="utf-8") as f: data = f.read()
sections[chapter_id] = re.findall("<h2 id=(.*?)>(.*?)</h2>", data)
with open('index.html', encoding="utf-8") as f: data = f.read()
short_toc = data.split('<!-- toc -->')[1].split('<!-- /toc -->')[0]
full_toc = ['<!-- toc -->']
chapter_id = ''
for line in short_toc.splitlines():
if line.count('<li') and line.count('<a href'):
chapter_id = line.split('<a href=', 1)[1].split('.', 1)[0]
line = line.replace('<li>', '<li id={0}>'.format(chapter_id))
full_toc.append(line)
if chapter_id:
full_toc.append('<ol>')
section_number = 0
for section_id, section_title in sections[chapter_id]:
section_number += 1
full_toc.append('<li><dl><dt><a href={0}.html#{1}>{2}</a><dd>{3}</dl>'.format(chapter_id, section_id, section_title, roman.to_roman(section_number).lower()))
full_toc.append('</ol>')
chapter_id = ''
full_toc.append('<!-- /toc -->')
with open('table-of-contents.html', encoding="utf-8") as f: data = f.read()
with open('table-of-contents.html', mode="w", encoding="utf-8") as f:
f.write(data.split('<!-- toc -->')[0] + "\n".join(full_toc) + data.split('<!-- /toc -->')[1])
| 39.325 | 169 | 0.590591 |
032f6865856fc5c22e277c9cc7dcdf3da71825d0 | 523 | py | Python | codesamples/managers.py | akondasif/Clone-test-repo | 8a6fc17dc904015db76d4fe81965765466adfc55 | [
"Apache-2.0"
] | null | null | null | codesamples/managers.py | akondasif/Clone-test-repo | 8a6fc17dc904015db76d4fe81965765466adfc55 | [
"Apache-2.0"
] | 1 | 2019-03-28T22:12:58.000Z | 2019-03-28T22:12:58.000Z | codesamples/managers.py | akondasif/Clone-test-repo | 8a6fc17dc904015db76d4fe81965765466adfc55 | [
"Apache-2.0"
] | 1 | 2019-04-03T20:26:54.000Z | 2019-04-03T20:26:54.000Z | from django.db.models import Manager
from django.db.models.query import QuerySet
class CodeSampleQuerySet(QuerySet):
def draft(self):
return self.filter(is_published=False)
def published(self):
return self.filter(is_published=True)
class CodeSampleManager(Manager):
def get_queryset(self):
return CodeSampleQuerySet(self.model, using=self._db)
def draft(self):
return self.get_queryset().draft()
def published(self):
return self.get_queryset().published()
| 23.772727 | 61 | 0.709369 |
faccb25850836b9f05de2215d8b37bc66df4ee6c | 854 | py | Python | open-hackathon-client/src/client/views/route_template.py | chensong2000/open-hackathon | e2d162995fc4b55ea96b21e4c3cb0c8388cdc25f | [
"MIT"
] | null | null | null | open-hackathon-client/src/client/views/route_template.py | chensong2000/open-hackathon | e2d162995fc4b55ea96b21e4c3cb0c8388cdc25f | [
"MIT"
] | null | null | null | open-hackathon-client/src/client/views/route_template.py | chensong2000/open-hackathon | e2d162995fc4b55ea96b21e4c3cb0c8388cdc25f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import sys
sys.path.append("..")
from client import app
from . import render
from flask_login import login_required
@app.route("/template")
def template_index():
return render("/template/index.html")
@app.route("/template/create")
@login_required
def template_create():
return render("/template/create.html")
@app.route("/template/create/<template_id>")
@login_required
def template_create_from_existing(template_id):
return render("/template/create.html", template_id=template_id)
@app.route("/template/edit/<template_id>")
def template_edit():
return render("/template/create.html")
@app.route("/template/try/<template_id>")
@login_required
def try_template(template_id):
return render("/manage/testtemplate.html")
| 20.829268 | 71 | 0.735363 |
ba5e0416e6b4cfb878a405870045a54aad543390 | 3,306 | py | Python | Crawlers/news_sites/items.py | Lasith-Niro/fact-Bounty | aadfcb21d18a547ab23ea9b7fd750b9bdfcd6fcf | [
"Apache-2.0"
] | null | null | null | Crawlers/news_sites/items.py | Lasith-Niro/fact-Bounty | aadfcb21d18a547ab23ea9b7fd750b9bdfcd6fcf | [
"Apache-2.0"
] | null | null | null | Crawlers/news_sites/items.py | Lasith-Niro/fact-Bounty | aadfcb21d18a547ab23ea9b7fd750b9bdfcd6fcf | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class NewsSitesItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class NFItem(scrapy.Item):
news_headline = scrapy.Field()
published_timestamp = scrapy.Field()
author =scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
imgURL = scrapy.Field()
class CTItem(scrapy.Item):
news_headline = scrapy.Field()
#published_timestamp = scrapy.Field()
#comments = scrapy.Field()
#views = scrapy.Field()
#moreDetails=scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
class chaptersItem(scrapy.Item):
news_headline = scrapy.Field()
link = scrapy.Field()
newsInDetails = scrapy.Field()
class DailyMirrorSportsItem(scrapy.Item):
news_headline = scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
newsInDetails = scrapy.Field()
class DailyMirrorItem(scrapy.Item):
news_headline = scrapy.Field()
published_timestamp = scrapy.Field()
comments = scrapy.Field()
views = scrapy.Field()
moreDetails=scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
class LDItem(scrapy.Item):
news_headline = scrapy.Field()
published_timestamp = scrapy.Field()
link = scrapy.Field()
data = scrapy.Field()
class EconomyNextItem(scrapy.Item):
news_headline = scrapy.Field()
link = scrapy.Field()
datetime = scrapy.Field()
newsInDetails = scrapy.Field()
class GeneralItem(scrapy.Item):
news_headline = scrapy.Field()
link = scrapy.Field()
datetime = scrapy.Field()
newsInDetails = scrapy.Field()
class AutoLankaItem(scrapy.Item):
news_headline = scrapy.Field()
link = scrapy.Field()
telephone = scrapy.Field()
newsInDetails = scrapy.Field()
class FtItem(scrapy.Item):
news_headline = scrapy.Field()
date = scrapy.Field()
data = scrapy.Field()
news_link = scrapy.Field()
class PulseItem(scrapy.Item):
news_headline = scrapy.Field()
date = scrapy.Field()
comment = scrapy.Field()
newsInDetails = scrapy.Field()
news_link = scrapy.Field()
class RoarItem(scrapy.Item):
news_headline = scrapy.Field()
imgURL = scrapy.Field()
newsInDetails = scrapy.Field()
news_link = scrapy.Field()
date = scrapy.Field()
class ReadMeItem(scrapy.Item):
sub_category = scrapy.Field()
news_headline = scrapy.Field()
newsInDetails = scrapy.Field()
news_link = scrapy.Field()
date = scrapy.Field()
writer = scrapy.Field()
class ThePapareItem(scrapy.Item):
news_headline = scrapy.Field()
newsInDetails = scrapy.Field()
news_link = scrapy.Field()
date = scrapy.Field()
writer = scrapy.Field()
img_src = scrapy.Field()
comments = scrapy.Field()
class NationlkItem(scrapy.Item):
news_headline = scrapy.Field()
newsInDetails = scrapy.Field()
news_link = scrapy.Field()
date = scrapy.Field()
writer = scrapy.Field()
class adaDeraneItem(scrapy.Item):
news_headline = scrapy.Field()
newsInDetails = scrapy.Field()
news_link = scrapy.Field()
date = scrapy.Field()
image_url = scrapy.Field()
| 25.828125 | 51 | 0.672716 |
3a67b67fa5eb2c6d96fe6278db375dbfd9f485ee | 4,884 | py | Python | atomate/utils/database.py | jordanpburns/atomate | 95bbdeb8fd5d1721688ab5947befc5d50ce1137f | [
"BSD-3-Clause-LBNL"
] | 1 | 2019-09-02T00:55:26.000Z | 2019-09-02T00:55:26.000Z | atomate/utils/database.py | jordanpburns/atomate | 95bbdeb8fd5d1721688ab5947befc5d50ce1137f | [
"BSD-3-Clause-LBNL"
] | null | null | null | atomate/utils/database.py | jordanpburns/atomate | 95bbdeb8fd5d1721688ab5947befc5d50ce1137f | [
"BSD-3-Clause-LBNL"
] | null | null | null | # coding: utf-8
from __future__ import division, print_function, unicode_literals, absolute_import
"""
This module defines a base class for derived database classes that store calculation data.
"""
import datetime
from abc import ABCMeta, abstractmethod
import six
from pymongo import MongoClient, ReturnDocument
from monty.json import jsanitize
from monty.serialization import loadfn
from atomate.utils.utils import get_logger
__author__ = 'Kiran Mathew'
__credits__ = 'Anubhav Jain'
__email__ = 'kmathew@lbl.gov'
logger = get_logger(__name__)
class CalcDb(six.with_metaclass(ABCMeta)):
def __init__(self, host, port, database, collection, user, password, **kwargs):
self.host = host
self.db_name = database
self.user = user
self.password = password
self.port = int(port)
try:
self.connection = MongoClient(host=self.host, port=self.port,
username=self.user,
password=self.password, **kwargs)
self.db = self.connection[self.db_name]
except:
logger.error("Mongodb connection failed")
raise Exception
try:
if self.user:
self.db.authenticate(self.user, self.password,
source=kwargs.get("authsource", None))
except:
logger.error("Mongodb authentication failed")
raise ValueError
self.collection = self.db[collection]
# set counter collection
if self.db.counter.find({"_id": "taskid"}).count() == 0:
self.db.counter.insert_one({"_id": "taskid", "c": 0})
self.build_indexes()
@abstractmethod
def build_indexes(self, indexes=None, background=True):
"""
Build the indexes.
Args:
indexes (list): list of single field indexes to be built.
background (bool): Run in the background or not.
"""
pass
def insert(self, d, update_duplicates=True):
"""
Insert the task document ot the database collection.
Args:
d (dict): task document
update_duplicates (bool): whether to update the duplicates
"""
result = self.collection.find_one({"dir_name": d["dir_name"]}, ["dir_name", "task_id"])
if result is None or update_duplicates:
d["last_updated"] = datetime.datetime.utcnow()
if result is None:
if ("task_id" not in d) or (not d["task_id"]):
d["task_id"] = self.db.counter.find_one_and_update(
{"_id": "taskid"}, {"$inc": {"c": 1}},
return_document=ReturnDocument.AFTER)["c"]
logger.info("Inserting {} with taskid = {}".format(d["dir_name"], d["task_id"]))
elif update_duplicates:
d["task_id"] = result["task_id"]
logger.info("Updating {} with taskid = {}".format(d["dir_name"], d["task_id"]))
d = jsanitize(d, allow_bson=True)
self.collection.update_one({"dir_name": d["dir_name"]},
{"$set": d}, upsert=True)
return d["task_id"]
else:
logger.info("Skipping duplicate {}".format(d["dir_name"]))
return None
@abstractmethod
def reset(self):
pass
@classmethod
def from_db_file(cls, db_file, admin=True):
"""
Create MMDB from database file. File requires host, port, database,
collection, and optionally admin_user/readonly_user and
admin_password/readonly_password
Args:
db_file (str): path to the file containing the credentials
admin (bool): whether to use the admin user
Returns:
MMDb object
"""
creds = loadfn(db_file)
if admin and "admin_user" not in creds and "readonly_user" in creds:
raise ValueError("Trying to use admin credentials, "
"but no admin credentials are defined. "
"Use admin=False if only read_only "
"credentials are available.")
if admin:
user = creds.get("admin_user")
password = creds.get("admin_password")
else:
user = creds.get("readonly_user")
password = creds.get("readonly_password")
kwargs = creds.get("mongoclient_kwargs", {}) # any other MongoClient kwargs can go here ...
if "authsource" in creds:
kwargs["authsource"] = creds["authsource"]
else:
kwargs["authsource"] = creds["database"]
return cls(creds["host"], int(creds["port"]), creds["database"], creds["collection"],
user, password, **kwargs)
| 35.391304 | 100 | 0.570844 |
bd42267473381a9224bd1922e471eec1d6afb0d8 | 13,241 | py | Python | src/m1.py | wangtianlinrosehulman/05a-Debugging | 7c90a7f924d600a9af152bc2a100e7241ec36db8 | [
"MIT"
] | null | null | null | src/m1.py | wangtianlinrosehulman/05a-Debugging | 7c90a7f924d600a9af152bc2a100e7241ec36db8 | [
"MIT"
] | null | null | null | src/m1.py | wangtianlinrosehulman/05a-Debugging | 7c90a7f924d600a9af152bc2a100e7241ec36db8 | [
"MIT"
] | null | null | null | """
This module lets you practice DEBUGGING when RUN-TIME EXCEPTIONS occur.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Tianlin Wang.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
########################################################################
#
# Done: 2. READ these instructions, ASKING QUESTIONS as needed.
#
# This module contains 7 "broken" functions.
#
# For each broken function, running it will generate what's called
# an EXCEPTION -- an error that occurs when the program runs and
# (left to itself) causes the program to end its execution.
#
# We have written tests for each broken function.
# *** DO NOT MODIFY THE TESTS. ***
# In fact, you do not even need to read the tests.
# Instead, look at the file m1_pictures.pdf to see what output
# the tests should generate.
#
# To do this exercise, do the following:
#
# Step 1:
# -- Read the doc-string (but NOT the code) of the broken_1 function.
# -- Look at the m1_pictures.pdf file
# to see what output the tests should produce.
# -- ASK QUESTIONS AS NEEDED until you understand the specification
# of the broken_1 function.
#
# Step 2: Run this module.
# You will see that the code "breaks" and prints on the Console:
# -- a STACK TRACEBACK of the code that led to the exception
# -- an error message that attempts to explain the exception
# Right-click in the Console and select "Word Wrap" to make
# the stack traceback and error message more readable.
#
# Step 3: READ the error message. Try to make sense of it.
# ASK QUESTIONS AS NEEDED!
#
# Step 4: Click on the BOTTOM blue link in the Console.
# It will take you to the line at which the code broke.
# (BUT if the line refers to a line in rosegraphics.py,
# work your way UP the blue links until you reach
# the lowermost one that refers to a line in THIS module.)
#
# Step 5: Looking at the line at which the code broke,
# figure out what the error message is telling you.
# ASK QUESTIONS AS NEEDED!
#
# Step 6: Thinking about the green specification of the broken_1
# function, and thinking about what the error message tells you,
# correct the mistake(s).
#
# Sometimes the mistake will be on the line at which the code broke,
# sometimes at a line that executed before that line executed.
#
# After correcting the mistake(s), run the program again.
# Continue until you believe that the broken_1 function produces
# the correct output (per the m1_pictures.pdf file)
# AND you believe that the code for the function is correct.
#
# ** IMPORTANT: **
# Resist the urge to "fiddle" with the code until you stumble
# upon something that works. This exercise will be helpful
# to you ONLY if you use it as an opportunity to learn
# what the error messages mean and how to react to them.
#
# *** ASK QUESTIONS AS NEEDED! ***
#
# Once you have corrected the broken_1 function, continue to
# the next function, again proceeding according to the above steps.
#
# When you believe you understand these instructions,
# change the above TO DO to DONE.
#
########################################################################
def main():
""" Calls the TEST functions in this module. """
run_test_all()
########################################################################
# Students: Do NOT change the following tests.
# There are NO errors in the TESTS.
########################################################################
def run_test_all():
""" Tests ALL the functions in this module. """
# Test broken_1:
window = rg.RoseWindow(title='Testing BROKEN_1')
circle1 = rg.Circle(rg.Point(50, 50), 15)
circle1.fill_color = 'blue'
broken_1(circle1, window) # Test 1 of broken_1
circle2 = rg.Circle(rg.Point(70, 150), 30)
circle2.fill_color = 'red'
broken_1(circle2, window) # Test 2 of broken_1
window.close_on_mouse_click()
# Test broken_2:
window = rg.RoseWindow(title='Testing BROKEN_2')
broken_2(50, 75, window) # Test 1 of broken_2
broken_2(100, 150, window) # Test 2 of broken_2
window.close_on_mouse_click()
# Test broken_3:
window = rg.RoseWindow(title='Testing BROKEN_3')
broken_3(5, rg.Point(100, 50), 80, 20, window) # Test 1 of broken_3
broken_3(3, rg.Point(50, 150), 40, 50, window) # Test 2 of broken_3
window.close_on_mouse_click()
# Test broken_4:
window = rg.RoseWindow(title='Testing BROKEN_4')
broken_4(50, 75, 40, window) # Test 1 of broken_4
broken_4(100, 150, 75, window) # Test 2 of broken_4
window.close_on_mouse_click()
# Test broken_5:
window = rg.RoseWindow(title='Testing BROKEN_5')
circle = rg.Circle(rg.Point(100, 50), 30)
circle.fill_color = 'pink'
broken_5(circle, window) # Test 1 of broken_5
circle = rg.Circle(rg.Point(250, 100), 80)
circle.fill_color = 'red'
broken_5(circle, window) # Test 2 of broken_5
window.close_on_mouse_click()
# Test broken_6:
expected = 1.8333333
actual = broken_6(3) # Test 1 of broken_6
print("Testing BROKEN_6:\n")
print('Expected for BROKEN_6, Test 1:', expected, '(approximately)')
print(' Actual for BROKEN_6, Test 1:', actual)
expected = 5.1873775
actual = broken_6(100) # Test 2 of broken_6
print()
print('Expected for BROKEN_6, Test 2:', expected, '(approximately)')
print(' Actual for BROKEN_6, Test 2:', actual)
print()
# Test broken_7:
window = rg.RoseWindow(title='Testing BROKEN_7')
broken_7(5, rg.Point(100, 50), 80, 20, window) # Test 1 of broken_7
broken_7(3, rg.Point(50, 150), 40, 50, window) # Test 2 of broken_7
window.close_on_mouse_click()
# ----------------------------------------------------------------------
# Done: 3. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_1(circle, window):
"""
What comes in: an rg.Circle and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws the given rg.Circle on the given rg.RoseWindow,
then draws another rg.Circle whose RADIUS
is TWICE that of the given rg.Circle
and whose center is the same as that of the given rg.Circle.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type circle: rg.Circle
:type window: rg.RoseWindow
"""
circle.attach_to(window)
circle2 = rg.Circle(circle.center, 2 * circle.radius)
circle2.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Done: 4. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_2(x, y, window):
"""
What comes in: Positive integers x and y, and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws a rg.Circle with radius 33, centered at (x, y),
on the given rg.RoseWindow.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type x: int
:type y: int
:type window: rg.RoseWindow
"""
center = rg.Point(x, y)
circle = rg.Circle(center, 33)
circle.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Done: 5. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_3(n, point, length, distance_between_lines, window):
"""
What comes in: The four arguments are:
-- A positive integer n.
-- An rg.Point.
-- A positive integer length.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws n vertical rg.Lines on the given rg.RoseWindow,
where the leftmost rg.Line has the given point as its topmost
point and all the rg.Lines have the given length
and they are the given distance apart.
Each line is drawn with a 0.5 second pause after drawing it.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type point: rg.Point
:type length: int
:type distance_between_lines: int
:type window: rg.RoseWindow
"""
a = rg.Point(point.x, point.y)
b = rg.Point(point.x, point.y + length)
for k in range(n):
a = rg.Point(point.x + k * distance_between_lines, point.y)
b = rg.Point(point.x + k * distance_between_lines, point.y + length)
line = rg.Line(a, b)
line.attach_to(window)
window.render(0.5)
# ----------------------------------------------------------------------
# Done: 6. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_4(x, y, radius, window):
"""
What comes in: Positive integers x and y, and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws a green-filled rg.Circle with the given radius,
centered at (x, y), on the given rg.RoseWindow
Must ** render ** but ** NOT close ** the window.
Type hints:
:type x: int
:type y: int
:type radius: int
:type window: rg.RoseWindow
"""
circle = rg.Circle(rg.Point(x, y), radius)
circle.fill_color = 'green'
circle.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Done: 7. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_5(circle, window):
"""
What comes in: an rg.Circle and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws the given rg.Circle and an rg.Square that circumscribes it,
both on the given rg.RoseWindow.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type circle: rg.Circle
:type window: rg.RoseWindow
"""
circle.attach_to(window)
square = rg.Square(circle.center, 2 * circle.radius)
square.outline_color = circle.fill_color
square.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Done: 8. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_6(n):
"""
What comes in: A positive integer n.
What goes out: Returns the sum:
1 + 1/2 + 1/3 + ... + 1/n.
Side effects: None.
"""
total = 0
for k in range(1, n + 1):
total = total + 1 / k
return total
# ----------------------------------------------------------------------
# Done: 9. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_7(n, point, length, distance_between_lines, window):
"""
What comes in: The four arguments are:
-- A positive integer n.
-- An rg.Point.
-- A positive integer length.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws n horizontal rg.Lines on the given rg.RoseWindow,
where the topmost rg.Line has the given point as its leftmost
point and all the rg.Lines have the given length
and they are the given distance apart.
Each line is drawn with a 0.5 second pause after drawing it.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type point: rg.Point
:type length: int
:type distance_between_lines: int
:type window: rg.RoseWindow
"""
left = rg.Point(point.x, point.y)
right = rg.Point(point.x + length, point.y)
for _ in range(n):
line = rg.Line(left, right)
line.attach_to(window)
window.render(0.5)
left = rg.Point(left.x, left.y + distance_between_lines)
right = rg.Point(right.x, right.y + distance_between_lines)
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 37.089636 | 76 | 0.56529 |
74027d8fc9a13570850450c2df5140d899fc5a79 | 114 | py | Python | news/admin.py | hussienalbared/NewsWebsite | c29f4081c15e93117603021f7ca0a536366bd663 | [
"MIT"
] | null | null | null | news/admin.py | hussienalbared/NewsWebsite | c29f4081c15e93117603021f7ca0a536366bd663 | [
"MIT"
] | null | null | null | news/admin.py | hussienalbared/NewsWebsite | c29f4081c15e93117603021f7ca0a536366bd663 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import news
admin.site.register(news)
# Register your models here.
| 19 | 32 | 0.798246 |
5dc03aa4a757d91e8c3c10e5797acbcf76d6f05c | 6,653 | py | Python | Python/klampt/model/create/primitives.py | smeng9/Klampt | 7ff91bead90ac04280eff310623338fd10aaba79 | [
"BSD-3-Clause"
] | null | null | null | Python/klampt/model/create/primitives.py | smeng9/Klampt | 7ff91bead90ac04280eff310623338fd10aaba79 | [
"BSD-3-Clause"
] | null | null | null | Python/klampt/model/create/primitives.py | smeng9/Klampt | 7ff91bead90ac04280eff310623338fd10aaba79 | [
"BSD-3-Clause"
] | null | null | null | """Utilities for creating geometric primitives (and world entities made out
of them).
"""
from klampt import Geometry3D,GeometricPrimitive
from klampt.math import vectorops
def box(width,depth,height,center=None,R=None,t=None,world=None,name=None,mass=float('inf'),type='TriangleMesh'):
"""Makes a box with dimensions width x depth x height. The box is centered
at (0,0,0) by default.
Args:
width,depth,height (float): x,y,z dimensions of the box
center (list of 3 floats, optional): if None (typical),
the *geometry* of the box is centered at 0. Otherwise,
the *geometry* of the box is shifted relative to the
box's local coordinate system.
R,t (se3 transform, optional): if given, the box's world coordinates
will be rotated and shifted by this transform.
world (WorldModel, optional): If given, then the box will be a
RigidObjectModel or TerrainModel will be created in this world
name (str, optional): If world is given, this is the name of the object.
Default is 'box'.
mass (float, optional): If world is given and this is inf, then a
TerrainModel will be created. Otherwise, a RigidObjectModel
will be created with automatically determined inertia.
type (str, optional): the geometry type. Defaults to 'TriangleMesh',
but also 'GeometricPrimitive' and 'VolumeGrid' are accepted.
Returns:
Geometry3D, RigidObjectModel, or TerrainModel: A representation
of the box. If a world is given, then either a RigidObjectModel
or TerrainModel is added to the world and returned.
"""
if center is None:
center = [0,0,0]
prim = GeometricPrimitive()
prim.setAABB([center[0]-width*0.5,center[1]-depth*0.5,center[2]-height*0.5],[center[0]+width*0.5,center[1]+depth*0.5,center[2]+height*0.5])
geom = Geometry3D(prim)
if type != 'GeometricPrimitive':
geom = geom.convert(type)
if world is None:
if R is not None and t is not None:
geom.setCurrentTransform(R,t)
return geom
#want a RigidObjectModel or TerrainModel
if name is None:
name = 'box'
if mass != float('inf'):
bmass = Mass()
bmass.setMass(mass)
bmass.setCom(center)
bmass.setInertia([mass*(depth**2+height**2)/12,mass*(width**2+height**2)/12,mass*(width**2+height**2)/12])
robj = world.makeRigidObject(name)
robj.geometry().set(geom)
robj.setMass(bmass)
if R is not None and t is not None:
robj.setTransform(R,t)
return robj
else:
tobj = world.makeTerrain(name)
if R is not None and t is not None:
geom.transform(R,t)
tobj.geometry().set(geom)
return tobj
def sphere(radius,center=None,R=None,t=None,world=None,name=None,mass=float('inf'),type='TriangleMesh'):
"""Makes a sphere with the given radius
Args:
radius (float): radius of the sphere
center (list of 3 floats, optional): if None (typical), the *geometry*
of the sphere is centered at 0. Otherwise, the *geometry* of
the sphere is shifted relative to the sphere's local coordinate system.
R,t (se3 transform, optional): if given, the sphere's world coordinates
will be rotated and shifted by this transform.
world (WorldModel, optional): If given, then the sphere will be a
RigidObjectModel or TerrainModel will be created in this world
name (str, optional): If world is given, this is the name of the object.
Default is 'sphere'.
mass (float, optional): If world is given and this is inf, then a
TerrainModel will be created. Otherwise, a RigidObjectModel
will be created with automatically determined inertia.
type (str, optional): the geometry type. Defaults to 'TriangleMesh',
but also 'GeometricPrimitive' and 'VolumeGrid' are accepted.
Returns:
Geometry3D, RigidObjectModel, or TerrainModel: A representation
of the sphere. If a world is given, then either a RigidObjectModel
or TerrainModel is added to the world and returned.
"""
if center is None:
center = [0,0,0]
prim = GeometricPrimitive()
prim.setSphere(center,radius)
geom = Geometry3D(prim)
if type != 'GeometricPrimitive':
geom = geom.convert(type)
if world is None:
if R is not None and t is not None:
geom.setCurrentTransform(R,t)
return geom
#want a RigidObjectModel or TerrainModel
if name is None:
name = 'sphere'
if mass != float('inf'):
bmass = Mass()
bmass.setMass(mass)
bmass.setCom(center)
bmass.setInertia([0.4*mass*radius**2]*3)
robj = world.makeRigidObject(name)
robj.geometry().set(geom)
robj.setMass(bmass)
if R is not None and t is not None:
robj.setTransform(R,t)
return robj
else:
tobj = world.makeTerrain(name)
if R is not None and t is not None:
geom.transform(R,t)
tobj.geometry().set(geom)
return tobj
def bbox(bmin,bmax,R=None,t=None,world=None,name=None,mass=float('inf'),type='TriangleMesh'):
"""Makes a box from bounds [bmin,bmax].
Args:
bmin (list of 3 floats): the lower corner of the box
center (list of 3 floats): the upper corner of the box
R,t (se3 transform, optional): if given, the box's world coordinates
will be rotated and shifted by this transform.
world (WorldModel, optional): If given, then the box will be a
RigidObjectModel or TerrainModel will be created in this world
name (str, optional): If world is given, this is the name of the object.
Default is 'box'.
mass (float, optional): If world is given and this is inf, then a
TerrainModel will be created. Otherwise, a RigidObjectModel
will be created with automatically determined inertia.
type (str, optional): the geometry type. Defaults to 'TriangleMesh',
but also 'GeometricPrimitive' and 'VolumeGrid' are accepted.
Returns:
Geometry3D, RigidObjectModel, or TerrainModel: A representation
of the box. If a world is given, then either a RigidObjectModel
or TerrainModel is added to the world and returned.
"""
w,d,h = vectorops.sub(bmax,bmin)
center = vectorops.interpolate(bmin,bmax,0.5)
return box(w,d,h,center,R,t,world,name,mass,type)
| 43.769737 | 143 | 0.643319 |
640e78eaac01c5d769a106a46940848fe2309424 | 6,298 | py | Python | meeting_scheduler/meeting_scheduler/settings.py | HulewiczKamil/kpz-2021-meeting-scheduler | f17227ff8f3b8450cbbb6a8b285972054f577b94 | [
"MIT"
] | 3 | 2021-03-15T16:14:12.000Z | 2021-03-15T16:15:48.000Z | meeting_scheduler/meeting_scheduler/settings.py | HulewiczKamil/kpz-2021-meeting-scheduler | f17227ff8f3b8450cbbb6a8b285972054f577b94 | [
"MIT"
] | 8 | 2021-03-24T23:51:23.000Z | 2021-04-15T18:22:41.000Z | meeting_scheduler/meeting_scheduler/settings.py | HulewiczKamil/kpz-2021-meeting-scheduler | f17227ff8f3b8450cbbb6a8b285972054f577b94 | [
"MIT"
] | 1 | 2021-09-07T17:59:48.000Z | 2021-09-07T17:59:48.000Z | """
Django settings for meeting_scheduler project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Rest framework and CORS
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'dj_rest_auth',
'dj_rest_auth.registration',
#Custom applications
'scheduler_api',
'scheduler',
#django-allauth for social login
'django.contrib.sites',
'allauth',
'allauth.account',
#'rest_auth.registration', #?????????????????
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
# Provider specific settings
SOCIALACCOUNT_PROVIDERS = {
'google': {
# For each OAuth based provider, either add a ``SocialApp``
# (``socialaccount`` app) containing the required client
# credentials, or list them here:
'Google':{
'Client ID': os.environ.get('CLIENT_ID'),
'Client secret': os.environ.get('SECRET_GOOGLE'),
},
'SCOPE':[
"https://www.googleapis.com/auth/calendar.calendarlist.readonly",
"https://www.googleapis.com/auth/calendar.app.created",
"https://www.googleapis.com/auth/calendar.events.freebusy",
"https://www.googleapis.com/auth/calendar.events.public.readonly",
"https://www.googleapis.com/auth/calendar.freebusy",
"https://www.googleapis.com/auth/calendar.settings.readonly",
"https://www.googleapis.com/auth/calendar",
"https://www.googleapis.com/auth/calendar.readonly",
"https://www.googleapis.com/auth/calendar.events",
"https://www.googleapis.com/auth/calendar.events.owned",
"https://www.googleapis.com/auth/calendar.events.owned.readonly",
"https://www.googleapis.com/auth/calendar.events.readonly",
"https://www.googleapis.com/auth/calendar.calendarlist",
"https://www.googleapis.com/auth/calendar.calendars",
"https://www.googleapis.com/auth/calendar.calendars.readonly",
"https://www.googleapis.com/auth/calendar.acls"
],
'AUTH_PARAMS': {
'access_type': 'online',
}
}
}
MIDDLEWARE = [
#'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'meeting_scheduler.urls'
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meeting_scheduler.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
SITE_ID = 2
REST_USE_JWT = True
JWT_AUTH_COOKIE = 'aaaa'
JWT_AUTH_REFRESH_COOKIE = 'refresh_token'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_PARSER_CLASSES': [
'rest_framework.parsers.JSONParser',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'dj_rest_auth.jwt_auth.JWTCookieAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
# 'rest_framework.authentication.TokenAuthentication',
),
}
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
]
| 28.757991 | 91 | 0.67704 |
032aac751897585d6411284050d5982be496a591 | 2,947 | py | Python | oss/GameElements/map.py | m4reQ/Oss-2.0 | 225011f8aa50c7219f00f85d03a1bc7964798c0d | [
"MIT"
] | 2 | 2019-08-13T13:54:20.000Z | 2021-07-11T05:52:54.000Z | oss/GameElements/map.py | m4reQ/Oss-2.0 | 225011f8aa50c7219f00f85d03a1bc7964798c0d | [
"MIT"
] | 14 | 2019-08-10T23:10:46.000Z | 2020-04-13T21:49:18.000Z | oss/GameElements/map.py | m4reQ/Oss-2.0 | 225011f8aa50c7219f00f85d03a1bc7964798c0d | [
"MIT"
] | null | null | null | if __name__ == '__main__':
quit()
from Utils.graphics import TranslateCoord, TranslationMode
from Utils.game import GetMaxPoints
from Utils.debug import Log, LogLevel
def ParseMap(filepath):
data = []
lineCount = 0
try:
with open(filepath, "r") as f:
for line in f.readlines():
lineCount += 1
if line[0] == '#' or line[0] == "[":
continue
lineData = line.split(",")
newLineData = []
for x in lineData:
x = x.replace("\n", "")
x = x.replace(" ", "")
try:
x = float(x)
except ValueError: #if even a single fragment of any line failed to parse stop loading rest of map
Log("Invalid map format at line {}".format(lineCount), LogLevel.Error, __name__)
return -1
newLineData.append(x)
data.append(newLineData)
except IOError:
print("Error cannot load map: File {} didn't found.".format(filepath))
return -1
return data
def MakeMap(filepath, targetRes):
"""
rtype: array, tuple
returns: array
"""
data = ParseMap(filepath)
if data == -1:
return
#import circles here to avoid circular import
from .circle import Circle
circles = []
for element in data:
try:
posX = float(element[0])
posY = float(element[1])
time = int(element[2])
tposX, tposY = TranslateCoord((posX, posY), targetRes, TranslationMode.Decode)
obj = Circle((tposX, tposY), time)
circles.append(obj)
except IndexError:
print('Cannot make object {}.\n Maybe map has outdated or invalid format.'.format(str(obj)))
return
Log("Map '{}' loaded succesfully.".format(filepath), LogLevel.Info, __name__)
return circles
class Map:
resolution = (0, 0)
@staticmethod
def ReadHeader(filename):
try:
with open(filename) as f:
while True:
first = f.readline()
first = first.replace("\n", "")
if first[0] == "#":
continue
if first[0] == "[" and first[-1] == "]":
break
except IOError:
print("Cannot load map: File {} didn't found.".format(filename))
return (0, 0, 0, -1)
first = first[1:-1]
data = first.split(",")
data = [x.replace(" ", "") for x in data]
data[-1] = data[-1].replace("\n", "")
return (data[0], int(data[1]), int(data[2]), 1)
def __init__(self, filename):
self.filename = filename
self.name, self.id, self.length, self.loadSuccess = Map.ReadHeader(filename)
if self.loadSuccess == -1:
print("Cannot load map from '{}'.".format(filename))
self.objects = MakeMap(filename, Map.resolution)
self.objectsLeft = self.objects[:]
self.objCount = len(self.objects)
self.shouldPlay = True
self.maxCombo = self.objCount
self.maxPoints = GetMaxPoints(self.maxCombo)
def __str__(self):
return "Map - Name: {}, ID: {}, Length: {}, Objects: {}".format(self.name, self.id, self.length, self.objCount)
class EmptyMap(object):
def __init__(self):
self.objectsLeft = []
self.shouldPlay = True
self.loadSuccess = 1
self.length = float('inf')
| 23.204724 | 113 | 0.642348 |
2076f44a6175269618c7ec6652ef5bcd1ca5eb30 | 666 | py | Python | src/utils/data_mgmt.py | sharad28/ANN_implementation | 93a9107408da1745dd73b2a11a7137ba1eae79ff | [
"MIT"
] | 1 | 2022-01-26T20:41:13.000Z | 2022-01-26T20:41:13.000Z | src/utils/data_mgmt.py | sharad28/ANN_implementation | 93a9107408da1745dd73b2a11a7137ba1eae79ff | [
"MIT"
] | null | null | null | src/utils/data_mgmt.py | sharad28/ANN_implementation | 93a9107408da1745dd73b2a11a7137ba1eae79ff | [
"MIT"
] | null | null | null | import tensorflow as tf
def get_data(validation_datasize):
mnist = tf.keras.datasets.mnist
(X_train_full, y_train_full), (X_test, y_test) = mnist.load_data()
# create a validation data set from the full training data
# Scale the data between 0 to 1 by dividing it by 255. as its an unsigned data between 0-255 range
X_valid, X_train = X_train_full[:validation_datasize] / 255., X_train_full[validation_datasize:] / 255.
y_valid, y_train = y_train_full[:validation_datasize], y_train_full[validation_datasize:]
# scale the test set as well
X_test = X_test / 255.
return (X_train, y_train), (X_valid, y_valid), (X_test, y_test) | 41.625 | 107 | 0.731231 |
2ede97076a16f8b7e5cb1199b61169b6122c3e3f | 2,653 | py | Python | test/test_pipeline/test_metrics.py | LMZimmer/Auto-PyTorch_refactor | ac7a9ce35e87a428caca2ac108b362a54d3b8f3a | [
"Apache-2.0"
] | null | null | null | test/test_pipeline/test_metrics.py | LMZimmer/Auto-PyTorch_refactor | ac7a9ce35e87a428caca2ac108b362a54d3b8f3a | [
"Apache-2.0"
] | 34 | 2020-10-06T08:06:46.000Z | 2021-01-21T13:23:34.000Z | test/test_pipeline/test_metrics.py | LMZimmer/Auto-PyTorch_refactor | ac7a9ce35e87a428caca2ac108b362a54d3b8f3a | [
"Apache-2.0"
] | 1 | 2020-10-14T12:25:47.000Z | 2020-10-14T12:25:47.000Z | import unittest
import unittest.mock
import numpy as np
from autoPyTorch.constants import STRING_TO_TASK_TYPES
from autoPyTorch.pipeline.components.training.metrics.base import autoPyTorchMetric
from autoPyTorch.pipeline.components.training.metrics.utils import calculate_score, get_metrics
class MetricsTest(unittest.TestCase):
def test_get_no_name(self):
dataset_properties = {'task_type': 'tabular_classification'}
metrics = get_metrics(dataset_properties)
for metric in metrics:
self.assertTrue(isinstance(metric, autoPyTorchMetric))
def test_get_name(self):
dataset_properties = {'task_type': 'tabular_classification'}
names = ['accuracy', 'average_precision']
metrics = get_metrics(dataset_properties, names)
for i in range(len(metrics)):
self.assertTrue(isinstance(metrics[i], autoPyTorchMetric))
self.assertEqual(metrics[i].name.lower(), names[i].lower())
def test_get_name_error(self):
dataset_properties = {'task_type': 'tabular_classification'}
names = ['root_mean_sqaured_error', 'average_precision']
try:
get_metrics(dataset_properties, names)
except ValueError as msg:
self.assertRegex(str(msg), r"Invalid name entered for task [a-z]+_[a-z]+, "
r"currently supported metrics for task include .*")
def test_metrics(self):
# test of all classification metrics
dataset_properties = {'task_type': 'tabular_classification'}
y_target = np.array([0, 1, 0, 1])
y_pred = np.array([0, 0, 0, 1])
metrics = get_metrics(dataset_properties=dataset_properties, all_supported_metrics=True)
score_dict = calculate_score(y_pred, y_target, STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics)
self.assertIsInstance(score_dict, dict)
for name, score in score_dict.items():
self.assertIsInstance(name, str)
self.assertIsInstance(score, float)
# test of all regression metrics
dataset_properties = {'task_type': 'tabular_regression'}
y_target = np.array([0.1, 0.6, 0.7, 0.4])
y_pred = np.array([0.6, 0.7, 0.4, 1])
metrics = get_metrics(dataset_properties=dataset_properties, all_supported_metrics=True)
score_dict = calculate_score(y_pred, y_target, STRING_TO_TASK_TYPES[dataset_properties['task_type']], metrics)
self.assertIsInstance(score_dict, dict)
for name, score in score_dict.items():
self.assertIsInstance(name, str)
self.assertIsInstance(score, float)
| 45.741379 | 118 | 0.684885 |
3e1682631c9ed0a76659b24770912b34d0b7701b | 3,285 | py | Python | tests/failure/failure_fixture.py | amittbose/peloton | 679ac0946f35f6ea17bd73b97e1a115530941f6e | [
"Apache-2.0"
] | null | null | null | tests/failure/failure_fixture.py | amittbose/peloton | 679ac0946f35f6ea17bd73b97e1a115530941f6e | [
"Apache-2.0"
] | null | null | null | tests/failure/failure_fixture.py | amittbose/peloton | 679ac0946f35f6ea17bd73b97e1a115530941f6e | [
"Apache-2.0"
] | null | null | null | import logging
from tests.failure.framework import components, framework
from tests.integration.common import wait_for_condition
from tests.integration import job as tjob
class FailureFixture(object):
"""
Fixture for failure tests. It is responsible for creating
and initializing the failure-testing framework. Each test
gets an instance of the fixture using which the test can
operate on the framework. In addition, the fixture provides
helper functions to make writing tests easier.
"""
# Number of attempts to make while waiting for a condition
MAX_RETRY_ATTEMPTS = 180
def __init__(self):
self.fw = framework.FailureFramework()
self.log = logging.getLogger(__name__)
self.log.level = logging.INFO
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
self.log.addHandler(sh)
self.mesos_master = components.MesosMaster()
self.mesos_agent = components.MesosAgent()
self.zookeeper = components.Zookeeper()
self.cassandra = components.Cassandra()
self.hostmgr = components.HostMgr()
self.jobmgr = components.JobMgr()
self.resmgr = components.ResMgr()
self.batch_pe = components.BatchPlacementEngine()
self.stateless_pe = components.StatelessPlacementEngine()
self.integ_config = tjob.IntegrationTestConfig(
max_retry_attempts=self.MAX_RETRY_ATTEMPTS)
def setup(self):
"""
Initializes the failure-test framework.
"""
self.fw.setup()
self.client = self.fw.client
def teardown(self):
"""
Cleans up state if any.
"""
pass
def reset_client(self):
"""
Re-initialize PelotonClient. Useful after a leader change has
happened.
"""
self.fw.reset_client()
self.client = self.fw.client
def job(self, **kwargs):
"""
Create the spec for a job with some defaults.
:param kwargs: Keyword arguments for job spec
"""
kwargs.setdefault('config', self.integ_config)
kwargs.setdefault('client', self.client)
return tjob.Job(**kwargs)
def wait_for_condition(self, condition):
"""
Wait for a condition to be true.
:param condition: Function that is evalauted
"""
wait_for_condition(message='', condition=condition,
config=self.integ_config)
def wait_for_leader_change(self, comp, old_leader):
"""
Wait for the leader of a component to change.
:param comp: Component to check
:param old_leader: Zookeeper data for old leader
"""
self.log.info("%s: waiting for leader change. Old leader %s",
comp.name, old_leader)
def leader_changed():
new_leader = self.fw.get_leader_info(comp)
self.log.debug("%s: leader info %s", comp.name, new_leader)
if new_leader != old_leader:
self.log.info("%s: leader changed to %s",
comp.name, new_leader)
return True
self.wait_for_condition(leader_changed)
| 33.865979 | 71 | 0.624353 |
baf22e195dd363d37ec1b54fef066b7e8f146734 | 649 | py | Python | steps/step58.py | timwuu/deep-learning-from-scratch-3 | 6f18dee8c1d764e16275ed68f90966bc85f0ae66 | [
"MIT"
] | null | null | null | steps/step58.py | timwuu/deep-learning-from-scratch-3 | 6f18dee8c1d764e16275ed68f90966bc85f0ae66 | [
"MIT"
] | null | null | null | steps/step58.py | timwuu/deep-learning-from-scratch-3 | 6f18dee8c1d764e16275ed68f90966bc85f0ae66 | [
"MIT"
] | null | null | null | import os, sys; sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from PIL import Image
import dezero
from dezero.models import VGG16
from dezero.dataset import preprocess_vgg
from dezero.datasets import get_imagenet_labels
url = 'https://github.com/oreilly-japan/deep-learning-from-scratch-3/raw/images/zebra.jpg'
img_path = dezero.utils.get_file(url)
img = Image.open(img_path)
x = preprocess_vgg(img)
x = x[np.newaxis]
model = VGG16(pretrained=True)
with dezero.test_mode():
y = model(x)
predict_id = np.argmax(y.data)
model.plot(x, to_file='vgg.pdf')
labels = get_imagenet_labels()
print(labels[predict_id]) | 30.904762 | 90 | 0.767334 |
7d43e9b3bde41ab79351f2cf004fdb4a9395c1bb | 2,149 | py | Python | setup.py | sjayellis/molssi_devops_old_example | 8645a5d931bd59598227d1ef062664d06bcf53ce | [
"BSD-3-Clause"
] | null | null | null | setup.py | sjayellis/molssi_devops_old_example | 8645a5d931bd59598227d1ef062664d06bcf53ce | [
"BSD-3-Clause"
] | 1 | 2020-04-13T18:24:58.000Z | 2020-04-13T18:25:08.000Z | setup.py | sjayellis/molssi_devops_old_example | 8645a5d931bd59598227d1ef062664d06bcf53ce | [
"BSD-3-Clause"
] | null | null | null | """
molssi_devops
A sample repository for the MolSSI devops workshop
"""
import sys
from setuptools import setup
import versioneer
short_description = __doc__.split("\n")
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = "\n".join(short_description[2:]),
setup(
# Self-descriptive entries which should always be present
name='molssi_devops',
author='Sam',
author_email='sjellis@vt.edu',
description=short_description[0],
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD-3-Clause',
# Which Python importable modules should be included when your package is installed
packages=['molssi_devops', "molssi_devops.tests"],
# Optional include package data to ship with your package
# Comment out this line to prevent the files from being packaged with your software
# Extend/modify the list to include/exclude other items as need be
package_data={'molssi_devops': ["data/*.dat"]
},
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# author_email='me@place.org', # Author email
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
| 35.816667 | 118 | 0.684039 |
4c7376061b8024108bd2d5c8ce46ebb1cfc018b6 | 377 | py | Python | cloverly/resources/estimate.py | cloverly/cloverly-python-module | 773736db6713919c80be288a7875f41c37feaeba | [
"MIT"
] | 1 | 2021-08-10T02:54:19.000Z | 2021-08-10T02:54:19.000Z | cloverly/resources/estimate.py | cloverly/cloverly-python-module | 773736db6713919c80be288a7875f41c37feaeba | [
"MIT"
] | null | null | null | cloverly/resources/estimate.py | cloverly/cloverly-python-module | 773736db6713919c80be288a7875f41c37feaeba | [
"MIT"
] | null | null | null | """
Author: Zain Lakhani
Date: 08/06/2021
Title: Cloverly Resource Estimate Model
Description: Relates to /estimates endpoint. For creating and
viewing estimates
"""
from ..base import CloverlyResource
class Estimate(CloverlyResource):
"""Relates to /estimates endpoint. For creating and
viewing estimates
"""
resource_url = 'estimates'
| 20.944444 | 65 | 0.70557 |
a3dcc41a7d7392d4b830df92e204b241672c55fa | 2,178 | py | Python | examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-one-vs-all.py | hubert-he/FATE | 6758e150bd7ca7d6f788f9a7a8c8aea7e6500363 | [
"Apache-2.0"
] | 3,787 | 2019-08-30T04:55:10.000Z | 2022-03-31T23:30:07.000Z | examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-one-vs-all.py | hubert-he/FATE | 6758e150bd7ca7d6f788f9a7a8c8aea7e6500363 | [
"Apache-2.0"
] | 1,439 | 2019-08-29T16:35:52.000Z | 2022-03-31T11:55:31.000Z | examples/pipeline/hetero_logistic_regression/pipeline-hetero-lr-one-vs-all.py | hubert-he/FATE | 6758e150bd7ca7d6f788f9a7a8c8aea7e6500363 | [
"Apache-2.0"
] | 1,179 | 2019-08-29T16:18:32.000Z | 2022-03-31T12:55:38.000Z | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
cur_path = os.path.realpath(__file__)
for i in range(4):
cur_path = os.path.dirname(cur_path)
print(f'fate_path: {cur_path}')
sys.path.append(cur_path)
from examples.pipeline.hetero_logistic_regression import common_tools
from pipeline.utils.tools import load_job_config
from pipeline.runtime.entity import JobParameters
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
backend = config.backend
work_mode = config.work_mode
lr_param = {
"name": "hetero_lr_0",
"penalty": "L2",
"optimizer": "nesterov_momentum_sgd",
"tol": 1e-05,
"alpha": 0.0001,
"max_iter": 10,
"early_stop": "diff",
"multi_class": "ovr",
"batch_size": -1,
"learning_rate": 0.15,
"init_param": {
"init_method": "zeros"
}
}
pipeline = common_tools.make_normal_dsl(config, namespace, lr_param, is_ovr=True)
# fit model
job_parameters = JobParameters(backend=backend, work_mode=work_mode)
pipeline.fit(job_parameters)
# query component summary
common_tools.prettify(pipeline.get_component("hetero_lr_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 29.835616 | 85 | 0.676309 |
ad5ac9c2187d5967505433ec3b8c619e1b3b8c7f | 13,805 | py | Python | env/Lib/site-packages/pylint/checkers/spelling.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 33 | 2020-10-05T01:04:55.000Z | 2021-06-24T01:52:31.000Z | env/Lib/site-packages/pylint/checkers/spelling.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 14 | 2020-10-07T03:15:12.000Z | 2021-01-15T11:53:29.000Z | env/Lib/site-packages/pylint/checkers/spelling.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 11 | 2020-07-31T08:20:43.000Z | 2020-08-21T04:08:29.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2014-2019 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2015 Pavel Roskin <proski@gnu.org>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016-2017 Pedro Algarvio <pedro@algarvio.me>
# Copyright (c) 2016 Alexander Todorov <atodorov@otb.bg>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2017 Mikhail Fesenko <proggga@gmail.com>
# Copyright (c) 2018, 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2019 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2019 agutole <toldo_carp@hotmail.com>
# Copyright (c) 2019 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Checker for spelling errors in comments and docstrings.
"""
import os
import re
import tokenize
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import IAstroidChecker, ITokenChecker
try:
import enchant
from enchant.tokenize import ( # type: ignore
get_tokenizer,
Chunker,
Filter,
EmailFilter,
URLFilter,
WikiWordFilter,
)
except ImportError:
enchant = None
# pylint: disable=no-init
class Filter: # type: ignore
def _skip(self, word):
raise NotImplementedError
class Chunker: # type: ignore
pass
if enchant is not None:
br = enchant.Broker()
dicts = br.list_dicts()
dict_choices = [""] + [d[0] for d in dicts]
dicts = ["%s (%s)" % (d[0], d[1].name) for d in dicts]
dicts = ", ".join(dicts)
instr = ""
else:
dicts = "none"
dict_choices = [""]
instr = " To make it work, install the python-enchant package."
class WordsWithDigigtsFilter(Filter):
"""Skips words with digits.
"""
def _skip(self, word):
for char in word:
if char.isdigit():
return True
return False
class WordsWithUnderscores(Filter):
"""Skips words with underscores.
They are probably function parameter names.
"""
def _skip(self, word):
return "_" in word
class CamelCasedWord(Filter):
r"""Filter skipping over camelCasedWords.
This filter skips any words matching the following regular expression:
^([a-z]\w+[A-Z]+\w+)
That is, any words that are camelCasedWords.
"""
_pattern = re.compile(r"^([a-z]+([\d]|[A-Z])(?:\w+)?)")
def _skip(self, word):
return bool(self._pattern.match(word))
class SphinxDirectives(Filter):
r"""Filter skipping over Sphinx Directives.
This filter skips any words matching the following regular expression:
^:([a-z]+):`([^`]+)(`)?
That is, for example, :class:`BaseQuery`
"""
# The final ` in the pattern is optional because enchant strips it out
_pattern = re.compile(r"^:([a-z]+):`([^`]+)(`)?")
def _skip(self, word):
return bool(self._pattern.match(word))
class ForwardSlashChunkder(Chunker):
"""
This chunker allows splitting words like 'before/after' into 'before' and 'after'
"""
def next(self):
while True:
if not self._text:
raise StopIteration()
if "/" not in self._text:
text = self._text
self._offset = 0
self._text = ""
return (text, 0)
pre_text, post_text = self._text.split("/", 1)
self._text = post_text
self._offset = 0
if (
not pre_text
or not post_text
or not pre_text[-1].isalpha()
or not post_text[0].isalpha()
):
self._text = ""
self._offset = 0
return (pre_text + "/" + post_text, 0)
return (pre_text, 0)
def _next(self):
while True:
if "/" not in self._text:
return (self._text, 0)
pre_text, post_text = self._text.split("/", 1)
if not pre_text or not post_text:
break
if not pre_text[-1].isalpha() or not post_text[0].isalpha():
raise StopIteration()
self._text = pre_text + " " + post_text
raise StopIteration()
class SpellingChecker(BaseTokenChecker):
"""Check spelling in comments and docstrings"""
__implements__ = (ITokenChecker, IAstroidChecker)
name = "spelling"
msgs = {
"C0401": (
"Wrong spelling of a word '%s' in a comment:\n%s\n"
"%s\nDid you mean: '%s'?",
"wrong-spelling-in-comment",
"Used when a word in comment is not spelled correctly.",
),
"C0402": (
"Wrong spelling of a word '%s' in a docstring:\n%s\n"
"%s\nDid you mean: '%s'?",
"wrong-spelling-in-docstring",
"Used when a word in docstring is not spelled correctly.",
),
"C0403": (
"Invalid characters %r in a docstring",
"invalid-characters-in-docstring",
"Used when a word in docstring cannot be checked by enchant.",
),
}
options = (
(
"spelling-dict",
{
"default": "",
"type": "choice",
"metavar": "<dict name>",
"choices": dict_choices,
"help": "Spelling dictionary name. "
"Available dictionaries: %s.%s" % (dicts, instr),
},
),
(
"spelling-ignore-words",
{
"default": "",
"type": "string",
"metavar": "<comma separated words>",
"help": "List of comma separated words that " "should not be checked.",
},
),
(
"spelling-private-dict-file",
{
"default": "",
"type": "string",
"metavar": "<path to file>",
"help": "A path to a file that contains the private "
"dictionary; one word per line.",
},
),
(
"spelling-store-unknown-words",
{
"default": "n",
"type": "yn",
"metavar": "<y_or_n>",
"help": "Tells whether to store unknown words to the "
"private dictionary (see the "
"--spelling-private-dict-file option) instead of "
"raising a message.",
},
),
(
"max-spelling-suggestions",
{
"default": 4,
"type": "int",
"metavar": "N",
"help": "Limits count of emitted suggestions for " "spelling mistakes.",
},
),
)
def open(self):
self.initialized = False
self.private_dict_file = None
if enchant is None:
return
dict_name = self.config.spelling_dict
if not dict_name:
return
self.ignore_list = [
w.strip() for w in self.config.spelling_ignore_words.split(",")
]
# "param" appears in docstring in param description and
# "pylint" appears in comments in pylint pragmas.
self.ignore_list.extend(["param", "pylint"])
# Expand tilde to allow e.g. spelling-private-dict-file = ~/.pylintdict
if self.config.spelling_private_dict_file:
self.config.spelling_private_dict_file = os.path.expanduser(
self.config.spelling_private_dict_file
)
if self.config.spelling_private_dict_file:
self.spelling_dict = enchant.DictWithPWL(
dict_name, self.config.spelling_private_dict_file
)
self.private_dict_file = open(self.config.spelling_private_dict_file, "a")
else:
self.spelling_dict = enchant.Dict(dict_name)
if self.config.spelling_store_unknown_words:
self.unknown_words = set()
self.tokenizer = get_tokenizer(
dict_name,
chunkers=[ForwardSlashChunkder],
filters=[
EmailFilter,
URLFilter,
WikiWordFilter,
WordsWithDigigtsFilter,
WordsWithUnderscores,
CamelCasedWord,
SphinxDirectives,
],
)
self.initialized = True
def close(self):
if self.private_dict_file:
self.private_dict_file.close()
def _check_spelling(self, msgid, line, line_num):
original_line = line
try:
initial_space = re.search(r"^[^\S]\s*", line).regs[0][1]
except (IndexError, AttributeError):
initial_space = 0
if line.strip().startswith("#"):
line = line.strip()[1:]
starts_with_comment = True
else:
starts_with_comment = False
for word, word_start_at in self.tokenizer(line.strip()):
word_start_at += initial_space
lower_cased_word = word.casefold()
# Skip words from ignore list.
if word in self.ignore_list or lower_cased_word in self.ignore_list:
continue
# Strip starting u' from unicode literals and r' from raw strings.
if word.startswith(("u'", 'u"', "r'", 'r"')) and len(word) > 2:
word = word[2:]
lower_cased_word = lower_cased_word[2:]
# If it is a known word, then continue.
try:
if self.spelling_dict.check(lower_cased_word):
# The lower cased version of word passed spell checking
continue
# If we reached this far, it means there was a spelling mistake.
# Let's retry with the original work because 'unicode' is a
# spelling mistake but 'Unicode' is not
if self.spelling_dict.check(word):
continue
except enchant.errors.Error:
self.add_message(
"invalid-characters-in-docstring", line=line_num, args=(word,)
)
continue
# Store word to private dict or raise a message.
if self.config.spelling_store_unknown_words:
if lower_cased_word not in self.unknown_words:
self.private_dict_file.write("%s\n" % lower_cased_word)
self.unknown_words.add(lower_cased_word)
else:
# Present up to N suggestions.
suggestions = self.spelling_dict.suggest(word)
del suggestions[self.config.max_spelling_suggestions :]
line_segment = line[word_start_at:]
match = re.search(r"(\W|^)(%s)(\W|$)" % word, line_segment)
if match:
# Start position of second group in regex.
col = match.regs[2][0]
else:
col = line_segment.index(word)
col += word_start_at
if starts_with_comment:
col += 1
indicator = (" " * col) + ("^" * len(word))
self.add_message(
msgid,
line=line_num,
args=(
word,
original_line,
indicator,
"'{}'".format("' or '".join(suggestions)),
),
)
def process_tokens(self, tokens):
if not self.initialized:
return
# Process tokens and look for comments.
for (tok_type, token, (start_row, _), _, _) in tokens:
if tok_type == tokenize.COMMENT:
if start_row == 1 and token.startswith("#!/"):
# Skip shebang lines
continue
if token.startswith("# pylint:"):
# Skip pylint enable/disable comments
continue
self._check_spelling("wrong-spelling-in-comment", token, start_row)
@check_messages("wrong-spelling-in-docstring")
def visit_module(self, node):
if not self.initialized:
return
self._check_docstring(node)
@check_messages("wrong-spelling-in-docstring")
def visit_classdef(self, node):
if not self.initialized:
return
self._check_docstring(node)
@check_messages("wrong-spelling-in-docstring")
def visit_functiondef(self, node):
if not self.initialized:
return
self._check_docstring(node)
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(self, node):
"""check the node has any spelling errors"""
docstring = node.doc
if not docstring:
return
start_line = node.lineno + 1
# Go through lines of docstring
for idx, line in enumerate(docstring.splitlines()):
self._check_spelling("wrong-spelling-in-docstring", line, start_line + idx)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(SpellingChecker(linter))
| 33.185096 | 88 | 0.547917 |
c5e9c6a6589ab2e4e7f7da4905d07ddb45ccedc8 | 8,232 | py | Python | qa/rpc-tests/test_framework/test_framework.py | alexandergaldones/bitcoin | 6206252e5073c1cde2e313f2e5a3ca17582c5823 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/test_framework.py | alexandergaldones/bitcoin | 6206252e5073c1cde2e313f2e5a3ca17582c5823 | [
"MIT"
] | 1 | 2017-03-10T16:37:46.000Z | 2017-03-10T16:37:46.000Z | qa/rpc-tests/test_framework/test_framework.py | PETER-ITPE/bitcoin_pjt | 53c300fb525ab3e21206d47d8353f5246b4f24d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| 37.589041 | 142 | 0.609815 |
a00ceb90211e371c3b2f2b32f2042d1556158595 | 20,975 | py | Python | tensorflow/python/training/session_manager.py | mohammadzainabbas/tensorflow | 352142267a1a151b04c6198de83b40b7e979d1d8 | [
"Apache-2.0"
] | 6 | 2017-08-07T14:05:41.000Z | 2020-02-28T03:57:56.000Z | tensorflow/python/training/session_manager.py | mohammadzainabbas/tensorflow | 352142267a1a151b04c6198de83b40b7e979d1d8 | [
"Apache-2.0"
] | 6 | 2020-04-21T20:38:18.000Z | 2020-06-16T01:00:15.000Z | tensorflow/python/training/session_manager.py | mohammadzainabbas/tensorflow | 352142267a1a151b04c6198de83b40b7e979d1d8 | [
"Apache-2.0"
] | 3 | 2018-04-11T03:08:18.000Z | 2021-05-09T21:51:42.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and creates session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.util.tf_export import tf_export
def _maybe_name(obj):
"""Returns object name if it has one, or a message otherwise.
This is useful for names that apper in error messages.
Args:
obj: Object to get the name of.
Returns:
name, "None", or a "no name" message.
"""
if obj is None:
return "None"
elif hasattr(obj, "name"):
return obj.name
else:
return "<no name for %s>" % type(obj)
@tf_export("train.SessionManager")
class SessionManager(object):
"""Training helper that restores from checkpoint and creates session.
This class is a small wrapper that takes care of session creation and
checkpoint recovery. It also provides functions that to facilitate
coordination among multiple training threads or processes.
* Checkpointing trained variables as the training progresses.
* Initializing variables on startup, restoring them from the most recent
checkpoint after a crash, or wait for checkpoints to become available.
### Usage:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will checkpoint the model in '/tmp/mydir'.
sm = SessionManager()
sess = sm.prepare_session(master, init_op, saver, checkpoint_dir)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`prepare_session()` initializes or restores a model. It requires `init_op`
and `saver` as an argument.
A second process could wait for the model to be ready by doing the following:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a SessionManager that will wait for the model to become ready.
sm = SessionManager()
sess = sm.wait_for_session(master)
# Use the session to train the graph.
while True:
sess.run(<my_train_op>)
```
`wait_for_session()` waits for a model to be initialized by other processes.
"""
def __init__(self,
local_init_op=None,
ready_op=None,
ready_for_local_init_op=None,
graph=None,
recovery_wait_secs=30):
"""Creates a SessionManager.
The `local_init_op` is an `Operation` that is run always after a new session
was created. If `None`, this step is skipped.
The `ready_op` is an `Operation` used to check if the model is ready. The
model is considered ready if that operation returns an empty 1D string
tensor. If the operation returns a non empty 1D string tensor, the elements
are concatenated and used to indicate to the user why the model is not
ready.
The `ready_for_local_init_op` is an `Operation` used to check if the model
is ready to run local_init_op. The model is considered ready if that
operation returns an empty 1D string tensor. If the operation returns a non
empty 1D string tensor, the elements are concatenated and used to indicate
to the user why the model is not ready.
If `ready_op` is `None`, the model is not checked for readiness.
`recovery_wait_secs` is the number of seconds between checks that
the model is ready. It is used by processes to wait for a model to
be initialized or restored. Defaults to 30 seconds.
Args:
local_init_op: An `Operation` run immediately after session creation.
Usually used to initialize tables and local variables.
ready_op: An `Operation` to check if the model is initialized.
ready_for_local_init_op: An `Operation` to check if the model is ready
to run local_init_op.
graph: The `Graph` that the model will use.
recovery_wait_secs: Seconds between checks for the model to be ready.
Raises:
ValueError: If ready_for_local_init_op is not None but local_init_op is
None
"""
# Sets default values of arguments.
if graph is None:
graph = ops.get_default_graph()
self._local_init_op = local_init_op
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._graph = graph
self._recovery_wait_secs = recovery_wait_secs
self._target = None
if ready_for_local_init_op is not None and local_init_op is None:
raise ValueError("If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "
", ready_for_local_init_op [%s]" %
ready_for_local_init_op)
def _restore_checkpoint(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, and tries to restore a checkpoint.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, is_restored) where 'is_restored' is `True` if
the session could be restored, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
self._target = master
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
raise ValueError("Can not provide both checkpoint_dir and "
"checkpoint_filename_with_path.")
# If either saver or checkpoint_* is not specified, cannot restore. Just
# return.
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
return sess, False
if checkpoint_filename_with_path:
saver.restore(sess, checkpoint_filename_with_path)
return sess, True
# Waits up until max_wait_secs for checkpoint to become available.
wait_time = 0
ckpt = saver_mod.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info("Waiting for checkpoint to be available.")
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = saver_mod.get_checkpoint_state(checkpoint_dir)
else:
return sess, False
# Loads the checkpoint.
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
return sess, True
def prepare_session(self,
master,
init_op=None,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None,
init_feed_dict=None,
init_fn=None):
"""Creates a `Session`. Makes sure the model is ready to be used.
Creates a `Session` on 'master'. If a `saver` object is passed in, and
`checkpoint_dir` points to a directory containing valid checkpoint
files, then it will try to recover the model from checkpoint. If
no checkpoint files are available, and `wait_for_checkpoint` is
`True`, then the process would check every `recovery_wait_secs`,
up to `max_wait_secs`, for recovery to succeed.
If the model cannot be recovered successfully then it is initialized by
running the `init_op` and calling `init_fn` if they are provided.
The `local_init_op` is also run after init_op and init_fn, regardless of
whether the model was recovered successfully, but only if
`ready_for_local_init_op` passes.
If the model is recovered from a checkpoint it is assumed that all
global variables have been initialized, in particular neither `init_op`
nor `init_fn` will be executed.
It is an error if the model cannot be recovered and no `init_op`
or `init_fn` or `local_init_op` are passed.
Args:
master: `String` representation of the TensorFlow master to use.
init_op: Optional `Operation` used to initialize the model.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
optional `init_op` is called. The callable must accept one argument,
the session being initialized.
Returns:
A `Session` object that can be used to drive the model.
Raises:
RuntimeError: If the model cannot be initialized or recovered.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
if not is_loaded_from_checkpoint:
if init_op is None and not init_fn and self._local_init_op is None:
raise RuntimeError("Model is not initialized and no init_op or "
"init_fn or local_init_op was given")
if init_op is not None:
sess.run(init_op, feed_dict=init_feed_dict)
if init_fn:
init_fn(sess)
local_init_success, msg = self._try_run_local_init_op(sess)
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for local_init. "
"Init op: %s, init fn: %s, error: %s" % (_maybe_name(init_op),
init_fn,
msg))
is_ready, msg = self._model_ready(sess)
if not is_ready:
raise RuntimeError(
"Init operations did not make model ready. "
"Init op: %s, init fn: %s, local_init_op: %s, error: %s" %
(_maybe_name(init_op), init_fn, self._local_init_op, msg))
return sess
def recover_session(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, recovering if possible.
Creates a new session on 'master'. If the session is not initialized
and can be recovered from a checkpoint, recover it.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint in the
dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, initialized) where 'initialized' is `True` if
the session could be recovered and initialized, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path are
set.
"""
sess, is_loaded_from_checkpoint = self._restore_checkpoint(
master,
saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path,
wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs,
config=config)
# Always try to run local_init_op
local_init_success, msg = self._try_run_local_init_op(sess)
if not is_loaded_from_checkpoint:
# Do not need to run checks for readiness
return sess, False
restoring_file = checkpoint_dir or checkpoint_filename_with_path
if not local_init_success:
logging.info(
"Restoring model from %s did not make model ready for local init:"
" %s", restoring_file, msg)
return sess, False
is_ready, msg = self._model_ready(sess)
if not is_ready:
logging.info("Restoring model from %s did not make model ready: %s",
restoring_file, msg)
return sess, False
logging.info("Restored model from %s", restoring_file)
return sess, is_loaded_from_checkpoint
def wait_for_session(self, master, config=None, max_wait_secs=float("Inf")):
"""Creates a new `Session` and waits for model to be ready.
Creates a new `Session` on 'master'. Waits for the model to be
initialized or recovered from a checkpoint. It's expected that
another thread or process will make the model ready, and that this
is intended to be used by threads/processes that participate in a
distributed training configuration where a different thread/process
is responsible for initializing or recovering the model being trained.
NB: The amount of time this method waits for the session is bounded
by max_wait_secs. By default, this function will wait indefinitely.
Args:
master: `String` representation of the TensorFlow master to use.
config: Optional ConfigProto proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
Returns:
A `Session`. May be None if the operation exceeds the timeout
specified by config.operation_timeout_in_ms.
Raises:
tf.DeadlineExceededError: if the session is not available after
max_wait_secs.
"""
self._target = master
if max_wait_secs is None:
max_wait_secs = float("Inf")
timer = _CountDownTimer(max_wait_secs)
while True:
sess = session.Session(self._target, graph=self._graph, config=config)
not_ready_msg = None
not_ready_local_msg = None
local_init_success, not_ready_local_msg = self._try_run_local_init_op(
sess)
if local_init_success:
# Successful if local_init_op is None, or ready_for_local_init_op passes
is_ready, not_ready_msg = self._model_ready(sess)
if is_ready:
return sess
self._safe_close(sess)
# Do we have enough time left to try again?
remaining_ms_after_wait = (
timer.secs_remaining() - self._recovery_wait_secs)
if remaining_ms_after_wait < 0:
raise errors.DeadlineExceededError(
None, None,
"Session was not ready after waiting %d secs." % (max_wait_secs,))
logging.info("Waiting for model to be ready. "
"Ready_for_local_init_op: %s, ready: %s",
not_ready_local_msg, not_ready_msg)
time.sleep(self._recovery_wait_secs)
def _safe_close(self, sess):
"""Closes a session without raising an exception.
Just like sess.close() but ignores exceptions.
Args:
sess: A `Session`.
"""
# pylint: disable=broad-except
try:
sess.close()
except Exception:
# Intentionally not logging to avoid user complaints that
# they get cryptic errors. We really do not care that Close
# fails.
pass
# pylint: enable=broad-except
def _model_ready(self, sess):
"""Checks if the model is ready or not.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
return _ready(self._ready_op, sess, "Model not ready")
def _model_ready_for_local_init(self, sess):
"""Checks if the model is ready to run local_init_op.
Args:
sess: A `Session`.
Returns:
A tuple (is_ready, msg), where is_ready is True if ready to run
local_init_op and False otherwise, and msg is `None` if the model is
ready to run local_init_op, a `String` with the reason why it is not ready
otherwise.
"""
return _ready(self._ready_for_local_init_op, sess,
"Model not ready for local init")
def _try_run_local_init_op(self, sess):
"""Tries to run _local_init_op, if not None, and is ready for local init.
Args:
sess: A `Session`.
Returns:
A tuple (is_successful, msg), where is_successful is True if
_local_init_op is None, or we ran _local_init_op, and False otherwise;
and msg is a `String` with the reason why the model was not ready to run
local init.
"""
if self._local_init_op is not None:
is_ready_for_local_init, msg = self._model_ready_for_local_init(sess)
if is_ready_for_local_init:
logging.info("Running local_init_op.")
sess.run(self._local_init_op)
logging.info("Done running local_init_op.")
return True, None
else:
return False, msg
return True, None
def _ready(op, sess, msg):
"""Checks if the model is ready or not, as determined by op.
Args:
op: An op, either _ready_op or _ready_for_local_init_op, which defines the
readiness of the model.
sess: A `Session`.
msg: A message to log to warning if not ready
Returns:
A tuple (is_ready, msg), where is_ready is True if ready and False
otherwise, and msg is `None` if the model is ready, a `String` with the
reason why it is not ready otherwise.
"""
if op is None:
return True, None
else:
try:
ready_value = sess.run(op)
# The model is considered ready if ready_op returns an empty 1-D tensor.
# Also compare to `None` and dtype being int32 for backward
# compatibility.
if (ready_value is None or ready_value.dtype == np.int32 or
ready_value.size == 0):
return True, None
else:
# TODO(sherrym): If a custom ready_op returns other types of tensor,
# or strings other than variable names, this message could be
# confusing.
non_initialized_varnames = ", ".join(
[i.decode("utf-8") for i in ready_value])
return False, "Variables not initialized: " + non_initialized_varnames
except errors.FailedPreconditionError as e:
if "uninitialized" not in str(e):
logging.warning("%s : error [%s]", msg, str(e))
raise e
return False, str(e)
class _CountDownTimer(object):
def __init__(self, duration_secs):
self._start_time_secs = time.time()
self._duration_secs = duration_secs
def secs_remaining(self):
diff = self._duration_secs - (time.time() - self._start_time_secs)
return max(0, diff)
| 38.415751 | 80 | 0.678236 |
63d34de50cb089056652dfd88a3c167e8ac6ee87 | 29,547 | py | Python | lib/rucio/core/permission/generic.py | sahiljajodia01/rucio | e8c957953b0864c0f4e1e27678efdf3aef2b2df1 | [
"Apache-2.0"
] | null | null | null | lib/rucio/core/permission/generic.py | sahiljajodia01/rucio | e8c957953b0864c0f4e1e27678efdf3aef2b2df1 | [
"Apache-2.0"
] | null | null | null | lib/rucio/core/permission/generic.py | sahiljajodia01/rucio | e8c957953b0864c0f4e1e27678efdf3aef2b2df1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2016
# - Cedric Serfon <cedric.serfon@cern.ch>, 2016-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2017
# - Mario Lassnig <mario.lassnig@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018
#
# PY3K COMPATIBLE
import rucio.core.authentication
import rucio.core.scope
from rucio.core.account import list_account_attributes, has_account_attribute
from rucio.core.rse import list_rse_attributes
from rucio.db.sqla.constants import IdentityType
def has_permission(issuer, action, kwargs):
"""
Checks if an account has the specified permission to
execute an action with parameters.
:param issuer: Account identifier which issues the command..
:param action: The action(API call) called by the account.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
perm = {'add_account': perm_add_account,
'del_account': perm_del_account,
'update_account': perm_update_account,
'add_rule': perm_add_rule,
'add_subscription': perm_add_subscription,
'add_scope': perm_add_scope,
'add_rse': perm_add_rse,
'update_rse': perm_update_rse,
'add_protocol': perm_add_protocol,
'del_protocol': perm_del_protocol,
'update_protocol': perm_update_protocol,
'declare_bad_file_replicas': perm_declare_bad_file_replicas,
'declare_suspicious_file_replicas': perm_declare_suspicious_file_replicas,
'add_replicas': perm_add_replicas,
'delete_replicas': perm_delete_replicas,
'skip_availability_check': perm_skip_availability_check,
'update_replicas_states': perm_update_replicas_states,
'add_rse_attribute': perm_add_rse_attribute,
'del_rse_attribute': perm_del_rse_attribute,
'del_rse': perm_del_rse,
'del_rule': perm_del_rule,
'update_rule': perm_update_rule,
'approve_rule': perm_approve_rule,
'update_subscription': perm_update_subscription,
'reduce_rule': perm_reduce_rule,
'move_rule': perm_move_rule,
'get_auth_token_user_pass': perm_get_auth_token_user_pass,
'get_auth_token_gss': perm_get_auth_token_gss,
'get_auth_token_x509': perm_get_auth_token_x509,
'add_account_identity': perm_add_account_identity,
'add_did': perm_add_did,
'add_dids': perm_add_dids,
'attach_dids': perm_attach_dids,
'detach_dids': perm_detach_dids,
'attach_dids_to_dids': perm_attach_dids_to_dids,
'create_did_sample': perm_create_did_sample,
'set_metadata': perm_set_metadata,
'set_status': perm_set_status,
'queue_requests': perm_queue_requests,
'set_rse_usage': perm_set_rse_usage,
'set_rse_limits': perm_set_rse_limits,
'query_request': perm_query_request,
'get_request_by_did': perm_get_request_by_did,
'cancel_request': perm_cancel_request,
'get_next': perm_get_next,
'set_account_limit': perm_set_account_limit,
'delete_account_limit': perm_delete_account_limit,
'config_sections': perm_config,
'config_add_section': perm_config,
'config_has_section': perm_config,
'config_options': perm_config,
'config_has_option': perm_config,
'config_get': perm_config,
'config_items': perm_config,
'config_set': perm_config,
'config_remove_section': perm_config,
'config_remove_option': perm_config,
'get_account_usage': perm_get_account_usage,
'add_attribute': perm_add_account_attribute,
'del_attribute': perm_del_account_attribute,
'list_heartbeats': perm_list_heartbeats,
'resurrect': perm_resurrect,
'update_lifetime_exceptions': perm_update_lifetime_exceptions,
'get_ssh_challenge_token': perm_get_ssh_challenge_token,
'get_signed_url': perm_get_signed_url,
'add_bad_pfns': perm_add_bad_pfns}
return perm.get(action, perm_default)(issuer=issuer, kwargs=kwargs)
def perm_default(issuer, kwargs):
"""
Default permission.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_add_rse(issuer, kwargs):
"""
Checks if an account can add a RSE.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_update_rse(issuer, kwargs):
"""
Checks if an account can update a RSE.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_add_rule(issuer, kwargs):
"""
Checks if an account can add a replication rule.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if kwargs['account'] == issuer and not kwargs['locked']:
return True
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_add_subscription(issuer, kwargs):
"""
Checks if an account can add a subscription.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_add_rse_attribute(issuer, kwargs):
"""
Checks if an account can add a RSE attribute.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_del_rse_attribute(issuer, kwargs):
"""
Checks if an account can delete a RSE attribute.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_del_rse(issuer, kwargs):
"""
Checks if an account can delete a RSE.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_add_account(issuer, kwargs):
"""
Checks if an account can add an account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'
def perm_del_account(issuer, kwargs):
"""
Checks if an account can del an account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'
def perm_update_account(issuer, kwargs):
"""
Checks if an account can update an account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_add_scope(issuer, kwargs):
"""
Checks if an account can add a scop to a account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or issuer == kwargs.get('account')
def perm_get_auth_token_user_pass(issuer, kwargs):
"""
Checks if a user can request a token with user_pass for an account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if rucio.core.authentication.exist_identity_account(identity=kwargs['username'], type=IdentityType.USERPASS, account=kwargs['account']):
return True
return False
def perm_get_auth_token_gss(issuer, kwargs):
"""
Checks if a user can request a token with user_pass for an account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if rucio.core.authentication.exist_identity_account(identity=kwargs['gsscred'], type=IdentityType.GSS, account=kwargs['account']):
return True
return False
def perm_get_auth_token_x509(issuer, kwargs):
"""
Checks if a user can request a token with user_pass for an account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if rucio.core.authentication.exist_identity_account(identity=kwargs['dn'], type=IdentityType.X509, account=kwargs['account']):
return True
return False
def perm_add_account_identity(issuer, kwargs):
"""
Checks if an account can add an identity to an account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or issuer == kwargs.get('account')
def perm_add_did(issuer, kwargs):
"""
Checks if an account can add an data identifier to a scope.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
# Check the accounts of the issued rules
if issuer != 'root' and not has_account_attribute(account=issuer, key='admin'):
for rule in kwargs.get('rules', []):
if rule['account'] != issuer:
return False
return issuer == 'root'\
or has_account_attribute(account=issuer, key='admin')\
or rucio.core.scope.is_scope_owner(scope=kwargs['scope'], account=issuer)\
or kwargs['scope'] == u'mock'
def perm_add_dids(issuer, kwargs):
"""
Checks if an account can bulk add data identifiers.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
# Check the accounts of the issued rules
if issuer != 'root' and not has_account_attribute(account=issuer, key='admin'):
for did in kwargs['dids']:
for rule in did.get('rules', []):
if rule['account'] != issuer:
return False
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_attach_dids(issuer, kwargs):
"""
Checks if an account can append an data identifier to the other data identifier.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'\
or has_account_attribute(account=issuer, key='admin')\
or rucio.core.scope.is_scope_owner(scope=kwargs['scope'], account=issuer)\
or kwargs['scope'] == 'mock'
def perm_attach_dids_to_dids(issuer, kwargs):
"""
Checks if an account can append an data identifier to the other data identifier.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
else:
attachments = kwargs['attachments']
scopes = [did['scope'] for did in attachments]
scopes = list(set(scopes))
for scope in scopes:
if not rucio.core.scope.is_scope_owner(scope, issuer):
return False
return True
def perm_create_did_sample(issuer, kwargs):
"""
Checks if an account can create a sample of a data identifier collection.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'\
or has_account_attribute(account=issuer, key='admin')\
or rucio.core.scope.is_scope_owner(scope=kwargs['scope'], account=issuer)\
or kwargs['scope'] == 'mock'
def perm_del_rule(issuer, kwargs):
"""
Checks if an issuer can delete a replication rule.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_update_rule(issuer, kwargs):
"""
Checks if an issuer can update a replication rule.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_approve_rule(issuer, kwargs):
"""
Checks if an issuer can approve a replication rule.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_reduce_rule(issuer, kwargs):
"""
Checks if an issuer can reduce a replication rule.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_move_rule(issuer, kwargs):
"""
Checks if an issuer can move a replication rule.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_update_subscription(issuer, kwargs):
"""
Checks if an account can update a subscription.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
return False
def perm_detach_dids(issuer, kwargs):
"""
Checks if an account can detach an data identifier from the other data identifier.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return perm_attach_dids(issuer, kwargs)
def perm_set_metadata(issuer, kwargs):
"""
Checks if an account can set a metadata on a data identifier.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin') or rucio.core.scope.is_scope_owner(scope=kwargs['scope'], account=issuer)
def perm_set_status(issuer, kwargs):
"""
Checks if an account can set status on an data identifier.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if kwargs.get('open', False):
if issuer != 'root' and not has_account_attribute(account=issuer, key='admin'):
return False
return issuer == 'root' or has_account_attribute(account=issuer, key='admin') or rucio.core.scope.is_scope_owner(scope=kwargs['scope'], account=issuer)
def perm_add_protocol(issuer, kwargs):
"""
Checks if an account can add a protocol to an RSE.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_del_protocol(issuer, kwargs):
"""
Checks if an account can delete protocols from an RSE.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_update_protocol(issuer, kwargs):
"""
Checks if an account can update protocols of an RSE.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_declare_bad_file_replicas(issuer, kwargs):
"""
Checks if an account can declare bad file replicas.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'
def perm_declare_suspicious_file_replicas(issuer, kwargs):
"""
Checks if an account can declare suspicious file replicas.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return True
def perm_add_replicas(issuer, kwargs):
"""
Checks if an account can add replicas.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return str(kwargs.get('rse', '')).endswith('SCRATCHDISK')\
or str(kwargs.get('rse', '')).endswith('USERDISK')\
or str(kwargs.get('rse', '')).endswith('MOCK')\
or str(kwargs.get('rse', '')).endswith('LOCALGROUPDISK')\
or issuer == 'root'\
or has_account_attribute(account=issuer, key='admin')
def perm_skip_availability_check(issuer, kwargs):
"""
Checks if an account can skip the availabity check to add/delete file replicas.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_delete_replicas(issuer, kwargs):
"""
Checks if an account can delete replicas.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return False
def perm_update_replicas_states(issuer, kwargs):
"""
Checks if an account can delete replicas.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_queue_requests(issuer, kwargs):
"""
Checks if an account can submit transfer or deletion requests on destination RSEs for data identifiers.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'
def perm_query_request(issuer, kwargs):
"""
Checks if an account can query a request.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'
def perm_get_request_by_did(issuer, kwargs):
"""
Checks if an account can get a request by DID.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return True
def perm_cancel_request(issuer, kwargs):
"""
Checks if an account can cancel a request.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'
def perm_get_next(issuer, kwargs):
"""
Checks if an account can retrieve the next request matching the request type and state.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'
def perm_set_rse_usage(issuer, kwargs):
"""
Checks if an account can set RSE usage information.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
return issuer == 'root'
def perm_set_rse_limits(issuer, kwargs):
"""
Checks if an account can set RSE limits.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_set_account_limit(issuer, kwargs):
"""
Checks if an account can set an account limit.
:param account: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
# Check if user is a country admin
admin_in_country = []
for kv in list_account_attributes(account=issuer):
if kv['key'].startswith('country-') and kv['value'] == 'admin':
admin_in_country.append(kv['key'].partition('-')[2])
if admin_in_country and list_rse_attributes(rse=kwargs['rse'], rse_id=None).get('country') in admin_in_country:
return True
return False
def perm_delete_account_limit(issuer, kwargs):
"""
Checks if an account can delete an account limit.
:param account: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin'):
return True
# Check if user is a country admin
admin_in_country = []
for kv in list_account_attributes(account=issuer):
if kv['key'].startswith('country-') and kv['value'] == 'admin':
admin_in_country.append(kv['key'].partition('-')[2])
if admin_in_country and list_rse_attributes(rse=kwargs['rse'], rse_id=None).get('country') in admin_in_country:
return True
return False
def perm_config(issuer, kwargs):
"""
Checks if an account can read/write the configuration.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_get_account_usage(issuer, kwargs):
"""
Checks if an account can get the account usage of an account.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
if issuer == 'root' or has_account_attribute(account=issuer, key='admin') or kwargs.get('account') == issuer:
return True
# Check if user is a country admin
for kv in list_account_attributes(account=issuer):
if kv['key'].startswith('country-') and kv['value'] == 'admin':
return True
return False
def perm_add_account_attribute(issuer, kwargs):
"""
Checks if an account can add attributes to accounts.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_del_account_attribute(issuer, kwargs):
"""
Checks if an account can add attributes to accounts.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
return perm_add_account_attribute(issuer, kwargs)
def perm_list_heartbeats(issuer, kwargs):
"""
Checks if an account can list heartbeats.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
return issuer == 'root'
def perm_resurrect(issuer, kwargs):
"""
Checks if an account can resurrect DIDS.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed to call the API call, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_update_lifetime_exceptions(issuer, kwargs):
"""
Checks if an account can approve/reject Lifetime Model exceptions.
:param issuer: Account identifier which issues the command.
:returns: True if account is allowed to call the API call, otherwise False
"""
return issuer == 'root' or has_account_attribute(account=issuer, key='admin')
def perm_get_ssh_challenge_token(issuer, kwargs):
"""
Checks if an account can request a challenge token.
:param issuer: Account identifier which issues the command.
:returns: True if account is allowed to call the API call, otherwise False
"""
return True
def perm_get_signed_url(issuer, kwargs):
"""
Checks if an account can request a signed URL.
:param issuer: Account identifier which issues the command.
:returns: True if account is allowed to call the API call, otherwise False
"""
return issuer == 'root'
def perm_add_bad_pfns(issuer, kwargs):
"""
Checks if an account can declare bad PFNs.
:param issuer: Account identifier which issues the command.
:param kwargs: List of arguments for the action.
:returns: True if account is allowed, otherwise False
"""
return issuer == 'root'
| 35.25895 | 155 | 0.693167 |
23d195cb83a5fed64c038221fdf793c9a71a0448 | 23,636 | py | Python | src/prefect/agent/agent.py | pcieslinski/prefect | d934da5c9abe65f9cbb8f95d263ef336f3e53f6f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-08-01T15:44:32.000Z | 2020-08-01T15:44:32.000Z | src/prefect/agent/agent.py | pcieslinski/prefect | d934da5c9abe65f9cbb8f95d263ef336f3e53f6f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/prefect/agent/agent.py | pcieslinski/prefect | d934da5c9abe65f9cbb8f95d263ef336f3e53f6f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-04T13:22:11.000Z | 2020-05-04T13:22:11.000Z | import functools
import logging
import math
import os
import signal
import sys
import threading
import time
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import contextmanager
from typing import Any, Generator, Iterable, Set, Optional, cast
from urllib.parse import urlparse
import pendulum
from tornado import web
from tornado.ioloop import IOLoop
from prefect import config
from prefect.client import Client
from prefect.engine.state import Failed, Submitted
from prefect.serialization import state
from prefect.utilities.context import context
from prefect.utilities.exceptions import AuthorizationError
from prefect.utilities.graphql import GraphQLResult, with_args
ascii_name = r"""
____ __ _ _ _
| _ \ _ __ ___ / _| ___ ___| |_ / \ __ _ ___ _ __ | |_
| |_) | '__/ _ \ |_ / _ \/ __| __| / _ \ / _` |/ _ \ '_ \| __|
| __/| | | __/ _| __/ (__| |_ / ___ \ (_| | __/ | | | |_
|_| |_| \___|_| \___|\___|\__| /_/ \_\__, |\___|_| |_|\__|
|___/
"""
# Event to notify agent process to start looking for available flow runs.
AGENT_WAKE_EVENT = threading.Event()
@contextmanager
def exit_handler(agent: "Agent") -> Generator:
exit_event = threading.Event()
def _exit_handler(*args: Any, **kwargs: Any) -> None:
agent.logger.info("Keyboard Interrupt received: Agent is shutting down.")
exit_event.set()
AGENT_WAKE_EVENT.set()
original = signal.getsignal(signal.SIGINT)
try:
signal.signal(signal.SIGINT, _exit_handler)
yield exit_event
except SystemExit:
pass
finally:
signal.signal(signal.SIGINT, original)
class HealthHandler(web.RequestHandler):
"""Respond to /api/health"""
def get(self) -> None:
# Empty json blob, may add more info later
self.write({})
class PokeHandler(web.RequestHandler):
"""Respond to /api/poke
The handler is expected to be called by user to notify agent of available
flow runs waiting for execution.
"""
def get(self) -> None:
# Wake up agent that might be waiting for interval loop to complete.
AGENT_WAKE_EVENT.set()
class Agent:
"""
Base class for Agents. Information on using the Prefect agents can be found at
https://docs.prefect.io/orchestration/agents/overview.html
This Agent class is a standard point for executing Flows in Prefect Cloud. It is meant to
have subclasses which inherit functionality from this class. The only piece that the
subclasses should implement is the `deploy_flows` function, which specifies how to run a
Flow on the given platform. It is built in this way to keep Prefect Cloud logic standard
but allows for platform specific customizability.
In order for this to operate `PREFECT__CLOUD__AGENT__AUTH_TOKEN` must be set as an
environment variable or in your user configuration file.
Args:
- name (str, optional): An optional name to give this agent. Can also be set through
the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to "agent"
- labels (List[str], optional): a list of labels, which are arbitrary string
identifiers used by Prefect Agents when polling for work
- env_vars (dict, optional): a dictionary of environment variables and values that will
be set on each flow run that this agent submits for execution
- max_polls (int, optional): maximum number of times the agent will poll Prefect Cloud
for flow runs; defaults to infinite
- agent_address (str, optional): Address to serve internal api at. Currently this is
just health checks for use by an orchestration layer. Leave blank for no api server
(default).
- no_cloud_logs (bool, optional): Disable logging to a Prefect backend for this agent
and all deployed flow runs
"""
def __init__(
self,
name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
max_polls: int = None,
agent_address: str = None,
no_cloud_logs: bool = False,
) -> None:
self.name = name or config.cloud.agent.get("name", "agent")
self.labels = labels or list(config.cloud.agent.get("labels", []))
self.env_vars = env_vars or config.cloud.agent.get("env_vars", dict())
self.max_polls = max_polls
self.log_to_cloud = False if no_cloud_logs else True
self.agent_address = agent_address or config.cloud.agent.get(
"agent_address", ""
)
self._api_server = None # type: ignore
self._api_server_loop = None # type: Optional[IOLoop]
self._api_server_thread = None # type: Optional[threading.Thread]
logger = logging.getLogger(self.name)
logger.setLevel(config.cloud.agent.get("level"))
if not any([isinstance(h, logging.StreamHandler) for h in logger.handlers]):
ch = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(context.config.logging.format)
formatter.converter = time.gmtime # type: ignore
ch.setFormatter(formatter)
logger.addHandler(ch)
self.logger = logger
self.submitting_flow_runs = set() # type: Set[str]
self.logger.debug("Verbose logs enabled")
self.logger.debug(f"Environment variables: {[*self.env_vars]}")
self.logger.debug(f"Max polls: {self.max_polls}")
self.logger.debug(f"Agent address: {self.agent_address}")
self.logger.debug(f"Log to Cloud: {self.log_to_cloud}")
token = config.cloud.agent.get("auth_token")
self.logger.debug(f"Prefect backend: {config.backend}")
self.client = Client(api_token=token)
def _verify_token(self, token: str) -> None:
"""
Checks whether a token with a `RUNNER` scope was provided
Args:
- token (str): The provided agent token to verify
Raises:
- AuthorizationError: if token is empty or does not have a RUNNER role
"""
if not token:
raise AuthorizationError("No agent API token provided.")
# Check if RUNNER role
result = self.client.graphql(query="query { auth_info { api_token_scope } }")
if (
not result.data # type: ignore
or result.data.auth_info.api_token_scope != "RUNNER" # type: ignore
):
raise AuthorizationError("Provided token does not have a RUNNER scope.")
def _register_agent(self) -> str:
"""
Register this agent with Prefect Cloud and retrieve agent ID
Returns:
- The agent ID as a string
"""
agent_id = self.client.register_agent(
agent_type=type(self).__name__, name=self.name, labels=self.labels # type: ignore
)
self.logger.debug(f"Agent ID: {agent_id}")
return agent_id
def start(self, _loop_intervals: dict = None) -> None:
"""
The main entrypoint to the agent. This function loops and constantly polls for
new flow runs to deploy
Args:
- _loop_intervals (dict, optional): Exposed for testing only.
"""
if config.backend == "cloud":
self._verify_token(self.client.get_auth_token())
self.client.attach_headers({"X-PREFECT-AGENT-ID": self._register_agent()})
try:
self.setup()
with exit_handler(self) as exit_event:
# Loop intervals for query sleep backoff
loop_intervals = _loop_intervals or {
0: 0.25,
1: 0.5,
2: 1.0,
3: 2.0,
4: 4.0,
5: 8.0,
6: 10.0,
}
index = 0
remaining_polls = math.inf if self.max_polls is None else self.max_polls
# the max workers default has changed in 3.8. For stable results the
# default 3.8 behavior is elected here.
max_workers = min(32, (os.cpu_count() or 1) + 4)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
self.logger.debug("Max Workers: {}".format(max_workers))
while not exit_event.is_set() and remaining_polls:
# Reset the event in case it was set by poke handler.
AGENT_WAKE_EVENT.clear()
self.heartbeat()
if self.agent_process(executor):
index = 0
elif index < max(loop_intervals.keys()):
index += 1
remaining_polls -= 1
self.logger.debug(
"Next query for flow runs in {} seconds".format(
loop_intervals[index]
)
)
# Wait for loop interval timeout or agent to be poked by
# external process before querying for flow runs again.
AGENT_WAKE_EVENT.wait(timeout=loop_intervals[index])
finally:
self.cleanup()
def setup(self) -> None:
self.agent_connect()
if self.agent_address:
parsed = urlparse(self.agent_address)
if not parsed.port:
raise ValueError("Must specify port in agent address")
port = cast(int, parsed.port)
hostname = parsed.hostname or ""
app = web.Application(
[("/api/health", HealthHandler), ("/api/poke", PokeHandler)]
)
def run() -> None:
self.logger.debug(
f"Agent API server listening on port {self.agent_address}"
)
self._api_server = app.listen(port, address=hostname) # type: ignore
self._api_server_loop = IOLoop.current()
self._api_server_loop.start() # type: ignore
self._api_server_thread = threading.Thread(
name="api-server", target=run, daemon=True
)
self._api_server_thread.start()
def cleanup(self) -> None:
self.on_shutdown()
if self._api_server is not None:
self.logger.debug("Stopping agent API server")
self._api_server.stop()
if self._api_server_loop is not None:
self.logger.debug("Stopping agent API server loop")
def stop_server() -> None:
try:
loop = cast(IOLoop, self._api_server_loop)
loop.stop()
except Exception:
pass
self._api_server_loop.add_callback(stop_server)
if self._api_server_thread is not None:
self.logger.debug("Joining agent API threads")
# Give the server a small period to shutdown nicely, otherwise it
# will terminate on exit anyway since it's a daemon thread.
self._api_server_thread.join(timeout=1)
def on_shutdown(self) -> None:
"""
Invoked when the event loop is exiting and the agent is shutting down. Intended
as a hook for child classes to optionally implement.
"""
def agent_connect(self) -> None:
"""
Verify agent connection to Prefect API by querying
"""
print(ascii_name)
self.logger.info(
"Starting {} with labels {}".format(type(self).__name__, self.labels)
)
self.logger.info(
"Agent documentation can be found at https://docs.prefect.io/orchestration/"
)
self.logger.info(
"Agent connecting to the Prefect API at {}".format(config.cloud.api)
)
try:
self.client.graphql(query="query { hello }")
except Exception as exc:
self.logger.error(
"There was an error connecting to {}".format(config.cloud.api)
)
self.logger.error(exc)
self.logger.info("Waiting for flow runs...")
def deploy_and_update_flow_run(self, flow_run: "GraphQLResult") -> None:
"""
Deploy a flow run and update Cloud with the resulting deployment info.
If any errors occur when submitting the flow run, capture the error and log to Cloud.
Args:
- flow_run (GraphQLResult): The specific flow run to deploy
"""
# Deploy flow run and mark failed if any deployment error
try:
self.update_state(flow_run)
deployment_info = self.deploy_flow(flow_run)
if getattr(flow_run, "id", None):
self.client.write_run_logs(
[
dict(
flow_run_id=getattr(flow_run, "id"), # type: ignore
name=self.name,
message="Submitted for execution: {}".format(
deployment_info
),
level="INFO",
)
]
)
except Exception as exc:
# if the state update failed, we don't want to follow up with another state update
if "State update failed" in str(exc):
self.logger.debug("Updating Flow Run state failed: {}".format(str(exc)))
return
self.logger.error(
"Logging platform error for flow run {}".format(
getattr(flow_run, "id", "UNKNOWN") # type: ignore
)
)
if getattr(flow_run, "id", None):
self.client.write_run_logs(
[
dict(
flow_run_id=getattr(flow_run, "id"), # type: ignore
name=self.name,
message=str(exc),
level="ERROR",
)
]
)
self.mark_failed(flow_run=flow_run, exc=exc)
def on_flow_run_deploy_attempt(self, fut: "Future", flow_run_id: str) -> None:
"""
Indicates that a flow run deployment has been deployed (successfully or otherwise).
This is intended to be a future callback hook, called in the agent's main thread
when the background thread has completed the deploy_and_update_flow_run() call, either
successfully, in error, or cancelled. In all cases the agent should be open to
attempting to deploy the flow run if the flow run id is still in the Cloud run queue.
Args:
- fut (Future): a callback requirement, the future which has completed or been
cancelled.
- flow_run_id (str): the id of the flow run that the future represents.
"""
self.submitting_flow_runs.remove(flow_run_id)
self.logger.debug("Completed flow run submission (id: {})".format(flow_run_id))
def agent_process(self, executor: "ThreadPoolExecutor") -> bool:
"""
Full process for finding flow runs, updating states, and deploying.
Args:
- executor (ThreadPoolExecutor): the interface to submit flow deployments in
background threads
Returns:
- bool: whether or not flow runs were found
"""
flow_runs = None
try:
flow_runs = self.query_flow_runs()
if flow_runs:
self.logger.info(
"Found {} flow run(s) to submit for execution.".format(
len(flow_runs)
)
)
for flow_run in flow_runs:
fut = executor.submit(self.deploy_and_update_flow_run, flow_run)
self.submitting_flow_runs.add(flow_run.id)
fut.add_done_callback(
functools.partial(
self.on_flow_run_deploy_attempt, flow_run_id=flow_run.id
)
)
except Exception as exc:
self.logger.error(exc)
return bool(flow_runs)
def query_flow_runs(self) -> list:
"""
Query Prefect Cloud for flow runs which need to be deployed and executed
Returns:
- list: A list of GraphQLResult flow run objects
"""
self.logger.debug("Querying for flow runs")
# keep a copy of what was curringly running before the query (future callbacks may be
# updating this set)
currently_submitting_flow_runs = self.submitting_flow_runs.copy()
# Get scheduled flow runs from queue
mutation = {
"mutation($input: get_runs_in_queue_input!)": {
"get_runs_in_queue(input: $input)": {"flow_run_ids"}
}
}
now = pendulum.now("UTC")
result = self.client.graphql(
mutation,
variables={
"input": {"before": now.isoformat(), "labels": list(self.labels)}
},
)
# we queried all of the available flow runs, however, some may have already been pulled
# by this agent and are in the process of being submitted in the background. We do not
# want to act on these "duplicate" flow runs until we've been assured that the background
# thread has attempted to submit the work (successful or otherwise).
flow_run_ids = set(result.data.get_runs_in_queue.flow_run_ids) # type: ignore
if flow_run_ids:
msg = "Found flow runs {}".format(
result.data.get_runs_in_queue.flow_run_ids
)
else:
msg = "No flow runs found"
already_submitting = flow_run_ids & currently_submitting_flow_runs
target_flow_run_ids = flow_run_ids - already_submitting
if already_submitting:
msg += " ({} already submitting: {})".format(
len(already_submitting), list(already_submitting)
)
self.logger.debug(msg)
# Query metadata fow flow runs found in queue
query = {
"query": {
with_args(
"flow_run",
{
# match flow runs in the flow_run_ids list
"where": {
"id": {"_in": list(target_flow_run_ids)},
"_or": [
# who are EITHER scheduled...
{"state": {"_eq": "Scheduled"}},
# OR running with task runs scheduled to start more than 3
# seconds ago
{
"state": {"_eq": "Running"},
"task_runs": {
"state_start_time": {
"_lte": str(now.subtract(seconds=3)) # type: ignore
}
},
},
],
}
},
): {
"id": True,
"version": True,
"state": True,
"serialized_state": True,
"parameters": True,
"flow": {"id", "name", "environment", "storage", "version"},
with_args(
"task_runs",
{
"where": {
"state_start_time": {
"_lte": str(now.subtract(seconds=3)) # type: ignore
}
}
},
): {"id", "version", "task_id", "serialized_state"},
}
}
}
if target_flow_run_ids:
self.logger.debug("Querying flow run metadata")
result = self.client.graphql(query)
return result.data.flow_run # type: ignore
else:
return []
def update_state(self, flow_run: GraphQLResult) -> None:
"""
After a flow run is grabbed this function sets the state to Submitted so it
won't be picked up by any other processes
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
"""
self.logger.debug(
"Updating states for flow run {}".format(flow_run.id) # type: ignore
)
# Set flow run state to `Submitted` if it is currently `Scheduled`
if state.StateSchema().load(flow_run.serialized_state).is_scheduled():
self.logger.debug(
"Flow run {} is in a Scheduled state, updating to Submitted".format(
flow_run.id # type: ignore
)
)
self.client.set_flow_run_state(
flow_run_id=flow_run.id,
version=flow_run.version,
state=Submitted(
message="Submitted for execution",
state=state.StateSchema().load(flow_run.serialized_state),
),
)
# Set task run states to `Submitted` if they are currently `Scheduled`
for task_run in flow_run.task_runs:
if state.StateSchema().load(task_run.serialized_state).is_scheduled():
self.logger.debug(
"Task run {} is in a Scheduled state, updating to Submitted".format(
task_run.id # type: ignore
)
)
self.client.set_task_run_state(
task_run_id=task_run.id,
version=task_run.version,
state=Submitted(
message="Submitted for execution.",
state=state.StateSchema().load(task_run.serialized_state),
),
)
def mark_failed(self, flow_run: GraphQLResult, exc: Exception) -> None:
"""
Mark a flow run as `Failed`
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
- exc (Exception): An exception that was raised to use as the `Failed`
message
"""
self.client.set_flow_run_state(
flow_run_id=flow_run.id,
version=flow_run.version,
state=Failed(message=str(exc)),
)
self.logger.error("Error while deploying flow: {}".format(repr(exc)))
def deploy_flow(self, flow_run: GraphQLResult) -> str:
"""
Meant to be overridden by a platform specific deployment option
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: Information about the deployment
Raises:
- ValueError: if deployment attempted on unsupported Storage type
"""
raise NotImplementedError()
def heartbeat(self) -> None:
"""
Meant to be overridden by a platform specific heartbeat option
"""
if __name__ == "__main__":
Agent().start()
| 38 | 97 | 0.549839 |
b47779674eff0bb5df2e75ea3b3c1ba6427db391 | 6,920 | py | Python | src/model/model_helper.py | dimanbeliy1/script-server | 1eebaeeca24bb2b6161275b97859a96b0ffb5391 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | src/model/model_helper.py | dimanbeliy1/script-server | 1eebaeeca24bb2b6161275b97859a96b0ffb5391 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | src/model/model_helper.py | dimanbeliy1/script-server | 1eebaeeca24bb2b6161275b97859a96b0ffb5391 | [
"Apache-2.0",
"CC0-1.0"
] | null | null | null | import logging
import os
import re
import utils.env_utils as env_utils
from config.constants import FILE_TYPE_DIR, FILE_TYPE_FILE
from utils.string_utils import is_blank
ENV_VAR_PREFIX = '$$'
SECURE_MASK = '*' * 6
LOGGER = logging.getLogger('script_server.model_helper')
def resolve_env_vars(value, *, full_match=False):
if not isinstance(value, str) or is_empty(value):
return value
if full_match:
if value.startswith(ENV_VAR_PREFIX):
return env_utils.read_variable(value[2:])
return value
def resolve_var(match):
var_match = match.group()
var_name = var_match[2:]
resolved = env_utils.read_variable(var_name, fail_on_missing=False)
if resolved is not None:
return resolved
return var_match
pattern = re.escape(ENV_VAR_PREFIX) + '\w+'
return re.sub(pattern, resolve_var, value)
def read_obligatory(values_dict, key, error_suffix=''):
value = values_dict.get(key)
if is_empty(value):
raise Exception('"' + key + '" is required attribute' + error_suffix)
return value
def read_list(values_dict, key, default=None):
"""
Reads value from values_dict as a list
If value is a list, then list is returned
If value is missing, then default value is returned (or an empty list if not specified)
If value is a dictionary, then error is raised
Otherwise, a list of single element is returned as a value
"""
value = values_dict.get(key)
if value is None:
if default is not None:
return default
return []
if isinstance(value, list):
return value
if isinstance(value, dict):
raise Exception('"' + key + '" has invalid type. List expected, got dictionary')
return [value]
def read_dict(values_dict, key, default=None):
"""
Reads value from values_dict as a dictionary
If value is a dict, then dict is returned
If value is missing, then default value is returned (or an empty dict if not specified)
Otherwise an error is raised
"""
value = values_dict.get(key)
if value is None:
if default is not None:
return default
return {}
if isinstance(value, dict):
return value
raise Exception('"' + key + '" has invalid type. Dict expected')
def read_bool_from_config(key, config_obj, *, default=None):
value = config_obj.get(key)
if value is None:
return default
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.lower() == 'true'
raise Exception('"' + key + '" field should be true or false')
def read_bool(value):
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise Exception('Invalid value, should be bool or string. value=' + repr(value))
return value.lower() == 'true'
def read_int_from_config(key, config_obj, *, default=None):
value = config_obj.get(key)
if value is None:
return default
if isinstance(value, int) and not isinstance(value, bool):
return value
if isinstance(value, str):
if value.strip() == '':
return default
try:
return int(value)
except ValueError as e:
raise InvalidValueException(key, 'Invalid %s value: integer expected, but was: %s' % (key, value)) from e
raise InvalidValueTypeException('Invalid %s value: integer expected, but was: %s' % (key, repr(value)))
def read_str_from_config(config_obj, key, *, default=None, blank_to_none=False):
"""
Reads string value from a config by the key
If the value is missing, returns specified default value
If the value is not string, InvalidValueTypeException is thrown
:param config_obj: where to read value from
:param key: key to read value from
:param default: default value, if config value is missing
:param blank_to_none: if value is blank, treat it as null
:return: config_obj[key] if non None, default otherwise
"""
value = config_obj.get(key)
if blank_to_none and isinstance(value, str) and is_blank(value):
value = None
if value is None:
return default
if isinstance(value, str):
return value
raise InvalidValueTypeException('Invalid %s value: string expected, but was: %s' % (key, repr(value)))
def is_empty(value):
return (not value) and (value != 0) and (value is not False)
def fill_parameter_values(parameter_configs, template, values):
result = template
for parameter_config in parameter_configs:
if parameter_config.secure or parameter_config.no_value:
continue
parameter_name = parameter_config.name
value = values.get(parameter_name)
if value is None:
value = ''
if not isinstance(value, str):
value = str(value)
result = result.replace('${' + parameter_name + '}', str(value))
return result
def replace_auth_vars(text, username, audit_name):
result = text
if not username:
username = ''
if not audit_name:
audit_name = ''
result = result.replace('${auth.username}', str(username))
result = result.replace('${auth.audit_name}', str(audit_name))
return result
def normalize_extension(extension):
return re.sub('^\.', '', extension).lower()
def list_files(dir, file_type=None, file_extensions=None):
if not os.path.exists(dir) or not os.path.isdir(dir):
raise InvalidFileException(dir, 'Directory not found')
result = []
if not is_empty(file_extensions):
file_type = FILE_TYPE_FILE
sorted_files = sorted(os.listdir(dir), key=lambda s: s.casefold())
for file in sorted_files:
file_path = os.path.join(dir, file)
if file_type:
if file_type == FILE_TYPE_DIR and not os.path.isdir(file_path):
continue
elif file_type == FILE_TYPE_FILE and not os.path.isfile(file_path):
continue
if file_extensions and not os.path.isdir(file_path):
_, extension = os.path.splitext(file_path)
if normalize_extension(extension) not in file_extensions:
continue
result.append(file)
return result
class InvalidFileException(Exception):
def __init__(self, path, message) -> None:
super().__init__(message)
self.path = path
class InvalidValueException(Exception):
def __init__(self, param_name, validation_error) -> None:
super().__init__(validation_error)
self.param_name = param_name
class InvalidValueTypeException(Exception):
def __init__(self, message) -> None:
super().__init__(message)
class AccessProhibitedException(Exception):
def __init__(self, message) -> None:
super().__init__(message)
| 27.03125 | 117 | 0.655202 |
2779cabc9c4450910f1951481d98406bcc5c838d | 17,520 | py | Python | spexxy/weight/fromgrid.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 4 | 2019-05-13T21:36:31.000Z | 2021-09-06T01:56:36.000Z | spexxy/weight/fromgrid.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 2 | 2020-02-12T14:36:39.000Z | 2020-07-14T11:43:10.000Z | spexxy/weight/fromgrid.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 1 | 2019-11-08T09:26:23.000Z | 2019-11-08T09:26:23.000Z | import os
import numpy as np
import pandas as pd
from typing import List, Union, Tuple
from .weight import Weight
from .fromgridnearest import WeightFromGridNearest
from ..data import Spectrum
class WeightFromGrid(Weight):
"""
This class loads the weights from a grid depending on the initial values of the fit parameters by linear
interpolation. It returns an array containing the weights.
"""
def __init__(self, filename, initial: float = 0., max_line_depth: float = 0.5, center_weight: float = 1.,
max_step: int = 1, mask_lines: Union[bool, str, List] = True, max_change=(300, 0.3), *args, **kwargs):
"""
Initializes a new weight.
Args:
filename: Name of grid file.
initial: Initial value for the whole weight array.
max_line_depth: Central pixel for lines with larger line depth are masked out.
center_weight: Factor that increases the weight of the central pixel of each line.
max_step: In iteration steps <= max_step new weights are loaded from the grid.
mask_lines: List of absorption lines that are always masked out in their centers.
"""
Weight.__init__(self, *args, **kwargs)
# expand filename
filename = os.path.expandvars(filename)
self.filename = filename
self._initial = initial
self._max_line_depth = max_line_depth
self._center_weight = center_weight
self._max_step = max_step
self._max_change = sorted(max_change, reverse=True)
if mask_lines:
if isinstance(mask_lines, bool):
self._mask_lines = 'default'
elif isinstance(mask_lines, list):
self._mask_lines = []
for line in mask_lines:
if len(line) == 2:
self._mask_lines.append(line + [-0.5, 6.5])
else:
self._mask_lines.append(line)
elif isinstance(mask_lines, str):
df = pd.read_csv(os.path.expandvars(mask_lines))
df.loc[df['logg_min'].isna(), 'logg_min'] = -0.5
df.loc[df['logg_max'].isna(), 'logg_max'] = 6.5
self._mask_lines = df.to_numpy()
else:
self._mask_lines = mask_lines
# get grid's root path
self._root = os.path.dirname(filename)
# load CSV
self._data = pd.read_csv(filename, index_col=False)
# get all parameters, by removing 'Filename' from list of columns
self._parameters = list(self._data.columns)
self._parameters.remove('Filename')
# we assume that all parameters are floats, so treat them as such
for name in self._parameters:
self._data[name] = self._data[name].apply(lambda x: float(x))
# get grid axes
self._axes = [sorted(self._data[p].unique()) for p in self._parameters]
# remove axes that contain only a single value
for i, p in enumerate(self._parameters):
if len(self._axes[i]) <= 1:
del self._axes[i]
self._parameters.remove(p)
# initialize step counter
self._step = 1
# values of the fit parameter from previous iteration step
self._previous_values = None
# save weight array
self._weights = None
# save initial parameters
self._initial_values = None
self._logg = None
def __call__(self, spectrum: Spectrum, filename: str) -> np.ndarray:
"""
Creates and returns weight array.
Args:
spectrum: Spectrum to create weight for.
filename: Name of spectrum file.
Returns:
Array containing the weight for given spectrum.
"""
# save initial values
if self._initial_values is None:
self._initial_values = {}
for cmp in self.objects['init_iter'].values():
for param_name in cmp.param_names:
self._initial_values[param_name] = cmp[param_name]
if param_name == 'logg' and self._logg is None:
self._logg = cmp[param_name]
break
# load new weights if the fit parameters changed significantly
new_weights = False
if self._previous_values is not None:
for param in self._parameters:
if new_weights:
break
for cmp in self.objects['init_iter'].values():
for param_name in cmp.param_names:
if param.lower() != param_name.lower():
continue
if param.lower() == 'teff':
# did Teff change significantly?
new_weights = abs(
self._previous_values[self._parameters.index(param)] - cmp[param_name]) > self._max_change[0]
else:
# did FeH, Alpha or logg change significantly?
new_weights = abs(
self._previous_values[self._parameters.index(param)] - cmp[param_name]) > self._max_change[1]
# are current parameter values identical with initial values?
if self._step > 1:
tmp = []
for cmp in self.objects['init_iter'].values():
for param_name in cmp.param_names:
tmp.append(cmp[param_name] == self._initial_values[param_name])
break
# component is reset to initial values if the fit restarts with a damping factor, in that case the iteration
# step needs to be reset as well
if np.all(tmp):
self._step = 1
# load new weights if max_step has not been reached or fit parameters changed significantly
if (self._step <= self._max_step) or new_weights:
if new_weights:
self._step = 1
# get parameters from component
params = []
for param in self._parameters:
for cmp in self.objects['init_iter'].values():
for param_name in cmp.param_names:
if param.lower() != param_name.lower():
continue
params.append(cmp[param_name])
break
# save current parameters for next step
self._previous_values = params.copy()
# interpolate weight for given values, use nearest neighbour if values are outside of the grid
try:
self._weight_table = self._interpolate(tuple(params))
except KeyError:
self._weight_table = None
if self._mask_lines == 'default':
w = WeightFromGridNearest(self.filename, self._initial, self._max_line_depth, self._center_weight,
self._max_step, objects=self.objects)
else:
w = WeightFromGridNearest(self.filename, self._initial, self._max_line_depth, self._center_weight,
self._max_step, self._mask_lines, objects=self.objects)
self._weights = {step: w(spectrum, filename) for step in range(1, 6)}
if self._weight_table is None:
if self._step <= 5:
w = self._weights[self._step]
self._step += 1
return w
return self._weights[5]
w = self._get_weight_array(self._weight_table, spectrum)
self._step += 1
return w
def _interpolate(self, params: Tuple, axis: int = None) -> pd.DataFrame:
# no axis given, start at latest
if axis is None:
axis = len(self._axes) - 1
if params[axis] < min(self._axes[axis]) or params[axis] > max(self._axes[axis]):
raise KeyError('Requested parameters are outside the grid.')
# let's get all possible values for the given axis
axisValues = self._axes[axis].copy()
# if params[axis] is on axis; return it directly
if params[axis] in axisValues:
if axis == 0:
return self._load_weight_table(params)
else:
return self._interpolate(tuple(params), axis - 1)
# find the next lower and the next higher axis value
p_lower = self._neighbour(tuple(params), axis, 0)
p_higher = self._neighbour(tuple(params), axis, 1)
if p_lower is None or p_higher is None:
raise KeyError('No direct neighbours found in grid.')
# get axis values
x_lower = p_lower[axis]
x_higher = p_higher[axis]
# get data for p_lower and p_higher
if axis == 0:
lower_data = self._load_weight_table(p_lower)
higher_data = self._load_weight_table(p_higher)
else:
lower_data = self._interpolate(p_lower)
higher_data = self._interpolate(p_higher)
# interpolate
f = (params[axis] - x_lower) / (x_higher - x_lower)
# add interpolation weight to table
lower_data['w'] = 1. - f
higher_data['w'] = f
df = pd.concat([lower_data, higher_data])
df = df.sort_values(by=['wave_center'])
cols = list(df.columns)
cols.remove('wave_center')
# assign identical values to wave centers less than 0.3 Angstrom apart
centers = df['wave_center'].values
while True:
delta = np.ediff1d(centers)
mask = (np.abs(delta) <= 0.3) & (delta != 0.)
centers[1:][mask] = centers[:-1][mask]
if not np.any(mask):
break
# average lines with identical wave centers together
df['wave_center'] = centers
df_grouped = df.groupby('wave_center')
df = df_grouped.filter(lambda x: len(x) == 1).copy()
df['weights'] *= df['w'].values
# average lines showing up in both tables
df2 = df_grouped.filter(lambda x: (len(x) == 2) & (x['w'].sum() == 1)).copy()
w = df2['w'].values[:, None]
df2.loc[:, cols] *= w
df2 = df2.groupby('wave_center').sum()
df2 = df2.reset_index()
cols.insert(1, 'wave_center')
df2 = df2.loc[:, cols]
df = pd.concat([df, df2]).sort_values('wave_center')
df = df.drop(columns=['w'])
df['step'] = df['step'].apply(lambda x: np.around(x, decimals=0))
return df
def _load_weight_table(self, params: Tuple) -> pd.DataFrame:
"""
Load CSV file containing the weights for the given set of parameters.
"""
grid = self._data.copy()
for i, p in enumerate(self._parameters):
grid = grid[grid[p] == params[i]]
filename = grid.Filename.values[0]
return pd.read_csv(os.path.join(self._root, filename))
def _get_weight_array(self, df: pd.DataFrame, spectrum: Spectrum):
"""
Create weight array from the given table for the given spectrum.
Args:
df: Table containing the weights for each absorption line considered in that fitting mask.
spectrum: Spectrum for which the weight array is created.
Returns:
Weight array.
"""
# consider only weights for iteration steps lower/equal than the given one
df = df[df['step'] <= self._step]
df = df[df['weights'] > 10.]
# initialize weight array
weights = np.zeros(spectrum.wave.shape) + self._initial
# write weights to array
for i, row in df.iterrows():
if isinstance(self._mask_lines, list) or isinstance(self._mask_lines, np.ndarray):
if self._mask_centers(row, self._mask_lines, weights, spectrum):
continue
elif self._mask_lines == 'default':
if self._mask_default_lines(row, weights, spectrum):
continue
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
# if line depth larger than given threshold mask out the central region otherwise increase weight of
# central pixel by given factor
if row['line_depth'] > self._max_line_depth:
# if region spans more than 10 wavelength pixel mask out the 3 central pixel otherwise only the central
# one
if (row['wave_end'] - row['wave_start']) >= 12:
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i-1:i+2] = 0
else:
weights[np.argmin(np.abs(spectrum.wave - row['wave_center']))] = 0
else:
weights[np.argmin(np.abs(spectrum.wave - row['wave_center']))] *= self._center_weight
return weights
def _neighbour(self, params: Tuple, axis: int, distance: int = 1):
"""Finds a neighbour on the given axis for the given value in the given distance.
Args:
params: Paremeter tuple to search neighbour from.
axis: Axis to search for
distance: Distance in which to find neighbour.
>0: Find larger neighbours, i.e. 0 next larger value, 1 the one after that, etc
<=0: Find smaller neighbouars, i.e. 0 next smaller value (or value itself), -1 the before that, etc
Returns:
New parameter tuple with neighbour on the given axis.
"""
# find neighbour in axis
values = self._axes[axis]
value = None
# loop all values
for i in range(len(values)):
# found value?
if values[i] <= params[axis] < values[i + 1]:
# index of neighbour
ii = i + distance
# does it exist?
if 0 <= ii < len(values):
value = values[ii]
if value is None:
return None
# create new tuple
p = list(params)
p[axis] = value
return tuple(p)
def _mask_default_lines(self, row: pd.Series, weights: np.ndarray, spectrum: Spectrum):
# Halpha
if (row['wave_center'] < 6566.) & (row['wave_center'] > 6557.) & (self._logg <= 3.5):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 1:i + 2] = 0
return True
elif (row['wave_center'] < 6566.) & (row['wave_center'] > 6557.) & (self._logg > 3.5):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i] = 0
return True
# Hbeta
if (row['wave_center'] < 4867.) & (row['wave_center'] > 4857.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i] = 0
return True
# FeI line
if (row['wave_center'] < 5272.) and (row['wave_center'] > 5267.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 1:i + 2] = 0
return True
# Ca triplet
if (row['wave_center'] < 8508.) and (row['wave_center'] > 8490.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 2:i + 3] = 0
return True
if (row['wave_center'] < 8553.) and (row['wave_center'] > 8530.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 2:i + 3] = 0
return True
if (row['wave_center'] < 8672.) and (row['wave_center'] > 8651.):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
weights[i - 2:i + 3] = 0
return True
return False
def _mask_centers(self, row: pd.Series, lines: Union[list, np.ndarray], weights: np.ndarray, spectrum: Spectrum):
for center, npix, logg_min, logg_max in lines:
if (row['wave_start'] < center) and (row['wave_end'] > center) and (self._logg < logg_max) and (
self._logg >= logg_min):
weights[(spectrum.wave >= row['wave_start']) & (spectrum.wave <= row['wave_end'])] += row['weights']
i = np.argmin(np.abs(spectrum.wave - row['wave_center']))
if npix % 2 == 0:
weights[int(i-npix//2):int(i+npix//2)] = 0
else:
weights[int(i-npix//2):int(i+npix//2+1)] = 0
return True
return False
__all__ = ['WeightFromGrid']
| 38.33698 | 125 | 0.557763 |
3608024ce81f9c424686172ab9d789549b379e65 | 4,518 | py | Python | Code/YOLO/darkflow/darkflow/net/yolo/misc.py | dnyaneshb25/dnyaneshbhonde | d4d253107ca3d15a8b20705a0b2bd782a47ae38b | [
"Apache-2.0"
] | 49 | 2018-02-28T21:50:06.000Z | 2022-03-14T15:18:44.000Z | Code/YOLO/darkflow/darkflow/net/yolo/misc.py | dnyaneshb25/dnyaneshbhonde | d4d253107ca3d15a8b20705a0b2bd782a47ae38b | [
"Apache-2.0"
] | 13 | 2020-01-28T22:23:29.000Z | 2022-03-11T23:26:27.000Z | Code/YOLO/darkflow/darkflow/net/yolo/misc.py | dnyaneshb25/dnyaneshbhonde | d4d253107ca3d15a8b20705a0b2bd782a47ae38b | [
"Apache-2.0"
] | 22 | 2021-03-24T10:21:51.000Z | 2022-03-30T06:30:29.000Z | import pickle
import numpy as np
import cv2
import os
labels20 = ["aeroplane", "bicycle", "bird", "boat", "bottle",
"bus", "car", "cat", "chair", "cow", "diningtable", "dog",
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa",
"train", "tvmonitor"]
# 8, 14, 15, 19
voc_models = ['yolo-full', 'yolo-tiny', 'yolo-small', # <- v1
'yolov1', 'tiny-yolov1', # <- v1.1
'tiny-yolo-voc', 'yolo-voc'] # <- v2
coco_models = ['tiny-coco', 'yolo-coco', # <- v1.1
'yolo', 'tiny-yolo'] # <- v2
coco_names = 'coco.names'
nine_names = '9k.names'
def labels(meta, FLAGS):
model = os.path.basename(meta['name'])
if model in voc_models:
print("Model has a VOC model name, loading VOC labels.")
meta['labels'] = labels20
else:
file = FLAGS.labels
if model in coco_models:
print("Model has a coco model name, loading coco labels.")
file = os.path.join(FLAGS.config, coco_names)
elif model == 'yolo9000':
print("Model has name yolo9000, loading yolo9000 labels.")
file = os.path.join(FLAGS.config, nine_names)
with open(file, 'r') as f:
meta['labels'] = list()
labs = [l.strip() for l in f.readlines()]
for lab in labs:
if lab == '----': break
meta['labels'] += [lab]
if len(meta['labels']) == 0:
meta['labels'] = labels20
def is_inp(self, name):
return name.lower().endswith(('.jpg', '.jpeg', '.png'))
def show(im, allobj, S, w, h, cellx, celly):
for obj in allobj:
a = obj[5] % S
b = obj[5] // S
cx = a + obj[1]
cy = b + obj[2]
centerx = cx * cellx
centery = cy * celly
ww = obj[3]**2 * w
hh = obj[4]**2 * h
cv2.rectangle(im,
(int(centerx - ww/2), int(centery - hh/2)),
(int(centerx + ww/2), int(centery + hh/2)),
(0,0,255), 2)
cv2.imshow('result', im)
cv2.waitKey()
cv2.destroyAllWindows()
def show2(im, allobj):
for obj in allobj:
cv2.rectangle(im,
(obj[1], obj[2]),
(obj[3], obj[4]),
(0,0,255),2)
cv2.imshow('result', im)
cv2.waitKey()
cv2.destroyAllWindows()
_MVA = .05
def profile(self, net):
pass
# data = self.parse(exclusive = True)
# size = len(data); batch = self.FLAGS.batch
# all_inp_ = [x[0] for x in data]
# net.say('Will cycle through {} examples {} times'.format(
# len(all_inp_), net.FLAGS.epoch))
# fetch = list(); mvave = list(); names = list();
# this = net.top
# conv_lay = ['convolutional', 'connected', 'local', 'conv-select']
# while this.inp is not None:
# if this.lay.type in conv_lay:
# fetch = [this.out] + fetch
# names = [this.lay.signature] + names
# mvave = [None] + mvave
# this = this.inp
# print(names)
# total = int(); allofthem = len(all_inp_) * net.FLAGS.epoch
# batch = min(net.FLAGS.batch, len(all_inp_))
# for count in range(net.FLAGS.epoch):
# net.say('EPOCH {}'.format(count))
# for j in range(len(all_inp_)/batch):
# inp_feed = list(); new_all = list()
# all_inp = all_inp_[j*batch: (j*batch+batch)]
# for inp in all_inp:
# new_all += [inp]
# this_inp = os.path.join(net.FLAGS.dataset, inp)
# this_inp = net.framework.preprocess(this_inp)
# expanded = np.expand_dims(this_inp, 0)
# inp_feed.append(expanded)
# all_inp = new_all
# feed_dict = {net.inp : np.concatenate(inp_feed, 0)}
# out = net.sess.run(fetch, feed_dict)
# for i, o in enumerate(out):
# oi = out[i];
# dim = len(oi.shape) - 1
# ai = mvave[i];
# mi = np.mean(oi, tuple(range(dim)))
# vi = np.var(oi, tuple(range(dim)))
# if ai is None: mvave[i] = [mi, vi]
# elif 'banana ninja yada yada':
# ai[0] = (1 - _MVA) * ai[0] + _MVA * mi
# ai[1] = (1 - _MVA) * ai[1] + _MVA * vi
# total += len(inp_feed)
# net.say('{} / {} = {}%'.format(
# total, allofthem, 100. * total / allofthem))
# with open('profile', 'wb') as f:
# pickle.dump([mvave], f, protocol = -1)
| 34.48855 | 71 | 0.501992 |
f4654039f689ac2701894762814f6855442623e5 | 199 | py | Python | admitad/__init__.py | Picasel/django-admitad | 7df25dffa5bb7b8c57a53afaa274ea4dca3bc420 | [
"MIT"
] | 3 | 2019-01-29T05:45:42.000Z | 2020-09-22T14:42:33.000Z | admitad/__init__.py | Picasel/django-admitad | 7df25dffa5bb7b8c57a53afaa274ea4dca3bc420 | [
"MIT"
] | 8 | 2018-11-27T11:39:39.000Z | 2019-06-29T07:55:02.000Z | admitad/__init__.py | k0t3n/django-admitad | 7df25dffa5bb7b8c57a53afaa274ea4dca3bc420 | [
"MIT"
] | null | null | null | default_app_config = 'admitad.apps.AdmitadConfig'
REQUIRED_SETTINGS = ['ADMITAD_COMPAIN_CODE', 'ADMITAD_POSTBACK_URL', 'ADMITAD_POSTBACK_KEY']
DOCS_URL = 'https://github.com/Picasel/django-admitad'
| 39.8 | 92 | 0.809045 |
016236f4ac716c1dfdfeea345f75657d33d15359 | 178 | py | Python | app/main/__init__.py | hwc0919/TsyDigitalRailway | 34d682f698b0b8bf066719c06fa431d2ab1436a0 | [
"CC0-1.0"
] | null | null | null | app/main/__init__.py | hwc0919/TsyDigitalRailway | 34d682f698b0b8bf066719c06fa431d2ab1436a0 | [
"CC0-1.0"
] | null | null | null | app/main/__init__.py | hwc0919/TsyDigitalRailway | 34d682f698b0b8bf066719c06fa431d2ab1436a0 | [
"CC0-1.0"
] | null | null | null | # 不能更改代码顺序!!!请勿使用IDE的代码格式化功能!!!
from flask import Blueprint # 第一行
main = Blueprint('main', __name__) # 第二行
if True:
from . import views, errors # 第三行
| 19.777778 | 46 | 0.606742 |
a4378aa624b873c50cc704af592a0e957abb6ce3 | 609 | py | Python | src/utilities/common.py | choobinejad/beltway-wifi | 5489807e4368a85dbeb59f4df509bec2c60d05bd | [
"MIT"
] | null | null | null | src/utilities/common.py | choobinejad/beltway-wifi | 5489807e4368a85dbeb59f4df509bec2c60d05bd | [
"MIT"
] | 9 | 2019-02-15T15:17:10.000Z | 2019-03-11T15:24:55.000Z | src/utilities/common.py | choobinejad/beltway-wifi | 5489807e4368a85dbeb59f4df509bec2c60d05bd | [
"MIT"
] | null | null | null | import sys
def progress(count, total, status='', visual=True):
bar_len = 100
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
if visual:
sys.stdout.write('[{}] {}{} ...{}\r'.format(bar, percents, '%', status))
sys.stdout.flush()
else:
return '[{}] {}{} ...{}\r'.format(bar, percents, '%', status)
def frange(start, stop, step, precision=5):
while start < stop:
yield int(start * 10**precision) / 10**precision
start += step
| 27.681818 | 80 | 0.563218 |
60d0a5abf7eea0dc75e48536f49072d8202f55c7 | 850 | py | Python | ssd_keras/testing_utils/videotest_example.py | shingte/CarND-Vehicle-Detection | 38b0f9645f1300836f5877a33c612f004e0aaa5b | [
"MIT"
] | 4 | 2020-03-24T02:16:08.000Z | 2021-11-25T17:47:49.000Z | ssd_keras/testing_utils/videotest_example.py | shingte/CarND-Vehicle-Detection | 38b0f9645f1300836f5877a33c612f004e0aaa5b | [
"MIT"
] | null | null | null | ssd_keras/testing_utils/videotest_example.py | shingte/CarND-Vehicle-Detection | 38b0f9645f1300836f5877a33c612f004e0aaa5b | [
"MIT"
] | 4 | 2020-09-18T01:08:25.000Z | 2021-11-25T17:48:47.000Z | import keras
import pickle
from videotest import VideoTest
import sys
sys.path.append("..")
from ssd import SSD300 as SSD
input_shape = (300,300,3)
# Change this if you run with other classes than VOC
class_names = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"];
NUM_CLASSES = len(class_names)
model = SSD(input_shape, num_classes=NUM_CLASSES)
# Change this path if you want to use your own trained weights
model.load_weights('../weights_SSD300.hdf5')
vid_test = VideoTest(class_names, model, input_shape)
# To test on webcam 0, remove the parameter (or change it to another number
# to test on that webcam)
vid_test.run('path/to/your/video.mkv')
| 34 | 225 | 0.695294 |
e66e1ea7d3ca9e7f5292259f785a79db7eb455b0 | 1,157 | py | Python | programming_languages/plc_script/tests/addition_multiplication_chain_test.py | playandlearntocode/main | 2f2374b92b9af3fc11e2aedf13bf8c2240ece906 | [
"MIT"
] | 2 | 2022-01-25T10:34:08.000Z | 2022-03-16T17:17:09.000Z | programming_languages/plc_script/tests/addition_multiplication_chain_test.py | playandlearntocode/main | 2f2374b92b9af3fc11e2aedf13bf8c2240ece906 | [
"MIT"
] | null | null | null | programming_languages/plc_script/tests/addition_multiplication_chain_test.py | playandlearntocode/main | 2f2374b92b9af3fc11e2aedf13bf8c2240ece906 | [
"MIT"
] | 3 | 2022-01-24T23:56:22.000Z | 2022-02-24T20:46:11.000Z | from parsing.ll1 import LL1
def test():
s = '''
2 + 3 * 4 +7;
'''
ll1 = LL1(s)
ast = ll1.parse()
expected = {
'type': 'addition_expression',
'content': {
'left': {
'type': 'addition_expression',
'content': {
'left': {
'type': 'number',
'value': '2'
},
'right': {
'type': 'multiplication_expression',
'content': {
'left': {
'type': 'number',
'value': '3'
},
'right': {
'type': 'number',
'value': '4'
}
}
}
}
},
'right': {
'type': 'number',
'value': '7'
}
}
}
print('ast:')
print(ast)
assert (ast == expected)
| 22.686275 | 60 | 0.242869 |
c57ba1c1451f6db9567d4126bf9d6cac81db26e9 | 869 | py | Python | src/openbiolink/graph_creation/metadata_infile/onto/inMetaOntoUberonIsA.py | jerryhluo/OpenBioLink | 6fc073af978daec0b0db5938b73beed37f57f495 | [
"MIT"
] | 97 | 2019-11-26T09:53:18.000Z | 2022-03-19T10:33:10.000Z | src/openbiolink/graph_creation/metadata_infile/onto/inMetaOntoUberonIsA.py | jerryhluo/OpenBioLink | 6fc073af978daec0b0db5938b73beed37f57f495 | [
"MIT"
] | 67 | 2019-12-09T21:01:52.000Z | 2021-12-21T15:19:41.000Z | src/openbiolink/graph_creation/metadata_infile/onto/inMetaOntoUberonIsA.py | jerryhluo/OpenBioLink | 6fc073af978daec0b0db5938b73beed37f57f495 | [
"MIT"
] | 20 | 2020-01-13T23:02:25.000Z | 2022-03-16T21:43:31.000Z | from openbiolink.edgeType import EdgeType
from openbiolink.graph_creation.metadata_infile.infileMetadata import InfileMetadata
from openbiolink.graph_creation.types.infileType import InfileType
from openbiolink.namespace import *
from openbiolink.nodeType import NodeType
class InMetaOntoUberonIsA(InfileMetadata):
CSV_NAME = "DB_ONTO_UBERON_IS_A_ontology.csv"
USE_COLS = ["ID", "IS_A"]
NODE1_COL = 0
NODE2_COL = 1
QSCORE_COL = None
SOURCE = "UBERON"
NODE1_TYPE = NodeType.ANATOMY
NODE1_NAMESPACE = Namespace(Namespaces.MULTI)
NODE2_TYPE = NodeType.ANATOMY
NODE2_NAMESPACE = Namespace(Namespaces.MULTI)
EDGE_TYPE = EdgeType.IS_A
MAPPING_SEP = ";"
INFILE_TYPE = InfileType.IN_ONTO_UBERON_IS_A
def __init__(self):
super().__init__(csv_name=self.CSV_NAME, cols=self.USE_COLS, infileType=self.INFILE_TYPE)
| 34.76 | 97 | 0.7687 |
d7290eced4b9408849085c8cf486e217bf9e574c | 857 | py | Python | torrt/trackers/test.py | st7105/torrt | 97a3cb20a8caec5bba2132543343a82eb13aa182 | [
"BSD-3-Clause"
] | null | null | null | torrt/trackers/test.py | st7105/torrt | 97a3cb20a8caec5bba2132543343a82eb13aa182 | [
"BSD-3-Clause"
] | null | null | null | torrt/trackers/test.py | st7105/torrt | 97a3cb20a8caec5bba2132543343a82eb13aa182 | [
"BSD-3-Clause"
] | null | null | null | from typing import List
from ..base_tracker import GenericPublicTracker
class TestTracker(GenericPublicTracker):
alias: str = 'test.st7105.ru'
def get_id_from_link(self, url: str) -> str:
"""Returns forum thread identifier from full thread URL."""
splitted = url.rstrip('/').split('/')
result = splitted[-1]
if not result.isdigit(): # URL contains SEO name in the last chunk
for result in splitted:
if result.isdigit():
break
return result
def get_download_link(self, url: str) -> str:
"""Tries to find .torrent file download link at forum thread page and return that one."""
page_soup = self.get_torrent_page(url)
download_link = self.find_links(url, page_soup, definite=r'\.torrent')
return download_link or ''
| 28.566667 | 97 | 0.631272 |
4aff2a6f91a6211050b7af4971c19ef152ded31d | 73 | py | Python | testing/sonar_test.py | hmallen/SonarMap | fb0dfad87af6834f0d96b4b4376cc21432ea1416 | [
"MIT"
] | null | null | null | testing/sonar_test.py | hmallen/SonarMap | fb0dfad87af6834f0d96b4b4376cc21432ea1416 | [
"MIT"
] | null | null | null | testing/sonar_test.py | hmallen/SonarMap | fb0dfad87af6834f0d96b4b4376cc21432ea1416 | [
"MIT"
] | null | null | null | from gpiozero import DistanceSensor
if __name__ == '__main__':
pass | 14.6 | 35 | 0.739726 |
5ac7a3fe3088d40a8eba1a29e5cb5e758ce5520f | 8,567 | py | Python | trace-viewer/tracing/third_party/tvcm/tvcm/module.py | RSB4760/apq8016_external_chromium-trace | 45b575bb05b3714142a9d67b9bd153740ef99226 | [
"BSD-3-Clause"
] | null | null | null | trace-viewer/tracing/third_party/tvcm/tvcm/module.py | RSB4760/apq8016_external_chromium-trace | 45b575bb05b3714142a9d67b9bd153740ef99226 | [
"BSD-3-Clause"
] | null | null | null | trace-viewer/tracing/third_party/tvcm/tvcm/module.py | RSB4760/apq8016_external_chromium-trace | 45b575bb05b3714142a9d67b9bd153740ef99226 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains the Module class and other classes for resources.
The Module class represents a module in the trace viewer system. A module has
a name, and may require a variety of other resources, such as stylesheets,
template objects, raw JavaScript, or other modules.
Other resources include HTML templates, raw JavaScript files, and stylesheets.
"""
import os
import inspect
import codecs
from tvcm import js_utils
class DepsException(Exception):
"""Exceptions related to module dependency resolution."""
def __init__(self, fmt, *args):
from tvcm import style_sheet as style_sheet_module
context = []
frame = inspect.currentframe()
while frame:
frame_locals = frame.f_locals
module_name = None
if 'self' in frame_locals:
s = frame_locals['self']
if isinstance(s, Module):
module_name = s.name
if isinstance(s, style_sheet_module.StyleSheet):
module_name = s.name + '.css'
if not module_name:
if 'module' in frame_locals:
module = frame_locals['module']
if isinstance(s, Module):
module_name = module.name
elif 'm' in frame_locals:
module = frame_locals['m']
if isinstance(s, Module):
module_name = module.name
if module_name:
if len(context):
if context[-1] != module_name:
context.append(module_name)
else:
context.append(module_name)
frame = frame.f_back
context.reverse()
self.context = context
context_str = '\n'.join(' %s' % x for x in context)
Exception.__init__(
self, 'While loading:\n%s\nGot: %s' % (context_str, (fmt % args)))
class ModuleDependencyMetadata(object):
def __init__(self):
self.dependent_module_names = []
self.dependent_raw_script_relative_paths = []
self.style_sheet_names = []
def AppendMetdata(self, other):
self.dependent_module_names += other.dependent_module_names
self.dependent_raw_script_relative_paths += \
other.dependent_raw_script_relative_paths
self.style_sheet_names += other.style_sheet_names
_next_module_id = 1
class Module(object):
"""Represents a JavaScript module.
Interesting properties include:
name: Module name, may include a namespace, e.g. 'tvcm.foo'.
filename: The filename of the actual module.
contents: The text contents of the module.
dependent_modules: Other modules that this module depends on.
In addition to these properties, a Module also contains lists of other
resources that it depends on.
"""
def __init__(self, loader, name, resource, load_resource=True):
assert isinstance(name, basestring), 'Got %s instead' % repr(name)
global _next_module_id
self._id = _next_module_id
_next_module_id += 1
self.loader = loader
self.name = name
self.resource = resource
if load_resource:
f = codecs.open(self.filename, mode='r', encoding='utf-8')
self.contents = f.read()
f.close()
else:
self.contents = None
# Dependency metadata, set up during Parse().
self.dependency_metadata = None
# Actual dependencies, set up during load().
self.dependent_modules = []
self.dependent_raw_scripts = []
self.style_sheets = []
# Caches.
self._all_dependent_modules_recursive = None
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name)
@property
def id(self):
return self._id
@property
def filename(self):
return self.resource.absolute_path
def isComponent(self):
ref = os.path.join('third_party', 'components')
return ref in self.filename
def Parse(self):
"""Parses self.contents and fills in the module's dependency metadata."""
raise NotImplementedError()
def GetTVCMDepsModuleType(self):
"""Returns the tvcm.setModuleInfo type for this module"""
raise NotImplementedError()
def AppendJSContentsToFile(self,
f,
use_include_tags_for_scripts,
dir_for_include_tag_root):
"""Appends the js for this module to the provided file."""
for dependent_raw_script in self.dependent_raw_scripts:
if use_include_tags_for_scripts:
rel_filename = os.path.relpath(dependent_raw_script.filename,
dir_for_include_tag_root)
f.write("""<include src="%s">\n""" % rel_filename)
else:
f.write(js_utils.EscapeJSIfNeeded(dependent_raw_script.contents))
f.write('\n')
def AppendHTMLContentsToFile(self, f, ctl, minify=False):
"""Appends the HTML for this module [without links] to the provided file."""
pass
def Load(self):
"""Loads the sub-resources that this module depends on from its dependency
metadata.
Raises:
DepsException: There was a problem finding one of the dependencies.
Exception: There was a problem parsing a module that this one depends on.
"""
assert self.name, 'Module name must be set before dep resolution.'
assert self.filename, 'Module filename must be set before dep resolution.'
assert self.name in self.loader.loaded_modules, (
'Module must be registered in resource loader before loading.')
metadata = self.dependency_metadata
for name in metadata.dependent_module_names:
module = self.loader.LoadModule(module_name=name)
self.dependent_modules.append(module)
for path in metadata.dependent_raw_script_relative_paths:
raw_script = self.loader.LoadRawScript(path)
self.dependent_raw_scripts.append(raw_script)
for name in metadata.style_sheet_names:
style_sheet = self.loader.LoadStyleSheet(name)
self.style_sheets.append(style_sheet)
@property
def all_dependent_modules_recursive(self):
if self._all_dependent_modules_recursive:
return self._all_dependent_modules_recursive
self._all_dependent_modules_recursive = set(self.dependent_modules)
for dependent_module in self.dependent_modules:
self._all_dependent_modules_recursive.update(
dependent_module.all_dependent_modules_recursive)
return self._all_dependent_modules_recursive
def ComputeLoadSequenceRecursive(self, load_sequence, already_loaded_set,
depth=0):
"""Recursively builds up a load sequence list.
Args:
load_sequence: A list which will be incrementally built up.
already_loaded_set: A set of modules that has already been added to the
load sequence list.
depth: The depth of recursion. If it too deep, that indicates a loop.
"""
if depth > 32:
raise Exception('Include loop detected on %s', self.name)
for dependent_module in self.dependent_modules:
if dependent_module.name in already_loaded_set:
continue
dependent_module.ComputeLoadSequenceRecursive(
load_sequence, already_loaded_set, depth + 1)
if self.name not in already_loaded_set:
already_loaded_set.add(self.name)
load_sequence.append(self)
def GetAllDependentFilenamesRecursive(self, include_raw_scripts=True):
dependent_filenames = []
visited_modules = set()
def Get(module):
module.AppendDirectlyDependentFilenamesTo(
dependent_filenames, include_raw_scripts)
visited_modules.add(module)
for m in module.dependent_modules:
if m in visited_modules:
continue
Get(m)
Get(self)
return dependent_filenames
def AppendDirectlyDependentFilenamesTo(
self, dependent_filenames, include_raw_scripts=True):
dependent_filenames.append(self.resource.absolute_path)
if include_raw_scripts:
for raw_script in self.dependent_raw_scripts:
dependent_filenames.append(raw_script.resource.absolute_path)
for style_sheet in self.style_sheets:
style_sheet.AppendDirectlyDependentFilenamesTo(dependent_filenames)
class RawScript(object):
"""Represents a raw script resource referenced by a module via the
tvcm.requireRawScript(xxx) directive."""
def __init__(self, resource):
self.resource = resource
@property
def filename(self):
return self.resource.absolute_path
@property
def contents(self):
return self.resource.contents
def __repr__(self):
return 'RawScript(%s)' % self.filename
| 32.206767 | 80 | 0.698844 |
d38161d7e8d89422b5aa58476c8c44edb5813e00 | 2,746 | py | Python | userena/tests/tests_middleware.py | jdavidagudelo/django-userena-ce | 970ca25ca367112625933bd61a0ba745b052692e | [
"BSD-3-Clause"
] | 86 | 2018-03-09T22:24:39.000Z | 2021-12-12T22:30:33.000Z | userena/tests/tests_middleware.py | jdavidagudelo/django-userena-ce | 970ca25ca367112625933bd61a0ba745b052692e | [
"BSD-3-Clause"
] | 113 | 2018-02-25T12:24:13.000Z | 2022-02-22T17:59:51.000Z | userena/tests/tests_middleware.py | jdavidagudelo/django-userena-ce | 970ca25ca367112625933bd61a0ba745b052692e | [
"BSD-3-Clause"
] | 19 | 2018-08-16T18:13:48.000Z | 2021-12-11T18:14:30.000Z | from django.contrib.auth import get_user_model
from django.http import HttpRequest
from django.test import TestCase
from userena.tests.profiles.models import Profile
from userena.middleware import UserenaLocaleMiddleware
from userena import settings as userena_settings
from userena.utils import get_user_profile, get_profile_model
User = get_user_model()
def has_profile(user):
"""Test utility function to check if user has profile"""
profile_model = get_profile_model()
try:
profile = user.get_profile()
except AttributeError:
related_name = profile_model._meta.get_field("user").related_query_name()
profile = getattr(user, related_name, None)
except profile_model.DoesNotExist:
profile = None
return bool(profile)
class UserenaLocaleMiddlewareTests(TestCase):
""" Test the ``UserenaLocaleMiddleware`` """
fixtures = ["users", "profiles"]
def _get_request_with_user(self, user):
""" Fake a request with an user """
request = HttpRequest()
request.META = {"SERVER_NAME": "testserver", "SERVER_PORT": 80}
request.method = "GET"
request.session = {}
# Add user
request.user = user
return request
def test_preference_user(self):
""" Test the language preference of two users """
users = ((1, "nl"), (2, "en"))
for pk, lang in users:
user = User.objects.get(pk=pk)
profile = get_user_profile(user=user)
req = self._get_request_with_user(user)
# Check that the user has this preference
self.assertEqual(profile.language, lang)
# Request should have a ``LANGUAGE_CODE`` with dutch
UserenaLocaleMiddleware().process_request(req)
self.assertEqual(req.LANGUAGE_CODE, lang)
def test_without_profile(self):
""" Middleware should do nothing when a user has no profile """
# Delete the profile
Profile.objects.get(pk=1).delete()
user = User.objects.get(pk=1)
# User shouldn't have a profile
self.assertFalse(has_profile(user))
req = self._get_request_with_user(user)
UserenaLocaleMiddleware().process_request(req)
self.assertFalse(hasattr(req, "LANGUAGE_CODE"))
def test_without_language_field(self):
""" Middleware should do nothing if the profile has no language field """
userena_settings.USERENA_LANGUAGE_FIELD = "non_existant_language_field"
user = User.objects.get(pk=1)
req = self._get_request_with_user(user)
# Middleware should do nothing
UserenaLocaleMiddleware().process_request(req)
self.assertFalse(hasattr(req, "LANGUAGE_CODE"))
| 32.690476 | 81 | 0.671886 |
afdbb10ad7b4559ed0188dc18f85f27700cb637d | 53 | py | Python | factoids/__init__.py | micole/slackbucket | e3cd4cd8db086011548a4c3bfeb233d4ded2270a | [
"MIT"
] | 2 | 2018-02-12T19:11:05.000Z | 2018-02-15T14:35:03.000Z | factoids/__init__.py | micole/slackbucket | e3cd4cd8db086011548a4c3bfeb233d4ded2270a | [
"MIT"
] | 1 | 2021-06-01T21:52:25.000Z | 2021-06-01T21:52:25.000Z | factoids/__init__.py | micole/slackbucket | e3cd4cd8db086011548a4c3bfeb233d4ded2270a | [
"MIT"
] | 1 | 2018-02-12T19:17:54.000Z | 2018-02-12T19:17:54.000Z | from .plugin import Factoids
__all__ = ['Factoids']
| 13.25 | 28 | 0.735849 |
5ae2d9263e6dc5035f538d58c970586b132bd1a2 | 5,362 | py | Python | molar/backend/api/api_v1/endpoints/database.py | aspuru-guzik-group/molar | a3e0c337bd8a41c94b2c25831c95048cc7614f04 | [
"BSD-3-Clause"
] | 4 | 2021-07-20T18:49:44.000Z | 2021-10-15T00:58:12.000Z | molar/backend/api/api_v1/endpoints/database.py | aspuru-guzik-group/molar | a3e0c337bd8a41c94b2c25831c95048cc7614f04 | [
"BSD-3-Clause"
] | null | null | null | molar/backend/api/api_v1/endpoints/database.py | aspuru-guzik-group/molar | a3e0c337bd8a41c94b2c25831c95048cc7614f04 | [
"BSD-3-Clause"
] | 2 | 2022-01-07T17:57:42.000Z | 2022-01-13T21:00:20.000Z | # std
from typing import List
# external
import alembic
from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException
import sqlalchemy
from sqlalchemy.orm import Session
# molar
from molar import install
from molar.backend import alembic_utils, database, schemas
from molar.backend.api import deps
from molar.backend.core.config import settings
from molar.backend.crud import CRUDInterface
from molar.backend.database.query import INFORMATION_QUERY
from molar.backend.utils import send_database_created_email
router = APIRouter()
@router.post("/request", response_model=schemas.Msg)
def database_creation_request(
database_in: schemas.DatabaseCreate,
db: Session = Depends(deps.get_main_db),
crud: CRUDInterface = Depends(deps.get_main_crud),
):
if database_in.database_name in ["molar_main", "main"]:
raise HTTPException(
status_code=401, detail="This database name is already taken"
)
try:
crud.molar_database.create(db, obj_in=database_in)
except sqlalchemy.exc.IntegrityError:
raise HTTPException(
status_code=401, detail="This database name is already taken"
)
return {"msg": "Database request created"}
@router.get("/requests")
def get_database_requests(
db: Session = Depends(deps.get_main_db),
current_user=Depends(deps.get_main_current_active_superuser),
):
out = db.query(database.main.models.molar_database).all()
return out
@router.get("/information", response_model=List[schemas.DatabaseInformation])
def get_database_information(
db: Session = Depends(deps.get_db),
current_user=Depends(deps.get_current_active_user),
):
results = db.execute(INFORMATION_QUERY).all()
column_names = [
"table_name",
"column_name",
"type",
"subtype",
"is_nullable",
"constraint_name",
"constraint_type",
"references",
]
return [dict(zip(column_names, result)) for result in results]
@router.put("/approve/{database_name}", response_model=schemas.Msg)
def approve_database(
database_name: str,
db: Session = Depends(deps.get_main_db),
crud: CRUDInterface = Depends(deps.get_main_crud),
current_user=Depends(deps.get_main_current_active_superuser),
):
db_obj = crud.molar_database.get_by_database_name(db, database_name=database_name)
if not db_obj:
raise HTTPException(status_code=404, detail="Database request not found!")
crud.molar_database.approve(db, db_obj=db_obj)
alembic_config = alembic_utils.get_alembic_config()
try:
install.install_molar_database(
alembic_config=alembic_config,
hostname=settings.POSTGRES_SERVER,
postgres_username=settings.POSTGRES_USER,
postgres_password=settings.POSTGRES_PASSWORD,
new_database_name=db_obj.database_name,
superuser_fullname=db_obj.superuser_fullname,
superuser_email=db_obj.superuser_email,
superuser_hashed_password=db_obj.superuser_password,
revisions=db_obj.alembic_revisions,
)
except alembic.util.exc.CommandError as err:
raise HTTPException(
status_code=400,
detail=f"There was an issue during the alembic migration: {str(err)}",
)
except sqlalchemy.exc.ProgrammingError as err:
raise HTTPException(status_code=400, detail="Database already exists!")
if settings.EMAILS_ENABLED:
send_database_created_email(
email_to=db_obj.superuser_email, database=db_obj.database_name
)
return {"msg": f"Database {database_name} created."}
@router.delete("/request/{database_name}", response_model=schemas.Msg)
def remove_database_requests(
database_name: str,
db: Session = Depends(deps.get_main_db),
crud: CRUDInterface = Depends(deps.get_main_crud),
crud_user=Depends(deps.get_main_current_active_superuser),
):
db_obj = crud.molar_database.get_by_database_name(db, database_name=database_name)
if not db_obj:
raise HTTPException(status_code=404, detail="Database request not found!")
if db_obj.is_approved:
raise HTTPException(
status_code=403,
detail="This database request has been approved and therefore cannot be removed",
)
crud.molar_database.remove(db, id=db_obj.id)
return {"msg": "Database request removed"}
@router.delete("/{database_name}", response_model=schemas.Msg)
def remove_a_database(
database_name: str,
background_tasks: BackgroundTasks,
db: Session = Depends(deps.get_main_db),
crud: CRUDInterface = Depends(deps.get_main_crud),
crud_user=Depends(deps.get_main_current_active_superuser),
):
db_obj = crud.molar_database.get_by_database_name(db, database_name=database_name)
if not db_obj:
raise HTTPException(status_code=404, detail="Database not found!")
crud.molar_database.remove_by_database_name(db, database_name=database_name)
database.close_database(database_name)
background_tasks.add_task(
install.drop_database,
hostname=settings.POSTGRES_SERVER,
postgres_username=settings.POSTGRES_USER,
postgres_password=settings.POSTGRES_PASSWORD,
database=database_name,
)
return {"msg": "The database has been scheduled for deletion!"}
| 35.045752 | 93 | 0.723051 |
2edef990ae39ad66a9cdc1b9a98a3ccde60a48e0 | 660 | py | Python | shapes/arc_brezenhem.py | dimayasha7123/CGLabs | f56ee6ab1170b24467e5c3ee9782aeb6087cbe08 | [
"WTFPL"
] | null | null | null | shapes/arc_brezenhem.py | dimayasha7123/CGLabs | f56ee6ab1170b24467e5c3ee9782aeb6087cbe08 | [
"WTFPL"
] | null | null | null | shapes/arc_brezenhem.py | dimayasha7123/CGLabs | f56ee6ab1170b24467e5c3ee9782aeb6087cbe08 | [
"WTFPL"
] | null | null | null | import math
def arc_brezenhem(x0, y0, r, angF, angS):
if angF > angS:
angF, angS = angS, angF
angF *= math.pi / 180
angS *= math.pi / 180
x = int(r * math.cos(angS))
y = int(r * math.sin(angS))
delta = 2 * (1 - r)
output = []
while y >= int(r * math.sin(angF)):
output.append((x + x0, y + y0))
if delta < 0 and 2 * delta + 2 * y - 1 <= 0:
x += 1
delta += 2 * x - 1
elif delta > 0 and 2 * delta - 2 * x - 1 > 0:
y -= 1
delta += 1 - 2 * y
else:
x += 1
y -= 1
delta += 2 * x - 2 * y + 2
return output
| 25.384615 | 53 | 0.409091 |
2600bee0a7f45b6c44ac77911a2225b87abef8bf | 3,108 | py | Python | fastNLP/modules/decoder/utils.py | sleepy-owl/fastNLP | 22c6e6d59ccea011e325606ef10201ccb61cdc7a | [
"Apache-2.0"
] | 2,693 | 2018-03-08T03:09:20.000Z | 2022-03-30T07:38:42.000Z | fastNLP/modules/decoder/utils.py | stratoes/fastNLP | a8a458230489710ab945b37ec22e93315230f2de | [
"Apache-2.0"
] | 291 | 2018-07-21T07:43:17.000Z | 2022-03-07T13:06:58.000Z | fastNLP/modules/decoder/utils.py | stratoes/fastNLP | a8a458230489710ab945b37ec22e93315230f2de | [
"Apache-2.0"
] | 514 | 2018-03-09T06:54:25.000Z | 2022-03-26T20:11:44.000Z | r"""undocumented"""
__all__ = [
"viterbi_decode"
]
import torch
def viterbi_decode(logits, transitions, mask=None, unpad=False):
r"""
给定一个特征矩阵以及转移分数矩阵,计算出最佳的路径以及对应的分数
:param torch.FloatTensor logits: batch_size x max_len x num_tags,特征矩阵。
:param torch.FloatTensor transitions: n_tags x n_tags,[i, j]位置的值认为是从tag i到tag j的转换; 或者(n_tags+2) x
(n_tags+2), 其中n_tag是start的index, n_tags+1是end的index; 如果要i->j之间不允许越迁,就把transitions中(i,j)设置为很小的
负数,例如-10000000.0
:param torch.ByteTensor mask: batch_size x max_len, 为0的位置认为是pad;如果为None,则认为没有padding。
:param bool unpad: 是否将结果删去padding。False, 返回的是batch_size x max_len的tensor; True,返回的是
List[List[int]], 内部的List[int]为每个sequence的label,已经除去pad部分,即每个List[int]的长度是这
个sample的有效长度。
:return: 返回 (paths, scores)。
paths: 是解码后的路径, 其值参照unpad参数.
scores: torch.FloatTensor, size为(batch_size,), 对应每个最优路径的分数。
"""
batch_size, seq_len, n_tags = logits.size()
if transitions.size(0) == n_tags+2:
include_start_end_trans = True
elif transitions.size(0) == n_tags:
include_start_end_trans = False
else:
raise RuntimeError("The shapes of transitions and feats are not " \
"compatible.")
logits = logits.transpose(0, 1).data # L, B, H
if mask is not None:
mask = mask.transpose(0, 1).data.eq(True) # L, B
else:
mask = logits.new_ones((seq_len, batch_size), dtype=torch.uint8).eq(1)
trans_score = transitions[:n_tags, :n_tags].view(1, n_tags, n_tags).data
# dp
vpath = logits.new_zeros((seq_len, batch_size, n_tags), dtype=torch.long)
vscore = logits[0]
if include_start_end_trans:
vscore += transitions[n_tags, :n_tags]
for i in range(1, seq_len):
prev_score = vscore.view(batch_size, n_tags, 1)
cur_score = logits[i].view(batch_size, 1, n_tags)
score = prev_score + trans_score + cur_score
best_score, best_dst = score.max(1)
vpath[i] = best_dst
vscore = best_score.masked_fill(mask[i].eq(False).view(batch_size, 1), 0) + \
vscore.masked_fill(mask[i].view(batch_size, 1), 0)
if include_start_end_trans:
vscore += transitions[:n_tags, n_tags + 1].view(1, -1)
# backtrace
batch_idx = torch.arange(batch_size, dtype=torch.long, device=logits.device)
seq_idx = torch.arange(seq_len, dtype=torch.long, device=logits.device)
lens = (mask.long().sum(0) - 1)
# idxes [L, B], batched idx from seq_len-1 to 0
idxes = (lens.view(1, -1) - seq_idx.view(-1, 1)) % seq_len
ans = logits.new_empty((seq_len, batch_size), dtype=torch.long)
ans_score, last_tags = vscore.max(1)
ans[idxes[0], batch_idx] = last_tags
for i in range(seq_len - 1):
last_tags = vpath[idxes[i], batch_idx, last_tags]
ans[idxes[i + 1], batch_idx] = last_tags
ans = ans.transpose(0, 1)
if unpad:
paths = []
for idx, seq_len in enumerate(lens):
paths.append(ans[idx, :seq_len + 1].tolist())
else:
paths = ans
return paths, ans_score
| 38.85 | 103 | 0.651223 |
8e90ea4e385a8a80d6b8417f107b3a896ee4f7ec | 3,727 | py | Python | cryptfiles/tests/test_crypter.py | wlsouza/cryptfiles | 1fe22a081f2d89a1f14c95107b01891ead60aada | [
"MIT"
] | 1 | 2021-11-29T20:08:55.000Z | 2021-11-29T20:08:55.000Z | cryptfiles/tests/test_crypter.py | wlsouza/cryptfiles | 1fe22a081f2d89a1f14c95107b01891ead60aada | [
"MIT"
] | null | null | null | cryptfiles/tests/test_crypter.py | wlsouza/cryptfiles | 1fe22a081f2d89a1f14c95107b01891ead60aada | [
"MIT"
] | null | null | null | import pytest
from unittest import mock
from cryptfiles.tests.utils import create_a_random_test_file
from cryptfiles.app import Crypter
@pytest.fixture
def encryptmode_crypter(testfiles_path) -> Crypter:
return Crypter(mode="encrypt",target=testfiles_path,key_scan=False)
@pytest.fixture
def decryptmode_crypter(testfiles_path) -> Crypter:
return Crypter(mode="decrypt",target=testfiles_path,key_scan=False)
#region locate_files method
def test_locate_files_method_must_return_a_list_with_file_paths_if_exist_files(encryptmode_crypter):
expected = [
create_a_random_test_file(encryptmode_crypter.target,"jpg"),
create_a_random_test_file(encryptmode_crypter.target,"txt")
]
result = encryptmode_crypter.locate_files()
assert sorted(result) == sorted(expected)
def test_locate_files_method_must_return_a_empty_list_if_not_exist_files(encryptmode_crypter):
result = encryptmode_crypter.locate_files()
assert result == []
def test_locate_files_method_must_list_just_files_with_valid_extension(encryptmode_crypter):
expected = [
create_a_random_test_file(encryptmode_crypter.target,"jpg"),
create_a_random_test_file(encryptmode_crypter.target,"txt"),
]
create_a_random_test_file(encryptmode_crypter.target,"notallowedextension")
result = encryptmode_crypter.locate_files()
assert sorted(result) == sorted(expected)
def test_locate_files_method_work_with_a_specific_file_instead_a_directory(testfiles_path):
expected = [
create_a_random_test_file(testfiles_path,"txt")
]
crypter = Crypter(mode="encrypt",target=expected[0], key_scan=False)
result = crypter.locate_files()
assert result == expected
def test_locate_files_method_returns_empty_list_if_specific_file_does_not_exist(testfiles_path):
crypter = Crypter(mode="encrypt",target=f"{testfiles_path}/test_non_existent_file.txt",key_scan=False)
result = crypter.locate_files()
assert result == []
def test_locate_files_method_returns_empty_list_if_specific_file_extension_is_not_allowed(testfiles_path):
file_path = create_a_random_test_file(testfiles_path,"notallowedextension")
crypter = Crypter(mode="encrypt",target=file_path, key_scan=False)
result = crypter.locate_files()
assert result == []
#endregion
#region is_ext_allowed method
def test_is_ext_allowed_method_returns_true_when_file_ext_is_in_allowed_extensions_list_of_crypter(encryptmode_crypter):
ext = encryptmode_crypter.allowed_extensions[0]
file_path = create_a_random_test_file(encryptmode_crypter.target,ext)
result = encryptmode_crypter.is_ext_allowed(file_path)
assert result == True
def test_is_ext_allowed_method_returns_false_when_file_ext_is_not_in_allowed_extensions_list_of_crypter(encryptmode_crypter):
file_path = create_a_random_test_file(encryptmode_crypter.target,"notallowedextension")
result = encryptmode_crypter.is_ext_allowed(file_path)
assert result == False
#endregion
@mock.patch.object(Crypter, "encrypt_files")
def test_if_execute_selected_mode_method_calls_encrypt_files_method_passing_the_files_list_when_crypter_class_is_instanciated_in_encrypt_mode(encrypt_files, encryptmode_crypter):
files = [mock.Mock(), mock.Mock()]
encryptmode_crypter.execute_selected_mode(files)
encrypt_files.assert_called_with(files)
@mock.patch.object(Crypter, "decrypt_files")
def test_if_execute_selected_mode_method_calls_encrypt_files_method_passing_the_files_list_when_crypter_class_is_instanciated_in_decrypt_mode(decrypt_files, decryptmode_crypter):
files = [mock.Mock(), mock.Mock()]
decryptmode_crypter.execute_selected_mode(files)
decrypt_files.assert_called_with(files)
encryptmode_crypter | 45.45122 | 178 | 0.821572 |
21018eb479152c66ba82c9d4310d81494755757c | 3,980 | py | Python | allure-pytest/test/steps/nested_steps_test.py | vdsbenoit/allure-python | 7b56b031c42369dd73844105382e9ceb9a88d6cd | [
"Apache-2.0"
] | null | null | null | allure-pytest/test/steps/nested_steps_test.py | vdsbenoit/allure-python | 7b56b031c42369dd73844105382e9ceb9a88d6cd | [
"Apache-2.0"
] | null | null | null | allure-pytest/test/steps/nested_steps_test.py | vdsbenoit/allure-python | 7b56b031c42369dd73844105382e9ceb9a88d6cd | [
"Apache-2.0"
] | null | null | null | """
>>> getfixture('inject_matchers')
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... all_of(
... has_property('test_cases', has_length(32)),
... has_property('test_groups', has_length(0))
... )) # doctest: +SKIP
>>> for test_case_name in ['test_nested_steps_inside_test',
... 'test_nested_steps_outside_test',
... 'test_mixed_nested_steps',
... 'test_again_mixed_nested_steps']:
...
... for first_fail_before_second, first_fail_after_second, second_fail in fruit_machine:
... test_case = '{name}[{first_fail_before}-{first_fail_after}-{second_fail}]'.format(
... name=test_case_name,
... first_fail_before=first_fail_before_second,
... first_fail_after=first_fail_after_second,
... second_fail=second_fail)
...
... if first_fail_before_second:
... assert_that(allure_report,
... has_test_case(test_case,
... has_step('First step',
... with_status('failed')
... )),
... is_not(has_step('Second step'))
... )
...
... if not first_fail_before_second:
... assert_that(allure_report,
... has_test_case(test_case,
... has_step('First step',
... with_status('failed' if (first_fail_after_second or second_fail) else 'passed'),
... has_step('Second step',
... with_status('failed' if second_fail else 'passed')
... )
... )
... ))
"""
import pytest
import allure
from itertools import product
fruit_machine = [variants for variants in product([True, False], [True, False], [True, False])]
@pytest.mark.parametrize("first_fail_before_second, first_fail_after_second, second_fail", fruit_machine)
def test_nested_steps_inside_test(first_fail_before_second, first_fail_after_second, second_fail):
with allure.step('First step'):
assert not first_fail_before_second
with allure.step('Second step'):
assert not second_fail
assert not first_fail_after_second
@allure.step("Second step")
def second_step(second_fail):
assert not second_fail
@allure.step("First step")
def another_first_step(first_fail_before_second, first_fail_after_second, second_fail):
assert not first_fail_before_second
second_step(second_fail)
assert not first_fail_after_second
@pytest.mark.parametrize("first_fail_before_second, first_fail_after_second, second_fail", fruit_machine)
def test_nested_steps_outside_test(first_fail_before_second, first_fail_after_second, second_fail):
another_first_step(first_fail_before_second, first_fail_after_second, second_fail)
@allure.step("First step")
def yet_another_first_step(first_fail_before_second, first_fail_after_second, second_fail):
assert not first_fail_before_second
with allure.step('Second step'):
assert not second_fail
assert not first_fail_after_second
@pytest.mark.parametrize("first_fail_before_second, first_fail_after_second, second_fail", fruit_machine)
def test_mixed_nested_steps(first_fail_before_second, first_fail_after_second, second_fail):
yet_another_first_step(first_fail_before_second, first_fail_after_second, second_fail)
@pytest.mark.parametrize("first_fail_before_second, first_fail_after_second, second_fail", fruit_machine)
def test_again_mixed_nested_steps(first_fail_before_second, first_fail_after_second, second_fail):
with allure.step('First step'):
assert not first_fail_before_second
second_step(second_fail)
assert not first_fail_after_second
| 40.612245 | 116 | 0.654271 |
f9ca47f116bf07dfb1bfc5f547ceda115862f284 | 8,217 | py | Python | tests/web/classes/test_elements.py | DavidCain/python-slackclient | 687695be8527fd0a864a87a2b03066e508c6588a | [
"MIT"
] | null | null | null | tests/web/classes/test_elements.py | DavidCain/python-slackclient | 687695be8527fd0a864a87a2b03066e508c6588a | [
"MIT"
] | 1 | 2019-10-03T13:53:08.000Z | 2019-10-03T13:53:08.000Z | tests/web/classes/test_elements.py | DavidCain/python-slackclient | 687695be8527fd0a864a87a2b03066e508c6588a | [
"MIT"
] | null | null | null | import unittest
from slack.errors import SlackObjectFormationError
from slack.web.classes.elements import (
ButtonElement,
ChannelSelectElement,
ConversationSelectElement,
ExternalDataSelectElement,
ImageElement,
LinkButtonElement,
SelectElement,
UserSelectElement,
)
from slack.web.classes.objects import ConfirmObject, Option
from . import STRING_3001_CHARS, STRING_301_CHARS
class InteractiveElementTests(unittest.TestCase):
def test_action_id(self):
with self.assertRaises(SlackObjectFormationError):
ButtonElement(
text="click me!", action_id=STRING_301_CHARS, value="clickable button"
).to_dict()
class ButtonElementTests(unittest.TestCase):
def test_json(self):
self.assertDictEqual(
ButtonElement(
text="button text", action_id="some_button", value="button_123"
).to_dict(),
{
"text": {"emoji": True, "text": "button text", "type": "plain_text"},
"action_id": "some_button",
"value": "button_123",
"type": "button",
},
)
confirm = ConfirmObject(title="really?", text="are you sure?")
self.assertDictEqual(
ButtonElement(
text="button text",
action_id="some_button",
value="button_123",
style="primary",
confirm=confirm,
).to_dict(),
{
"text": {"emoji": True, "text": "button text", "type": "plain_text"},
"action_id": "some_button",
"value": "button_123",
"type": "button",
"style": "primary",
"confirm": confirm.to_dict(),
},
)
def test_text_length(self):
with self.assertRaises(SlackObjectFormationError):
ButtonElement(
text=STRING_301_CHARS, action_id="button", value="click_me"
).to_dict()
def test_value_length(self):
with self.assertRaises(SlackObjectFormationError):
ButtonElement(
text="Button", action_id="button", value=STRING_3001_CHARS
).to_dict()
def test_invalid_style(self):
with self.assertRaises(SlackObjectFormationError):
ButtonElement(
text="Button", action_id="button", value="button", style="invalid"
).to_dict()
class LinkButtonElementTests(unittest.TestCase):
def test_json(self):
button = LinkButtonElement(text="button text", url="http://google.com")
self.assertDictEqual(
button.to_dict(),
{
"text": {"emoji": True, "text": "button text", "type": "plain_text"},
"url": "http://google.com",
"type": "button",
"value": "",
"action_id": button.action_id,
},
)
def test_url_length(self):
with self.assertRaises(SlackObjectFormationError):
LinkButtonElement(text="Button", url=STRING_3001_CHARS).to_dict()
class ImageElementTests(unittest.TestCase):
def test_json(self):
self.assertDictEqual(
ImageElement(
image_url="http://google.com", alt_text="not really an image"
).to_dict(),
{
"image_url": "http://google.com",
"alt_text": "not really an image",
"type": "image",
},
)
def test_image_url_length(self):
with self.assertRaises(SlackObjectFormationError):
ImageElement(image_url=STRING_3001_CHARS, alt_text="text").to_dict()
def test_alt_text_length(self):
with self.assertRaises(SlackObjectFormationError):
ImageElement(
image_url="http://google.com", alt_text=STRING_3001_CHARS
).to_dict()
class SelectElementTests(unittest.TestCase):
option_one = Option.from_single_value("one")
option_two = Option.from_single_value("two")
options = [option_one, option_two, Option.from_single_value("three")]
def test_json(self):
self.maxDiff = None
self.assertDictEqual(
SelectElement(
placeholder="selectedValue",
action_id="dropdown",
options=self.options,
initial_option=self.option_two,
).to_dict(),
{
"placeholder": {
"emoji": True,
"text": "selectedValue",
"type": "plain_text",
},
"action_id": "dropdown",
"options": [o.to_dict("block") for o in self.options],
"initial_option": self.option_two.to_dict(),
"type": "static_select",
},
)
self.assertDictEqual(
SelectElement(
placeholder="selectedValue",
action_id="dropdown",
options=self.options,
confirm=ConfirmObject(title="title", text="text"),
).to_dict(),
{
"placeholder": {
"emoji": True,
"text": "selectedValue",
"type": "plain_text",
},
"action_id": "dropdown",
"options": [o.to_dict("block") for o in self.options],
"confirm": ConfirmObject(title="title", text="text").to_dict("block"),
"type": "static_select",
},
)
def test_options_length(self):
with self.assertRaises(SlackObjectFormationError):
SelectElement(
placeholder="select",
action_id="selector",
options=[self.option_one] * 101,
).to_dict()
class ExternalDropdownElementTests(unittest.TestCase):
def test_json(self):
self.assertDictEqual(
ExternalDataSelectElement(
placeholder="selectedValue", action_id="dropdown", min_query_length=5
).to_dict(),
{
"placeholder": {
"emoji": True,
"text": "selectedValue",
"type": "plain_text",
},
"action_id": "dropdown",
"min_query_length": 5,
"type": "external_select",
},
)
self.assertDictEqual(
ExternalDataSelectElement(
placeholder="selectedValue",
action_id="dropdown",
confirm=ConfirmObject(title="title", text="text"),
).to_dict(),
{
"placeholder": {
"emoji": True,
"text": "selectedValue",
"type": "plain_text",
},
"action_id": "dropdown",
"confirm": ConfirmObject(title="title", text="text").to_dict("block"),
"type": "external_select",
},
)
class DynamicDropdownTests(unittest.TestCase):
dynamic_types = {UserSelectElement, ConversationSelectElement, ChannelSelectElement}
def test_json(self):
for dropdown_type in self.dynamic_types:
with self.subTest(dropdown_type=dropdown_type):
self.assertDictEqual(
dropdown_type(
placeholder="abc",
action_id="dropdown",
# somewhat silly abuse of kwargs ahead:
**{f"initial_{dropdown_type.initial_object_type}": "def"},
).to_dict(),
{
"placeholder": {
"emoji": True,
"text": "abc",
"type": "plain_text",
},
"action_id": "dropdown",
f"initial_{dropdown_type.initial_object_type}": "def",
"type": f"{dropdown_type.initial_object_type}s_select",
},
)
| 34.52521 | 88 | 0.515517 |
b2f7fb8b602bfdd673abb75e7f89ca8dc32301c9 | 1,400 | py | Python | coretabs ATM py/withdraw.py | attia7/Test | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | null | null | null | coretabs ATM py/withdraw.py | attia7/Test | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | 11 | 2020-03-24T17:40:26.000Z | 2022-01-13T01:42:38.000Z | coretabs ATM py/withdraw.py | attia7/AttiaGit | c74f09816ba2e0798b0533e31ea8b72249dec598 | [
"MIT"
] | null | null | null | balance = 700
papers=[100, 50, 10, 5,4,3,2,1]
def withdraw(balance, request):
if balance < request :
print('Sorry, you are try withdraw: {0}, but Your balance just : {1}'.format(request, balance))
else:
print ('your balance >>', balance)
orgnal_request = request
while request > 0:
for i in papers:
while request >= i:
print('give', i)
request-=i
balance -= orgnal_request
return balance
def withdraw1(balance, request):
give = 0
if balance < request :
print('Sorry, you are try withdraw: {0}, but Your balance just : {1}'.format(request, balance))
else:
print ('your balance >>', balance)
balance -= request
while request > 0:
if request >= 100:
give = 100
elif request >= 50:
give = 50
elif request >= 10:
give = 10
elif request >= 5:
give = 5
else :
give = request
print('give',give)
request -= give
return balance
balance = withdraw(balance, 777)
balance = withdraw(balance, 276)
balance = withdraw1(balance, 276)
balance = withdraw(balance, 34)
balance = withdraw1(balance, 5)
balance = withdraw1(balance, 500) | 30.434783 | 103 | 0.512857 |
22f17720d9d324a4e80ec0068131433036c82c68 | 53,929 | py | Python | Bio/Emboss/Applications.py | bneron/biopython | 2c52e57661c8f6cdf4a191850b2f6871f8582af7 | [
"PostgreSQL"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | Bio/Emboss/Applications.py | bneron/biopython | 2c52e57661c8f6cdf4a191850b2f6871f8582af7 | [
"PostgreSQL"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | Bio/Emboss/Applications.py | bneron/biopython | 2c52e57661c8f6cdf4a191850b2f6871f8582af7 | [
"PostgreSQL"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # Copyright 2001-2009 Brad Chapman.
# Revisions copyright 2009-2010 by Peter Cock.
# Revisions copyright 2009 by David Winter.
# Revisions copyright 2009-2010 by Leighton Pritchard.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to interact with and run various EMBOSS programs.
These classes follow the AbstractCommandline interfaces for running
programs.
"""
from __future__ import print_function
from Bio.Application import _Option, _Switch, AbstractCommandline
__docformat__ = "restructuredtext en"
class _EmbossMinimalCommandLine(AbstractCommandline):
"""Base Commandline object for EMBOSS wrappers (PRIVATE).
This is provided for subclassing, it deals with shared options
common to all the EMBOSS tools:
- auto Turn off prompts
- stdout Write standard output
- filter Read standard input, write standard output
- options Prompt for standard and additional values
- debug Write debug output to program.dbg
- verbose Report some/full command line options
- help Report command line options. More
information on associated and general
qualifiers can be found with -help -verbose
- warning Report warnings
- error Report errors
- fatal Report fatal errors
- die Report dying program messages
"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
_Switch(["-auto", "auto"],
"""Turn off prompts.
Automatic mode disables prompting, so we recommend you set
this argument all the time when calling an EMBOSS tool from
Biopython.
"""),
_Switch(["-stdout", "stdout"],
"Write standard output."),
_Switch(["-filter", "filter"],
"Read standard input, write standard output."),
_Switch(["-options", "options"],
"""Prompt for standard and additional values.
If you are calling an EMBOSS tool from within Biopython,
we DO NOT recommend using this option.
"""),
_Switch(["-debug", "debug"],
"Write debug output to program.dbg."),
_Switch(["-verbose", "verbose"],
"Report some/full command line options"),
_Switch(["-help", "help"],
"""Report command line options.
More information on associated and general qualifiers can
be found with -help -verbose
"""),
_Switch(["-warning", "warning"],
"Report warnings."),
_Switch(["-error", "error"],
"Report errors."),
_Switch(["-die", "die"],
"Report dying program messages."),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
AbstractCommandline.__init__(self, cmd, **kwargs)
class _EmbossCommandLine(_EmbossMinimalCommandLine):
"""Base Commandline object for EMBOSS wrappers (PRIVATE).
This is provided for subclassing, it deals with shared options
common to all the EMBOSS tools plus:
- outfile Output filename
"""
def __init__(self, cmd=None, **kwargs):
assert cmd is not None
extra_parameters = [
_Option(["-outfile", "outfile"],
"Output filename",
filename=True),
]
try:
# Insert extra parameters - at the start just in case there
# are any arguments which must come last:
self.parameters = extra_parameters + self.parameters
except AttributeError:
# Should we raise an error? The subclass should have set this up!
self.parameters = extra_parameters
_EmbossMinimalCommandLine.__init__(self, cmd, **kwargs)
def _validate(self):
# Check the outfile, filter, or stdout option has been set.
# We can't simply do this via the required flag for the outfile
# output - this seems the simplest solution.
if not (self.outfile or self.filter or self.stdout):
raise ValueError("You must either set outfile (output filename), "
"or enable filter or stdout (output to stdout).")
return _EmbossMinimalCommandLine._validate(self)
class Primer3Commandline(_EmbossCommandLine):
"""Commandline object for the Primer3 interface from EMBOSS.
The precise set of supported arguments depends on your version of EMBOSS.
This version accepts arguments current at EMBOSS 6.1.0, but in order to
remain backwards compatible also support the old argument names as well.
e.g. Using EMBOSS 6.1.0 or later,
>>> cline = Primer3Commandline(sequence="mysequence.fas", auto=True, hybridprobe=True)
>>> cline.explainflag = True
>>> cline.osizeopt=20
>>> cline.psizeopt=200
>>> cline.outfile = "myresults.out"
>>> cline.bogusparameter = 1967 # Invalid parameter
Traceback (most recent call last):
...
ValueError: Option name bogusparameter was not found.
>>> print(cline)
eprimer3 -auto -outfile=myresults.out -sequence=mysequence.fas -hybridprobe=True -psizeopt=200 -osizeopt=20 -explainflag=True
The equivalent for anyone still using an older version of EMBOSS would be:
>>> cline = Primer3Commandline(sequence="mysequence.fas", auto=True, hybridprobe=True)
>>> cline.explainflag = True
>>> cline.oligosize=20 # Old EMBOSS, instead of osizeopt
>>> cline.productosize=200 # Old EMBOSS, instead of psizeopt
>>> cline.outfile = "myresults.out"
>>> print(cline)
eprimer3 -auto -outfile=myresults.out -sequence=mysequence.fas -hybridprobe=True -productosize=200 -oligosize=20 -explainflag=True
"""
def __init__(self, cmd="eprimer3", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence to choose primers from.",
is_required=True),
_Option(["-task", "task"],
"Tell eprimer3 what task to perform."),
_Option(["-hybridprobe", "hybridprobe"],
"Find an internal oligo to use as a hyb probe."),
_Option(["-numreturn", "numreturn"],
"Maximum number of primer pairs to return."),
_Option(["-includedregion", "includedregion"],
"Subregion of the sequence in which to pick primers."),
_Option(["-target", "target"],
"Sequence to target for flanking primers."),
_Option(["-excludedregion", "excludedregion"],
"Regions to exclude from primer picking."),
_Option(["-forwardinput", "forwardinput"],
"Sequence of a forward primer to check."),
_Option(["-reverseinput", "reverseinput"],
"Sequence of a reverse primer to check."),
_Option(["-gcclamp", "gcclamp"],
"The required number of Gs and Cs at the 3' of each primer."),
_Option(["-osize", "osize"],
"Optimum length of a primer oligo."),
_Option(["-minsize", "minsize"],
"Minimum length of a primer oligo."),
_Option(["-maxsize", "maxsize"],
"Maximum length of a primer oligo."),
_Option(["-otm", "otm"],
"""Melting temperature for primer oligo (OBSOLETE).
Option replaced in EMBOSS 6.6.0 by -opttm
"""),
_Option(["-opttm", "opttm"],
"""Optimum melting temperature for a primer oligo.
Option added in EMBOSS 6.6.0, replacing -otm
"""),
_Option(["-mintm", "mintm"],
"Minimum melting temperature for a primer oligo."),
_Option(["-maxtm", "maxtm"],
"Maximum melting temperature for a primer oligo."),
_Option(["-maxdifftm", "maxdifftm"],
"Maximum difference in melting temperatures between "
"forward and reverse primers."),
_Option(["-ogcpercent", "ogcpercent"],
"Optimum GC% for a primer."),
_Option(["-mingc", "mingc"],
"Minimum GC% for a primer."),
_Option(["-maxgc", "maxgc"],
"Maximum GC% for a primer."),
_Option(["-saltconc", "saltconc"],
"Millimolar salt concentration in the PCR."),
_Option(["-dnaconc", "dnaconc"],
"Nanomolar concentration of annealing oligos in the PCR."),
_Option(["-maxpolyx", "maxpolyx"],
"Maximum allowable mononucleotide repeat length in a primer."),
# Primer length:
_Option(["-productosize", "productosize"],
"""Optimum size for the PCR product (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -psizeopt
"""),
_Option(["-psizeopt", "psizeopt"],
"""Optimum size for the PCR product.
Option added in EMBOSS 6.1.0, replacing -productosize
"""),
_Option(["-productsizerange", "productsizerange"],
"""Acceptable range of length for the PCR product (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -prange
"""),
_Option(["-prange", "prange"],
"""Acceptable range of length for the PCR product.
Option added in EMBOSS 6.1.0, replacing -productsizerange
"""),
# Primer temperature:
_Option(["-productotm", "productotm"],
"""Optimum melting temperature for the PCR product (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -ptmopt
"""),
_Option(["-ptmopt", "ptmopt"],
"""Optimum melting temperature for the PCR product.
Option added in EMBOSS 6.1.0, replacing -productotm
"""),
_Option(["-productmintm", "productmintm"],
"""Minimum allowed melting temperature for the amplicon (OBSOLETE)
Option replaced in EMBOSS 6.1.0 by -ptmmin
"""),
_Option(["-ptmmin", "ptmmin"],
"""Minimum allowed melting temperature for the amplicon."),
Option added in EMBOSS 6.1.0, replacing -productmintm
"""),
_Option(["-productmaxtm", "productmaxtm"],
"""Maximum allowed melting temperature for the amplicon (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -ptmmax
"""),
_Option(["-ptmmax", "ptmmax"],
"""Maximum allowed melting temperature for the amplicon."),
Option added in EMBOSS 6.1.0, replacing -productmaxtm
"""),
# Note to self, should be -oexcludedregion not -oexcluderegion
_Option(["-oexcludedregion", "oexcludedregion"],
"""Do not pick internal oligos in this region."),
Option added in EMBOSS 6.1.0, replacing -oligoexcludedregion.
"""),
_Option(["-oligoexcludedregion", "oligoexcludedregion"],
"""Do not pick internal oligos in this region (OBSOLETE)."),
Option replaced in EMBOSS 6.1.0 by -oexcluderegion.
"""),
_Option(["-oligoinput", "oligoinput"],
"Sequence of the internal oligo."),
# Oligo length:
_Option(["-oligosize", "oligosize"],
"""Optimum length of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -osizeopt.
"""),
_Option(["-osizeopt", "osizeopt"],
"""Optimum length of internal oligo.
Option added in EMBOSS 6.1.0, replaces -oligosize
"""),
_Option(["-oligominsize", "oligominsize"],
"""Minimum length of internal oligo (OBSOLETE)."),
Option replaced in EMBOSS 6.1.0 by -ominsize.
"""),
_Option(["-ominsize", "ominsize"],
"""Minimum length of internal oligo."
Option added in EMBOSS 6.1.0, replaces -oligominsize
"""),
_Option(["-oligomaxsize", "oligomaxsize"],
"""Maximum length of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -omaxsize.
"""),
_Option(["-omaxsize", "omaxsize"],
"""Maximum length of internal oligo.
Option added in EMBOSS 6.1.0, replaces -oligomaxsize
"""),
# Oligo GC temperature:
_Option(["-oligotm", "oligotm"],
"""Optimum melting temperature of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -otmopt.
"""),
_Option(["-otmopt", "otmopt"],
"""Optimum melting temperature of internal oligo.
Option added in EMBOSS 6.1.0.
"""),
_Option(["-oligomintm", "oligomintm"],
"""Minimum melting temperature of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -otmmin.
"""),
_Option(["-otmmin", "otmmin"],
"""Minimum melting temperature of internal oligo.
Option added in EMBOSS 6.1.0, replacing -oligomintm
"""),
_Option(["-oligomaxtm", "oligomaxtm"],
"""Maximum melting temperature of internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -otmmax.
"""),
_Option(["-otmmax", "otmmax"],
"""Maximum melting temperature of internal oligo.
Option added in EMBOSS 6.1.0, replacing -oligomaxtm
"""),
# Oligo GC percent:
_Option(["-oligoogcpercent", "oligoogcpercent"],
"""Optimum GC% for internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -ogcopt.
"""),
_Option(["-ogcopt", "ogcopt"],
"""Optimum GC% for internal oligo."
Option added in EMBOSS 6.1.0, replacing -oligoogcpercent
"""),
_Option(["-oligomingc", "oligomingc"],
"""Minimum GC% for internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -ogcmin.
"""),
_Option(["-ogcmin", "ogcmin"],
"""Minimum GC% for internal oligo.
Option added in EMBOSS 6.1.0, replacing -oligomingc
"""),
_Option(["-oligomaxgc", "oligomaxgc"],
"""Maximum GC% for internal oligo.
Option replaced in EMBOSS 6.1.0 by -ogcmax
"""),
_Option(["-ogcmax", "ogcmax"],
"""Maximum GC% for internal oligo."),
Option added in EMBOSS 6.1.0, replacing -oligomaxgc
"""),
# Oligo salt concentration:
_Option(["-oligosaltconc", "oligosaltconc"],
"""Millimolar concentration of salt in the hybridisation."),
Option replaced in EMBOSS 6.1.0 by -osaltconc
"""),
_Option(["-osaltconc", "osaltconc"],
"""Millimolar concentration of salt in the hybridisation."),
Option added in EMBOSS 6.1.0, replacing -oligosaltconc
"""),
_Option(["-oligodnaconc", "oligodnaconc"],
"""Nanomolar concentration of internal oligo in the hybridisation.
Option replaced in EMBOSS 6.1.0 by -odnaconc
"""),
_Option(["-odnaconc", "odnaconc"],
"""Nanomolar concentration of internal oligo in the hybridisation.
Option added in EMBOSS 6.1.0, replacing -oligodnaconc
"""),
# Oligo self complementarity
_Option(["-oligoselfany", "oligoselfany"],
"""Maximum allowable alignment score for self-complementarity (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -oanyself
"""),
_Option(["-oanyself", "oanyself"],
"""Maximum allowable alignment score for self-complementarity."),
Option added in EMBOSS 6.1.0, replacing -oligoselfany
"""),
_Option(["-oligoselfend", "oligoselfend"],
"""Maximum allowable 3`-anchored global alignment score "
for self-complementarity (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -oendself
"""),
_Option(["-oendself", "oendself"],
"""Max 3`-anchored self-complementarity global alignment score.
Option added in EMBOSS 6.1.0, replacing -oligoselfend
"""),
_Option(["-oligomaxpolyx", "oligomaxpolyx"],
"""Maximum length of mononucleotide repeat in internal oligo (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -opolyxmax
"""),
_Option(["-opolyxmax", "opolyxmax"],
"""Maximum length of mononucleotide repeat in internal oligo."),
Option added in EMBOSS 6.1.0, replacing -oligomaxpolyx
"""),
_Option(["-mispriminglibraryfile", "mispriminglibraryfile"],
"File containing library of sequences to avoid amplifying"),
_Option(["-maxmispriming", "maxmispriming"],
"Maximum allowed similarity of primers to sequences in "
"library specified by -mispriminglibrary"),
_Option(["-oligomaxmishyb", "oligomaxmishyb"],
"""Maximum alignment score for hybridisation of internal oligo to
library specified by -oligomishyblibraryfile (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -omishybmax
"""),
_Option(["-omishybmax", "omishybmax"],
"""Maximum alignment score for hybridisation of internal oligo to
library specified by -mishyblibraryfile.
Option added in EMBOSS 6.1.0, replacing -oligomaxmishyb
"""),
_Option(["-oligomishyblibraryfile", "oligomishyblibraryfile"],
"""Library file of seqs to avoid internal oligo hybridisation (OBSOLETE).
Option replaced in EMBOSS 6.1.0 by -mishyblibraryfile
"""),
_Option(["-mishyblibraryfile", "mishyblibraryfile"],
"""Library file of seqs to avoid internal oligo hybridisation.
Option added in EMBOSS 6.1.0, replacing -oligomishyblibraryfile
"""),
_Option(["-explainflag", "explainflag"],
"Produce output tags with eprimer3 statistics"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class PrimerSearchCommandline(_EmbossCommandLine):
"""Commandline object for the primersearch program from EMBOSS.
"""
def __init__(self, cmd="primersearch", **kwargs):
self.parameters = [
_Option(["-seqall", "-sequences", "sequences", "seqall"],
"Sequence to look for the primer pairs in.",
is_required=True),
# When this wrapper was written primersearch used -sequences
# as the argument name. Since at least EMBOSS 5.0 (and
# perhaps earlier) this has been -seqall instead.
_Option(["-infile", "-primers", "primers", "infile"],
"File containing the primer pairs to search for.",
filename=True,
is_required=True),
# When this wrapper was written primersearch used -primers
# as the argument name. Since at least EMBOSS 5.0 (and
# perhaps earlier) this has been -infile instead.
_Option(["-mismatchpercent", "mismatchpercent"],
"Allowed percentage mismatch (any integer value, default 0).",
is_required=True),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FDNADistCommandline(_EmbossCommandLine):
"""Commandline object for the fdnadist program from EMBOSS.
fdnadist is an EMBOSS wrapper for the PHYLIP program dnadist for
calulating distance matrices from DNA sequence files.
"""
def __init__(self, cmd="fdnadist", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to use (phylip)",
filename=True,
is_required=True),
_Option(["-method", "method"],
"sub. model [f,k,j,l,s]",
is_required=True),
_Option(["-gamma", "gamma"],
"gamma [g, i,n]"),
_Option(["-ncategories", "ncategories"],
"number of rate catergories (1-9)"),
_Option(["-rate", "rate"],
"rate for each category"),
_Option(["-categories", "categories"],
"File of substitution rate categories"),
_Option(["-weights", "weights"],
"weights file"),
_Option(["-gammacoefficient", "gammacoefficient"],
"value for gamma (> 0.001)"),
_Option(["-invarfrac", "invarfrac"],
"proportoin of invariant sites"),
_Option(["-ttratio", "ttratio"],
"ts/tv ratio"),
_Option(["-freqsfrom", "freqsfrom"],
"use emprical base freqs"),
_Option(["-basefreq", "basefreq"],
"specify basefreqs"),
_Option(["-lower", "lower"],
"lower triangle matrix (y/N)")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FTreeDistCommandline(_EmbossCommandLine):
"""Commandline object for the ftreedist program from EMBOSS.
ftreedist is an EMBOSS wrapper for the PHYLIP program treedist used for
calulating distance measures between phylogentic trees.
"""
def __init__(self, cmd="ftreedist", **kwargs):
self.parameters = [
_Option(["-intreefile", "intreefile"],
"tree file to score (phylip)",
filename=True,
is_required=True),
_Option(["-dtype", "dtype"],
"distance type ([S]ymetric, [b]ranch score)"),
_Option(["-pairing", "pairing"],
"tree pairing method ([A]djacent pairs, all [p]ossible pairs)"),
_Option(["-style", "style"],
"output style - [V]erbose, [f]ill, [s]parse"),
_Option(["-noroot", "noroot"],
"treat trees as rooted [N/y]"),
_Option(["-outgrno", "outgrno"],
"which taxon to root the trees with (starts from 0)")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FNeighborCommandline(_EmbossCommandLine):
"""Commandline object for the fneighbor program from EMBOSS.
fneighbor is an EMBOSS wrapper for the PHYLIP program neighbor used for
calulating neighbor-joining or UPGMA trees from distance matrices.
"""
def __init__(self, cmd="fneighbor", **kwargs):
self.parameters = [
_Option(["-datafile", "datafile"],
"dist file to use (phylip)",
filename=True,
is_required=True),
_Option(["-matrixtype", "matrixtype"],
"is martrix [S]quare pr [u]pper or [l]ower"),
_Option(["-treetype", "treetype"],
"nj or UPGMA tree (n/u)"),
_Option(["-outgrno", "outgrno"],
"taxon to use as OG"),
_Option(["-jumble", "jumble"],
"randommise input order (Y/n)"),
_Option(["-seed", "seed"],
"provide a random seed"),
_Option(["-trout", "trout"],
"write tree (Y/n)"),
_Option(["-outtreefile", "outtreefile"],
"filename for output tree"),
_Option(["-progress", "progress"],
"print progress (Y/n)"),
_Option(["-treeprint", "treeprint"],
"print tree (Y/n)")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FSeqBootCommandline(_EmbossCommandLine):
"""Commandline object for the fseqboot program from EMBOSS.
fseqboot is an EMBOSS wrapper for the PHYLIP program seqboot used to
pseudo-sample alignment files.
"""
def __init__(self, cmd="fseqboot", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to sample (phylip)",
filename=True,
is_required=True),
_Option(["-categories", "catergories"],
"file of input categories"),
_Option(["-weights", "weights"],
" weights file"),
_Option(["-test", "test"],
"specify operation, default is bootstrap"),
_Option(["-regular", "regular"],
"absolute number to resample"),
_Option(["-fracsample", "fracsample"],
"fraction to resample"),
_Option(["-rewriteformat", "rewriteformat"],
"output format ([P]hyilp, [n]exus, [x]ml"),
_Option(["-seqtype", "seqtype"],
"output format ([D]na, [p]rotein, [r]na"),
_Option(["-blocksize", "blocksize"],
"print progress (Y/n)"),
_Option(["-reps", "reps"],
"how many replicates, defaults to 100)"),
_Option(["-justweights", "jusweights"],
"what to write out [D]atasets of just [w]eights"),
_Option(["-seed", "seed"],
"specify random seed"),
_Option(["-dotdiff", "dotdiff"],
"Use dot-differencing? [Y/n]"), ]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FDNAParsCommandline(_EmbossCommandLine):
"""Commandline object for the fdnapars program from EMBOSS.
fdnapars is an EMBOSS version of the PHYLIP program dnapars, for
estimating trees from DNA sequences using parsiomny. Calling this command
without providing a value for the option "-intreefile" will invoke
"interactive mode" (and as a result fail if called with subprocess) if
"-auto" is not set to true.
"""
def __init__(self, cmd="fdnapars", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to use (phylip)",
filename=True,
is_required=True),
_Option(["-intreefile", "intreefile"],
"Phylip tree file"),
_Option(["-weights", "weights"],
"weights file"),
_Option(["-maxtrees", "maxtrees"],
"max trees to save during run"),
_Option(["-thorough", "thorough"],
"more thorough search (Y/n)"),
_Option(["-rearrange", "rearrange"],
"Rearrange on jsut 1 best tree (Y/n)"),
_Option(["-transversion", "transversion"],
"Use tranversion parsimony (y/N)"),
_Option(["-njumble", "njumble"],
"number of times to randomise input order (default is 0)"),
_Option(["-seed", "seed"],
"provide random seed"),
_Option(["-outgrno", "outgrno"],
"Specify outgroup"),
_Option(["-thresh", "thresh"],
"Use threshold parsimony (y/N)"),
_Option(["-threshold", "threshold"],
"Threshold value"),
_Option(["-trout", "trout"],
"Write trees to file (Y/n)"),
_Option(["-outtreefile", "outtreefile"],
"filename for output tree"),
_Option(["-dotdiff", "dotdiff"],
"Use dot-differencing? [Y/n]")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FProtParsCommandline(_EmbossCommandLine):
"""Commandline object for the fdnapars program from EMBOSS.
fprotpars is an EMBOSS version of the PHYLIP program protpars, for
estimating trees from protein sequences using parsiomny. Calling this
command without providing a value for the option "-intreefile" will invoke
"interactive mode" (and as a result fail if called with subprocess) if
"-auto" is not set to true.
"""
def __init__(self, cmd="fprotpars", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to use (phylip)",
filename=True,
is_required=True),
_Option(["-intreefile", "intreefile"],
"Phylip tree file to score"),
_Option(["-outtreefile", "outtreefile"],
"phylip tree output file",
filename=True,
is_required=True),
_Option(["-weights", "weights"],
"weights file"),
_Option(["-whichcode", "whichcode"],
"which genetic code, [U,M,V,F,Y]]"),
_Option(["-njumble", "njumble"],
"number of times to randomise input order (default is 0)"),
_Option(["-seed", "seed"],
"provide random seed"),
_Option(["-outgrno", "outgrno"],
"Specify outgroup"),
_Option(["-thresh", "thresh"],
"Use threshold parsimony (y/N)"),
_Option(["-threshold", "threshold"],
"Threshold value"),
_Option(["-trout", "trout"],
"Write trees to file (Y/n)"),
_Option(["-dotdiff", "dotdiff"],
"Use dot-differencing? [Y/n]")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FProtDistCommandline(_EmbossCommandLine):
"""Commandline object for the fprotdist program from EMBOSS.
fprotdist is an EMBOSS wrapper for the PHYLIP program protdist used to
estimate trees from protein sequences using parsimony
"""
def __init__(self, cmd="fprotdist", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"seq file to use (phylip)",
filename=True,
is_required=True),
_Option(["-ncategories", "ncategories"],
"number of rate catergories (1-9)"),
_Option(["-rate", "rate"],
"rate for each category"),
_Option(["-catergories", "catergories"],
"file of rates"),
_Option(["-weights", "weights"],
"weights file"),
_Option(["-method", "method"],
"sub. model [j,h,d,k,s,c]"),
_Option(["-gamma", "gamma"],
"gamma [g, i,c]"),
_Option(["-gammacoefficient", "gammacoefficient"],
"value for gamma (> 0.001)"),
_Option(["-invarcoefficient", "invarcoefficient"],
"float for variation of substitution rate among sites"),
_Option(["-aacateg", "aacateg"],
"Choose the category to use [G,C,H]"),
_Option(["-whichcode", "whichcode"],
"genetic code [c,m,v,f,y]"),
_Option(["-ease", "ease"],
"Pob change catergory (float between -0 and 1)"),
_Option(["-ttratio", "ttratio"],
"Transition/transversion ratio (0-1)"),
_Option(["-basefreq", "basefreq"],
"DNA base frequencies (space separated list)")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FConsenseCommandline(_EmbossCommandLine):
"""Commandline object for the fconsense program from EMBOSS.
fconsense is an EMBOSS wrapper for the PHYLIP program consense used to
calculate consensus trees.
"""
def __init__(self, cmd="fconsense", **kwargs):
self.parameters = [
_Option(["-intreefile", "intreefile"],
"file with phylip trees to make consensus from",
filename=True,
is_required=True),
_Option(["-method", "method"],
"consensus method [s, mr, MRE, ml]"),
_Option(["-mlfrac", "mlfrac"],
"cut-off freq for a branch to appear in consensus (0.5-1.0)"),
_Option(["-root", "root"],
"treat trees as rooted (YES, no)"),
_Option(["-outgrno", "outgrno"],
"OTU to use as outgroup (starts from 0)"),
_Option(["-trout", "trout"],
"treat trees as rooted (YES, no)"),
_Option(["-outtreefile", "outtreefile"],
"Phylip tree output file (optional)")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class WaterCommandline(_EmbossCommandLine):
"""Commandline object for the water program from EMBOSS.
"""
def __init__(self, cmd="water", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to align",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to align",
filename=True,
is_required=True),
_Option(["-gapopen", "gapopen"],
"Gap open penalty",
is_required=True),
_Option(["-gapextend", "gapextend"],
"Gap extension penalty",
is_required=True),
_Option(["-datafile", "datafile"],
"Matrix file",
filename=True),
_Switch(["-nobrief", "nobrief"],
"Display extended identity and similarity"),
_Switch(["-brief", "brief"],
"Display brief identity and similarity"),
_Option(["-similarity", "similarity"],
"Display percent identity and similarity"),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class NeedleCommandline(_EmbossCommandLine):
"""Commandline object for the needle program from EMBOSS.
"""
def __init__(self, cmd="needle", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to align",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to align",
filename=True,
is_required=True),
_Option(["-gapopen", "gapopen"],
"Gap open penalty",
is_required=True),
_Option(["-gapextend", "gapextend"],
"Gap extension penalty",
is_required=True),
_Option(["-datafile", "datafile"],
"Matrix file",
filename=True),
_Option(["-endweight", "endweight"],
"Apply And gap penalties"),
_Option(["-endopen", "endopen"],
"The score taken away when an end gap is created."),
_Option(["-endextend", "endextend"],
"The score added to the end gap penality for each base or "
"residue in the end gap."),
_Switch(["-nobrief", "nobrief"],
"Display extended identity and similarity"),
_Switch(["-brief", "brief"],
"Display brief identity and similarity"),
_Option(["-similarity", "similarity"],
"Display percent identity and similarity"),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class NeedleallCommandline(_EmbossCommandLine):
"""Commandline object for the needleall program from EMBOSS.
"""
def __init__(self, cmd="needleall", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to align",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to align",
filename=True,
is_required=True),
_Option(["-gapopen", "gapopen"],
"Gap open penalty",
is_required=True),
_Option(["-gapextend", "gapextend"],
"Gap extension penalty",
is_required=True),
_Option(["-datafile", "datafile"],
"Matrix file",
filename=True),
_Option(["-minscore", "minscore"],
"Exclude alignments with scores below this threshold score."),
_Option(["-errorfile", "errorfile"],
"Error file to be written to."),
_Option(["-endweight", "endweight"],
"Apply And gap penalties"),
_Option(["-endopen", "endopen"],
"The score taken away when an end gap is created."),
_Option(["-endextend", "endextend"],
"The score added to the end gap penality for each base or "
"residue in the end gap."),
_Switch(["-nobrief", "nobrief"],
"Display extended identity and similarity"),
_Switch(["-brief", "brief"],
"Display brief identity and similarity"),
_Option(["-similarity", "similarity"],
"Display percent identity and similarity"),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class StretcherCommandline(_EmbossCommandLine):
"""Commandline object for the stretcher program from EMBOSS.
"""
def __init__(self, cmd="stretcher", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to align",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to align",
filename=True,
is_required=True),
_Option(["-gapopen", "gapopen"],
"Gap open penalty",
is_required=True,
checker_function=lambda value: isinstance(value, int)),
_Option(["-gapextend", "gapextend"],
"Gap extension penalty",
is_required=True,
checker_function=lambda value: isinstance(value, int)),
_Option(["-datafile", "datafile"],
"Matrix file",
filename=True),
_Option(["-snucleotide", "snucleotide"],
"Sequences are nucleotide (boolean)"),
_Option(["-sprotein", "sprotein"],
"Sequences are protein (boolean)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class FuzznucCommandline(_EmbossCommandLine):
"""Commandline object for the fuzznuc program from EMBOSS.
"""
def __init__(self, cmd="fuzznuc", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence database USA",
is_required=True),
_Option(["-pattern", "pattern"],
"Search pattern, using standard IUPAC one-letter codes",
is_required=True),
_Option(["-mismatch", "mismatch"],
"Number of mismatches",
is_required=True),
_Option(["-complement", "complement"],
"Search complementary strand"),
_Option(["-rformat", "rformat"],
"Specify the report format to output in.")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class Est2GenomeCommandline(_EmbossCommandLine):
"""Commandline object for the est2genome program from EMBOSS.
"""
def __init__(self, cmd="est2genome", **kwargs):
self.parameters = [
_Option(["-est", "est"],
"EST sequence(s)",
is_required=True),
_Option(["-genome", "genome"],
"Genomic sequence",
is_required=True),
_Option(["-match", "match"],
"Score for matching two bases"),
_Option(["-mismatch", "mismatch"],
"Cost for mismatching two bases"),
_Option(["-gappenalty", "gappenalty"],
"Cost for deleting a single base in either sequence, "
"excluding introns"),
_Option(["-intronpenalty", "intronpenalty"],
"Cost for an intron, independent of length."),
_Option(["-splicepenalty", "splicepenalty"],
"Cost for an intron, independent of length "
"and starting/ending on donor-acceptor sites"),
_Option(["-minscore", "minscore"],
"Exclude alignments with scores below this threshold score."),
_Option(["-reverse", "reverse"],
"Reverse the orientation of the EST sequence"),
_Option(["-splice", "splice"],
"Use donor and acceptor splice sites."),
_Option(["-mode", "mode"],
"This determines the comparion mode. 'both', 'forward' "
"'reverse'"),
_Option(["-best", "best"],
"You can print out all comparisons instead of just the best"),
_Option(["-space", "space"],
"for linear-space recursion."),
_Option(["-shuffle", "shuffle"],
"Shuffle"),
_Option(["-seed", "seed"],
"Random number seed"),
_Option(["-align", "align"],
"Show the alignment."),
_Option(["-width", "width"],
"Alignment width")
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class ETandemCommandline(_EmbossCommandLine):
"""Commandline object for the etandem program from EMBOSS.
"""
def __init__(self, cmd="etandem", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence",
filename=True,
is_required=True),
_Option(["-minrepeat", "minrepeat"],
"Minimum repeat size",
is_required=True),
_Option(["-maxrepeat", "maxrepeat"],
"Maximum repeat size",
is_required=True),
_Option(["-threshold", "threshold"],
"Threshold score"),
_Option(["-mismatch", "mismatch"],
"Allow N as a mismatch"),
_Option(["-uniform", "uniform"],
"Allow uniform consensus"),
_Option(["-rformat", "rformat"],
"Output report format")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class EInvertedCommandline(_EmbossCommandLine):
"""Commandline object for the einverted program from EMBOSS.
"""
def __init__(self, cmd="einverted", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence",
filename=True,
is_required=True),
_Option(["-gap", "gap"],
"Gap penalty",
filename=True,
is_required=True),
_Option(["-threshold", "threshold"],
"Minimum score threshold",
is_required=True),
_Option(["-match", "match"],
"Match score",
is_required=True),
_Option(["-mismatch", "mismatch"],
"Mismatch score",
is_required=True),
_Option(["-maxrepeat", "maxrepeat"],
"Maximum separation between the start and end of repeat"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class PalindromeCommandline(_EmbossCommandLine):
"""Commandline object for the palindrome program from EMBOSS.
"""
def __init__(self, cmd="palindrome", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Sequence",
filename=True,
is_required=True),
_Option(["-minpallen", "minpallen"],
"Minimum palindrome length",
is_required=True),
_Option(["-maxpallen", "maxpallen"],
"Maximum palindrome length",
is_required=True),
_Option(["-gaplimit", "gaplimit"],
"Maximum gap between repeats",
is_required=True),
_Option(["-nummismatches", "nummismatches"],
"Number of mismatches allowed",
is_required=True),
_Option(["-overlap", "overlap"],
"Report overlapping matches",
is_required=True),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class TranalignCommandline(_EmbossCommandLine):
"""Commandline object for the tranalign program from EMBOSS.
"""
def __init__(self, cmd="tranalign", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"Nucleotide sequences to be aligned.",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Protein sequence alignment",
filename=True,
is_required=True),
_Option(["-outseq", "outseq"],
"Output sequence file.",
filename=True,
is_required=True),
_Option(["-table", "table"],
"Code to use")]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class DiffseqCommandline(_EmbossCommandLine):
"""Commandline object for the diffseq program from EMBOSS.
"""
def __init__(self, cmd="diffseq", **kwargs):
self.parameters = [
_Option(["-asequence", "asequence"],
"First sequence to compare",
filename=True,
is_required=True),
_Option(["-bsequence", "bsequence"],
"Second sequence to compare",
filename=True,
is_required=True),
_Option(["-wordsize", "wordsize"],
"Word size to use for comparisons (10 default)",
is_required=True),
_Option(["-aoutfeat", "aoutfeat"],
"File for output of first sequence's features",
filename=True,
is_required=True),
_Option(["-boutfeat", "boutfeat"],
"File for output of second sequence's features",
filename=True,
is_required=True),
_Option(["-rformat", "rformat"],
"Output report file format")
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
class IepCommandline(_EmbossCommandLine):
"""Commandline for EMBOSS iep: calculated isoelectric point and charge.
Example:
>>> from Bio.Emboss.Applications import IepCommandline
>>> iep_cline = IepCommandline(sequence="proteins.faa",
... outfile="proteins.txt")
>>> print(iep_cline)
iep -outfile=proteins.txt -sequence=proteins.faa
You would typically run the command line with iep_cline() or via the
Python subprocess module, as described in the Biopython tutorial.
"""
def __init__(self, cmd="iep", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Protein sequence(s) filename",
filename=True,
is_required=True),
_Option(["-amino", "amino"],
"""Number of N-termini
Integer 0 (default) or more.
"""),
_Option(["-carboxyl", "carboxyl"],
"""Number of C-termini
Integer 0 (default) or more.
"""),
_Option(["-lysinemodified", "lysinemodified"],
"""Number of modified lysines
Integer 0 (default) or more.
"""),
_Option(["-disulphides", "disulphides"],
"""Number of disulphide bridges
Integer 0 (default) or more.
"""),
# Should we implement the -termini switch as well?
_Option(["-notermini", "notermini"],
"Exclude (True) or include (False) charge at N and C terminus."),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
# seqret uses -outseq, not -outfile, so use the base class:
class SeqretCommandline(_EmbossMinimalCommandLine):
"""Commandline object for the seqret program from EMBOSS.
This tool allows you to interconvert between different sequence file
formats (e.g. GenBank to FASTA). Combining Biopython's Bio.SeqIO module
with seqret using a suitable intermediate file format can allow you to
read/write to an even wider range of file formats.
This wrapper currently only supports the core functionality, things like
feature tables (in EMBOSS 6.1.0 onwards) are not yet included.
"""
def __init__(self, cmd="seqret", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Input sequence(s) filename",
filename=True),
_Option(["-outseq", "outseq"],
"Output sequence file.",
filename=True),
_Option(["-sformat", "sformat"],
"Input sequence(s) format (e.g. fasta, genbank)"),
_Option(["-osformat", "osformat"],
"Output sequence(s) format (e.g. fasta, genbank)"),
]
_EmbossMinimalCommandLine.__init__(self, cmd, **kwargs)
def _validate(self):
# Check the outfile, filter, or stdout option has been set.
# We can't simply do this via the required flag for the outfile
# output - this seems the simplest solution.
if not (self.outseq or self.filter or self.stdout):
raise ValueError("You must either set outfile (output filename), "
"or enable filter or stdout (output to stdout).")
if not (self.sequence or self.filter or self.stdint):
raise ValueError("You must either set sequence (input filename), "
"or enable filter or stdin (input from stdin).")
return _EmbossMinimalCommandLine._validate(self)
class SeqmatchallCommandline(_EmbossCommandLine):
""" Commandline object for the seqmatchall program from EMBOSS
e.g.
>>> cline = SeqmatchallCommandline(sequence="opuntia.fasta", outfile="opuntia.txt")
>>> cline.auto = True
>>> cline.wordsize = 18
>>> cline.aformat = "pair"
>>> print(cline)
seqmatchall -auto -outfile=opuntia.txt -sequence=opuntia.fasta -wordsize=18 -aformat=pair
"""
def __init__(self, cmd="seqmatchall", **kwargs):
self.parameters = [
_Option(["-sequence", "sequence"],
"Readable set of sequences",
filename=True,
is_required=True),
_Option(["-wordsize", "wordsize"],
"Word size (Integer 2 or more, default 4)"),
_Option(["-aformat", "aformat"],
"Display output in a different specified output format"),
]
_EmbossCommandLine.__init__(self, cmd, **kwargs)
def _test():
"""Run the Bio.Emboss.Applications module doctests."""
import doctest
doctest.testmod()
if __name__ == "__main__":
# Run the doctests
_test()
| 42.564325 | 134 | 0.541341 |
1b387bf13fd27f6c7f0caca5558378790be870c8 | 22,373 | py | Python | contrib/devtools/copyright_header.py | SigxChain/sigx | 793d43fcd913853c3ed64ed8a5b9e42edc30d6fb | [
"MIT"
] | null | null | null | contrib/devtools/copyright_header.py | SigxChain/sigx | 793d43fcd913853c3ed64ed8a5b9e42edc30d6fb | [
"MIT"
] | 1 | 2019-09-27T08:26:37.000Z | 2019-09-27T08:26:37.000Z | contrib/devtools/copyright_header.py | anonymouszar/vestx | 466a8e2fd4c1b544e72051115908f3384fedbbe3 | [
"MIT"
] | 9 | 2019-04-03T19:31:36.000Z | 2019-11-24T22:19:03.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_vestx_NativeSecp256k1.c',
'src/secp256k1/src/java/org_vestx_NativeSecp256k1.h',
'src/secp256k1/src/java/org_vestx_Secp256k1Context.c',
'src/secp256k1/src/java/org_vestx_Secp256k1Context.h',
# univalue:
'src/univalue/test/object.cpp',
'src/univalue/lib/univalue_escapes.h',
# auto generated:
'src/qt/vestxstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Bitcoin Core developers\n",
"The Bitcoin Core developers \n",
"VESTX Developers\n",
"the VESTX developers\n",
"The VESTX developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r').read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a vestx source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r')
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w')
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Bitcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Bitcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a vestx source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Bitcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Bitcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the vestx repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The VESTX
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| 36.497553 | 92 | 0.600679 |
7415bc832b540beb2d4de23e5036c732e0852aa2 | 16,102 | py | Python | icdar2013_eval.py | outsidercsy/PeleeNet_Detection_pytorch | 675b5c0bd75ff880e47d605df0dc944db0756873 | [
"MIT"
] | 3 | 2019-10-30T00:43:46.000Z | 2020-01-10T13:32:45.000Z | icdar2013_eval.py | outsidercsy/PeleeNet_Detection_pytorch | 675b5c0bd75ff880e47d605df0dc944db0756873 | [
"MIT"
] | null | null | null | icdar2013_eval.py | outsidercsy/PeleeNet_Detection_pytorch | 675b5c0bd75ff880e47d605df0dc944db0756873 | [
"MIT"
] | 2 | 2019-11-19T01:56:15.000Z | 2020-05-24T01:44:44.000Z | """Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
from data import ICDAR2013Detection, ICDAR2013AnnotationTransform, ICDAR2013_ROOT####
# from data import VOC_CLASSES as labelmap
import torch.utils.data as data
# from ssd import build_ssd
from net import PeleeNet####
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
from data import icdar2013
# if sys.version_info[0] == 2:
# import xml.etree.cElementTree as ET
# else:
# import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--icdar2013_root', default=ICDAR2013_ROOT,####
help='Location of ICDAR2013 root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
args = parser.parse_args()
print('trained_model is', args.trained_model)####
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
# annopath = os.path.join(args.voc_root, 'VOC2007', 'Annotations', '%s.xml')
# imgpath = os.path.join(args.voc_root, 'VOC2007', 'JPEGImages', '%s.jpg')
# imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets',
# 'Main', '{:s}.txt')
# YEAR = '2007'
# devkit_path = args.voc_root + 'VOC' + YEAR
dataset_mean = (104, 117, 123)
# set_type = 'test'
# class Timer(object):
# """A simple timer."""
# def __init__(self):
# self.total_time = 0.
# self.calls = 0
# self.start_time = 0.
# self.diff = 0.
# self.average_time = 0.
# def tic(self):
# # using time.time instead of time.clock because time time.clock
# # does not normalize for multithreading
# self.start_time = time.time()
# def toc(self, average=True):
# self.diff = time.time() - self.start_time
# self.total_time += self.diff
# self.calls += 1
# self.average_time = self.total_time / self.calls
# if average:
# return self.average_time
# else:
# return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap, sorted_scores####
def test_net(save_folder, net, cuda, dataset, transform, top_k,####top_k is useless
im_size=304, thresh=0.05):####
num_images = len(dataset)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
top_bboxes = {}####pred
recs = {} ####gt
# timers
# _t = {'im_detect': Timer(), 'misc': Timer()}
# output_dir = get_output_dir('ssd300_120000', set_type)
# det_file = os.path.join(output_dir, 'detections.pkl')
f_imagesetfile=open('imagesetfile','w')
f_text_detection=open('text_detection','w')
for i in range(num_images):
im, gt, h, w = dataset.pull_item(i)
gt[:, 0] *= w
gt[:, 2] *= w
gt[:, 1] *= h
gt[:, 3] *= h
image_id = dataset.pull_image_file_name(i)####
print('processing',image_id)########
x = Variable(im.unsqueeze(0))
if args.cuda:
x = x.cuda()
# _t['im_detect'].tic()
detections = net(x).data####detections is (num, self.num_classes, self.top_k, 5)
# detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
dets = detections[0, 1, :]####j to 1
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
# cls_dets = np.hstack((boxes.cpu().numpy(),
# scores[:, np.newaxis])).astype(np.float32,
# copy=False)
cls_dets = np.hstack(( scores[:, np.newaxis], ####first confidence then location
boxes.cpu().numpy() )).astype(np.float32, copy=False)
top_bboxes[image_id] = cls_dets
####debug
if i < 50:
debug = True
else:
debug = False
if debug:
imgpath = os.path.join('/workspace2/csy','data','ICDAR2013_dataset', 'Challenge2_Test_Task12_Images', '%s')
im_origin = cv2.imread(imgpath % image_id)
keep_inds = (top_bboxes[image_id][:, 0] > 0.18)
for score_and_bbox in top_bboxes[image_id][keep_inds]:
score = score_and_bbox[0].astype(np.float32)
bbox = score_and_bbox[1:].astype(np.int32)
cv2.rectangle(im_origin,
(bbox[0], bbox[1]),
(bbox[2], bbox[3]),
[0,0,255], 1
)
cv2.putText(im_origin, str(score),
(bbox[0], bbox[1]),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), thickness=1
)
debug_file = os.path.join(save_folder, image_id)
cv2.imwrite(debug_file,im_origin) #########
########
f_imagesetfile.write(image_id)
f_imagesetfile.write('\n')
box_num=top_bboxes[image_id].shape[0]
for box_ind in range(box_num):
f_text_detection.write(image_id) ####打印image_id
f_text_detection.write(' ')
f_text_detection.write(str(top_bboxes[image_id][box_ind,0]))####先打印confidence
f_text_detection.write(' ')
for i in range(1,5): ####打印bbbox
f_text_detection.write(str(top_bboxes[image_id][box_ind,i]))
f_text_detection.write(' ')
f_text_detection.write('\n')
objects=[]
for i in range(gt.shape[0]):
obj_struct = {}
obj_struct['name'] = 'text'
obj_struct['pose'] = ' '
obj_struct['truncated'] = '0'
obj_struct['difficult'] = '0'
obj_struct['bbox']=list(gt[i][0:4])
objects.append(obj_struct)
recs[image_id]=objects
f_imagesetfile.close()
f_text_detection.close()
with open('annots.pkl','wb') as f:
pickle.dump(recs, f)
rec, prec, ap, sorted_scores = voc_eval(detpath="{}_detection",####
annopath='',
imagesetfile='imagesetfile',
classname='text',
cachedir=os.getcwd(),
ovthresh=0.5,
use_07_metric=False)
print('rec,prec,ap=',rec, prec, ap)
F2_index = np.argmax(2*prec*rec/(prec+rec))
F2 = np.max(2*prec*rec/(prec+rec))
print('F2_corresponding score = ', sorted_scores[F2_index])
print('F2 coresponding rec prec = ', rec[F2_index], prec[F2_index])
print('F2=',F2)
if __name__ == '__main__':
# load net
# num_classes = 1+1 # +1 for background
net = PeleeNet('test', icdar2013) # initialize SSD
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
# load data
dataset = ICDAR2013Detection(args.icdar2013_root, 'test',
BaseTransform(304, dataset_mean),####
ICDAR2013AnnotationTransform())
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(304, dataset_mean), args.top_k, 304,####
thresh=args.confidence_threshold)
| 34.259574 | 120 | 0.550491 |
1cce0638b2241e788a43951c2ed4fa252db27334 | 3,554 | py | Python | wagtail/admin/widgets/datetime.py | wgarlock/wagtail | 1bfc13952f5ffc0e40a4435d15a5aefd70984430 | [
"BSD-3-Clause"
] | 2 | 2021-03-18T21:41:05.000Z | 2021-03-18T21:41:08.000Z | wagtail/admin/widgets/datetime.py | wgarlock/wagtail | 1bfc13952f5ffc0e40a4435d15a5aefd70984430 | [
"BSD-3-Clause"
] | 13 | 2015-05-08T12:27:10.000Z | 2020-01-23T14:45:57.000Z | wagtail/admin/widgets/datetime.py | wgarlock/wagtail | 1bfc13952f5ffc0e40a4435d15a5aefd70984430 | [
"BSD-3-Clause"
] | 1 | 2021-02-15T18:59:53.000Z | 2021-02-15T18:59:53.000Z | import json
from django import forms
from django.conf import settings
from django.forms import widgets
from django.utils.formats import get_format
from wagtail.admin.datetimepicker import to_datetimepicker_format
from wagtail.admin.staticfiles import versioned_static
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DATETIME_FORMAT = '%Y-%m-%d %H:%M'
DEFAULT_TIME_FORMAT = '%H:%M'
class AdminDateInput(widgets.DateInput):
template_name = 'wagtailadmin/widgets/date_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'off'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATE_FORMAT', DEFAULT_DATE_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
}
context['widget']['config_json'] = json.dumps(config)
return context
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
class AdminTimeInput(widgets.TimeInput):
template_name = 'wagtailadmin/widgets/time_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'off'}
if attrs:
default_attrs.update(attrs)
fmt = format
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_TIME_FORMAT', DEFAULT_TIME_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'format': self.js_format,
'formatTime': self.js_format
}
context['widget']['config_json'] = json.dumps(config)
return context
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
class AdminDateTimeInput(widgets.DateTimeInput):
template_name = 'wagtailadmin/widgets/datetime_input.html'
def __init__(self, attrs=None, format=None, time_format=None):
default_attrs = {'autocomplete': 'off'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATETIME_FORMAT', DEFAULT_DATETIME_FORMAT)
time_fmt = time_format
if time_fmt is None:
time_fmt = getattr(settings, 'WAGTAIL_TIME_FORMAT', DEFAULT_TIME_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
self.js_time_format = to_datetimepicker_format(time_fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
'formatTime': self.js_time_format
}
context['widget']['config_json'] = json.dumps(config)
return context
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/date-time-chooser.js'),
])
| 31.451327 | 87 | 0.647721 |
d0d306fe5fb3c08912755f0024976b2f2c967f53 | 1,400 | py | Python | radloggerpy/tests/cli/device/test_device.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | radloggerpy/tests/cli/device/test_device.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | radloggerpy/tests/cli/device/test_device.py | Dantali0n/RadLoggerPy | c630ce730519001ee39fb3a02dd3652943a23067 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
# Copyright (c) 2020 Dantali0n
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from radloggerpy.cli.v1.device import device
from radloggerpy.tests import base
from radloggerpy.types.device_interfaces import INTERFACE_CHOICES
class TestDeviceCommand(base.TestCase):
class DevCommandExp(device.DeviceCommand):
_arguments = None
@property
def arguments(self):
if self._arguments is None:
self._arguments = super().arguments
return self._arguments
def setUp(self):
super(TestDeviceCommand, self).setUp()
def test_add_interfaces(self):
dev_command = TestDeviceCommand.DevCommandExp()
dev_command._add_interfaces()
self.assertItemsEqual(
INTERFACE_CHOICES.values(), dev_command.arguments[
'--interface'].kwargs()['choices']
)
| 30.434783 | 75 | 0.699286 |
232688ba3287c238bbdee33b1efb3faac73f2a7f | 1,678 | py | Python | examples/ElasticsearchDomain.py | mng12689/troposphere | f0c6c7be8963de2ad3537b1864b62b02a7b486fc | [
"BSD-2-Clause"
] | 1 | 2021-04-03T22:24:36.000Z | 2021-04-03T22:24:36.000Z | examples/ElasticsearchDomain.py | cartermeyers/troposphere | 4b42fa0d65f73cec28184b5349aa198fb8ee5b2e | [
"BSD-2-Clause"
] | null | null | null | examples/ElasticsearchDomain.py | cartermeyers/troposphere | 4b42fa0d65f73cec28184b5349aa198fb8ee5b2e | [
"BSD-2-Clause"
] | 5 | 2020-05-10T13:50:32.000Z | 2021-09-09T09:06:54.000Z | # Converted from Elasticsearch Domain example located at:
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#d0e51519
from troposphere import Template, constants
from troposphere.elasticsearch import Domain, EBSOptions, VPCOptions
from troposphere.elasticsearch import ElasticsearchClusterConfig
from troposphere.elasticsearch import SnapshotOptions
templ = Template()
templ.set_description('Elasticsearch Domain example')
es_domain = templ.add_resource(Domain(
'ElasticsearchDomain',
DomainName="ExampleElasticsearchDomain",
ElasticsearchClusterConfig=ElasticsearchClusterConfig(
DedicatedMasterEnabled=True,
InstanceCount=2,
ZoneAwarenessEnabled=True,
InstanceType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterCount=3
),
EBSOptions=EBSOptions(EBSEnabled=True,
Iops=0,
VolumeSize=20,
VolumeType="gp2"),
SnapshotOptions=SnapshotOptions(AutomatedSnapshotStartHour=0),
AccessPolicies={'Version': '2012-10-17',
'Statement': [{
'Effect': 'Allow',
'Principal': {
'AWS': '*'
},
'Action': 'es:*',
'Resource': '*'
}]},
AdvancedOptions={"rest.action.multi.allow_explicit_index": True},
VPCOptions=VPCOptions(
SubnetIds=["subnet-4f2bb123"],
SecurityGroupIds=["sg-04cf048c"]
)
))
print(templ.to_json())
| 35.702128 | 111 | 0.634684 |
a120ab8b7dfcba7c55fd79fd56645e4b95e5c202 | 6,707 | py | Python | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v16r_1_00b/brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/__init__.py | shivharis/pybind | 4e1c6d54b9fd722ccec25546ba2413d79ce337e6 | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import sec_path
class show_mpls_lsp_sec_path_info(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /brocade_mpls_rpc/show-mpls-bypass-bypass-lsp-extensive/output/bypass-lsp/show-mpls-lsp-extensive-info/show-mpls-lsp-sec-path-info. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__sec_path',)
_yang_name = 'show-mpls-lsp-sec-path-info'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__sec_path = YANGDynClass(base=YANGListType("lsp_sec_path_path_name",sec_path.sec_path, yang_name="sec-path", rest_name="sec-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-sec-path-path-name', extensions=None), is_container='list', yang_name="sec-path", rest_name="sec-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_mpls_rpc', u'show-mpls-bypass-bypass-lsp-extensive', u'output', u'bypass-lsp', u'show-mpls-lsp-extensive-info', u'show-mpls-lsp-sec-path-info']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'show-mpls-bypass-bypass-lsp-extensive', u'output', u'bypass-lsp']
def _get_sec_path(self):
"""
Getter method for sec_path, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path (list)
"""
return self.__sec_path
def _set_sec_path(self, v, load=False):
"""
Setter method for sec_path, mapped from YANG variable /brocade_mpls_rpc/show_mpls_bypass_bypass_lsp_extensive/output/bypass_lsp/show_mpls_lsp_extensive_info/show_mpls_lsp_sec_path_info/sec_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sec_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sec_path() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("lsp_sec_path_path_name",sec_path.sec_path, yang_name="sec-path", rest_name="sec-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-sec-path-path-name', extensions=None), is_container='list', yang_name="sec-path", rest_name="sec-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sec_path must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("lsp_sec_path_path_name",sec_path.sec_path, yang_name="sec-path", rest_name="sec-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-sec-path-path-name', extensions=None), is_container='list', yang_name="sec-path", rest_name="sec-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__sec_path = t
if hasattr(self, '_set'):
self._set()
def _unset_sec_path(self):
self.__sec_path = YANGDynClass(base=YANGListType("lsp_sec_path_path_name",sec_path.sec_path, yang_name="sec-path", rest_name="sec-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='lsp-sec-path-path-name', extensions=None), is_container='list', yang_name="sec-path", rest_name="sec-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
sec_path = __builtin__.property(_get_sec_path, _set_sec_path)
_pyangbind_elements = {'sec_path': sec_path, }
| 54.08871 | 582 | 0.733115 |
58e4fdd8a631a0cee39152ec20f3987a801b5de8 | 2,759 | py | Python | frappe/email/test_smtp.py | ssuda777/frappe | d3f3df2ce15154aecc1d9d6d07d947e72c2e8c6e | [
"MIT"
] | 3 | 2019-01-01T10:46:15.000Z | 2019-01-01T10:46:18.000Z | frappe/email/test_smtp.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 5 | 2021-04-28T06:55:26.000Z | 2022-02-10T07:59:06.000Z | frappe/email/test_smtp.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 2 | 2021-08-24T00:57:20.000Z | 2021-08-24T01:30:40.000Z | # Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# License: The MIT License
import unittest
import frappe
from frappe.email.smtp import SMTPServer
from frappe.email.doctype.email_account.email_account import EmailAccount
class TestSMTP(unittest.TestCase):
def test_smtp_ssl_session(self):
for port in [None, 0, 465, "465"]:
make_server(port, 1, 0)
def test_smtp_tls_session(self):
for port in [None, 0, 587, "587"]:
make_server(port, 0, 1)
def test_get_email_account(self):
existing_email_accounts = frappe.get_all("Email Account", fields = ["name", "enable_outgoing", "default_outgoing", "append_to"])
unset_details = {
"enable_outgoing": 0,
"default_outgoing": 0,
"append_to": None
}
for email_account in existing_email_accounts:
frappe.db.set_value('Email Account', email_account['name'], unset_details)
# remove mail_server config so that test@example.com is not created
mail_server = frappe.conf.get('mail_server')
del frappe.conf['mail_server']
frappe.local.outgoing_email_account = {}
frappe.local.outgoing_email_account = {}
# lowest preference given to email account with default incoming enabled
create_email_account(email_id="default_outgoing_enabled@gmail.com", password="password", enable_outgoing = 1, default_outgoing=1)
self.assertEqual(EmailAccount.find_outgoing().email_id, "default_outgoing_enabled@gmail.com")
frappe.local.outgoing_email_account = {}
# highest preference given to email account with append_to matching
create_email_account(email_id="append_to@gmail.com", password="password", enable_outgoing = 1, default_outgoing=1, append_to="Blog Post")
self.assertEqual(EmailAccount.find_outgoing(match_by_doctype="Blog Post").email_id, "append_to@gmail.com")
# add back the mail_server
frappe.conf['mail_server'] = mail_server
for email_account in existing_email_accounts:
set_details = {
"enable_outgoing": email_account['enable_outgoing'],
"default_outgoing": email_account['default_outgoing'],
"append_to": email_account['append_to']
}
frappe.db.set_value('Email Account', email_account['name'], set_details)
def create_email_account(email_id, password, enable_outgoing, default_outgoing=0, append_to=None):
email_dict = {
"email_id": email_id,
"passsword": password,
"enable_outgoing":enable_outgoing ,
"default_outgoing":default_outgoing ,
"enable_incoming": 1,
"append_to":append_to,
"is_dummy_password": 1,
"smtp_server": "localhost"
}
email_account = frappe.new_doc('Email Account')
email_account.update(email_dict)
email_account.save()
def make_server(port, ssl, tls):
server = SMTPServer(
server = "smtp.gmail.com",
port = port,
use_ssl = ssl,
use_tls = tls
)
server.session
| 34.924051 | 139 | 0.757521 |
2af4a0f962f40710e849feb05b869999f4263c78 | 3,075 | py | Python | lib/python3.6/site-packages/conda/_vendor/auxlib/path.py | PhonPhey/Magnezi | bf96246d69edc6882653ba5e1332c0eff8d10294 | [
"MIT"
] | 2 | 2021-11-28T12:47:01.000Z | 2021-12-04T16:58:16.000Z | lib/python3.6/site-packages/conda/_vendor/auxlib/path.py | PhonPhey/Magnezi | bf96246d69edc6882653ba5e1332c0eff8d10294 | [
"MIT"
] | 2 | 2021-12-04T12:51:07.000Z | 2021-12-04T16:49:18.000Z | lib/python3.6/site-packages/conda/_vendor/auxlib/path.py | PhonPhey/Magnezi | bf96246d69edc6882653ba5e1332c0eff8d10294 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from distutils.sysconfig import get_python_lib
from logging import getLogger
from os import chdir, getcwd
from os.path import abspath, dirname, exists, expanduser, expandvars, isdir, isfile, join, sep
try:
import pkg_resources
except ImportError:
pkg_resources = None
import sys
log = getLogger(__name__)
ROOT_PATH = abspath(sep)
def site_packages_paths():
if hasattr(sys, 'real_prefix'):
# in a virtualenv
log.debug('searching virtualenv')
return tuple(p for p in sys.path if p.endswith('site-packages'))
else:
# not in a virtualenv
log.debug('searching outside virtualenv') # pragma: no cover
return tuple(get_python_lib(), ) # pragma: no cover
class PackageFile(object):
def __init__(self, file_path, package_name):
self.file_path = file_path
self.package_name = package_name
def __enter__(self):
self.file_handle = open_package_file(self.file_path, self.package_name)
return self.file_handle
def __exit__(self, *args):
self.file_handle.close()
class ChangePath(object):
def __init__(self, path):
self.dirpath = dirname(path) if isfile(path) else path
if not isdir(self.dirpath):
raise IOError('File or directory not found: {0}'.format(path))
def __enter__(self):
self.cwd = getcwd()
chdir(self.dirpath)
return self
def __exit__(self, *args):
chdir(self.cwd)
def open_package_file(file_path, package_name):
file_path = expand(file_path)
# look for file at relative path
if exists(file_path):
log.info("found real file {0}".format(file_path))
return open(file_path)
# look for file in package resources
if (package_name and pkg_resources is not None and
pkg_resources.resource_exists(package_name, file_path)):
log.info("found package resource file {0} for package {1}".format(file_path, package_name))
return pkg_resources.resource_stream(package_name, file_path)
# look for file in site-packages
package_path = find_file_in_site_packages(file_path, package_name)
if package_path:
return open(package_path) # pragma: no cover
msg = "file for module [{0}] cannot be found at path {1}".format(package_name, file_path)
log.error(msg)
raise IOError(msg)
def find_file_in_site_packages(file_path, package_name):
package_path = package_name.replace('.', '/')
for site_packages_path in site_packages_paths():
test_path = join(site_packages_path, package_path, file_path)
if exists(test_path):
log.info("found site-package file {0} for package {1}".format(file_path, package_name))
return test_path
else:
log.error("No file found at {0}.".format(test_path))
return None
def expand(path):
return abspath(expanduser(expandvars(path)))
def absdirname(path):
return abspath(expanduser(dirname(path)))
| 30.147059 | 99 | 0.685854 |
4e19762657bd7c55dc69fc95d9e901f355dd9770 | 2,654 | py | Python | test/test_searchuniversalabsolute_api.py | yumimobi/graylog.py | 3118f4a49c91c2cbbd660523b0ab99e56fbfd861 | [
"Apache-2.0"
] | 10 | 2016-09-27T08:13:22.000Z | 2018-09-04T13:15:42.000Z | test/test_searchuniversalabsolute_api.py | yumimobi/graylog.py | 3118f4a49c91c2cbbd660523b0ab99e56fbfd861 | [
"Apache-2.0"
] | 1 | 2019-08-28T16:16:09.000Z | 2019-08-28T16:16:09.000Z | test/test_searchuniversalabsolute_api.py | yumimobi/graylog.py | 3118f4a49c91c2cbbd660523b0ab99e56fbfd861 | [
"Apache-2.0"
] | 5 | 2016-11-03T07:45:18.000Z | 2021-08-19T14:21:49.000Z | # coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1.1+01d50e5
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import graylog
from graylog.rest import ApiException
from graylog.apis.searchuniversalabsolute_api import SearchuniversalabsoluteApi
class TestSearchuniversalabsoluteApi(unittest.TestCase):
""" SearchuniversalabsoluteApi unit test stubs """
def setUp(self):
self.api = graylog.apis.searchuniversalabsolute_api.SearchuniversalabsoluteApi()
def tearDown(self):
pass
def test_export_search_absolute_chunked(self):
"""
Test case for export_search_absolute_chunked
Export message search with absolute timerange.
"""
pass
def test_field_histogram_absolute(self):
"""
Test case for field_histogram_absolute
Field value histogram of a query using an absolute timerange.
"""
pass
def test_histogram_absolute(self):
"""
Test case for histogram_absolute
Datetime histogram of a query using an absolute timerange.
"""
pass
def test_search_absolute(self):
"""
Test case for search_absolute
Message search with absolute timerange.
"""
pass
def test_stats_absolute(self):
"""
Test case for stats_absolute
Field statistics for a query using an absolute timerange.
"""
pass
def test_terms_absolute(self):
"""
Test case for terms_absolute
Most common field terms of a query using an absolute timerange.
"""
pass
def test_terms_stats_absolute(self):
"""
Test case for terms_stats_absolute
Ordered field terms of a query computed on another field using an absolute timerange.
"""
pass
if __name__ == '__main__':
unittest.main()
| 25.76699 | 104 | 0.678598 |
a3cd7ad708ec564f8cf3e81a7edea7fa57751671 | 429 | py | Python | urllib-learn/encode-quote.py | pfcstyle/spider-learn | dc53a1de03ba27d5f785cb558571b40ee61f3052 | [
"Apache-2.0"
] | 1 | 2020-05-30T15:33:05.000Z | 2020-05-30T15:33:05.000Z | urllib-learn/encode-quote.py | pfcstyle/spider-learn | dc53a1de03ba27d5f785cb558571b40ee61f3052 | [
"Apache-2.0"
] | null | null | null | urllib-learn/encode-quote.py | pfcstyle/spider-learn | dc53a1de03ba27d5f785cb558571b40ee61f3052 | [
"Apache-2.0"
] | null | null | null | import urllib.request
import urllib.parse
url = 'https://movie.douban.com/'
data = {
'value': 'true',
}
# 数据处理
data = urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.urlopen(url, data=data)
# 解决中文问题
url = '%2523%25E7%25BC%2596%25E7%25A8%258B%2523'
# 第一次解码
first = urllib.parse.unquote(url)
print(first)
# 输出:'%23%E7%BC%96%E7%A8%8B%23'
# 第二次解码
second = urllib.parse.unquote(first)
print(second)
# 输出:'#编程#' | 20.428571 | 51 | 0.689977 |
2e5c938ecfdb67905a66b3fd3103096f06ffa52a | 2,789 | py | Python | pyro/poutine/plate_messenger.py | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f | [
"Apache-2.0"
] | 1 | 2021-02-08T22:53:23.000Z | 2021-02-08T22:53:23.000Z | pyro/poutine/plate_messenger.py | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f | [
"Apache-2.0"
] | null | null | null | pyro/poutine/plate_messenger.py | futurewarning/pyro | 005032f10099188fea86f63b6baa46a27867983f | [
"Apache-2.0"
] | 1 | 2021-04-11T21:37:25.000Z | 2021-04-11T21:37:25.000Z | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from contextlib import contextmanager
from .broadcast_messenger import BroadcastMessenger
from .messenger import block_messengers
from .subsample_messenger import SubsampleMessenger
class PlateMessenger(SubsampleMessenger):
"""
Swiss army knife of broadcasting amazingness:
combines shape inference, independence annotation, and subsampling
"""
def _process_message(self, msg):
super()._process_message(msg)
return BroadcastMessenger._pyro_sample(msg)
def __enter__(self):
super().__enter__()
if self._vectorized and self._indices is not None:
return self.indices
return None
@contextmanager
def block_plate(name=None, dim=None, *, strict=True):
"""
EXPERIMENTAL Context manager to temporarily block a single enclosing plate.
This is useful for sampling auxiliary variables or lazily sampling global
variables that are needed in a plated context. For example the following
models are equivalent:
Example::
def model_1(data):
loc = pyro.sample("loc", dist.Normal(0, 1))
with pyro.plate("data", len(data)):
with block_plate("data"):
scale = pyro.sample("scale", dist.LogNormal(0, 1))
pyro.sample("x", dist.Normal(loc, scale))
def model_2(data):
loc = pyro.sample("loc", dist.Normal(0, 1))
scale = pyro.sample("scale", dist.LogNormal(0, 1))
with pyro.plate("data", len(data)):
pyro.sample("x", dist.Normal(loc, scale))
:param str name: Optional name of plate to match.
:param int dim: Optional dim of plate to match. Must be negative.
:param bool strict: Whether to error if no matching plate is found.
Defaults to True.
:raises: ValueError if no enclosing plate was found and ``strict=True``.
"""
if (name is not None) == (dim is not None):
raise ValueError("Exactly one of name,dim must be specified")
if name is not None:
assert isinstance(name, str)
if dim is not None:
assert isinstance(dim, int)
assert dim < 0
def predicate(messenger):
if not isinstance(messenger, PlateMessenger):
return False
if name is not None:
return messenger.name == name
if dim is not None:
return messenger.dim == dim
with block_messengers(predicate) as matches:
if strict and len(matches) != 1:
raise ValueError(f"block_plate matched {len(matches)} messengers. "
"Try either removing the block_plate or "
"setting strict=False.")
yield
| 35.303797 | 79 | 0.637863 |
37a9ed5cc0de554ff492d80f336382e0c97de93f | 3,321 | py | Python | nova/api/openstack/compute/schemas/v3/cells.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 7 | 2015-09-22T11:27:16.000Z | 2015-11-02T12:33:46.000Z | nova/api/openstack/compute/schemas/v3/cells.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 9 | 2015-05-20T11:20:17.000Z | 2017-07-27T08:21:33.000Z | nova/api/openstack/compute/schemas/v3/cells.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | 13 | 2015-05-05T09:34:04.000Z | 2017-11-08T02:03:46.000Z | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'cell': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'type': {
'type': 'string',
'enum': ['parent', 'child'],
},
# NOTE: In unparse_transport_url(), a url consists of the
# following parameters:
# "qpid://<username>:<password>@<rpc_host>:<rpc_port>/"
# or
# "rabiit://<username>:<password>@<rpc_host>:<rpc_port>/"
# Then the url is stored into transport_url of cells table
# which is defined with String(255).
'username': {
'type': 'string', 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-_]*$'
},
'password': {
# Allow to specify any string for strong password.
'type': 'string', 'maxLength': 255,
},
'rpc_host': parameter_types.hostname_or_ip_address,
'rpc_port': parameter_types.tcp_udp_port,
'rpc_virtual_host': parameter_types.hostname_or_ip_address,
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['cell'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'cell': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'type': {
'type': 'string',
'enum': ['parent', 'child'],
},
'username': {
'type': 'string', 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-_]*$'
},
'password': {
'type': 'string', 'maxLength': 255,
},
'rpc_host': parameter_types.hostname_or_ip_address,
'rpc_port': parameter_types.tcp_udp_port,
'rpc_virtual_host': parameter_types.hostname_or_ip_address,
},
'additionalProperties': False,
},
},
'required': ['cell'],
'additionalProperties': False,
}
sync_instances = {
'type': 'object',
'properties': {
'project_id': parameter_types.project_id,
'deleted': parameter_types.boolean,
'updated_since': {
'type': 'string',
'format': 'date-time',
},
},
'additionalProperties': False,
}
| 33.21 | 78 | 0.502559 |
462327c8f588a5b263b1d2f59cc669c310460329 | 29,532 | py | Python | python/ccxt/okcoinusd.py | sleepingAnt/ccxt | 3c8d62f48726d217e06b252800ca36950208bc47 | [
"MIT"
] | 1 | 2018-07-06T08:14:13.000Z | 2018-07-06T08:14:13.000Z | python/ccxt/okcoinusd.py | sleepingAnt/ccxt | 3c8d62f48726d217e06b252800ca36950208bc47 | [
"MIT"
] | null | null | null | python/ccxt/okcoinusd.py | sleepingAnt/ccxt | 3c8d62f48726d217e06b252800ca36950208bc47 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class okcoinusd (Exchange):
def describe(self):
return self.deep_extend(super(okcoinusd, self).describe(), {
'id': 'okcoinusd',
'name': 'OKCoin USD',
'countries': ['CN', 'US'],
'version': 'v1',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'has': {
'CORS': False,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'withdraw': True,
'futures': False,
},
'extension': '.do', # appended to endpoint URL
'timeframes': {
'1m': '1min',
'3m': '3min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'2h': '2hour',
'4h': '4hour',
'6h': '6hour',
'12h': '12hour',
'1d': '1day',
'3d': '3day',
'1w': '1week',
},
'api': {
'web': {
'get': [
'spot/markets/currencies',
'spot/markets/products',
],
},
'public': {
'get': [
'depth',
'exchange_rate',
'future_depth',
'future_estimated_price',
'future_hold_amount',
'future_index',
'future_kline',
'future_price_limit',
'future_ticker',
'future_trades',
'kline',
'otcs',
'ticker',
'tickers',
'trades',
],
},
'private': {
'post': [
'account_records',
'batch_trade',
'borrow_money',
'borrow_order_info',
'borrows_info',
'cancel_borrow',
'cancel_order',
'cancel_otc_order',
'cancel_withdraw',
'funds_transfer',
'future_batch_trade',
'future_cancel',
'future_devolve',
'future_explosive',
'future_order_info',
'future_orders_info',
'future_position',
'future_position_4fix',
'future_trade',
'future_trades_history',
'future_userinfo',
'future_userinfo_4fix',
'lend_depth',
'order_fee',
'order_history',
'order_info',
'orders_info',
'otc_order_history',
'otc_order_info',
'repayment',
'submit_otc_order',
'trade',
'trade_history',
'trade_otc_order',
'wallet_info',
'withdraw',
'withdraw_info',
'unrepayments_info',
'userinfo',
],
},
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766791-89ffb502-5ee5-11e7-8a5b-c5950b68ac65.jpg',
'api': {
'web': 'https://www.okcoin.com/v2',
'public': 'https://www.okcoin.com/api',
'private': 'https://www.okcoin.com/api',
},
'www': 'https://www.okcoin.com',
'doc': [
'https://www.okcoin.com/rest_getStarted.html',
'https://www.npmjs.com/package/okcoin.com',
],
},
'fees': {
'trading': {
'taker': 0.002,
'maker': 0.002,
},
},
'exceptions': {
'1009': OrderNotFound, # for spot markets, cancelling closed order
'1051': OrderNotFound, # for spot markets, cancelling "just closed" order
'1019': OrderNotFound, # order closed?
'20015': OrderNotFound, # for future markets
'1013': InvalidOrder, # no contract type(PR-1101)
'1027': InvalidOrder, # createLimitBuyOrder(symbol, 0, 0): Incorrect parameter may exceeded limits
'1002': InsufficientFunds, # "The transaction amount exceed the balance"
'1050': InvalidOrder, # returned when trying to cancel an order that was filled or canceled previously
'10000': ExchangeError, # createLimitBuyOrder(symbol, None, None)
'10005': AuthenticationError, # bad apiKey
'10008': ExchangeError, # Illegal URL parameter
},
'options': {
'marketBuyPrice': False,
'defaultContractType': 'this_week', # next_week, quarter
'warnOnFetchOHLCVLimitArgument': True,
'fiats': ['USD', 'CNY'],
'futures': {
'BCH': True,
'BTC': True,
'BTG': True,
'EOS': True,
'ETC': True,
'ETH': True,
'LTC': True,
'NEO': True,
'QTUM': True,
'USDT': True,
'XUC': True,
},
},
})
def fetch_markets(self):
response = self.webGetSpotMarketsProducts()
markets = response['data']
result = []
for i in range(0, len(markets)):
id = markets[i]['symbol']
baseId, quoteId = id.split('_')
baseIdUppercase = baseId.upper()
quoteIdUppercase = quoteId.upper()
base = self.common_currency_code(baseIdUppercase)
quote = self.common_currency_code(quoteIdUppercase)
symbol = base + '/' + quote
precision = {
'amount': markets[i]['maxSizeDigit'],
'price': markets[i]['maxPriceDigit'],
}
lot = math.pow(10, -precision['amount'])
minAmount = markets[i]['minTradeSize']
minPrice = math.pow(10, -precision['price'])
active = (markets[i]['online'] != 0)
baseNumericId = markets[i]['baseCurrency']
quoteNumericId = markets[i]['quoteCurrency']
market = self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'baseNumericId': baseNumericId,
'quoteNumericId': quoteNumericId,
'info': markets[i],
'type': 'spot',
'spot': True,
'future': False,
'lot': lot,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': minPrice,
'max': None,
},
'cost': {
'min': minAmount * minPrice,
'max': None,
},
},
})
result.append(market)
if (self.has['futures']) and(market['base'] in list(self.options['futures'].keys())):
fiats = self.options['fiats']
for j in range(0, len(fiats)):
fiat = fiats[j]
lowercaseFiat = fiat.lower()
result.append(self.extend(market, {
'quote': fiat,
'symbol': market['base'] + '/' + fiat,
'id': market['base'].lower() + '_' + lowercaseFiat,
'quoteId': lowercaseFiat,
'type': 'future',
'spot': False,
'future': True,
}))
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGet'
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'Depth'
orderbook = getattr(self, method)(self.extend(request, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = ticker['timestamp']
symbol = None
if market is None:
if 'symbol' in ticker:
marketId = ticker['symbol']
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGet'
request = {
'symbol': market['id'],
}
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'Ticker'
response = getattr(self, method)(self.extend(request, params))
ticker = self.safe_value(response, 'ticker')
if ticker is None:
raise ExchangeError(self.id + ' fetchTicker returned an empty response: ' + self.json(response))
timestamp = self.safe_integer(response, 'date')
if timestamp is not None:
timestamp *= 1000
ticker = self.extend(ticker, {'timestamp': timestamp})
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
symbol = None
if market:
symbol = market['symbol']
return {
'info': trade,
'timestamp': trade['date_ms'],
'datetime': self.iso8601(trade['date_ms']),
'symbol': symbol,
'id': str(trade['tid']),
'order': None,
'type': None,
'side': trade['type'],
'price': self.safe_float(trade, 'price'),
'amount': self.safe_float(trade, 'amount'),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGet'
request = {
'symbol': market['id'],
}
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'Trades'
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
return [
ohlcv[0], # timestamp
ohlcv[1], # Open
ohlcv[2], # High
ohlcv[3], # Low
ohlcv[4], # Close
# ohlcv[5], # quote volume
# ohlcv[6], # base volume
ohlcv[volumeIndex], # okex will return base volume in the 7th element for future markets
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'publicGet'
request = {
'symbol': market['id'],
'type': self.timeframes[timeframe],
}
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'Kline'
if limit is not None:
if self.options['warnOnFetchOHLCVLimitArgument']:
raise ExchangeError(self.id + ' fetchOHLCV counts "limit" candles from current time backwards, therefore the "limit" argument for ' + self.id + ' is disabled. Set ' + self.id + '.options["warnOnFetchOHLCVLimitArgument"] = False to suppress self warning message.')
request['size'] = int(limit) # max is 1440 candles
if since is not None:
request['since'] = since
else:
request['since'] = self.milliseconds() - 86400000 # last 24 hours
response = getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserinfo()
balances = response['info']['funds']
result = {'info': response}
ids = list(self.currencies_by_id.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.currencies_by_id[id]['code']
account = self.account()
account['free'] = self.safe_float(balances['free'], id, 0.0)
account['used'] = self.safe_float(balances['freezed'], id, 0.0)
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
method = 'privatePost'
order = {
'symbol': market['id'],
'type': side,
}
if market['future']:
method += 'Future'
order = self.extend(order, {
'contract_type': self.options['defaultContractType'], # self_week, next_week, quarter
'match_price': 0, # match best counter party price? 0 or 1, ignores price if 1
'lever_rate': 10, # leverage rate value: 10 or 20(10 by default)
'price': price,
'amount': amount,
})
else:
if type == 'limit':
order['price'] = price
order['amount'] = amount
else:
order['type'] += '_market'
if side == 'buy':
if self.options['marketBuyPrice']:
if price is None:
# eslint-disable-next-line quotes
raise ExchangeError(self.id + " market buy orders require a price argument(the amount you want to spend or the cost of the order) when self.options['marketBuyPrice'] is True.")
order['price'] = price
else:
order['price'] = self.safe_float(params, 'cost')
if not order['price']:
# eslint-disable-next-line quotes
raise ExchangeError(self.id + " market buy orders require an additional cost parameter, cost = price * amount. If you want to pass the cost of the market order(the amount you want to spend) in the price argument(the default " + self.id + " behaviour), set self.options['marketBuyPrice'] = True. It will effectively suppress self warning exception as well.")
else:
order['amount'] = amount
params = self.omit(params, 'cost')
method += 'Trade'
response = getattr(self, method)(self.extend(order, params))
timestamp = self.milliseconds()
return {
'info': response,
'id': str(response['order_id']),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'order_id': id,
}
method = 'privatePost'
if market['future']:
method += 'FutureCancel'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
else:
method += 'CancelOrder'
response = getattr(self, method)(self.extend(request, params))
return response
def parse_order_status(self, status):
if status == -1:
return 'canceled'
if status == 0:
return 'open'
if status == 1:
return 'open'
if status == 2:
return 'closed'
if status == 3:
return 'open'
if status == 4:
return 'canceled'
return status
def parse_order_side(self, side):
if side == 1:
return 'buy' # open long position
if side == 2:
return 'sell' # open short position
if side == 3:
return 'sell' # liquidate long position
if side == 4:
return 'buy' # liquidate short position
return side
def parse_order(self, order, market=None):
side = None
type = None
if 'type' in order:
if (order['type'] == 'buy') or (order['type'] == 'sell'):
side = order['type']
type = 'limit'
elif order['type'] == 'buy_market':
side = 'buy'
type = 'market'
elif order['type'] == 'sell_market':
side = 'sell'
type = 'market'
else:
side = self.parse_order_side(order['type'])
if ('contract_name' in list(order.keys())) or ('lever_rate' in list(order.keys())):
type = 'margin'
status = self.parse_order_status(order['status'])
symbol = None
if market is None:
if 'symbol' in order:
if order['symbol'] in self.markets_by_id:
market = self.markets_by_id[order['symbol']]
if market:
symbol = market['symbol']
timestamp = None
createDateField = self.get_create_date_field()
if createDateField in order:
timestamp = order[createDateField]
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'deal_amount')
remaining = amount - filled
if type == 'market':
remaining = 0
average = self.safe_float(order, 'avg_price')
# https://github.com/ccxt/ccxt/issues/2452
average = self.safe_float(order, 'price_avg', average)
cost = average * filled
result = {
'info': order,
'id': str(order['order_id']),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': order['price'],
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
def get_create_date_field(self):
# needed for derived exchanges
# allcoin typo create_data instead of create_date
return 'create_date'
def get_orders_field(self):
# needed for derived exchanges
# allcoin typo order instead of orders(expected based on their API docs)
return 'orders'
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrder requires a symbol parameter')
self.load_markets()
market = self.market(symbol)
method = 'privatePost'
request = {
'order_id': id,
'symbol': market['id'],
# 'status': 0, # 0 for unfilled orders, 1 for filled orders
# 'current_page': 1, # current page number
# 'page_length': 200, # number of orders returned per page, maximum 200
}
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'OrderInfo'
response = getattr(self, method)(self.extend(request, params))
ordersField = self.get_orders_field()
numOrders = len(response[ordersField])
if numOrders > 0:
return self.parse_order(response[ordersField][0])
raise OrderNotFound(self.id + ' order ' + id + ' not found')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders requires a symbol parameter')
self.load_markets()
market = self.market(symbol)
method = 'privatePost'
request = {
'symbol': market['id'],
}
order_id_in_params = ('order_id' in list(params.keys()))
if market['future']:
method += 'FutureOrdersInfo'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
if not order_id_in_params:
raise ExchangeError(self.id + ' fetchOrders() requires order_id param for futures market ' + symbol + '(a string of one or more order ids, comma-separated)')
else:
status = None
if 'type' in params:
status = params['type']
elif 'status' in params:
status = params['status']
else:
name = 'type' if order_id_in_params else 'status'
raise ExchangeError(self.id + ' fetchOrders() requires ' + name + ' param for spot market ' + symbol + '(0 - for unfilled orders, 1 - for filled/canceled orders)')
if order_id_in_params:
method += 'OrdersInfo'
request = self.extend(request, {
'type': status,
'order_id': params['order_id'],
})
else:
method += 'OrderHistory'
request = self.extend(request, {
'status': status,
'current_page': 1, # current page number
'page_length': 200, # number of orders returned per page, maximum 200
})
params = self.omit(params, ['type', 'status'])
response = getattr(self, method)(self.extend(request, params))
ordersField = self.get_orders_field()
return self.parse_orders(response[ordersField], market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
open = 0 # 0 for unfilled orders, 1 for filled orders
return self.fetch_orders(symbol, since, limit, self.extend({
'status': open,
}, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
closed = 1 # 0 for unfilled orders, 1 for filled orders
orders = self.fetch_orders(symbol, since, limit, self.extend({
'status': closed,
}, params))
return orders
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
# if amount < 0.01:
# raise ExchangeError(self.id + ' withdraw() requires amount > 0.01')
# for some reason they require to supply a pair of currencies for withdrawing one currency
currencyId = currency['id'] + '_usd'
request = {
'symbol': currencyId,
'withdraw_address': address,
'withdraw_amount': amount,
'target': 'address', # or 'okcn', 'okcom', 'okex'
}
query = params
if 'chargefee' in query:
request['chargefee'] = query['chargefee']
query = self.omit(query, 'chargefee')
else:
raise ExchangeError(self.id + ' withdraw() requires a `chargefee` parameter')
if self.password:
request['trade_pwd'] = self.password
elif 'password' in query:
request['trade_pwd'] = query['password']
query = self.omit(query, 'password')
elif 'trade_pwd' in query:
request['trade_pwd'] = query['trade_pwd']
query = self.omit(query, 'trade_pwd')
passwordInRequest = ('trade_pwd' in list(request.keys()))
if not passwordInRequest:
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = self.privatePostWithdraw(self.extend(request, query))
return {
'info': response,
'id': self.safe_string(response, 'withdraw_id'),
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api != 'web':
url += self.version + '/'
url += path
if api != 'web':
url += self.extension
if api == 'private':
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, params))
# secret key must be at the end of query
queryString = self.rawencode(query) + '&secret_key=' + self.secret
query['sign'] = self.hash(self.encode(queryString)).upper()
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
else:
if params:
url += '?' + self.urlencode(params)
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body):
if len(body) < 2:
return # fallback to default error handler
if body[0] == '{':
response = json.loads(body)
if 'error_code' in response:
error = self.safe_string(response, 'error_code')
message = self.id + ' ' + self.json(response)
if error in self.exceptions:
ExceptionClass = self.exceptions[error]
raise ExceptionClass(message)
else:
raise ExchangeError(message)
if 'result' in response:
if not response['result']:
raise ExchangeError(self.id + ' ' + self.json(response))
| 40.733793 | 385 | 0.494582 |
e2b0db54519397e03a5085fe76fcc0e0b7ec9526 | 4,577 | py | Python | Text-Code/text-to-code/code/dataset.py | ywen666/CodeXGLUE | f75c9628d8a29daedd5953474fb38e24c30d7349 | [
"CC0-1.0",
"MIT"
] | 613 | 2020-09-28T08:49:25.000Z | 2022-03-31T10:57:34.000Z | Text-Code/text-to-code/code/dataset.py | ywen666/CodeXGLUE | f75c9628d8a29daedd5953474fb38e24c30d7349 | [
"CC0-1.0",
"MIT"
] | 95 | 2020-10-01T07:46:04.000Z | 2022-03-31T03:54:42.000Z | Text-Code/text-to-code/code/dataset.py | ywen666/CodeXGLUE | f75c9628d8a29daedd5953474fb38e24c30d7349 | [
"CC0-1.0",
"MIT"
] | 178 | 2020-09-29T05:15:29.000Z | 2022-03-31T07:14:22.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import pickle
import random
import re
import gc
import shutil
import json
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
BertConfig, BertForMaskedLM, BertTokenizer,
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
class concodeDataset(Dataset):
def __init__(self, tokenizer, args, logger, file_type='train', block_size=512, mode='train'):
if args.local_rank==-1:
local_rank=0
world_size=1
else:
local_rank=args.local_rank
world_size=torch.distributed.get_world_size()
self.block_size = block_size
self.mode = mode
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
if mode != 'test' and os.path.exists(cached_file) and not args.overwrite_cache:
if file_type == 'train':
logger.warning("Loading features from cached file %s", cached_file)
with open(cached_file, 'rb') as handle:
data = pickle.load(handle)
self.inputs = data['inputs']
self.token_labels = data['token_labels']
else:
self.inputs = []
self.token_labels = []
datafile = os.path.join(args.data_dir, f"{file_type}.json")
if file_type == 'train':
logger.warning("Creating features from dataset file at %s", datafile)
datas = open(datafile).readlines()
length = len(datas)
logger.info("Data size: %d"%(length))
for idx, x in enumerate(datas):
if idx % (length//10) == 0:
percent = idx / (length//10) * 10
logger.warning("Rank %d, load %d"%(local_rank, percent))
if idx % world_size != local_rank:
continue
x = json.loads(x)
code = tokenizer.encode(x["code"])
nl = tokenizer.encode(x["nl"])
input_ids, input_labels = self.pad_and_get_mask(code, nl, tokenizer)
self.inputs.append(input_ids)
self.token_labels.append(input_labels)
if file_type == 'train':
logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
logger.warning("Saving features into cached file %s", cached_file)
if mode != 'test':
with open(cached_file, 'wb') as handle:
pickle.dump({'inputs': self.inputs, 'token_labels': self.token_labels}, handle, protocol=pickle.HIGHEST_PROTOCOL)
def pad_and_get_mask(self, code, nl, tokenizer):
if self.mode == 'test':
code = []
while (len(code) + len(nl) + 2 > self.block_size):
if (len(code) > len(nl)):
code = code[:-1]
else:
nl = nl[:-1]
if self.mode == 'train':
inputs = nl + [tokenizer.bos_token_id] + code + [tokenizer.eos_token_id]
labels = [1] * len(nl) + [2] * (len(code)+1) + [0]
else:
inputs = nl + [tokenizer.bos_token_id]
labels = [1] * len(nl) + [2]
return inputs, labels
assert len(inputs) <= self.block_size
pad_len = self.block_size - len(inputs)
inputs += [tokenizer.pad_token_id] * pad_len
labels += [0] * pad_len
assert len(inputs) == len(labels)
return inputs, labels
def __len__(self):
return len(self.inputs)
def __getitem__(self, item):
return torch.tensor(self.inputs[item]), torch.tensor(self.token_labels[item])
| 39.119658 | 143 | 0.594494 |
0a4d2ec87c3aed4fef6e7805f8bfbe19f4d3fd0e | 949 | py | Python | parlai/tasks/fvqa/build.py | ysglh/ParlAI | e0f16e9168839be12f72d3431b9819cf3d51fe10 | [
"BSD-3-Clause"
] | 2 | 2017-09-30T23:23:44.000Z | 2021-07-08T17:12:58.000Z | parlai/tasks/fvqa/build.py | ysglh/ParlAI | e0f16e9168839be12f72d3431b9819cf3d51fe10 | [
"BSD-3-Clause"
] | 1 | 2018-03-08T20:44:39.000Z | 2018-03-08T23:49:29.000Z | parlai/tasks/fvqa/build.py | ysglh/ParlAI | e0f16e9168839be12f72d3431b9819cf3d51fe10 | [
"BSD-3-Clause"
] | 1 | 2018-03-08T20:42:57.000Z | 2018-03-08T20:42:57.000Z | # This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import parlai.core.build_data as build_data
import os
def build(opt):
dpath = os.path.join(opt['datapath'], 'FVQA')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
# An older version exists, so remove these outdated files.
if build_data.built(dpath):
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
build_data.download('https://dl.dropboxusercontent.com/s/iyz6l7jhbt6jb7q/new_dataset_release.zip', dpath, 'FVQA.zip')
build_data.untar(dpath, 'FVQA.zip')
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
| 37.96 | 125 | 0.695469 |
d1166ae341404c1881588a5cc84594c9e30b980e | 11,754 | py | Python | mlrun/runtimes/local.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | null | null | null | mlrun/runtimes/local.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | null | null | null | mlrun/runtimes/local.py | rpatil524/mlrun | bb2259a959f871d7a479834ddc55ad1470e6c2c0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util as imputil
import inspect
import json
import os
import socket
import sys
import traceback
from contextlib import redirect_stdout
from copy import copy
from io import StringIO
from os import environ, remove
from pathlib import Path
from subprocess import PIPE, Popen
from sys import executable
from tempfile import mktemp
from distributed import Client, as_completed
from nuclio import Event
import mlrun
from mlrun.lists import RunList
from ..execution import MLClientCtx
from ..model import RunObject
from ..utils import logger
from .base import BaseRuntime
from .kubejob import KubejobRuntime
from .remotesparkjob import RemoteSparkRuntime
from .utils import RunError, global_context, log_std
class ParallelRunner:
def _get_handler(self, handler):
return None, handler
def _get_dask_client(self, options):
if options.dask_cluster_uri:
function = mlrun.import_function(options.dask_cluster_uri)
return function.client, function.metadata.name
return Client(), None
def _parallel_run_many(self, generator, execution, runobj: RunObject) -> RunList:
results = RunList()
tasks = generator.generate(runobj)
handler = runobj.spec.handler
self._force_handler(handler)
if self.spec.pythonpath:
set_paths(self.spec.pythonpath)
_, handler = self._get_handler(handler)
client, function_name = self._get_dask_client(generator.options)
parallel_runs = generator.options.parallel_runs or 4
queued_runs = 0
num_errors = 0
def process_result(future):
nonlocal num_errors
resp, sout, serr = future.result()
runobj = RunObject.from_dict(resp)
try:
log_std(self._db_conn, runobj, sout, serr, skip=self.is_child)
resp = self._post_run(resp)
except RunError as err:
resp = self._post_run(resp, err=str(err))
num_errors += 1
results.append(resp)
if num_errors > generator.max_errors:
logger.error("max errors reached, stopping iterations!")
return True
run_results = resp["status"].get("results", {})
stop = generator.eval_stop_condition(run_results)
if stop:
logger.info(
f"reached early stop condition ({generator.options.stop_condition}), stopping iterations!"
)
return stop
completed_iter = as_completed([])
for task in tasks:
resp = client.submit(
remote_handler_wrapper, task.to_json(), handler, self.spec.workdir
)
completed_iter.add(resp)
queued_runs += 1
if queued_runs >= parallel_runs:
future = next(completed_iter)
early_stop = process_result(future)
queued_runs -= 1
if early_stop:
break
for future in completed_iter:
process_result(future)
client.close()
if function_name and generator.options.teardown_dask:
logger.info("tearing down the dask cluster..")
mlrun.get_run_db().delete_runtime_object("dask", function_name, force=True)
return results
def remote_handler_wrapper(task, handler, workdir=None):
if task and not isinstance(task, dict):
task = json.loads(task)
context = MLClientCtx.from_dict(task, autocommit=False, host=socket.gethostname(),)
runobj = RunObject.from_dict(task)
sout, serr = exec_from_params(handler, runobj, context, workdir)
return context.to_dict(), sout, serr
class HandlerRuntime(BaseRuntime, ParallelRunner):
kind = "handler"
def _run(self, runobj: RunObject, execution):
handler = runobj.spec.handler
self._force_handler(handler)
tmp = mktemp(".json")
environ["MLRUN_META_TMPFILE"] = tmp
if self.spec.pythonpath:
set_paths(self.spec.pythonpath)
context = MLClientCtx.from_dict(
runobj.to_dict(),
rundb=self.spec.rundb,
autocommit=False,
tmp=tmp,
host=socket.gethostname(),
)
global_context.set(context)
sout, serr = exec_from_params(handler, runobj, context, self.spec.workdir)
log_std(self._db_conn, runobj, sout, serr, show=False)
return context.to_dict()
class LocalRuntime(BaseRuntime, ParallelRunner):
kind = "local"
_is_remote = False
def to_job(self, image=""):
struct = self.to_dict()
obj = KubejobRuntime.from_dict(struct)
if image:
obj.spec.image = image
return obj
@property
def is_deployed(self):
return True
def _get_handler(self, handler):
return load_module(self.spec.command, handler)
def _run(self, runobj: RunObject, execution):
environ["MLRUN_EXEC_CONFIG"] = runobj.to_json()
tmp = mktemp(".json")
environ["MLRUN_META_TMPFILE"] = tmp
if self.spec.rundb:
environ["MLRUN_DBPATH"] = self.spec.rundb
handler = runobj.spec.handler
handler_str = handler or "main"
logger.debug(f"starting local run: {self.spec.command} # {handler_str}")
if (
runobj.metadata.labels["kind"] == RemoteSparkRuntime.kind
and environ["MLRUN_SPARK_CLIENT_IGZ_SPARK"] == "true"
):
from mlrun.runtimes.remotesparkjob import igz_spark_pre_hook
igz_spark_pre_hook()
if handler:
if self.spec.pythonpath:
set_paths(self.spec.pythonpath)
mod, fn = self._get_handler(handler)
context = MLClientCtx.from_dict(
runobj.to_dict(),
rundb=self.spec.rundb,
autocommit=False,
tmp=tmp,
host=socket.gethostname(),
)
mod.global_mlrun_context = context
global_context.set(context)
sout, serr = exec_from_params(fn, runobj, context, self.spec.workdir)
log_std(self._db_conn, runobj, sout, serr, skip=self.is_child, show=False)
return context.to_dict()
else:
if self.spec.mode == "pass":
cmd = [self.spec.command]
else:
cmd = [executable, "-u", self.spec.command]
env = None
if self.spec.pythonpath:
pypath = self.spec.pythonpath
if "PYTHONPATH" in environ:
pypath = f"{environ['PYTHONPATH']}:{pypath}"
env = {"PYTHONPATH": pypath}
if runobj.spec.verbose:
if not env:
env = {}
env["MLRUN_LOG_LEVEL"] = "DEBUG"
sout, serr = run_exec(cmd, self.spec.args, env=env, cwd=self.spec.workdir)
log_std(self._db_conn, runobj, sout, serr, skip=self.is_child, show=False)
try:
with open(tmp) as fp:
resp = fp.read()
remove(tmp)
if resp:
return json.loads(resp)
logger.error("empty context tmp file")
except FileNotFoundError:
logger.info("no context file found")
return runobj.to_dict()
def set_paths(pythonpath=""):
paths = pythonpath.split(":")
if not paths:
return
for p in paths:
abspath = os.path.abspath(p)
if abspath not in sys.path:
sys.path.append(abspath)
def load_module(file_name, handler):
"""Load module from file name"""
path = Path(file_name)
mod_name = path.name
if path.suffix:
mod_name = mod_name[: -len(path.suffix)]
spec = imputil.spec_from_file_location(mod_name, file_name)
if spec is None:
raise RunError(f"cannot import from {file_name!r}")
mod = imputil.module_from_spec(spec)
spec.loader.exec_module(mod)
try:
fn = getattr(mod, handler) # Will raise if name not found
except AttributeError:
raise RunError(f"handler {handler} not found in {file_name}")
return mod, fn
def run_exec(cmd, args, env=None, cwd=None):
if args:
cmd += args
out = ""
process = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env, cwd=cwd)
while True:
nextline = process.stdout.readline()
if not nextline and process.poll() is not None:
break
print(nextline.decode("utf-8"), end="")
sys.stdout.flush()
out += nextline.decode("utf-8")
code = process.poll()
err = process.stderr.read().decode("utf-8") if code != 0 else ""
return out, err
class _DupStdout(object):
def __init__(self):
self.terminal = sys.stdout
self.buf = StringIO()
def write(self, message):
self.terminal.write(message)
self.buf.write(message)
def flush(self):
self.terminal.flush()
def exec_from_params(handler, runobj: RunObject, context: MLClientCtx, cwd=None):
old_level = logger.level
if runobj.spec.verbose:
logger.set_logger_level("DEBUG")
args_list = get_func_arg(handler, runobj, context)
stdout = _DupStdout()
err = ""
val = None
old_dir = os.getcwd()
with redirect_stdout(stdout):
context.set_logger_stream(stdout)
try:
if cwd:
os.chdir(cwd)
val = handler(*args_list)
context.set_state("completed", commit=False)
except Exception as exc:
err = str(exc)
logger.error(traceback.format_exc())
context.set_state(error=err, commit=False)
logger.set_logger_level(old_level)
stdout.flush()
if cwd:
os.chdir(old_dir)
context.set_logger_stream(sys.stdout)
if val:
context.log_result("return", val)
context.commit()
logger.set_logger_level(old_level)
return stdout.buf.getvalue(), err
def get_func_arg(handler, runobj: RunObject, context: MLClientCtx):
params = runobj.spec.parameters or {}
inputs = runobj.spec.inputs or {}
args_list = []
i = 0
args = inspect.signature(handler).parameters
if len(args) > 0 and list(args.keys())[0] == "context":
args_list.append(context)
i += 1
if len(args) > i + 1 and list(args.keys())[i] == "event":
event = Event(runobj.to_dict())
args_list.append(event)
i += 1
for key in list(args.keys())[i:]:
if args[key].name in params:
args_list.append(copy(params[key]))
elif args[key].name in inputs:
obj = context.get_input(key, inputs[key])
if type(args[key].default) is str or args[key].annotation == str:
args_list.append(obj.local())
else:
args_list.append(context.get_input(key, inputs[key]))
elif args[key].default is not inspect.Parameter.empty:
args_list.append(args[key].default)
else:
args_list.append(None)
return args_list
| 32.469613 | 110 | 0.611707 |
907a44142ba4f9781f1a23e140e44c128ae86f7f | 101 | py | Python | python/testData/inspections/PyStringFormatInspection/UnsupportedFormatSpecifierNewStyleFormatting.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyStringFormatInspection/UnsupportedFormatSpecifierNewStyleFormatting.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/inspections/PyStringFormatInspection/UnsupportedFormatSpecifierNewStyleFormatting.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | print(<warning descr="Unsupported format character 'q'">'{:+q}; {:+f}'</warning>.format(3.14, -3.14)) | 101 | 101 | 0.643564 |
a509432f0086fe9e36384eea1ee00b090d2cd462 | 444 | py | Python | __init__.py | airflow-plugins/trello_plugin | a2ce09b5763d99b38617ce60321918d887b64048 | [
"Apache-2.0"
] | 5 | 2018-01-16T18:29:39.000Z | 2019-01-31T02:09:38.000Z | __init__.py | airflow-plugins/trello_plugin | a2ce09b5763d99b38617ce60321918d887b64048 | [
"Apache-2.0"
] | null | null | null | __init__.py | airflow-plugins/trello_plugin | a2ce09b5763d99b38617ce60321918d887b64048 | [
"Apache-2.0"
] | 3 | 2018-06-26T20:37:31.000Z | 2021-03-24T11:32:36.000Z | from airflow.plugins_manager import AirflowPlugin
from trello_plugin.operators.trello_to_s3_operator import TrelloToS3Operator
from trello_plugin.hooks.trello_hook import TrelloHook
class trello_plugin(AirflowPlugin):
name = "trello_plugin"
operators = [TrelloToS3Operator]
hooks = [TrelloHook]
# Leave in for explicitness
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
| 26.117647 | 76 | 0.740991 |
bf510617b403689e84275f660cd2a8894888e7a2 | 8,130 | py | Python | wofrysrw/storage_ring/light_sources/srw_gaussian_light_source.py | lucarebuffi/wofrysrw | 174bedd7f9fc9711af371743f1b13577dbf838bb | [
"MIT"
] | null | null | null | wofrysrw/storage_ring/light_sources/srw_gaussian_light_source.py | lucarebuffi/wofrysrw | 174bedd7f9fc9711af371743f1b13577dbf838bb | [
"MIT"
] | null | null | null | wofrysrw/storage_ring/light_sources/srw_gaussian_light_source.py | lucarebuffi/wofrysrw | 174bedd7f9fc9711af371743f1b13577dbf838bb | [
"MIT"
] | 2 | 2019-02-14T09:46:35.000Z | 2019-04-03T07:39:18.000Z | from wofrysrw.propagator.wavefront2D.srw_wavefront import WavefrontParameters, SRWWavefront
from wofrysrw.storage_ring.srw_light_source import SRWLightSource
from wofrysrw.storage_ring.srw_electron_beam import SRWElectronBeam
from oasys_srw.srwlib import *
'''
x = 0.0, #Transverse Coordinates of Gaussian Beam Center at Waist [m]
y = 0.0,
z = 0.0, #Longitudinal Coordinate of Waist [m]
xp = 0.0, #Average Angles of Gaussian Beam at Waist [rad]
yp = 0.0,
avgPhotEn = 12400, #5000 #Photon Energy [eV]
pulseEn = 0.001, #Energy per Pulse [J] - to be corrected
repRate = 1, #Rep. Rate [Hz] - to be corrected
polar = 1, #1- linear hoirizontal
sigX = 23e-06/2.35, #Horiz. RMS size at Waist [m]
sigY = 23e-06/2.35, #Vert. RMS size at Waist [m]
sigT = 10e-15, #Pulse duration [s] (not used?)
mx = 0, #Transverse Gauss-Hermite Mode Orders
my = 0
'''
class Polarization:
LINEAR_HORIZONTAL = 1
LINEAR_VERTICAL = 2
LINEAR_45_DEGREES = 3
LINEAR_135_DEGREES = 4
CIRCULAR_RIGHT = 5
CIRCULAR_LEFT = 6
@classmethod
def tuple(cls):
return ["Linear Horizontal",
"Linear Vertical",
"Linear 45\u00b0",
"Linear 135\u00b0",
"Circular Right",
"Circular Left"]
class SRWGaussianLightSource(SRWLightSource):
def __init__(self,
name="Undefined",
beam_center_at_waist_x = 0.0, #Transverse Coordinates of Gaussian Beam Center at Waist [m]
beam_center_at_waist_y = 0.0,
beam_center_at_waist_z = 0.0, #Longitudinal Coordinate of Waist [m]
average_angle_at_waist_x = 0.0, #Average Angles of Gaussian Beam at Waist [rad]
average_angle_at_waist_y = 0.0,
photon_energy = 12400,
energy_per_pulse = 0.001, #Energy per Pulse [J]
repetition_rate = 1, #[Hz]
polarization = Polarization.LINEAR_HORIZONTAL,
horizontal_sigma_at_waist = 1e-6,
vertical_sigma_at_waist = 1e-6,
pulse_duration = 10e-15, #[s]
transverse_gauss_hermite_mode_order_x = 0,
transverse_gauss_hermite_mode_order_y = 0
):
super().__init__(name,
electron_beam=None,
magnetic_structure=None)
self.beam_center_at_waist_x = beam_center_at_waist_x
self.beam_center_at_waist_y = beam_center_at_waist_y
self.beam_center_at_waist_z = beam_center_at_waist_z
self.average_angle_at_waist_x = average_angle_at_waist_x
self.average_angle_at_waist_y = average_angle_at_waist_y
self.photon_energy = photon_energy
self.energy_per_pulse = energy_per_pulse
self.repetition_rate = repetition_rate
self.polarization = polarization
self.horizontal_sigma_at_waist = horizontal_sigma_at_waist
self.vertical_sigma_at_waist = vertical_sigma_at_waist
self.pulse_duration = pulse_duration
self.transverse_gauss_hermite_mode_order_x = transverse_gauss_hermite_mode_order_x
self.transverse_gauss_hermite_mode_order_y = transverse_gauss_hermite_mode_order_y
# from Wofry Decorator
def get_wavefront(self, wavefront_parameters):
return self.get_SRW_Wavefront(source_wavefront_parameters=wavefront_parameters).toGenericWavefront()
def get_SRW_Wavefront(self, source_wavefront_parameters = WavefrontParameters()):
self.__source_wavefront_parameters = source_wavefront_parameters
source_wavefront_parameters.photon_energy_min = self.photon_energy
source_wavefront_parameters.photon_energy_max = self.photon_energy
source_wavefront_parameters.photon_energy_points = 1
mesh = source_wavefront_parameters.to_SRWRadMesh()
GsnBm = SRWLGsnBm() #Gaussian Beam structure (just parameters)
GsnBm.x = self.beam_center_at_waist_x
GsnBm.y = self.beam_center_at_waist_y
GsnBm.z = self.beam_center_at_waist_z
GsnBm.xp = self.average_angle_at_waist_x
GsnBm.yp = self.average_angle_at_waist_y
GsnBm.avgPhotEn = self.photon_energy
GsnBm.pulseEn = self.energy_per_pulse
GsnBm.repRate = self.repetition_rate
GsnBm.polar = self.polarization
GsnBm.sigX = self.horizontal_sigma_at_waist
GsnBm.sigY = self.vertical_sigma_at_waist
GsnBm.sigT = self.pulse_duration
GsnBm.mx = self.transverse_gauss_hermite_mode_order_x
GsnBm.my = self.transverse_gauss_hermite_mode_order_y
wfr = SRWWavefront()
wfr.allocate(mesh.ne, mesh.nx, mesh.ny)
wfr.mesh = mesh
wfr.partBeam.partStatMom1.x = GsnBm.x
wfr.partBeam.partStatMom1.y = GsnBm.y
wfr.partBeam.partStatMom1.z = GsnBm.z
wfr.partBeam.partStatMom1.xp = GsnBm.xp
wfr.partBeam.partStatMom1.yp = GsnBm.yp
arPrecPar = [source_wavefront_parameters._wavefront_precision_parameters._sampling_factor_for_adjusting_nx_ny]
srwl.CalcElecFieldGaussian(wfr, GsnBm, arPrecPar)
return wfr
def get_source_wavefront_parameters(self):
return self.__source_wavefront_parameters
def to_python_code(self, data=None):
is_multi_electron = data
text_code = ""
source_wavefront_parameters = self.get_source_wavefront_parameters()
if not source_wavefront_parameters is None:
text_code += source_wavefront_parameters.to_python_code()
text_code += "\n"
text_code += "wfr = SRWLWfr()" + "\n"
text_code += "wfr.allocate(mesh.ne, mesh.nx, mesh.ny)" + "\n"
text_code += "wfr.mesh = mesh" + "\n"
text_code += "\n"
text_code += "initial_mesh = deepcopy(wfr.mesh)" + "\n"
text_code += "\n"
text_code += "GsnBm = SRWLGsnBm()" + "\n"
text_code += "GsnBm.x = " + str(self.beam_center_at_waist_x) + "\n"
text_code += "GsnBm.y = " + str(self.beam_center_at_waist_y) + "\n"
text_code += "GsnBm.z = " + str(self.beam_center_at_waist_z) + "\n"
text_code += "GsnBm.xp = " + str(self.average_angle_at_waist_x) + "\n"
text_code += "GsnBm.yp = " + str(self.average_angle_at_waist_y) + "\n"
text_code += "GsnBm.avgPhotEn = " + str(self.photon_energy) + "\n"
text_code += "GsnBm.pulseEn = " + str(self.energy_per_pulse) + "\n"
text_code += "GsnBm.repRate = " + str(self.repetition_rate) + "\n"
text_code += "GsnBm.polar = " + str(self.polarization) + "\n"
text_code += "GsnBm.sigX = " + str(self.horizontal_sigma_at_waist) + "\n"
text_code += "GsnBm.sigY = " + str(self.vertical_sigma_at_waist) + "\n"
text_code += "GsnBm.sigT = " + str(self.pulse_duration) + "\n"
text_code += "GsnBm.mx = " + str(self.transverse_gauss_hermite_mode_order_x) + "\n"
text_code += "GsnBm.my = " + str(self.transverse_gauss_hermite_mode_order_y) + "\n"
text_code += "\n"
text_code += "wfr.partBeam.partStatMom1.x = GsnBm.x" + "\n"
text_code += "wfr.partBeam.partStatMom1.y = GsnBm.y" + "\n"
text_code += "wfr.partBeam.partStatMom1.z = GsnBm.z" + "\n"
text_code += "wfr.partBeam.partStatMom1.xp = GsnBm.xp" + "\n"
text_code += "wfr.partBeam.partStatMom1.yp = GsnBm.yp" + "\n"
text_code += "\n"
if not is_multi_electron:
text_code += "srwl.CalcElecFieldGaussian(wfr, GsnBm, [" + str(source_wavefront_parameters._wavefront_precision_parameters._sampling_factor_for_adjusting_nx_ny) + "])" + "\n"
return text_code
| 47.823529 | 189 | 0.621525 |
84904d61ce2b18d9f642368a7ff378e76331cfc1 | 2,764 | py | Python | build/merge_libs.py | TeamNuclear/external_chromium_org_third_party_webrtc | 5bd5c72d7c01872fea80698dac196ff9a01dfcba | [
"DOC",
"BSD-3-Clause"
] | null | null | null | build/merge_libs.py | TeamNuclear/external_chromium_org_third_party_webrtc | 5bd5c72d7c01872fea80698dac196ff9a01dfcba | [
"DOC",
"BSD-3-Clause"
] | 2 | 2019-01-16T13:52:47.000Z | 2019-02-03T11:34:44.000Z | build/merge_libs.py | TeamNuclear/external_chromium_org_third_party_webrtc | 5bd5c72d7c01872fea80698dac196ff9a01dfcba | [
"DOC",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# Searches for libraries or object files on the specified path and merges them
# them into a single library. Assumes ninja is used on all platforms.
import fnmatch
import os
import subprocess
import sys
IGNORE_PATTERNS = ['do_not_use', 'protoc']
def FindFiles(path, pattern):
"""Finds files matching |pattern| under |path|.
Returns a list of file paths matching |pattern|, by walking the directory tree
under |path|. Filenames containing the string 'do_not_use' or 'protoc' are
excluded.
Args:
path: The root path for the search.
pattern: A shell-style wildcard pattern to match filenames against.
(e.g. '*.a')
Returns:
A list of file paths, relative to the current working directory.
"""
files = []
for root, _, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, pattern):
if filename not in IGNORE_PATTERNS:
# We use the relative path here to avoid "argument list too
# long" errors on Linux. Note: This doesn't always work, so
# we use the find command on Linux.
files.append(os.path.relpath(os.path.join(root, filename)))
return files
def main(argv):
if len(argv) != 3:
sys.stderr.write('Usage: ' + argv[0] + ' <search_path> <output_lib>\n')
return 1
search_path = os.path.normpath(argv[1])
output_lib = os.path.normpath(argv[2])
if not os.path.exists(search_path):
sys.stderr.write('search_path does not exist: %s\n' % search_path)
return 1
if os.path.isfile(output_lib):
os.remove(output_lib)
if sys.platform.startswith('linux'):
pattern = '*.o'
cmd = 'ar crs'
elif sys.platform == 'darwin':
pattern = '*.a'
cmd = 'libtool -static -v -o '
elif sys.platform == 'win32':
pattern = '*.lib'
cmd = 'lib /OUT:'
else:
sys.stderr.write('Platform not supported: %r\n\n' % sys.platform)
return 1
if sys.platform.startswith('linux'):
cmd = ' '.join(['find', search_path, '-name "' + pattern + '"' +
' -and -not -name ' +
' -and -not -name '.join(IGNORE_PATTERNS) +
' -exec', cmd, output_lib, '{} +'])
else:
cmd = ' '.join([cmd + output_lib] + FindFiles(search_path, pattern))
print cmd
subprocess.check_call(cmd, shell=True)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 31.409091 | 80 | 0.660637 |
b89ffd2e83e0f81f7a9b209b16ca471d3bc8535d | 4,043 | py | Python | tests/fetch/test_base.py | thesamesam/pkgcore | be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf | [
"BSD-3-Clause"
] | null | null | null | tests/fetch/test_base.py | thesamesam/pkgcore | be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf | [
"BSD-3-Clause"
] | null | null | null | tests/fetch/test_base.py | thesamesam/pkgcore | be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf | [
"BSD-3-Clause"
] | null | null | null | import os
from functools import partial
import pytest
from pkgcore.fetch import base, errors, fetchable
from snakeoil import data_source
from snakeoil.chksum import get_handlers
repeating_str = 'asdf'
data = repeating_str * 4000
handlers = get_handlers()
from snakeoil.mappings import LazyValDict
def _callback(chf):
return handlers[chf](data_source.data_source(data))
chksums = LazyValDict(frozenset(handlers.keys()), _callback)
# get a non size based chksum
known_chksum = [x for x in handlers.keys() if x != "size"][0]
class TestFetcher:
@pytest.fixture(autouse=True)
def _setup(self, tmpdir):
self.fp = os.path.join(str(tmpdir), "test")
self.obj = fetchable(self.fp, chksums=chksums)
self.fetcher = base.fetcher()
def write_data(self, data=data):
with open(self.fp, "w") as f:
f.write(data)
def test__call__(self):
l = []
class c(base.fetcher):
def fetch(self, *a, **kw):
l.extend((a, kw))
o = c()
o.fetch(1, foon=True)
assert [(1,), {"foon": True}] == l
self.write_data()
assert self.fetcher._verify(self.fp, self.obj) == None
self.write_data("asdf")
with pytest.raises(errors.FetchError) as excinfo:
self.fetcher._verify(self.fp, self.obj)
assert excinfo.value.resumable
def test_verify_all_chksums(self):
self.write_data()
subhandlers = dict([list(handlers.items())[0]])
with pytest.raises(errors.RequiredChksumDataMissing):
self.fetcher._verify(self.fp, self.obj, handlers=subhandlers)
self.fetcher._verify(self.fp, self.obj)
assert None == self.fetcher._verify(
self.fp, self.obj, handlers=subhandlers, all_chksums=False)
def test_size_verification_first(self):
self.write_data()
chksum_data = dict(chksums.items())
l = []
def f(chf, fp):
l.append(chf)
return chksum_data[chf]
subhandlers = {"size": partial(f, 'size'), known_chksum:partial(f, known_chksum)}
# exact size verification
self.fetcher._verify(self.fp, self.obj, handlers=subhandlers, all_chksums=False)
assert ['size', known_chksum] == l
for x in (-100, 100):
while l:
l.pop(-1)
chksum_data["size"] = chksums["size"] + x
if x > 0:
with pytest.raises(errors.ChksumFailure) as excinfo:
self.fetcher._verify(
self.fp, self.obj, handlers=subhandlers, all_chksums=False)
assert excinfo.value.chksum == 'size'
else:
with pytest.raises(errors.FetchError) as excinfo:
self.fetcher._verify(
self.fp, self.obj, handlers=subhandlers, all_chksums=False)
assert excinfo.value.resumable
assert ['size'] == l
def test_normal(self):
self.write_data()
assert self.fetcher._verify(self.fp, self.obj) == None
self.write_data(data[:-1])
with pytest.raises(errors.FetchError) as excinfo:
self.fetcher._verify(self.fp, self.obj)
assert excinfo.value.resumable
# verify it returns -2 for missing file paths.
os.unlink(self.fp)
with pytest.raises(errors.MissingDistfile) as excinfo:
self.fetcher._verify(self.fp, self.obj)
assert excinfo.value.resumable
self.write_data(data + "foon")
with pytest.raises(errors.ChksumFailure) as excinfo:
self.fetcher._verify(self.fp, self.obj)
assert excinfo.value.chksum == 'size'
# verify they're ran one, and only once
l = []
def f(chf, fp):
l.append(chf)
return chksums[chf]
alt_handlers = {chf: partial(f, chf) for chf in chksums}
assert None == self.fetcher._verify(self.fp, self.obj, handlers=alt_handlers)
assert sorted(l) == sorted(alt_handlers)
| 35.156522 | 89 | 0.606975 |
f7c4c1cd066beac316649ce9327bb6970311e6b3 | 39,487 | py | Python | python/tvm/relay/op/strategy/generic.py | ZihengJiang/tvm-hago | 6e71860aeb502de2366ff6c39f513f16dbaf0924 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1 | 2021-06-21T06:54:58.000Z | 2021-06-21T06:54:58.000Z | python/tvm/relay/op/strategy/generic.py | ZihengJiang/tvm-hago | 6e71860aeb502de2366ff6c39f513f16dbaf0924 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1 | 2021-09-01T15:21:49.000Z | 2021-09-01T15:21:49.000Z | python/tvm/relay/op/strategy/generic.py | ZihengJiang/tvm-hago | 6e71860aeb502de2366ff6c39f513f16dbaf0924 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 1 | 2021-07-03T08:09:32.000Z | 2021-07-03T08:09:32.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of generic operator strategy."""
# pylint: disable=invalid-name,unused-argument
import logging
import re
import topi
from topi.util import get_const_int, get_const_float, get_const_tuple, get_float_tuple
from .. import op as _op
from ....target import generic_func, override_native_generic_func
logger = logging.getLogger('strategy')
def wrap_topi_schedule(topi_schedule):
"""Wrap TOPI schedule which doesn't use attrs"""
def wrapper(attrs, outs, target):
with target:
return topi_schedule(outs)
return wrapper
def get_conv2d_in_channels(data_shape, data_layout):
"""Get conv2d input channels"""
data_shape = get_const_tuple(data_shape)
if len(data_shape) == 4:
idx = data_layout.find("C")
assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout)
return data_shape[idx]
if re.match(r"NCHW\d*c", data_layout):
# NCHW[8]c
return data_shape[1] * data_shape[4]
raise ValueError("Unknown conv2d data layout {}".format(data_layout))
def get_conv2d_out_channels(kernel_shape, kernel_layout):
"""Get conv2d output channels"""
kernel_shape = get_const_tuple(kernel_shape)
if len(kernel_shape) == 4:
idx = kernel_layout.find("O")
assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout)
return kernel_shape[idx]
if re.match(r"OIHW\d*i\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[5]
if re.match(r"OIHW\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[4]
raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout))
def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups):
ic = get_conv2d_in_channels(data_shape, data_layout)
oc = get_conv2d_out_channels(kernel_shape, kernel_layout)
return ic == oc == groups
@generic_func
def schedule_injective(attrs, outs, target):
"""Schedule injective ops"""
with target:
return topi.generic.schedule_injective(outs)
@generic_func
def schedule_reduce(attrs, outs, target):
"""Schedule reduction ops"""
with target:
return topi.generic.schedule_reduce(outs)
_op._schedule_injective = schedule_injective
_op._schedule_reduce = schedule_reduce
# concatenate
@generic_func
def schedule_concatenate(attrs, outs, target):
"""Schedule concatenate op"""
with target:
return topi.generic.schedule_injective(outs)
# pool
@generic_func
def schedule_pool(attrs, outs, target):
"""Schedule pooling ops"""
with target:
return topi.generic.schedule_pool(outs, attrs.layout)
# pool_grad
@generic_func
def schedule_pool_grad(attrs, outs, target):
"""Schedule pooling gradient ops"""
with target:
return topi.generic.schedule_pool_grad(outs)
# adaptive pool
@generic_func
def schedule_adaptive_pool(attrs, outs, target):
"""Schedule adaptive pooling ops"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
# softmax
def wrap_compute_softmax(topi_compute):
"""Wrap softmax topi compute"""
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax
@override_native_generic_func("softmax_strategy")
def softmax_strategy(attrs, inputs, out_type, target):
"""softmax generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.generic.schedule_softmax),
name="softmax.generic")
return strategy
# log_softmax
@generic_func
def schedule_log_softmax(attrs, outs, target):
"""Schedule log_softmax op"""
with target:
return topi.generic.schedule_softmax(outs)
# lrn
@generic_func
def schedule_lrn(attrs, outs, target):
"""Schedule LRN op"""
with target:
return topi.generic.schedule_lrn(outs)
# bitpack
@generic_func
def schedule_bitpack(attrs, outs, target):
"""Schedule bitpack"""
with target:
return topi.generic.schedule_bitpack(outs)
# conv2d
def wrap_compute_conv2d(topi_compute, need_data_layout=False, need_out_layout=False,
has_groups=False):
"""Wrap conv2d topi compute"""
def _compute_conv2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
data_layout = attrs.get_str("data_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
args = [inputs[0], inputs[1], strides, padding, dilation]
if has_groups:
args.append(attrs.groups)
if need_data_layout:
args.append(data_layout)
if need_out_layout:
args.append(out_layout)
args.append(out_dtype)
return [topi_compute(*args)]
return _compute_conv2d
@override_native_generic_func("conv2d_strategy")
def conv2d_strategy(attrs, inputs, out_type, target):
"""conv2d generic strategy"""
logger.warning("conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
data, kernel = inputs
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_nchw),
name="conv2d_nchw.generic")
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_conv2d_nhwc),
name="conv2d_nhwc.generic")
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_hwcn),
wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn),
name="conv2d_hwcn.generic")
else:
raise RuntimeError("Unsupported conv2d layout {}".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.generic")
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic")
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
if layout == 'NCHW':
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.generic")
else:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
# conv2d_NCHWc
@override_native_generic_func("conv2d_NCHWc_strategy")
def conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""conv2d_NCHWc generic strategy"""
logger.warning("conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
if inputs[0].dtype == "int8" or inputs[0].dtype == "uint8":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc_int8, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.generic")
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.generic")
return strategy
# depthwise_conv2d_NCHWc
@override_native_generic_func("depthwise_conv2d_NCHWc_strategy")
def depthwise_conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""depthwise_conv2d generic strategy"""
logger.warning("depthwise_conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_NCHWc, True, True),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_NCHWc),
name="depthwise_conv2d_NCHWc.generic")
return strategy
# conv2d_winograd_without_weight_transform
@override_native_generic_func("conv2d_winograd_without_weight_transform_strategy")
def conv2d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv2d_winograd_without_weight_transform")
# conv2d_gemm_without_weight_transform
@override_native_generic_func("conv2d_gemm_without_weight_transform_strategy")
def conv2d_gemm_without_weight_transform_strategy(attrs, inputs, out_type, target):
"""conv2d_gemm_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv2d_gemm_without_weight_transform")
# conv2d_winograd_weight_transform
@generic_func
def schedule_conv2d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
# conv2d_winograd_nnpack_weight_transform
@generic_func
def schedule_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_nnpack_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
# conv2d_gemm_weight_transform
@generic_func
def schedule_conv2d_gemm_weight_transform(attrs, outs, target):
"""Schedule conv2d_gemm_weight_transform"""
with target:
return topi.generic.schedule_conv2d_gemm_weight_transform(outs)
# deformable_conv2d
def wrap_compute_deformable_conv2d(topi_compute):
"""wrap deformable_conv2d topi compute"""
def _compute_deformable_conv2d(attrs, inputs, out_dtype):
assert attrs.data_layout == "NCHW"
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(inputs[0], inputs[1], inputs[2], strides, padding,
dilation, deformable_groups, groups, out_dtype)
return [out]
return _compute_deformable_conv2d
@override_native_generic_func("deformable_conv2d_strategy")
def deformable_conv2d_strategy(attrs, inputs, out_type, target):
"""deformable_conv2d generic strategy"""
logger.warning("deformable_conv2d is not optimized for this platform.")
layout = attrs.data_layout
assert layout == "NCHW"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_deformable_conv2d_nchw),
name="deformable_conv2d.generic")
return strategy
# conv2d_transpose
def wrap_compute_conv2d_transpose(topi_compute):
"""wrap conv2d_transpose topi compute"""
def compute_conv2d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(
inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv2d_transpose
@override_native_generic_func("conv2d_transpose_strategy")
def conv2d_transpose_strategy(attrs, inputs, out_type, target):
"""conv2d_transpose generic strategy"""
logger.warning("conv2d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.generic")
return strategy
# conv3d_transpose
def wrap_compute_conv3d_transpose(topi_compute):
"""wrap conv3d_transpose topi compute"""
def compute_conv3d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv3d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
out = topi_compute(
inputs[0], inputs[1], strides, padding, out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi.nn.pad(out,
[0, 0, 0, 0, 0],
[0, 0, output_padding[0], output_padding[1], output_padding[2]])
return [out]
return compute_conv3d_transpose
@override_native_generic_func("conv3d_transpose_strategy")
def conv3d_transpose_strategy(attrs, inputs, out_type, target):
"""conv3d_transpose generic strategy"""
logger.warning("conv3d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.nn.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.generic")
return strategy
# conv3d
def wrap_compute_conv3d(topi_compute, need_layout=False):
"""wrap conv3d topi compute"""
def _compute_conv3d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
(dilation_d, dilation_h, dilation_w) = dilation
if dilation_d < 1 or dilation_h < 1 or dilation_w < 1:
raise ValueError("Dilation should be positive value")
if groups != 1:
raise ValueError("Not support arbitrary group number for conv3d")
if need_layout:
out = topi_compute(inputs[0], inputs[1], strides, padding, dilation,
layout, out_dtype)
else:
out = topi_compute(inputs[0], inputs[1], strides, padding, dilation,
out_dtype)
return [out]
return _compute_conv3d
@override_native_generic_func("conv3d_strategy")
def conv3d_strategy(attrs, inputs, out_type, target):
"""conv3d generic strategy"""
logger.warning("conv3d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.generic")
elif layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ndhwc),
wrap_topi_schedule(topi.generic.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.generic")
else:
raise ValueError("Not support this layout {} yet".format(layout))
return strategy
# conv3d_winograd_without_weight_transform
@override_native_generic_func("conv3d_winograd_without_weight_transform_strategy")
def conv3d_winograd_without_weight_transfrom_strategy(attrs, inputs, out_type, target):
"""conv3d_winograd_without_weight_transfrom generic strategy"""
raise ValueError("No generic implemenation for conv3d_winograd_without_weight_transform")
# conv3d_winograd_weight_transform
@generic_func
def schedule_conv3d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv3d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv3d_winograd_weight_transform(outs)
# conv1d
def wrap_compute_conv1d(topi_compute):
"""wrap conv1d topi compute"""
def _compute_conv1d(attrs, inputs, out_type):
"""Compute definition of conv1d"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
return [topi_compute(inputs[0], inputs[1], strides, padding, dilation,
out_dtype)]
return _compute_conv1d
@override_native_generic_func("conv1d_strategy")
def conv1d_strategy(attrs, inputs, out_type, target):
"""conv1d generic strategy"""
logger.warning("conv1d is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_ncw),
name="conv1d_ncw.generic")
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_conv1d_nwc),
name="conv1d_nwc.generic")
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
# conv1d_transpose
def wrap_compute_conv1d_transpose(topi_compute):
"""wrap conv1d_transpose topi compute"""
def _compute_conv1d_tranpsoe(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "") else out_dtype)
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return _compute_conv1d_tranpsoe
@override_native_generic_func("conv1d_transpose_strategy")
def conv1d_transpose_strategy(attrs, inputs, out_type, target):
"""conv1d_transpose generic strategy"""
logger.warning("conv1d_transpose is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
strategy.add_implementation(wrap_compute_conv1d_transpose(topi.nn.conv1d_transpose_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_transpose_ncw),
name="conv1d_transpose_ncw.generic")
return strategy
# dilation2d
def wrap_compute_dilation2d(topi_compute, need_data_layout=False):
"""Wrap dilation2d topi compute"""
def _compute_dilation2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilations = get_const_tuple(attrs.dilations)
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.out_dtype
out_dtype = (inputs[0].dtype if out_dtype in ("same", "")
else out_dtype)
args = [inputs[0], inputs[1], strides, padding, dilations]
if need_data_layout:
args.append(data_layout)
args.append(out_dtype)
return [topi_compute(*args)]
return _compute_dilation2d
@override_native_generic_func("dilation2d_strategy")
def dilation2d_strategy(attrs, inputs, out_type, target):
"""dilation2d_strategy generic strategy"""
logger.warning("dilation2d_strategy is not optimized for this platform.")
strategy = _op.OpStrategy()
dilations = get_const_tuple(attrs.dilations)
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
assert layout in ["NCHW", "NHWC"]
(dilation_h, dilation_w) = dilations
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if layout == "NCHW":
assert kernel_layout == "IHW"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nchw),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw),
name="dilation2d_nchw.generic")
elif layout == "NHWC":
assert kernel_layout == "HWI"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc),
name="dilation2d_nhwc.generic")
else:
raise RuntimeError("Unsupported dilation2d layout {}".format(layout))
return strategy
# dense
def wrap_compute_dense(topi_compute):
"""wrap dense topi compute"""
def _compute_dense(attrs, inputs, out_type):
"""Compute definition of dense"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
return [topi_compute(inputs[0], inputs[1], None, out_dtype)]
return _compute_dense
@override_native_generic_func("dense_strategy")
def dense_strategy(attrs, inputs, out_type, target):
"""dense generic strategy"""
logger.warning("dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense.generic")
return strategy
# batch_matmul
def wrap_compute_batch_matmul(topi_compute):
"""wrap batch_matmul topi compute"""
def _compute_batch_matmul(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1])]
return _compute_batch_matmul
@override_native_generic_func("batch_matmul_strategy")
def batch_matmul_strategy(attrs, inputs, out_type, target):
"""batch_matmul generic strategy"""
logger.warning("batch_matmul is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.generic.schedule_batch_matmul),
name="batch_matmul.generic")
return strategy
# sparse dense
def wrap_compute_sparse_dense(topi_compute):
"""wrap sparse dense topi compute"""
def _compute_sparse_dense(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3])]
return _compute_sparse_dense
@override_native_generic_func("sparse_dense_strategy")
def sparse_dense_strategy(attrs, inputs, out_type, target):
"""sparse dense generic strategy"""
logger.warning("sparse dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_sparse_dense(topi.nn.sparse_dense),
wrap_topi_schedule(topi.generic.schedule_sparse_dense),
name="sparse_dense.generic")
return strategy
# sparse_transpose
@generic_func
def schedule_sparse_transpose(attrs, outs, target):
"""schedule sparse_transpose"""
with target:
return topi.generic.schedule_sparse_transpose(outs)
# argsort
def wrap_compute_argsort(topi_compute):
"""Wrap argsort topi compute"""
def _compute_argsort(attrs, inputs, _):
axis = get_const_int(attrs.axis)
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)]
return _compute_argsort
@override_native_generic_func("argsort_strategy")
def argsort_strategy(attrs, inputs, out_type, target):
"""argsort generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_argsort(topi.argsort),
wrap_topi_schedule(topi.generic.schedule_argsort),
name="argsort.generic")
return strategy
# topk
def wrap_compute_topk(topi_compute):
"""Wrap topk compute"""
def _compute_topk(attrs, inputs, out_type):
if attrs.k is not None:
k = attrs.k
else:
k = inputs[1]
axis = get_const_int(attrs.axis)
ret_type = attrs.ret_type
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
out = topi_compute(inputs[0], k, axis, ret_type, is_ascend, dtype)
out = out if isinstance(out, list) else [out]
return out
return _compute_topk
@override_native_generic_func("topk_strategy")
def topk_strategy(attrs, inputs, out_type, target):
"""topk generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_topk(topi.topk),
wrap_topi_schedule(topi.generic.schedule_topk),
name="topk.generic")
return strategy
# multibox_prior
def wrap_compute_multibox_prior(topi_compute):
"""Wrap multibox_prior compute"""
def _compute_multibox_prior(attrs, inputs, _):
"""Compute definition of multibox_prior"""
sizes = get_float_tuple(attrs.sizes)
ratios = get_float_tuple(attrs.ratios)
steps = get_float_tuple(attrs.steps)
offsets = get_float_tuple(attrs.offsets)
clip = bool(get_const_int(attrs.clip))
return [topi_compute(inputs[0], sizes, ratios, steps, offsets, clip)]
return _compute_multibox_prior
@override_native_generic_func("multibox_prior_strategy")
def multibox_prior_strategy(attrs, inputs, out_type, target):
"""multibox_prior generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_multibox_prior(topi.vision.ssd.multibox_prior),
wrap_topi_schedule(topi.generic.schedule_multibox_prior),
name="multibox_prior.generic")
return strategy
# multibox_transform_loc
def wrap_compute_multibox_transform_loc(topi_compute):
"""Wrap multibox_transform_loc compute"""
def _compute_multibox_transform_loc(attrs, inputs, _):
"""Compute definition of multibox_detection"""
clip = bool(get_const_int(attrs.clip))
threshold = get_const_float(attrs.threshold)
variances = get_float_tuple(attrs.variances)
return topi_compute(
inputs[0], inputs[1], inputs[2], clip, threshold, variances)
return _compute_multibox_transform_loc
@override_native_generic_func("multibox_transform_loc_strategy")
def multibox_transform_loc_strategy(attrs, inputs, out_type, target):
"""schedule multibox_transform_loc"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_transform_loc(topi.vision.ssd.multibox_transform_loc),
wrap_topi_schedule(topi.generic.schedule_multibox_transform_loc),
name="multibox_transform_loc.generic")
return strategy
# get_valid_counts
def wrap_compute_get_valid_counts(topi_compute):
"""wrap get_valid_counts topi compute"""
def _compute_get_valid_counts(attrs, inputs, out_type):
score_threshold = get_const_float(attrs.score_threshold)
id_index = get_const_int(attrs.id_index)
score_index = get_const_int(attrs.score_index)
return topi_compute(inputs[0], score_threshold, id_index, score_index)
return _compute_get_valid_counts
@override_native_generic_func("get_valid_counts_strategy")
def get_valid_counts_strategy(attrs, inputs, out_type, target):
"""get_valid_counts generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_get_valid_counts(topi.vision.get_valid_counts),
wrap_topi_schedule(topi.generic.schedule_get_valid_counts),
name="get_valid_counts.generic")
return strategy
# non-maximum suppression
def wrap_compute_nms(topi_compute):
"""wrap nms topi compute"""
def _compute_nms(attrs, inputs, out_type):
max_output_size = inputs[3]
if attrs.max_output_size is not None:
max_output_size = attrs.max_output_size
return_indices = bool(get_const_int(attrs.return_indices))
iou_threshold = get_const_float(attrs.iou_threshold)
force_suppress = bool(get_const_int(attrs.force_suppress))
top_k = get_const_int(attrs.top_k)
coord_start = get_const_int(attrs.coord_start)
score_index = get_const_int(attrs.score_index)
id_index = get_const_int(attrs.id_index)
invalid_to_bottom = bool(get_const_int(attrs.invalid_to_bottom))
if return_indices:
return topi_compute(inputs[0], inputs[1], inputs[2], max_output_size, iou_threshold,
force_suppress, top_k, coord_start, score_index, id_index,
return_indices, invalid_to_bottom)
return [topi_compute(inputs[0], inputs[1], inputs[2], max_output_size, iou_threshold,
force_suppress, top_k, coord_start, score_index, id_index,
return_indices, invalid_to_bottom)]
return _compute_nms
@override_native_generic_func("non_max_suppression_strategy")
def nms_strategy(attrs, inputs, out_type, target):
"""nms generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_nms(topi.vision.non_max_suppression),
wrap_topi_schedule(topi.generic.schedule_nms),
name="nms.generic")
return strategy
# roi_align
def wrap_compute_roi_align(topi_compute):
"""wrap roi_align topi compute"""
def _compute_roi_align(attrs, inputs, out_type):
assert attrs.layout == "NCHW"
pooled_size = get_const_tuple(attrs.pooled_size)
return [topi_compute(inputs[0], inputs[1],
pooled_size=pooled_size,
spatial_scale=attrs.spatial_scale,
sample_ratio=attrs.sample_ratio)]
return _compute_roi_align
@override_native_generic_func("roi_align_strategy")
def roi_align_strategy(attrs, inputs, out_type, target):
"""roi_align generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.generic")
return strategy
# roi_pool
@generic_func
def schedule_roi_pool(attrs, outs, target):
"""schedule roi_pool"""
with target:
return topi.generic.schedule_roi_pool(outs)
# proposal
def wrap_compute_proposal(topi_compute):
"""wrap proposal topi compute"""
def _compute_proposal(attrs, inputs, out_type):
scales = get_float_tuple(attrs.scales)
ratios = get_float_tuple(attrs.ratios)
feature_stride = attrs.feature_stride
threshold = attrs.threshold
rpn_pre_nms_top_n = attrs.rpn_pre_nms_top_n
rpn_post_nms_top_n = attrs.rpn_post_nms_top_n
rpn_min_size = attrs.rpn_min_size
iou_loss = bool(get_const_int(attrs.iou_loss))
return [topi_compute(inputs[0], inputs[1], inputs[2], scales, ratios,
feature_stride, threshold, rpn_pre_nms_top_n,
rpn_post_nms_top_n, rpn_min_size, iou_loss)]
return _compute_proposal
@override_native_generic_func("proposal_strategy")
def proposal_strategy(attrs, inputs, out_type, target):
"""proposal generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(wrap_compute_proposal(topi.vision.rcnn.proposal),
wrap_topi_schedule(topi.generic.schedule_proposal),
name="proposal.generic")
return strategy
# argwhere
@generic_func
def schedule_argwhere(attrs, outs, target):
"""schedule argwhere"""
with target:
return topi.generic.schedule_argwhere(outs)
# scatter
@generic_func
def schedule_scatter(attrs, outs, target):
"""schedule scatter"""
with target:
return topi.generic.schedule_scatter(outs)
# scatter_add
@generic_func
def schedule_scatter_add(attrs, outs, target):
"""schedule scatter_add"""
with target:
return topi.generic.schedule_scatter_add(outs)
# bitserial_conv2d
def wrap_compute_bitserial_conv2d(topi_compute):
"""wrap bitserial_conv2d topi compute"""
def compute_bitserial_conv2d(attrs, inputs, out_dtype):
"""Compute definition for bitserial conv2d."""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
return [topi_compute(inputs[0], inputs[1], strides, padding, activation_bits,
weight_bits, pack_dtype, out_dtype, unipolar)]
return compute_bitserial_conv2d
@override_native_generic_func("bitserial_conv2d_strategy")
def bitserial_conv2d_strategy(attrs, inputs, out_type, target):
"""bitserial_conv2d generic strategy"""
logger.warning("bitserial_conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.generic")
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.generic")
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
# bitserial_dense
def wrap_compute_bitserial_dense(topi_compute):
"""wrap bitserial_dense topi compute"""
def compute_bitserial_dense(attrs, inputs, out_type):
"""Compute definition of bitserial dense"""
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [topi_compute(inputs[0], inputs[1], data_bits, weight_bits,
pack_dtype, out_dtype, unipolar)]
return compute_bitserial_dense
@override_native_generic_func("bitserial_dense_strategy")
def bitserial_dense_strategy(attrs, inputs, out_type, target):
"""bitserial_dense generic strategy"""
logger.warning("bitserial_dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_bitserial_dense(topi.nn.bitserial_dense),
wrap_topi_schedule(topi.generic.schedule_bitserial_dense),
name="bitserial_dense.generic")
return strategy
# correlation
def wrap_compute_correlation(topi_compute):
"""wrap correlation topi compute"""
def _compute_correlation(attrs, inputs, out_type):
kernel_size = attrs.kernel_size
max_displacement = attrs.max_displacement
stride1 = attrs.stride1
stride2 = attrs.stride2
padding = get_const_tuple(attrs.padding)
is_multiply = attrs.is_multiply
return [topi_compute(inputs[0], inputs[1], kernel_size, max_displacement, stride1, stride2,
padding, is_multiply)]
return _compute_correlation
@override_native_generic_func("correlation_strategy")
def correlation_strategy(attrs, inputs, out_type, target):
"""correlation generic strategy"""
logger.warning("correlation is not optimized for this platform.")
layout = attrs.layout
assert layout == "NCHW", "Only support NCHW layout"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_correlation(topi.nn.correlation_nchw),
wrap_topi_schedule(topi.generic.schedule_correlation_nchw),
name="correlation.generic")
return strategy
@generic_func
def schedule_simulated_quantize(attrs, outs, target):
return topi.generic.default.default_schedule(outs, auto_inline=False)
| 41.829449 | 99 | 0.700813 |
e80dd85cca51ec7619e25b31762d2b433514bf81 | 496 | py | Python | Ejercicio 3/ejercicio3.py | migueliiin/Grupo | e027fbd99399a7481da14c6f9345b8ef58e50b6f | [
"Apache-2.0"
] | null | null | null | Ejercicio 3/ejercicio3.py | migueliiin/Grupo | e027fbd99399a7481da14c6f9345b8ef58e50b6f | [
"Apache-2.0"
] | null | null | null | Ejercicio 3/ejercicio3.py | migueliiin/Grupo | e027fbd99399a7481da14c6f9345b8ef58e50b6f | [
"Apache-2.0"
] | null | null | null |
from tkinter import Y
compra = float(input("compra:"))
sapo = False
while sapo == True and float(compra) > 0:
if float(compra) in range(100,500):
(compra) == compra - (compra)*(1/20)
print (compra)
sapo==True
if float(compra) >500 :
(compra) == compra - (compra)*(2/25)
sapo==True
print(compra)
else:
compra == compra
print(compra)
sapo==False
print("pim") | 23.619048 | 48 | 0.489919 |
d502d6bd7429985499035aa8a413d206a517ed7f | 2,113 | py | Python | clickhouse_sqlalchemy/alembic/toimpl.py | Fozar/clickhouse-sqlalchemy | 88fd630856655cc470430b365dce7e85516abf62 | [
"MIT"
] | null | null | null | clickhouse_sqlalchemy/alembic/toimpl.py | Fozar/clickhouse-sqlalchemy | 88fd630856655cc470430b365dce7e85516abf62 | [
"MIT"
] | null | null | null | clickhouse_sqlalchemy/alembic/toimpl.py | Fozar/clickhouse-sqlalchemy | 88fd630856655cc470430b365dce7e85516abf62 | [
"MIT"
] | null | null | null | from alembic.operations import Operations
from sqlalchemy.sql.ddl import CreateColumn
from . import operations
@Operations.implementation_for(operations.CreateMatViewOp)
def create_mat_view(operations, operation):
impl = operations.impl
ddl_compiler = impl.dialect.ddl_compiler(impl.dialect, None)
text = 'CREATE MATERIALIZED VIEW '
if operation.kwargs.get('if_not_exists'):
text += 'IF NOT EXISTS '
text += operation.name
if operation.kwargs.get('on_cluster'):
text += ' ON CLUSTER ' + operation.kwargs['on_cluster']
text += ' (' + ', '.join(
ddl_compiler.process(CreateColumn(c)) for c in operation.columns
) + ') '
text += 'ENGINE = ' + operation.engine
if operation.kwargs.get('populate'):
text += ' POPULATE'
text += ' AS ' + operation.selectable
operations.execute(text)
@Operations.implementation_for(operations.CreateMatViewToTableOp)
def create_mat_view_to_table(operations, operation):
text = 'CREATE MATERIALIZED VIEW '
if operation.kwargs.get('if_not_exists'):
text += 'IF NOT EXISTS '
text += operation.name
if operation.kwargs.get('on_cluster'):
text += ' ON CLUSTER ' + operation.kwargs['on_cluster']
text += ' TO ' + operation.inner_name
if operation.kwargs.get('populate'):
text += ' POPULATE'
text += ' AS ' + operation.selectable
operations.execute(text)
@Operations.implementation_for(operations.AttachMatViewOp)
def attach_mat_view(operations, operation):
impl = operations.impl
ddl_compiler = impl.dialect.ddl_compiler(impl.dialect, None)
text = 'ATTACH MATERIALIZED VIEW '
if operation.kwargs.get('if_not_exists'):
text += 'IF NOT EXISTS '
text += operation.name + ' '
if operation.kwargs.get('on_cluster'):
text += ' ON CLUSTER ' + operation.kwargs['on_cluster']
text += ' (' + ', '.join(
ddl_compiler.process(CreateColumn(c)) for c in operation.columns
) + ') '
text += 'ENGINE = ' + operation.engine + ' AS ' + operation.selectable
operations.execute(text)
| 26.4125 | 74 | 0.665878 |
5f6141397f125896218271edd266c2d6aa087580 | 2,247 | py | Python | main.py | HellishMadonna/winter2020practice | db72558881ad53f66aaba138a4496188e7d7b5c0 | [
"MIT"
] | 51 | 2019-01-19T12:00:13.000Z | 2022-01-22T12:50:25.000Z | main.py | HellishMadonna/winter2020practice | db72558881ad53f66aaba138a4496188e7d7b5c0 | [
"MIT"
] | 2 | 2019-06-06T09:50:17.000Z | 2021-02-03T08:50:47.000Z | main.py | HellishMadonna/winter2020practice | db72558881ad53f66aaba138a4496188e7d7b5c0 | [
"MIT"
] | 14 | 2019-02-05T21:43:02.000Z | 2022-03-31T13:52:05.000Z | import serial, time
import datetime as dt
import numpy as np
import cv2
# function to get Emissivity from MCU
def get_emissivity():
ser.write(serial.to_bytes([0xA5,0x55,0x01,0xFB]))
read = ser.read(4)
return read[2]/100
# function to get temperatures from MCU (Celsius degrees x 100)
def get_temp_array(d):
# getting ambient temperature
T_a = (int(d[1540]) + int(d[1541])*256)/100
# getting raw array of pixels temperature
raw_data = d[4:1540]
T_array = np.frombuffer(raw_data, dtype=np.int16)
return T_a, T_array
# function to convert temperatures to pixels on image
def td_to_image(f):
norm = np.uint8((f/100 - Tmin)*255/(Tmax-Tmin))
norm.shape = (24,32)
return norm
########################### Main cycle #################################
# Color map range
Tmax = 40
Tmin = 20
print ('Configuring Serial port')
ser = serial.Serial ('/dev/serial0')
ser.baudrate = 115200
# set frequency of module to 4 Hz
ser.write(serial.to_bytes([0xA5,0x25,0x01,0xCB]))
time.sleep(0.1)
# Starting automatic data colection
ser.write(serial.to_bytes([0xA5,0x35,0x02,0xDC]))
t0 = time.time()
try:
while True:
# waiting for data frame
data = ser.read(1544)
# The data is ready, let's handle it!
Ta, temp_array = get_temp_array(data)
ta_img = td_to_image(temp_array)
# Image processing
img = cv2.applyColorMap(ta_img, cv2.COLORMAP_JET)
img = cv2.resize(img, (320,240), interpolation = cv2.INTER_CUBIC)
img = cv2.flip(img, 1)
text = 'Tmin = {:+.1f} Tmax = {:+.1f} FPS = {:.2f}'.format(temp_array.min()/100, temp_array.max()/100, 1/(time.time() - t0))
cv2.putText(img, text, (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 0), 1)
cv2.imshow('Output', img)
# if 's' is pressed - saving of picture
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
fname = 'pic_' + dt.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + '.jpg'
cv2.imwrite(fname, img)
print('Saving image ', fname)
t0 = time.time()
except KeyboardInterrupt:
# to terminate the cycle
ser.write(serial.to_bytes([0xA5,0x35,0x01,0xDB]))
ser.close()
cv2.destroyAllWindows()
print(' Stopped')
# just in case
ser.close()
cv2.destroyAllWindows()
| 26.75 | 127 | 0.6413 |
d6249dcb61fb1ac8cf760dfb5009404c17f6bb80 | 113 | py | Python | Code/main.py | vinicioscunha/ValidarIP | dfcc3f0d844649284e900dd40ab2d613fc25034f | [
"MIT"
] | null | null | null | Code/main.py | vinicioscunha/ValidarIP | dfcc3f0d844649284e900dd40ab2d613fc25034f | [
"MIT"
] | null | null | null | Code/main.py | vinicioscunha/ValidarIP | dfcc3f0d844649284e900dd40ab2d613fc25034f | [
"MIT"
] | null | null | null | from endereco_IP import fn_VerifyIP
str_word = input("Enter with an IP address: ")
print(fn_VerifyIP(str_word))
| 22.6 | 46 | 0.787611 |
de55e8d7b28de3261528f6bbb60db223ba02076e | 270 | py | Python | project_name/__main__.py | paulovitorweb/flask-project-template | dfe9c08b75f84fea0f2330545b72df2ea8f36757 | [
"Unlicense"
] | 41 | 2021-08-17T13:48:51.000Z | 2022-03-17T19:57:54.000Z | project_name/__main__.py | paulovitorweb/flask-project-template | dfe9c08b75f84fea0f2330545b72df2ea8f36757 | [
"Unlicense"
] | 1 | 2021-11-02T17:18:08.000Z | 2021-11-02T17:18:08.000Z | project_name/__main__.py | paulovitorweb/flask-project-template | dfe9c08b75f84fea0f2330545b72df2ea8f36757 | [
"Unlicense"
] | 10 | 2021-09-11T15:36:42.000Z | 2022-03-28T21:53:11.000Z | import click
from flask.cli import FlaskGroup
from . import create_app_wsgi
@click.group(cls=FlaskGroup, create_app=create_app_wsgi)
def main():
"""Management script for the project_name application."""
if __name__ == "__main__": # pragma: no cover
main()
| 19.285714 | 61 | 0.737037 |
0c7dc4e728eca11bec37fa09c44d44bf03a3e2cd | 4,340 | py | Python | benchmarks/benchmarks/bench_shape_base.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 20,453 | 2015-01-02T09:00:47.000Z | 2022-03-31T23:35:56.000Z | benchmarks/benchmarks/bench_shape_base.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 14,862 | 2015-01-01T01:28:34.000Z | 2022-03-31T23:48:52.000Z | benchmarks/benchmarks/bench_shape_base.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 9,362 | 2015-01-01T15:49:43.000Z | 2022-03-31T21:26:51.000Z | from .common import Benchmark
import numpy as np
class Block(Benchmark):
params = [1, 10, 100]
param_names = ['size']
def setup(self, n):
self.a_2d = np.ones((2 * n, 2 * n))
self.b_1d = np.ones(2 * n)
self.b_2d = 2 * self.a_2d
self.a = np.ones(3 * n)
self.b = np.ones(3 * n)
self.one_2d = np.ones((1 * n, 3 * n))
self.two_2d = np.ones((1 * n, 3 * n))
self.three_2d = np.ones((1 * n, 6 * n))
self.four_1d = np.ones(6 * n)
self.five_0d = np.ones(1 * n)
self.six_1d = np.ones(5 * n)
# avoid np.zeros's lazy allocation that might cause
# page faults during benchmark
self.zero_2d = np.full((2 * n, 6 * n), 0)
self.one = np.ones(3 * n)
self.two = 2 * np.ones((3, 3 * n))
self.three = 3 * np.ones(3 * n)
self.four = 4 * np.ones(3 * n)
self.five = 5 * np.ones(1 * n)
self.six = 6 * np.ones(5 * n)
# avoid np.zeros's lazy allocation that might cause
# page faults during benchmark
self.zero = np.full((2 * n, 6 * n), 0)
def time_block_simple_row_wise(self, n):
np.block([self.a_2d, self.b_2d])
def time_block_simple_column_wise(self, n):
np.block([[self.a_2d], [self.b_2d]])
def time_block_complicated(self, n):
np.block([[self.one_2d, self.two_2d],
[self.three_2d],
[self.four_1d],
[self.five_0d, self.six_1d],
[self.zero_2d]])
def time_nested(self, n):
np.block([
[
np.block([
[self.one],
[self.three],
[self.four]
]),
self.two
],
[self.five, self.six],
[self.zero]
])
def time_no_lists(self, n):
np.block(1)
np.block(np.eye(3 * n))
class Block2D(Benchmark):
params = [[(16, 16), (32, 32), (64, 64), (128, 128), (256, 256), (512, 512), (1024, 1024)],
['uint8', 'uint16', 'uint32', 'uint64'],
[(2, 2), (4, 4)]]
param_names = ['shape', 'dtype', 'n_chunks']
def setup(self, shape, dtype, n_chunks):
self.block_list = [
[np.full(shape=[s//n_chunk for s, n_chunk in zip(shape, n_chunks)],
fill_value=1, dtype=dtype) for _ in range(n_chunks[1])]
for _ in range(n_chunks[0])
]
def time_block2d(self, shape, dtype, n_chunks):
np.block(self.block_list)
class Block3D(Benchmark):
"""This benchmark concatenates an array of size ``(5n)^3``"""
# Having copy as a `mode` of the block3D
# allows us to directly compare the benchmark of block
# to that of a direct memory copy into new buffers with
# the ASV framework.
# block and copy will be plotted on the same graph
# as opposed to being displayed as separate benchmarks
params = [[1, 10, 100],
['block', 'copy']]
param_names = ['n', 'mode']
def setup(self, n, mode):
# Slow setup method: hence separated from the others above
self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1
self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2
self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3
self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4
self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5
self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6
self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7
self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8
self.block = [
[
[self.a000, self.a001],
[self.a010, self.a011],
],
[
[self.a100, self.a101],
[self.a110, self.a111],
]
]
self.arr_list = [a
for two_d in self.block
for one_d in two_d
for a in one_d]
def time_3d(self, n, mode):
if mode == 'block':
np.block(self.block)
else: # mode == 'copy'
[arr.copy() for arr in self.arr_list]
# Retain old benchmark name for backward compat
time_3d.benchmark_name = "bench_shape_base.Block.time_3d"
| 31.678832 | 95 | 0.491244 |
7582859c724ed1887125fdbc750a4490c26674da | 355 | py | Python | login/setup.py | Yurzs/L2py | 49d4f35920e0ae1c13cb494f8ae6d9c4a037c583 | [
"MIT"
] | 7 | 2020-09-01T21:52:37.000Z | 2022-02-25T16:00:08.000Z | login/setup.py | Yurzs/L2py | 49d4f35920e0ae1c13cb494f8ae6d9c4a037c583 | [
"MIT"
] | 4 | 2021-09-10T22:15:09.000Z | 2022-03-25T22:17:43.000Z | login/setup.py | Yurzs/L2py | 49d4f35920e0ae1c13cb494f8ae6d9c4a037c583 | [
"MIT"
] | 9 | 2020-09-01T21:53:39.000Z | 2022-03-30T12:03:04.000Z | from setuptools import find_packages, setup
VERSION = "0.1"
def read_requirements():
req = []
with open("requirements.txt") as requirements:
for module in requirements.readlines():
req.append(module.strip())
return req
setup(
name="login",
packages=find_packages(),
install_requires=read_requirements(),
)
| 18.684211 | 50 | 0.664789 |
8a8377f20a1b5c9a732c35b7ba82b9ec008c96aa | 7,050 | py | Python | tools/tcpconnlat.py | figozhang/bcc | 0abd93e5f908a930f87007937f4028aecc13975b | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-04-30T05:05:53.000Z | 2021-04-30T05:05:53.000Z | tools/tcpconnlat.py | Agares/bcc | 0abd93e5f908a930f87007937f4028aecc13975b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tools/tcpconnlat.py | Agares/bcc | 0abd93e5f908a930f87007937f4028aecc13975b | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2018-10-30T07:57:13.000Z | 2021-06-15T02:29:44.000Z | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpconnlat Trace TCP active connection latency (connect).
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpconnlat [-h] [-t] [-p PID]
#
# This uses dynamic tracing of kernel functions, and will need to be updated
# to match kernel changes.
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 19-Feb-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
import argparse
import ctypes as ct
# arguments
examples = """examples:
./tcpconnlat # trace all TCP connect()s
./tcpconnlat -t # include timestamps
./tcpconnlat -p 181 # only trace PID 181
"""
parser = argparse.ArgumentParser(
description="Trace TCP connects and show connection latency",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <bcc/proto.h>
struct info_t {
u64 ts;
u64 pid;
char task[TASK_COMM_LEN];
};
BPF_HASH(start, struct sock *, struct info_t);
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
// XXX: switch some to u32's when supported
u64 ts_us;
u64 pid;
u64 saddr;
u64 daddr;
u64 ip;
u64 dport;
u64 delta_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u64 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u64 dport;
u64 delta_us;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
int trace_connect(struct pt_regs *ctx, struct sock *sk)
{
u32 pid = bpf_get_current_pid_tgid();
FILTER
struct info_t info = {.pid = pid};
info.ts = bpf_ktime_get_ns();
bpf_get_current_comm(&info.task, sizeof(info.task));
start.update(&sk, &info);
return 0;
};
// See tcp_v4_do_rcv() and tcp_v6_do_rcv(). So TCP_ESTBALISHED and TCP_LISTEN
// are fast path and processed elsewhere, and leftovers are processed by
// tcp_rcv_state_process(). We can trace this for handshake completion.
// This should all be switched to static tracepoints when available.
int trace_tcp_rcv_state_process(struct pt_regs *ctx, struct sock *sk)
{
// will be in TCP_SYN_SENT for handshake
if (sk->__sk_common.skc_state != TCP_SYN_SENT)
return 0;
// check start and calculate delta
struct info_t *infop = start.lookup(&sk);
if (infop == 0) {
return 0; // missed entry or filtered
}
u64 ts = infop->ts;
u64 now = bpf_ktime_get_ns();
// pull in details
u16 family = 0, dport = 0;
struct sock *skp = NULL;
bpf_probe_read(&skp, sizeof(skp), &sk);
bpf_probe_read(&family, sizeof(family), &skp->__sk_common.skc_family);
bpf_probe_read(&dport, sizeof(dport), &skp->__sk_common.skc_dport);
// emit to appropriate data path
if (family == AF_INET) {
struct ipv4_data_t data4 = {.pid = infop->pid, .ip = 4};
data4.ts_us = now / 1000;
bpf_probe_read(&data4.saddr, sizeof(u32),
&skp->__sk_common.skc_rcv_saddr);
bpf_probe_read(&data4.daddr, sizeof(u32),
&skp->__sk_common.skc_daddr);
data4.dport = ntohs(dport);
data4.delta_us = (now - ts) / 1000;
__builtin_memcpy(&data4.task, infop->task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));
} else /* AF_INET6 */ {
struct ipv6_data_t data6 = {.pid = infop->pid, .ip = 6};
data6.ts_us = now / 1000;
bpf_probe_read(&data6.saddr, sizeof(data6.saddr),
&skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read(&data6.daddr, sizeof(data6.daddr),
&skp->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.dport = ntohs(dport);
data6.delta_us = (now - ts) / 1000;
__builtin_memcpy(&data6.task, infop->task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));
}
start.delete(&sk);
return 0;
}
"""
# code substitutions
if args.pid:
bpf_text = bpf_text.replace('FILTER',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('FILTER', '')
if debug:
print(bpf_text)
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="tcp_v4_connect", fn_name="trace_connect")
b.attach_kprobe(event="tcp_v6_connect", fn_name="trace_connect")
b.attach_kprobe(event="tcp_rcv_state_process",
fn_name="trace_tcp_rcv_state_process")
# event data
TASK_COMM_LEN = 16 # linux/sched.h
class Data_ipv4(ct.Structure):
_fields_ = [
("ts_us", ct.c_ulonglong),
("pid", ct.c_ulonglong),
("saddr", ct.c_ulonglong),
("daddr", ct.c_ulonglong),
("ip", ct.c_ulonglong),
("dport", ct.c_ulonglong),
("delta_us", ct.c_ulonglong),
("task", ct.c_char * TASK_COMM_LEN)
]
class Data_ipv6(ct.Structure):
_fields_ = [
("ts_us", ct.c_ulonglong),
("pid", ct.c_ulonglong),
("saddr", (ct.c_ulonglong * 2)),
("daddr", (ct.c_ulonglong * 2)),
("ip", ct.c_ulonglong),
("dport", ct.c_ulonglong),
("delta_us", ct.c_ulonglong),
("task", ct.c_char * TASK_COMM_LEN)
]
# process event
start_ts = 0
def print_ipv4_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data_ipv4)).contents
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
print("%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), end="")
print("%-6d %-12.12s %-2d %-16s %-16s %-5d %.2f" % (event.pid, event.task,
event.ip, inet_ntop(AF_INET, pack("I", event.saddr)),
inet_ntop(AF_INET, pack("I", event.daddr)), event.dport,
float(event.delta_us) / 1000))
def print_ipv6_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data_ipv6)).contents
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
print("%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), end="")
print("%-6d %-12.12s %-2d %-16s %-16s %-5d %.2f" % (event.pid, event.task,
event.ip, inet_ntop(AF_INET6, event.saddr),
inet_ntop(AF_INET6, event.daddr), event.dport,
float(event.delta_us) / 1000))
# header
if args.timestamp:
print("%-9s" % ("TIME(s)"), end="")
print("%-6s %-12s %-2s %-16s %-16s %-5s %s" % ("PID", "COMM", "IP", "SADDR",
"DADDR", "DPORT", "LAT(ms)"))
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
while 1:
b.kprobe_poll()
| 30.652174 | 78 | 0.642837 |
09fd29039a704affdf58bf8bf3f74687f3c01eb6 | 2,392 | py | Python | Tests/varLib/builder_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 240 | 2021-01-11T14:49:24.000Z | 2022-03-29T22:33:49.000Z | Tests/varLib/builder_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 77 | 2021-01-12T20:23:30.000Z | 2022-03-28T12:14:34.000Z | Tests/varLib/builder_test.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 28 | 2021-01-17T05:44:11.000Z | 2022-01-11T19:58:46.000Z | from __future__ import print_function, division, absolute_import
from fontTools.varLib.builder import buildVarData
import pytest
@pytest.mark.parametrize("region_indices, items, expected_num_shorts", [
([], [], 0),
([0], [[1]], 0),
([0], [[128]], 1),
([0, 1, 2], [[128, 1, 2], [3, -129, 5], [6, 7, 8]], 2),
([0, 1, 2], [[0, 128, 2], [3, 4, 5], [6, 7, -129]], 3),
], ids=[
"0_regions_0_deltas",
"1_region_1_uint8",
"1_region_1_short",
"3_regions_2_shorts_ordered",
"3_regions_2_shorts_unordered",
])
def test_buildVarData_no_optimize(region_indices, items, expected_num_shorts):
data = buildVarData(region_indices, items, optimize=False)
assert data.ItemCount == len(items)
assert data.NumShorts == expected_num_shorts
assert data.VarRegionCount == len(region_indices)
assert data.VarRegionIndex == region_indices
assert data.Item == items
@pytest.mark.parametrize([
"region_indices", "items", "expected_num_shorts",
"expected_regions", "expected_items"
], [
([0, 1, 2], [[0, 1, 2], [3, 4, 5], [6, 7, 8]], 0,
[0, 1, 2], [[0, 1, 2], [3, 4, 5], [6, 7, 8]]),
([0, 1, 2], [[0, 128, 2], [3, 4, 5], [6, 7, 8]], 1,
[1, 0, 2], [[128, 0, 2], [4, 3, 5], [7, 6, 8]]),
([0, 1, 2], [[0, 1, 128], [3, 4, 5], [6, -129, 8]], 2,
[1, 2, 0], [[1, 128, 0], [4, 5, 3], [-129, 8, 6]]),
([0, 1, 2], [[128, 1, -129], [3, 4, 5], [6, 7, 8]], 2,
[0, 2, 1], [[128, -129, 1], [3, 5, 4], [6, 8, 7]]),
([0, 1, 2], [[0, 1, 128], [3, -129, 5], [256, 7, 8]], 3,
[0, 1, 2], [[0, 1, 128], [3, -129, 5], [256, 7, 8]]),
([0, 1, 2], [[0, 128, 2], [0, 4, 5], [0, 7, 8]], 1,
[1, 2], [[128, 2], [4, 5], [7, 8]]),
], ids=[
"0/3_shorts_no_reorder",
"1/3_shorts_reorder",
"2/3_shorts_reorder",
"2/3_shorts_same_row_reorder",
"3/3_shorts_no_reorder",
"1/3_shorts_1/3_zeroes",
])
def test_buildVarData_optimize(
region_indices, items, expected_num_shorts, expected_regions,
expected_items):
data = buildVarData(region_indices, items, optimize=True)
assert data.ItemCount == len(items)
assert data.NumShorts == expected_num_shorts
assert data.VarRegionCount == len(expected_regions)
assert data.VarRegionIndex == expected_regions
assert data.Item == expected_items
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
| 35.176471 | 78 | 0.571488 |
0f98da06c4756c9e11af6ffa4b778e4dc354db6b | 4,272 | py | Python | scraper/get_info/query_info.py | NicolasAbroad/wnscraper | 87d5aa8e3a26aa0846a289d378848e1eb1d13304 | [
"Apache-2.0"
] | null | null | null | scraper/get_info/query_info.py | NicolasAbroad/wnscraper | 87d5aa8e3a26aa0846a289d378848e1eb1d13304 | [
"Apache-2.0"
] | null | null | null | scraper/get_info/query_info.py | NicolasAbroad/wnscraper | 87d5aa8e3a26aa0846a289d378848e1eb1d13304 | [
"Apache-2.0"
] | null | null | null | # query_info.py - queries data from website
import requests
import bs4
from . import parse_info
def get_page_html(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0'}
res = requests.get(url, headers=headers)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.content, "html.parser")
return soup
# Use to get serie's name and author
def get_string(soup, html_id):
for name in soup.find_all(html_id[0], class_=html_id[1]):
return name.text
def get_index(soup, html_index):
index = soup.find(html_index[0], class_=html_index[1])
return index
def get_index_children(index):
index_children = index.children
return index_children
def volumes_exist(index_children, vol_and_chap):
for index_child in index_children:
child = str(index_child)
if vol_and_chap[0] in child:
return True
return False
# Use if volumes exist
def scrape_chapter_info_by_volume(index_children, name_id, target_info_id):
chapter_info_dict = {}
volume = ''
volume_id = name_id[0]
chap_id = name_id[1]
for index_child in index_children:
index_child_str = str(index_child)
if volume_id in index_child_str:
volume = index_child.string
volume = parse_info.format_volume_name_to_html(volume)
chapter_info_dict.setdefault(volume, []) # index_child_str = volume name
elif chap_id in index_child_str:
ch_name = index_child.find(target_info_id[0])
chapter_info_dict[volume] += [(ch_name.text, ch_name.attrs[target_info_id[1]])]
return chapter_info_dict
"""
def scrape_chapter_info_by_volume(index_children, name_id, target_info_id):
combined_dict = {}
chapter_info_dict = {}
volume_names = {}
volume = ''
volume_nb = 1
volume_id = name_id[0]
chap_id = name_id[1]
for index_child in index_children:
index_child_str = str(index_child)
if volume_id in index_child_str:
# Add volume name as key to chapter info dict
volume_name = index_child.string
volume_name = parse_info.format_volume_name_to_html(volume_name)
chapter_info_dict.setdefault(volume_name, []) # index_child_str = volume name
# Add volume number as key to volume names // Assign volume name to key
volume_nb_formatted = '{0:02d}'.format(volume_nb)
volume_names[volume_nb_formatted] = volume_name
elif chap_id in index_child_str:
ch_name = index_child.find(target_info_id[0])
chapter_info_dict[volume] += [(ch_name.text, ch_name.attrs[target_info_id[1]])]
combined_dict['info'] = chapter_info_dict
combined_dict['names'] = volume_names
return combined_dict
"""
# Use if volumes don't exist
def get_dict_key(series_name):
chapter_info_dict = {}
chapter_info_dict.setdefault(series_name, [])
return chapter_info_dict
# Use if volumes don't exist
def scrape_all_chapter_info(chapter_info_dict, index_children, name_id, series_name, target_info_id):
def inner(chapter_info_dict, index_children, name_id, series_name, target_info_id):
local_chapter_info_dict = chapter_info_dict
volume = series_name
for index_child in index_children:
index_child_str = str(index_child)
if name_id[1] in index_child_str:
ch_name = index_child.find(target_info_id[0])
local_chapter_info_dict[volume] += [(ch_name.text, ch_name.attrs[target_info_id[1]])]
return local_chapter_info_dict
return inner(chapter_info_dict, index_children, name_id, series_name, target_info_id)
def volume_info(source, source_info, toc_html):
index = get_index(toc_html, source_info['html_index'])
index_children = get_index_children(index)
if volumes_exist(index_children, source_info['vol_and_chap']):
volume_info = scrape_chapter_info_by_volume(index_children, source_info)
else:
volume_info = get_dict_key(series_name)
volume_info = scrape_all_chapter_info(volume_info, index_children, source_info['vol_and_chap'], series_name, source_info['chap_name_url'])
return volume_info
| 35.6 | 146 | 0.702481 |
9197d779c07bee16121d3f0db4f009223772b823 | 6,356 | py | Python | opennode/cli/helpers.py | opennode/opennode-tui | 1f92da5690a42ad71b1030c72db4b5a1158ab052 | [
"Apache-1.1"
] | 4 | 2015-05-21T20:21:26.000Z | 2020-12-18T06:50:06.000Z | opennode/cli/helpers.py | opennode/opennode-tui | 1f92da5690a42ad71b1030c72db4b5a1158ab052 | [
"Apache-1.1"
] | null | null | null | opennode/cli/helpers.py | opennode/opennode-tui | 1f92da5690a42ad71b1030c72db4b5a1158ab052 | [
"Apache-1.1"
] | 1 | 2016-02-08T00:43:44.000Z | 2016-02-08T00:43:44.000Z | import types
import re
from opennode.cli import actions
from snack import Textbox, Entry, Button, Listbox, Grid, Scale, Form
from snack import ButtonBar, TextboxReflowed, CheckboxTree, GridFormHelp
from snack import ButtonChoiceWindow, ListboxChoiceWindow
class DownloadMonitor():
def __init__(self, screen, title, item_count=0):
self.screen = screen
self.title = title
self.current_item = 0
self.item_count = item_count
g = Grid(1, 2)
self.fnm_label = Textbox(40, 2, 'Downloading...', 0, 0)
self.scale = Scale(40, 100)
self.scale.set(0)
g.setField(self.fnm_label, 0, 1)
g.setField(self.scale, 0, 0)
self.screen.gridWrappedWindow(g, title)
self.f = Form()
self.f.add(self.scale)
self.f.add(self.fnm_label)
def update_url(self, fnm):
self.current_item = self.current_item + 1
self.fnm_label.setText("(%s/%s): %s" % (self.current_item,
self.item_count, fnm))
def download_hook(self, count, blockSize, totalSize):
donep = int(min(100, float(blockSize * count) / totalSize * 100))
self.scale.set(donep)
self.f.draw()
self.screen.refresh()
def create_select_checkbox(screen, title, text, items, buttons=(('Cancel', 'cancel', 'F12'), 'Ok'),
width=40, scroll=0, height=-1, help=None):
"""Helper class for displaying a windows with a checkbox list.
On exit, list of selected items is returned"""
if (height == -1):
height = len(items)
if len(items) > height:
scroll = 1
bb = ButtonBar(screen, buttons)
t = TextboxReflowed(width, text)
cb = CheckboxTree(height, scroll=scroll)
count = 0
for count, item in enumerate(items):
if isinstance(item, types.TupleType):
(text, key, selected) = item
else:
text = item
key = count
selected = 0
cb.append(text, key, selected)
g = GridFormHelp(screen, title, help, 1, 3)
g.add(t, 0, 0)
g.add(cb, 0, 1, padding=(0, 1, 0, 1))
g.add(bb, 0, 2, growx=1)
rc = g.runOnce()
return (bb.buttonPressed(rc), cb.getSelection())
## XXX: refactor into a forms.GenericTemplateEditForm derivative
def display_create_template(screen, title, vm_type, templates, help=None):
"""Helper function for displaying a form for creating a new VM template"""
label_base = Textbox(40, 2,
'Select %s VM to be used as a basis\n(only stopped VMs are allowed)' %
vm_type, 0, 0)
base_tmpl = Listbox(7, 1, 0, 30, 1)
for vm in templates.keys():
base_tmpl.append(templates[vm], vm)
label_newname = Textbox(40, 2, 'Name of the template to be created', 0, 0)
spacer1 = Textbox(1, 1, "", 0, 0)
spacer2 = Textbox(1, 1, "", 0, 0)
entry_newname = Entry(30, 'template_name')
bb = ButtonBar(screen, ('Create new template', ('Back to menu', 'back')))
form = GridFormHelp(screen, title, help, 1, 7)
form.add(label_base, 0, 0)
form.add(base_tmpl, 0, 1)
form.add(spacer1, 0, 2)
form.add(label_newname, 0, 3)
form.add(entry_newname, 0, 4)
form.add(spacer2, 0, 5)
form.add(bb, 0, 6)
form_result = form.runOnce()
tmpl_name = entry_newname.value()
# remove whitespaces from the template name
tmpl_name = re.sub(r'\s', '', tmpl_name)
return (bb.buttonPressed(form_result), str(base_tmpl.current()), tmpl_name)
def display_selection(screen, title, list_of_items, subtitle, default=None,
buttons=[('Back', 'back', 'F12'), 'Ok']):
"""Display a list of items, return selected one or None, if nothing was selected"""
#if len(list_of_items) == 1:
# shortcut if there's only one item for choosing
# return list_of_items[0]
if len(list_of_items) > 0:
if not isinstance(list_of_items[0], types.TupleType):
# if we have a list of strings, we'd prefer to get these strings as the selection result
list_of_items = zip(list_of_items, list_of_items)
height = 10
scroll = 1 if len(list_of_items) > height else 0
action, selection = ListboxChoiceWindow(screen, title, subtitle, list_of_items,
buttons, scroll=scroll, height=height, default=default)
if buttons == [('Back', 'back', 'F12'), 'Ok'] or buttons == [('Menu', 'back', 'F12'), 'Ok']:
if action != 'back':
return selection
else:
return (action, selection) # customized buttons
else:
ButtonChoiceWindow(screen, title, 'Sorry, there are no items to choose from.', ['Back'])
def display_checkbox_selection(screen, title, list_of_items, subtitle):
if len(list_of_items) > 0:
action, selection = create_select_checkbox(screen, title, subtitle,
list_of_items,
['Ok', 'Back'],
height=10)
if action != 'back':
return selection
else:
ButtonChoiceWindow(screen, title,
'Sorry, there are no items to choose from',
['Back'])
def display_vm_type_select(screen, title):
"""Display selection menu for the template type"""
types = [actions.vm.backend_hname(t) for t in actions.vm.backends()]
return display_selection(screen, title, types, 'Select a VM type to use:',
buttons=[('Menu', 'back', 'F12'), 'Ok'])
def display_yesno(screen, title, question_text="Yes / No", width=50, height=2):
"""Display yes/no dialog. Return True on yes and False on no."""
g = GridFormHelp(screen, title, help, 1, 2)
bb = ButtonBar(screen, (('No', 'no', 'F12'), 'Yes'))
g.add(Textbox(width, height, question_text, 0, 0), 0, 0, padding=(0, 1, 0, 1))
g.add(bb, 0, 1)
rc = g.runOnce()
return bb.buttonPressed(rc) == 'yes'
def display_info(screen, title, info_text="Close me, please.", width=50, height=2):
"""Display information message on information screen"""
g = GridFormHelp(screen, title, help, 1, 2)
g.add(Textbox(width, height, info_text, 0, 0), 0, 0, padding=(0, 1, 0, 1))
g.add(Button("OK"), 0, 1)
g.runOnce()
| 38.756098 | 100 | 0.599748 |
d881de5972206db4d6ec919980ab6201de2d2d6c | 308 | py | Python | day04/day4_b.py | AdamKinnell/AdventOfCode2017 | 28f506a39245198489d3da9535b190e6f98e92d1 | [
"MIT"
] | null | null | null | day04/day4_b.py | AdamKinnell/AdventOfCode2017 | 28f506a39245198489d3da9535b190e6f98e92d1 | [
"MIT"
] | null | null | null | day04/day4_b.py | AdamKinnell/AdventOfCode2017 | 28f506a39245198489d3da9535b190e6f98e92d1 | [
"MIT"
] | null | null | null | def is_valid(phrase):
words = phrase.split()
word_letters = list(map("".join, map(sorted, words)))
return len(set(word_letters)) == len(words)
passphrases = open("day4.txt").readlines()
valid_phrases = list(filter(is_valid, passphrases))
print('Valid Passphrases:', len(valid_phrases)) # = 231
| 30.8 | 57 | 0.698052 |
f13ba21550ab70cad014e6b74fe05ec25854a165 | 5,243 | py | Python | bootcamp/questions/models.py | mohammed00101/newSignet | ddde897f3d01a6d91cf679f37d1eedf7b19944e9 | [
"MIT"
] | null | null | null | bootcamp/questions/models.py | mohammed00101/newSignet | ddde897f3d01a6d91cf679f37d1eedf7b19944e9 | [
"MIT"
] | null | null | null | bootcamp/questions/models.py | mohammed00101/newSignet | ddde897f3d01a6d91cf679f37d1eedf7b19944e9 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
import markdown
from bootcamp.activities.models import Activity
from bootcamp import settings
@python_2_unicode_compatible
class Question(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
title = models.CharField(max_length=255)
description = models.TextField(max_length=2000)
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now_add=True)
favorites = models.IntegerField(default=0)
has_accepted_answer = models.BooleanField(default=False)
class Meta:
verbose_name = 'Question'
verbose_name_plural = 'Questions'
ordering = ('-update_date',)
def __str__(self):
return self.title
@staticmethod
def get_unanswered():
return Question.objects.filter(has_accepted_answer=False)
@staticmethod
def get_answered():
return Question.objects.filter(has_accepted_answer=True)
def get_answers(self):
return Answer.objects.filter(question=self)
def get_answers_count(self):
return Answer.objects.filter(question=self).count()
def get_accepted_answer(self):
return Answer.objects.get(question=self, is_accepted=True)
def get_description_as_markdown(self):
return markdown.markdown(self.description, safe_mode='escape')
def get_description_preview(self):
if len(self.description) > 255:
return '{0}...'.format(self.description[:255])
else:
return self.description
def get_description_preview_as_markdown(self):
return markdown.markdown(self.get_description_preview(),
safe_mode='escape')
def calculate_favorites(self):
favorites = Activity.objects.filter(activity_type=Activity.FAVORITE,
question=self.pk).count()
self.favorites = favorites
self.save()
return self.favorites
def get_favoriters(self):
favorites = Activity.objects.filter(activity_type=Activity.FAVORITE,
question=self.pk)
favoriters = []
for favorite in favorites:
favoriters.append(favorite.user)
return favoriters
def create_tags(self, tags):
tags = tags.strip()
tag_list = tags.split(' ')
for tag in tag_list:
t, created = Tag.objects.get_or_create(tag=tag.lower(),
question=self)
def get_tags(self):
return Tag.objects.filter(question=self)
@python_2_unicode_compatible
class Answer(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
question = models.ForeignKey(Question)
description = models.TextField(max_length=2000)
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(null=True, blank=True)
votes = models.IntegerField(default=0)
is_accepted = models.BooleanField(default=False)
class Meta:
verbose_name = 'Answer'
verbose_name_plural = 'Answers'
ordering = ('-is_accepted', '-votes', 'create_date',)
def __str__(self):
return self.description
def accept(self):
answers = Answer.objects.filter(question=self.question)
for answer in answers:
answer.is_accepted = False
answer.save()
self.is_accepted = True
self.save()
self.question.has_accepted_answer = True
self.question.save()
def calculate_votes(self):
up_votes = Activity.objects.filter(activity_type=Activity.UP_VOTE,
answer=self.pk).count()
down_votes = Activity.objects.filter(activity_type=Activity.DOWN_VOTE,
answer=self.pk).count()
self.votes = up_votes - down_votes
self.save()
return self.votes
def get_up_voters(self):
votes = Activity.objects.filter(activity_type=Activity.UP_VOTE,
answer=self.pk)
voters = []
for vote in votes:
voters.append(vote.user)
return voters
def get_down_voters(self):
votes = Activity.objects.filter(activity_type=Activity.DOWN_VOTE,
answer=self.pk)
voters = []
for vote in votes:
voters.append(vote.user)
return voters
def get_description_as_markdown(self):
return markdown.markdown(self.description, safe_mode='escape')
@python_2_unicode_compatible
class Tag(models.Model):
tag = models.CharField(max_length=50)
question = models.ForeignKey(Question)
class Meta:
verbose_name = 'Tag'
verbose_name_plural = 'Tags'
unique_together = (('tag', 'question'),)
index_together = [['tag', 'question'], ]
def __str__(self):
return self.tag
| 33.825806 | 79 | 0.621019 |
a066d2de7da1b02271485e4bb1bf44086f0788c8 | 964 | py | Python | vqe_playground/utils/fonts.py | ulitoo/vqe-playground | 6a11f14e0f6a022688a4f58ea0ff68625022ed7c | [
"Apache-2.0"
] | 19 | 2019-04-12T11:11:39.000Z | 2021-09-16T18:17:59.000Z | vqe_playground/utils/fonts.py | ulitoo/vqe-playground | 6a11f14e0f6a022688a4f58ea0ff68625022ed7c | [
"Apache-2.0"
] | 8 | 2019-04-18T11:12:50.000Z | 2020-12-18T20:33:37.000Z | vqe_playground/utils/fonts.py | ulitoo/vqe-playground | 6a11f14e0f6a022688a4f58ea0ff68625022ed7c | [
"Apache-2.0"
] | 11 | 2019-04-18T10:06:57.000Z | 2021-02-03T11:28:51.000Z | #!/usr/bin/env python
#
# Copyright 2019 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pygame
pygame.font.init()
ARIAL_48 = pygame.font.SysFont('Arial', 44)
ARIAL_36 = pygame.font.SysFont('Arial', 30)
ARIAL_30 = pygame.font.SysFont('Arial', 26)
ARIAL_24 = pygame.font.SysFont('Arial', 20)
ARIAL_22 = pygame.font.SysFont('Arial', 18)
ARIAL_20 = pygame.font.SysFont('Arial', 16)
ARIAL_16 = pygame.font.SysFont('Arial', 12)
| 35.703704 | 74 | 0.746888 |
bee05767e938a2497add74b5ea7986a56aff1c88 | 10,412 | py | Python | app.py | Roshanmahes/Adaptive-Schedule | 9995cf24ea6bf4279ed681c6b0a9dc6a55b634a3 | [
"MIT"
] | null | null | null | app.py | Roshanmahes/Adaptive-Schedule | 9995cf24ea6bf4279ed681c6b0a9dc6a55b634a3 | [
"MIT"
] | null | null | null | app.py | Roshanmahes/Adaptive-Schedule | 9995cf24ea6bf4279ed681c6b0a9dc6a55b634a3 | [
"MIT"
] | 2 | 2021-06-03T10:32:49.000Z | 2022-03-12T03:55:12.000Z | # imports
from logging import PlaceHolder
import dash
import dash_table as dt
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from markdown_helper import markdown_popup
import numpy as np
import pandas as pd
from adaptive_scheduling import Transient_IA
import plotly.graph_objs as go
# import plotly.io as pio
# pio.templates.default = 'plotly_white'
# initial table & figure
df = pd.DataFrame({r'Client (\(i\))': [''],
r'Interarrival time (\(x_i\))': ['Computing appointment schedule...'],
r'Arrival time (\(t_i\))': ['']})
df = df.to_dict('records')
no_fig = {
'layout': {
'xaxis': {'visible': False},
'yaxis': {'visible': False},
'paper_bgcolor': 'rgba(0,0,0,0)',
'plot_bgcolor': 'rgba(0,0,0,0)'
}
}
columns = [{'name': [f'Appointment Schedule', k], 'id': k} for k in df[0].keys()]
# main app
# app = dash.Dash(__name__, external_scripts=['https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.4/MathJax.js?config=TeX-MML-AM_CHTML'])
app = dash.Dash(__name__, external_scripts=['https://cdn.jsdelivr.net/npm/mathjax@2.7.9/MathJax.js?config=TeX-MML-AM_CHTML'])
# app = dash.Dash(__name__, external_scripts=['https://cdn.jsdelivr.net/npm/mathjax@3.2.0/es5/node-main.min.js?config=TeX-MML-AM_CHTML'])
app.title = 'Adaptive Schedule'
server = app.server
def app_layout():
app_layout = html.Div(id='main',children=[
dcc.Interval(id='interval-updating-graphs', interval=1000, n_intervals=0),
html.Div(id='top-bar'),
html.Div(
className='container',
children=[
html.Div(
id='left-side-column',
className='eight columns',
children=[
html.H4('Adaptive Schedule'),
html.P(
['This webapp solves the minimization problem' +
r'$$\min_{t_1,\dots,t_n}\omega \sum_{i=1}^{n}\mathbb{E}I_i + (1 - \omega)\sum_{i=1}^{n}\mathbb{E}W_i,$$' +
r'where \(I_i\) and \(W_i\) are the idle time and waiting time associated to client \(i\), respectively. ' +
r'The sequence of arrival epochs \(t_1,\dots,t_n\) is called the schedule. ' +
r'By entering the state information \((k, u)\), ' +
'this application can be used to generate adaptive schedules. ',
'Click ', html.A('here', id='learn-more-button', n_clicks=0), ' to learn more.']
),
html.P('Please fill in the parameters below.'),
html.Table(
id='my_table',
children=
# Header
[html.Tr([html.Td(''), html.Th('Parameter'), html.Th('Value'), html.Th('Range'), html.Th('Explanation')])] +
# Body
# [html.Tr([html.Td('test', style={'text-align': 'center'})])] +
[html.Tr([html.Th('Schedule Characteristics'),
html.Td(r'\(\omega\)'),
dcc.Input(id='omega', min=0, max=1, type='number', placeholder="e.g. '0.5'"),
html.Td(r'\((0,1)\)'),
html.Td('idle : waiting time')])] +
[html.Tr([html.Td(''),
html.Td(r'\(n\)'),
dcc.Input(id='n', min=1, max=20, step=1, type='number', placeholder="e.g. '4'"),
html.Td(r'\(\mathbb{N}_{\leq 20}\)'),
html.Td('#clients to serve')])] +
[html.Tr([html.Th('Patient Characteristics'),
html.Td(r'\(\mathbb{E}B_i \)'),
html.Div(dcc.Input(id='mean', type='text', placeholder="e.g. '1' or '(1,1,1,1)'")), ### TODO: eval, list
html.Td(r'\([0,\infty)^n\)'),
html.Td('mean(s)')])] +
[html.Tr([html.Td(''),
html.Td(r'\(\mathbb{S}(B_i)\)'),
html.Div(dcc.Input(id='SCV', type='text', placeholder="e.g. '(0.8,1.1,0.9,1.0)'")), ### TODO: eval, list
html.Td(r'\([0.2,2]^n\)'),
html.Td('SCV(s)')])] +
[html.Tr([html.Th('State Information'),
html.Td(r'\(k\)'),
dcc.Input(id='wis', min=0, max=5, step=1, type='number', placeholder="optional, e.g. '2'"), ### TODO: wis should be k!!!
html.Td(r'\(\mathbb{N}_{\leq 5}\)'), ### TODO: optional -> empty == 0
html.Td('#clients in system')])] +
[html.Tr([html.Td(''),
html.Td(r'\(u\)'),
dcc.Input(id='u', min=0, type='number', placeholder="optional, e.g. '0.33'"), ### TODO: optional -> empty == 0
html.Td(r'\([0,\infty)\)'),
html.Td('elapsed service time')])] +
[html.Tr([html.Th('Optional Constraints'),
html.Td(r'\(k\)'),
dcc.Input(id='wis2', min=0, max=5, step=1, type='number', placeholder="optional, e.g. '2'"), ### TODO: wis should be k!!!
html.Td(r'\([0,\infty)\times \dots\times [0,\infty)\)'), ### TODO: optional -> empty == 0
html.Td('fixed arrivals')])] +
[html.Tr([html.Td(''),
html.Td(r'\(u\)'),
dcc.Input(id='u2', min=0, type='number', placeholder="optional, e.g. '0.33'"), ### TODO: optional -> empty == 0
html.Td(r'\([0,\infty)\)'),
html.Td('first arrival moment')])], style={'width': '100%'}
),
html.Button(id='submit-button', n_clicks=0, children='Compute Appointment Schedule', style={'font-style': 'italic'}),
]
),
html.Div(
id='right-side-column',
className='dynamic schedule',
children=[
html.Div(
dt.DataTable(
id='schedule_df',
columns=columns,
data=df,
merge_duplicate_headers=True,
style_header={'textAlign': 'center', 'backgroundColor': '#f9f9f9', 'fontWeight': 'bold'},
style_cell={'textAlign': 'center'},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': '#f9f9f9'
},
{
'if': {'state': 'selected'},
'backgroundColor': '#dce9f9',
'border': '1px solid #242582',
}
],
),
),
html.Div([
dcc.Graph(
id='graph_df',
figure = no_fig,
config={'displayModeBar': False},
)], className='graphic'),
],
),
],
),
markdown_popup(),
])
return app_layout
# learn more popup
@app.callback(
Output('markdown', 'style'),
[Input('learn-more-button', 'n_clicks'), Input('markdown_close', 'n_clicks')],
)
def update_click_output(button_click, close_click):
ctx = dash.callback_context
prop_id = ""
if ctx.triggered:
prop_id = ctx.triggered[0]['prop_id'].split(".")[0]
if prop_id == 'learn-more-button':
return {'display': 'block'}
else:
return {'display': 'none'}
# schedule & graph
@app.callback(
[Output('schedule_df', 'columns'), Output('schedule_df', 'data'), Output('graph_df', 'figure')],
[Input('submit-button', 'n_clicks')],
[State('mean', 'value'), State('SCV', 'value'), State('omega', 'value'),
State('n', 'value'), State('wis', 'value'), State('u', 'value')],
)
def updateTable(n_clicks, mean, SCV, omega, n, wis, u):
mean = eval(mean)
SCV = eval(SCV)
N = n + wis
tol = None if N < 15 else 1e-4
u = u / mean
if not u and not wis:
N = N - 1
x, y = Transient_IA(SCV, u, omega, N, [], wis, tol)
x = np.pad(x, (1,0))
else:
x, y = Transient_IA(SCV, u, omega, N, [], wis, tol)
x = x * mean
df = pd.DataFrame({r'Client (\(i\))': list(np.arange(1,len(x)+1)),
r'Interarrival time (\(x_i\))': [f'{np.round(i,4):.4f}' for i in x],
r'Arrival time (\(t_i\))': [f'{np.round(i,4):.4f}' for i in np.cumsum(x)]})
figure = go.Figure(data=[go.Scatter(x=df.iloc[:,0], y=x, marker={'color': '#242582'})],
layout=go.Layout(
title=go.layout.Title(text=r'$\text{Optimal interarrival times } (x_i)$', x=0.5, xanchor='center'), # Plotly 4
# title=r'$\text{Optimal interarrival times } (x_i)$', # Plotly 2
xaxis={'title': r'$\text{Client } (i)$', 'tick0': 1, 'dtick': 1, 'range': [0.7,len(x) + 0.3]},
yaxis={'title': r'$\text{Interarrival time } (x_i)$'},
paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)'))
columns = [{'name': [f'Appointment Schedule (Cost: {y * mean:.4f})', k], 'id': k} for k in df.columns]
return columns, df.to_dict('records'), figure
app.layout = app_layout
if __name__ == '__main__':
app.run_server()
| 47.327273 | 153 | 0.441126 |
82a2727a34d0bd4125c878228ae371f8b88abf5a | 1,277 | py | Python | Server.py | Yuwin24/Music-Sharing-App-Stage-3 | 00667d53a4ec7dd9285c993b8533744942cddd36 | [
"MIT"
] | null | null | null | Server.py | Yuwin24/Music-Sharing-App-Stage-3 | 00667d53a4ec7dd9285c993b8533744942cddd36 | [
"MIT"
] | null | null | null | Server.py | Yuwin24/Music-Sharing-App-Stage-3 | 00667d53a4ec7dd9285c993b8533744942cddd36 | [
"MIT"
] | null | null | null | import socket
from threading import Thread
import time
import os
IP_ADDRESS = '127.0.0.1'
PORT = 8050
SERVER = None
BUFFER_SIZE = 4096
clients = {}
is_dir_exists = os.path.isdir('shared_files')
print(is_dir_exists)
if(not is_dir_exists):
os.makedirs('shared_files')
def acceptConnections():
global SERVER
global clients
while True:
client, addr = SERVER.accept()
client_name = client.recv(4096).decode().lower()
clients[client_name] = {
"client": client,
"address": addr,
"connected_with": "",
"file_name": "",
"file_size": 4096
}
print(f"Connection established with {client_name} : {addr}")
thread = Thread(target=handleClient, args=(client, client_name,))
thread.start()
def setup():
print("\n\t\t\t\t\t\tIP MESSENGER\n")
global PORT
global IP_ADDRESS
global SERVER
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.bind((IP_ADDRESS, PORT))
SERVER.listen(100)
print("\t\t\t\tSERVER IS WAITING FOR INCOMMING CONNECTIONS...")
print("\n")
acceptConnections()
setup_thread = Thread(target=setup)
setup_thread.start()
| 21.644068 | 74 | 0.606891 |
3aec13f6c94bc913399d51f764bec22f50bfc9e2 | 1,295 | py | Python | model_map.py | bobenxia/Centripetal-SGD | e33350464504085a641a2a969d3c06e3d5cc4765 | [
"Apache-2.0"
] | 767 | 2019-10-08T01:32:47.000Z | 2022-03-27T14:39:44.000Z | model_map.py | ShawnDing1994/ACNet | 9586a269d7065805aafb8f1d69d425e84cec55f1 | [
"MIT"
] | 48 | 2019-11-04T12:05:15.000Z | 2021-11-28T06:50:30.000Z | model_map.py | ShawnDing1994/ACNet | 9586a269d7065805aafb8f1d69d425e84cec55f1 | [
"MIT"
] | 140 | 2019-10-29T07:49:24.000Z | 2022-03-27T13:01:22.000Z | from base_model.mobilenetv1 import *
from base_model.stagewise_resnet import *
from base_model.vgg import *
from base_model.lenet5 import create_lenet5bn
from base_model.wrn import create_wrnc16plain
from base_model.resnet import create_ResNet18, create_ResNet34
from base_model.cfqk import create_CFQKBNC
IMAGENET_STANDARD_MODEL_MAP = {
'sres50': create_SResNet50,
'smi1': create_MobileV1Imagenet,
'sres18': create_ResNet18,
'sres34': create_ResNet34
}
CIFAR10_MODEL_MAP = {
'src56':create_SRC56,
'src110':create_SRC110,
'vc':create_vc,
'wrnc16plain':create_wrnc16plain,
'cfqkbnc':create_CFQKBNC
}
MNIST_MODEL_MAP = {
'lenet5bn': create_lenet5bn,
}
DATASET_TO_MODEL_MAP = {
'imagenet_standard': IMAGENET_STANDARD_MODEL_MAP,
'cifar10': CIFAR10_MODEL_MAP,
'mnist': MNIST_MODEL_MAP
}
# return the model creation function
def get_model_fn(dataset_name, model_name):
# print(DATASET_TO_MODEL_MAP[dataset_name.replace('_blank', '_standard')].keys())
return DATASET_TO_MODEL_MAP[dataset_name.replace('_blank', '_standard')][model_name]
def get_dataset_name_by_model_name(model_name):
for dataset_name, model_map in DATASET_TO_MODEL_MAP.items():
if model_name in model_map:
return dataset_name
return None | 29.431818 | 88 | 0.763707 |
62e32402fe64571029a5eb94e652a101e505e9c8 | 4,430 | py | Python | electrum_grs/contacts.py | Groestlcoin/electrum-grs | 80f00b8d3511bf40be71fc8c0ffe179ab1d57dd5 | [
"MIT"
] | 9 | 2016-09-15T10:10:36.000Z | 2019-08-01T09:08:30.000Z | electrum_grs/contacts.py | Groestlcoin/electrum-grs | 80f00b8d3511bf40be71fc8c0ffe179ab1d57dd5 | [
"MIT"
] | 5 | 2017-10-28T08:46:00.000Z | 2019-03-15T00:59:01.000Z | electrum_grs/contacts.py | Groestlcoin/electrum-grs | 80f00b8d3511bf40be71fc8c0ffe179ab1d57dd5 | [
"MIT"
] | 5 | 2017-08-24T03:44:27.000Z | 2019-03-03T20:42:25.000Z | # Electrum - Lightweight Bitcoin Client
# Copyright (c) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import dns
from dns.exception import DNSException
from . import bitcoin
from . import dnssec
from .util import read_json_file, write_json_file, to_string
from .logging import Logger
class Contacts(dict, Logger):
def __init__(self, db):
Logger.__init__(self)
self.db = db
d = self.db.get('contacts', {})
try:
self.update(d)
except:
return
# backward compatibility
for k, v in self.items():
_type, n = v
if _type == 'address' and bitcoin.is_address(n):
self.pop(k)
self[n] = ('address', k)
def save(self):
self.db.put('contacts', dict(self))
def import_file(self, path):
data = read_json_file(path)
data = self._validate(data)
self.update(data)
self.save()
def export_file(self, path):
write_json_file(path, self)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.save()
def pop(self, key):
if key in self.keys():
res = dict.pop(self, key)
self.save()
return res
def resolve(self, k):
if bitcoin.is_address(k):
return {
'address': k,
'type': 'address'
}
if k in self.keys():
_type, addr = self[k]
if _type == 'address':
return {
'address': addr,
'type': 'contact'
}
out = self.resolve_openalias(k)
if out:
address, name, validated = out
return {
'address': address,
'name': name,
'type': 'openalias',
'validated': validated
}
raise Exception("Invalid Groestlcoin address or alias", k)
def resolve_openalias(self, url):
# support email-style addresses, per the OA standard
url = url.replace('@', '.')
try:
records, validated = dnssec.query(url, dns.rdatatype.TXT)
except DNSException as e:
self.logger.info(f'Error resolving openalias: {repr(e)}')
return None
prefix = 'grs'
for record in records:
string = to_string(record.strings[0], 'utf8')
if string.startswith('oa1:' + prefix):
address = self.find_regex(string, r'recipient_address=([A-Za-z0-9]+)')
name = self.find_regex(string, r'recipient_name=([^;]+)')
if not name:
name = address
if not address:
continue
return address, name, validated
def find_regex(self, haystack, needle):
regex = re.compile(needle)
try:
return regex.search(haystack).groups()[0]
except AttributeError:
return None
def _validate(self, data):
for k, v in list(data.items()):
if k == 'contacts':
return self._validate(v)
if not bitcoin.is_address(k):
data.pop(k)
else:
_type, _ = v
if _type != 'address':
data.pop(k)
return data
| 32.814815 | 86 | 0.573815 |
570f64394a4169a0658d6eec246415a71a43f3df | 4,215 | py | Python | test/test_static_mount.py | afxcn/unit | a336928e1027af92d0c9bb2ccb369a3f9b53abae | [
"Apache-2.0"
] | null | null | null | test/test_static_mount.py | afxcn/unit | a336928e1027af92d0c9bb2ccb369a3f9b53abae | [
"Apache-2.0"
] | null | null | null | test/test_static_mount.py | afxcn/unit | a336928e1027af92d0c9bb2ccb369a3f9b53abae | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
from pathlib import Path
import pytest
from unit.applications.proto import TestApplicationProto
class TestStaticMount(TestApplicationProto):
prerequisites = {'features': ['chroot']}
@pytest.fixture(autouse=True)
def setup_method_fixture(self, is_su, temp_dir):
if not is_su:
pytest.skip('requires root')
os.makedirs(temp_dir + '/assets/dir/mount')
os.makedirs(temp_dir + '/assets/dir/dir')
os.makedirs(temp_dir + '/assets/mount')
Path(temp_dir + '/assets/index.html').write_text('index')
Path(temp_dir + '/assets/dir/dir/file').write_text('file')
Path(temp_dir + '/assets/mount/index.html').write_text('mount')
try:
process = subprocess.Popen(
[
"mount",
"--bind",
temp_dir + "/assets/mount",
temp_dir + "/assets/dir/mount",
],
stderr=subprocess.STDOUT,
)
process.communicate()
except KeyboardInterrupt:
raise
except:
pytest.fail('Can\'t run mount process.')
self._load_conf(
{
"listeners": {"*:7080": {"pass": "routes"}},
"routes": [{"action": {"share": temp_dir + "/assets/dir"}}],
}
)
yield
try:
process = subprocess.Popen(
["umount", "--lazy", temp_dir + "/assets/dir/mount"],
stderr=subprocess.STDOUT,
)
process.communicate()
except KeyboardInterrupt:
raise
except:
pytest.fail('Can\'t run umount process.')
def test_static_mount(self, temp_dir, skip_alert):
skip_alert(r'opening.*failed')
resp = self.get(url='/mount/')
assert resp['status'] == 200
assert resp['body'] == 'mount'
assert 'success' in self.conf(
{"share": temp_dir + "/assets/dir", "traverse_mounts": False},
'routes/0/action',
), 'configure mount disable'
assert self.get(url='/mount/')['status'] == 403
assert 'success' in self.conf(
{"share": temp_dir + "/assets/dir", "traverse_mounts": True},
'routes/0/action',
), 'configure mount enable'
resp = self.get(url='/mount/')
assert resp['status'] == 200
assert resp['body'] == 'mount'
def test_static_mount_two_blocks(self, temp_dir, skip_alert):
skip_alert(r'opening.*failed')
os.symlink(temp_dir + '/assets/dir', temp_dir + '/assets/link')
assert 'success' in self.conf(
[
{
"match": {"method": "HEAD"},
"action": {
"share": temp_dir + "/assets/dir",
"traverse_mounts": False,
},
},
{
"match": {"method": "GET"},
"action": {
"share": temp_dir + "/assets/dir",
"traverse_mounts": True,
},
},
],
'routes',
), 'configure two options'
assert self.get(url='/mount/')['status'] == 200, 'block enabled'
assert self.head(url='/mount/')['status'] == 403, 'block disabled'
def test_static_mount_chroot(self, temp_dir, skip_alert):
skip_alert(r'opening.*failed')
assert 'success' in self.conf(
{
"share": temp_dir + "/assets/dir",
"chroot": temp_dir + "/assets",
},
'routes/0/action',
), 'configure chroot mount default'
assert self.get(url='/mount/')['status'] == 200, 'chroot'
assert 'success' in self.conf(
{
"share": temp_dir + "/assets/dir",
"chroot": temp_dir + "/assets",
"traverse_mounts": False,
},
'routes/0/action',
), 'configure chroot mount disable'
assert self.get(url='/mount/')['status'] == 403, 'chroot mount'
| 29.893617 | 76 | 0.491815 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.