repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
nazeehshoura/crawler | refs/heads/master | env/lib/python2.7/site-packages/django/core/management/sql.py | 49 | from __future__ import unicode_literals
import codecs
import os
import re
import warnings
from django.apps import apps
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models, router
from django.utils.deprecation import RemovedInDjango19Warning
def check_for_migrations(app_config, connection):
# Inner import, else tests imports it too early as it needs settings
from django.db.migrations.loader import MigrationLoader
loader = MigrationLoader(connection)
if app_config.label in loader.migrated_apps:
raise CommandError("App '%s' has migrations. Only the sqlmigrate and sqlflush commands can be used when an app has migrations." % app_config.label)
def sql_create(app_config, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
check_for_migrations(app_config, connection)
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the database.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't properly specified the ENGINE setting for the database.\n" +
"see: https://docs.djangoproject.com/en/dev/ref/settings/#databases")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = list(app_config.get_models(include_auto_created=True))
final_output = []
tables = connection.introspection.table_names()
known_models = set(model for model in connection.introspection.installed_models(tables) if model not in app_models)
pending_references = {}
for model in router.get_migratable_models(app_config, connection.alias, include_auto_created=True):
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app_config, style, connection, close_connection=True):
"Returns a list of the DROP TABLE SQL statements for the given app."
check_for_migrations(app_config, connection)
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except Exception:
cursor = None
try:
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.table_names(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = router.get_migratable_models(app_config, connection.alias, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append((model, f))
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
finally:
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor and close_connection:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def sql_custom(app_config, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
check_for_migrations(app_config, connection)
output = []
app_models = router.get_migratable_models(app_config, connection.alias)
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app_config, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
check_for_migrations(app_config, connection)
output = []
for model in router.get_migratable_models(app_config, connection.alias, include_auto_created=True):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_destroy_indexes(app_config, style, connection):
"Returns a list of the DROP INDEX SQL statements for all models in the given app."
check_for_migrations(app_config, connection)
output = []
for model in router.get_migratable_models(app_config, connection.alias, include_auto_created=True):
output.extend(connection.creation.sql_destroy_indexes_for_model(model, style))
return output
def sql_all(app_config, style, connection):
check_for_migrations(app_config, connection)
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app_config, style, connection) + sql_custom(app_config, style, connection) + sql_indexes(app_config, style, connection)
def _split_statements(content):
# Private API only called from code that emits a RemovedInDjango19Warning.
comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$")
statements = []
statement = []
for line in content.split("\n"):
cleaned_line = comment_re.sub(r"\1", line).strip()
if not cleaned_line:
continue
statement.append(cleaned_line)
if cleaned_line.endswith(";"):
statements.append(" ".join(statement))
statement = []
return statements
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dirs = []
app_dir = apps.get_app_config(model._meta.app_label).path
app_dirs.append(os.path.normpath(os.path.join(app_dir, 'sql')))
# Deprecated location -- remove in Django 1.9
old_app_dir = os.path.normpath(os.path.join(app_dir, 'models/sql'))
if os.path.exists(old_app_dir):
warnings.warn("Custom SQL location '<app_label>/models/sql' is "
"deprecated, use '<app_label>/sql' instead.",
RemovedInDjango19Warning)
app_dirs.append(old_app_dir)
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = []
for app_dir in app_dirs:
sql_files.append(os.path.join(app_dir, "%s.%s.sql" % (opts.model_name, backend_name)))
sql_files.append(os.path.join(app_dir, "%s.sql" % opts.model_name))
for sql_file in sql_files:
if os.path.exists(sql_file):
with codecs.open(sql_file, 'r', encoding=settings.FILE_CHARSET) as fp:
output.extend(connection.ops.prepare_sql_script(fp.read(), _allow_fallback=True))
return output
def emit_pre_migrate_signal(create_models, verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
# For backwards-compatibility -- remove in Django 1.9.
models.signals.pre_syncdb.send(
sender=app_config.models_module,
app=app_config.models_module,
create_models=create_models,
verbosity=verbosity,
interactive=interactive,
db=db)
def emit_post_migrate_signal(created_models, verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
# For backwards-compatibility -- remove in Django 1.9.
models.signals.post_syncdb.send(
sender=app_config.models_module,
app=app_config.models_module,
created_models=created_models,
verbosity=verbosity,
interactive=interactive,
db=db)
|
wasade/picrust | refs/heads/master | tests/test_predict_metagenomes.py | 1 | #!/usr/bin/env python
# File created on 22 Feb 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011-2013, The PICRUSt Project"
__credits__ = ["Greg Caporaso","Jesse Zaneveld"]
__license__ = "GPL"
__version__ = "1.0.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
from numpy import array
from cogent.util.unit_test import TestCase, main
from biom.parse import parse_biom_table_str, get_axis_indices,\
direct_slice_data
from biom.table import DenseTable
from picrust.predict_metagenomes import predict_metagenomes,\
calc_nsti,get_overlapping_ids,\
extract_otu_and_genome_data,transfer_sample_metadata,\
transfer_observation_metadata,transfer_metadata,\
load_subset_from_biom_str,yield_subset_biom_str,\
predict_metagenome_variances,variance_of_sum,variance_of_product,\
sum_rows_with_variance
class PredictMetagenomeTests(TestCase):
""" """
def setUp(self):
#Datasets for metagenome prediction
self.otu_table1 = parse_biom_table_str(otu_table1)
self.otu_table1_with_metadata = parse_biom_table_str(otu_table1_with_metadata)
self.genome_table1 = parse_biom_table_str(genome_table1)
self.genome_table1_with_metadata = parse_biom_table_str(genome_table1_with_metadata)
self.genome_table2 = parse_biom_table_str(genome_table2)
self.predicted_metagenome_table1 = parse_biom_table_str(predicted_metagenome_table1)
self.predicted_metagenome_table1_with_metadata = parse_biom_table_str(predicted_metagenome_table1_with_metadata)
#Datasets for variance estimation during metagenome prediction
self.zero_variance_table1 = parse_biom_table_str(zero_variance_table1)
self.variance_table1_var_by_otu = parse_biom_table_str(variance_table1_var_by_otu)
self.variance_table1_var_by_gene = parse_biom_table_str(variance_table1_var_by_gene)
self.variance_table1_one_gene_one_otu = parse_biom_table_str(variance_table1_one_gene_one_otu)
self.predicted_metagenome_table1_zero_variance = parse_biom_table_str(predicted_metagenome_table1_zero_variance)
self.predicted_metagenome_variance_table1_one_gene_one_otu =\
parse_biom_table_str(predicted_metagenome_variance_table1_one_gene_one_otu)
self.predicted_metagenome_table1_one_gene_one = parse_biom_table_str(predicted_metagenome_table1)
#Datasets for testing confidence intervals
self.predicted_metagenome_table1_one_gene_one_otu_upper_CI =\
parse_biom_table_str(predicted_metagenome_table1_one_gene_one_otu_upper_CI)
self.predicted_metagenome_table1_one_gene_one_otu_lower_CI =\
parse_biom_table_str(predicted_metagenome_table1_one_gene_one_otu_lower_CI)
def test_predict_metagenomes(self):
""" predict_metagenomes functions as expected with valid input """
actual = predict_metagenomes(self.otu_table1,self.genome_table1)
self.assertEqual(actual.delimitedSelf(),self.predicted_metagenome_table1.delimitedSelf())
def test_predict_metagenomes_value_error(self):
""" predict_metagenomes raises ValueError when no overlapping otu ids """
self.assertRaises(ValueError,predict_metagenomes,self.otu_table1,self.genome_table2)
def test_predict_metagenome_variances_returns_zero_variance_from_zero_variance(self):
""" predict_metagenomes outputs correct results given zero variance input"""
curr_otu_table = self.otu_table1
curr_genome_table = self.genome_table1
curr_variance_table = self.zero_variance_table1
curr_exp_metagenome_table = self.predicted_metagenome_table1
curr_exp_metagenome_variance_table = self.predicted_metagenome_table1_zero_variance
obs_prediction,obs_variances,obs_lower_CI_95,obs_upper_CI_95 =\
predict_metagenome_variances(curr_otu_table,curr_genome_table,gene_variances=curr_variance_table)
#Test that the prediction itself is as expected
self.assertEqual(obs_prediction.delimitedSelf(),curr_exp_metagenome_table.delimitedSelf())
#Test that the variance prediction is all zeros, as expected
self.assertEqual(obs_variances.delimitedSelf(),curr_exp_metagenome_variance_table.delimitedSelf())
#Test that with zero variance, the upper and lower CIs are equal to the expected value (i.e. the prediction)
self.assertEqual(obs_lower_CI_95.delimitedSelf(),curr_exp_metagenome_table.delimitedSelf())
self.assertEqual(obs_upper_CI_95.delimitedSelf(),curr_exp_metagenome_table.delimitedSelf())
def test_predict_metagenome_variances_propagates_variance_in_gene_categories(self):
""" predict_metagenomes correctly propagates the rank order of gene family variance"""
curr_otu_table = self.otu_table1
curr_genome_table = self.genome_table1
curr_variance_table = self.variance_table1_var_by_gene
curr_exp_metagenome_table = self.predicted_metagenome_table1
obs_prediction,obs_variances,obs_lower_CI_95,obs_upper_CI_95 =\
predict_metagenome_variances(curr_otu_table,curr_genome_table,gene_variances=curr_variance_table)
#Check that the metagenome prediction hasn't changed
self.assertEqual(obs_prediction.delimitedSelf(),curr_exp_metagenome_table.delimitedSelf())
def test_predict_metagenome_variances_propagates_variance(self):
""" predict_metagenomes correctly propagates differences in gene family variance as expected in a simple example"""
curr_otu_table = self.otu_table1
curr_genome_table = self.genome_table1
curr_variance_table = self.variance_table1_one_gene_one_otu
curr_exp_metagenome_table = self.predicted_metagenome_table1
curr_exp_metagenome_varaiance_table = self.predicted_metagenome_variance_table1_one_gene_one_otu
curr_exp_upper_CI_95 = self.predicted_metagenome_table1_one_gene_one_otu_upper_CI
curr_exp_lower_CI_95 = self.predicted_metagenome_table1_one_gene_one_otu_lower_CI
obs_prediction,obs_variances,obs_lower_CI_95,obs_upper_CI_95 =\
predict_metagenome_variances(curr_otu_table,curr_genome_table,gene_variances=curr_variance_table)
self.assertEqual(obs_prediction.delimitedSelf(),curr_exp_metagenome_table.delimitedSelf())
#Expect no variance in f1 or f2 in any sample, and no variance in OTU 1 or 3.
#Otu 2 occurs in all samples except sample 3, so all samples except 3 should
#have variance. The exact values follow from variance of scaled random variables or
#The sum of random variables
self.assertEqual(obs_variances,self.predicted_metagenome_variance_table1_one_gene_one_otu)
#Check CIs against hand calculated CIs
self.assertEqual(obs_upper_CI_95.delimitedSelf(),curr_exp_upper_CI_95.delimitedSelf())
self.assertEqual(obs_lower_CI_95.delimitedSelf(),curr_exp_lower_CI_95.delimitedSelf())
def test_predict_metagenomes_keeps_observation_metadata(self):
"""predict_metagenomes preserves Observation metadata in genome and otu table"""
actual = predict_metagenomes(self.otu_table1_with_metadata,self.genome_table1_with_metadata)
exp = self.predicted_metagenome_table1_with_metadata
#NOTE: the expected data is mapped to dicts below because otherwise the memory
#location of the lambda function associated with the defaultdict
#causes (artifactual) inequality of results
actual_md = map(dict,sorted([md for md in actual.ObservationMetadata]))
exp_md = map(dict,sorted([md for md in exp.ObservationMetadata]))
for i,md in enumerate(actual_md):
self.assertEqualItems(md,exp_md[i])
for i,md in enumerate(exp_md):
self.assertEqualItems(md,actual_md[i])
def test_predict_metagenomes_keeps_sample_metadata(self):
"""predict_metagenomes preserves Sample metadata in genome and otu table"""
#NOTE: could be consolidated with "_keeps_observation_metadata above
actual = predict_metagenomes(self.otu_table1_with_metadata,\
self.genome_table1_with_metadata,verbose=False)
exp = self.predicted_metagenome_table1_with_metadata
#Need to map to dicts, otherwise the memory location of the lambda function
#associated with the defaultdict causes (artifactual) inequality of results
actual_md = map(dict,sorted([md for md in actual.SampleMetadata]))
exp_md = map(dict,sorted([md for md in exp.SampleMetadata]))
for i,md in enumerate(actual_md):
self.assertEqualItems(md,exp_md[i])
for i,md in enumerate(exp_md):
self.assertEqualItems(md,actual_md[i])
def test_transfer_metadata_moves_sample_metadata_between_biom_tables(self):
"""transfer_metadata moves sample metadata values between BIOM format tables"""
t1 = self.otu_table1
exp = self.otu_table1_with_metadata
actual = transfer_metadata(self.otu_table1_with_metadata,self.otu_table1,\
"SampleMetadata","SampleMetadata",verbose=False)
actual_md = map(dict,sorted([md for md in actual.SampleMetadata]))
exp_md = map(dict,sorted([md for md in exp.SampleMetadata]))
for i,md in enumerate(actual_md):
self.assertEqualItems(md,exp_md[i])
for i,md in enumerate(exp_md):
self.assertEqualItems(md,actual_md[i])
def test_transfer_metadata_moves_observation_metadata_between_biom_tables(self):
"""transfer_metadata moves observation metadata values between BIOM format tables"""
t1 = self.genome_table1
exp = self.genome_table1_with_metadata
actual = transfer_metadata(self.genome_table1_with_metadata,\
self.genome_table1,"ObservationMetadata","ObservationMetadata",verbose=False)
actual_md = map(dict,sorted([md for md in actual.ObservationMetadata]))
exp_md = map(dict,sorted([md for md in exp.ObservationMetadata]))
for i,md in enumerate(actual_md):
self.assertEqualItems(md,exp_md[i])
for i,md in enumerate(exp_md):
self.assertEqualItems(md,actual_md[i])
def test_transfer_sample_metadata_moves_sample_metadata_between_biom_tables(self):
"""transfer_sample_metadata moves sample metadata values between BIOM format tables"""
t1 = self.otu_table1
exp = self.otu_table1_with_metadata
actual = transfer_sample_metadata(self.otu_table1_with_metadata,\
self.otu_table1,"SampleMetadata",verbose=False)
actual_md = map(dict,sorted([md for md in actual.SampleMetadata]))
exp_md = map(dict,sorted([md for md in exp.SampleMetadata]))
for i,md in enumerate(actual_md):
self.assertEqualItems(md,exp_md[i])
for i,md in enumerate(exp_md):
self.assertEqualItems(md,actual_md[i])
def test_transfer_observation_metadata_moves_observation_metadata_between_biom_tables(self):
"""transfer_sample_metadata moves sample metadata values between BIOM format tables"""
t1 = self.genome_table1
exp = self.genome_table1_with_metadata
actual = transfer_observation_metadata(self.genome_table1_with_metadata,\
self.genome_table1,"ObservationMetadata",verbose=False)
actual_md = map(dict,sorted([md for md in actual.ObservationMetadata]))
exp_md = map(dict,sorted([md for md in exp.ObservationMetadata]))
for i,md in enumerate(actual_md):
self.assertEqualItems(md,exp_md[i])
for i,md in enumerate(exp_md):
self.assertEqualItems(md,actual_md[i])
def test_load_subset_from_biom_str_loads_a_subset_of_observations(self):
"""load_subset_from_biom_str loads a subset of observations from a valid BIOM format JSON string"""
biom_str = otu_table1_with_metadata
ids_to_load = ['GG_OTU_1','GG_OTU_2']
axis = 'observations'
#NOTE: this will fail currently due to a known bug in the BIOM direct_parse_key
#as soon as that is updated this should pass, however
#two_taxon_table = load_subset_from_biom_str(biom_str,ids_to_load,axis)
#self.assertEqualItems(two_taxon_table.ObservationIds,ids_to_load)
#Test that loading all ids is identical to just loading the table
#exp = parse_biom(biom_str)
#obs = load_subset_from_biom_str(biom_str,ids_to_load=['GG_OTU_1','GG_OTU_2','GG_OTU_3'],axis=axis)
#self.assertEqual(obs,exp)
def test_variance_of_sum_functions_as_expected_with_valid_input(self):
"""variance_of_sum functions as expected given two variances"""
#Example drawn from:http://onlinestatbook.com/chapter4/variance_sum_law2.html
var1 = 10000
var2 = 11000
r=0.50
# expected variance = 10,000 + 11,000 + 2(0.5)*sqrt(10,000)*sqrt(11,000)=31488
expected_var1 = 31488.088481701518
observed_var = variance_of_sum(var1,var2,r,sign_of_varB=1)
self.assertFloatEqual(expected_var1,observed_var)
expected_var2 = 10511.911518298484
observed_var = variance_of_sum(var1,var2,r,sign_of_varB=-1)
self.assertFloatEqual(expected_var2,observed_var)
#Test that this works for vector input
var1=array([10000,10000,0])
var2=array([11000,11000,0])
observed_var = variance_of_sum(var1,var2,r)
expected_var = array([expected_var1,expected_var1,0.0])
self.assertFloatEqual(observed_var,expected_var)
def test_sum_rows_with_variance(self):
"""sum_rows_with_variance sums the rows of a numpy array while accounting for variance"""
data_array = array([[0,0],[0,1.0]])
variance_array = array([[1.0,0],[0,1000.0]])
exp_data_array = array([0.0,1.0])
exp_variance_array = array([1.0,1000.0])
obs_data_array,obs_variance_array =\
sum_rows_with_variance(data_array,variance_array)
self.assertFloatEqual(obs_data_array,exp_data_array)
self.assertFloatEqual(obs_variance_array,exp_variance_array)
def test_variance_of_product_functions_as_expected_with_valid_input(self):
"""variance_of_product functions as expected given two values and two variances"""
varA = 100.0
A = 1.0
varB = 1000.0
B = 10.0
r=0.5
#Expected (calc'd by hand) = (100/1)**2 + (1000/10)**2 + 2*0.5*sqrt(100)*sqrt(1000)/10.0
#Equivalently = 10000 + 10000 + sqrt(1000) = 20000 + sqrt(1000) = 20000 + 31.622776601683793
expected = 20031.622776601683793
observed = variance_of_product(A,B,varA,varB,r=0.5)
self.assertFloatEqual(observed,expected)
#Test taat this works for vector input
Av = array([A]*10)
Bv = array([B]*10)
varAv=array([varA]*10)
varBv=array([varB]*10)
rv = array([r]*10)
result = variance_of_product(Av,Bv,varAv,varBv,rv)
self.assertFloatEqual(result,array([expected]*10))
def test_yield_subset_biom_str_yields_string_pieces_from_valid_input(self):
"""yield_subset_biom_str yields components of a biom string containing only a subset of ids, given a valid biom str"""
biom_str = otu_table1_with_metadata
ids_to_load = ['GG_OTU_1','GG_OTU_2']
axis = 'observations'
idxs, new_axis_md = get_axis_indices(biom_str,ids_to_load, axis)
new_data = direct_slice_data(biom_str,idxs, axis)
#NOTE: this will fail currently due to a known bug in the BIOM direct_parse_key
#as soon as that is updated this should pass, however
obs = [part for part in yield_subset_biom_str(biom_str,new_data,new_axis_md,axis)]
exp = ['{', '"id": "GG_OTU_1"', ',',\
'"format": "Biological Observation Matrix v0.9"', ',',\
'"format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html"', ',',\
'"type": "OTU table"', ',',\
# '"generated_by": "QIIME 1.4.0-dev, svn revision 2753', ',',\
'"generated_by": "QIIME 1.4.0-dev', ',',\
'"date": "2012-02-22T20:50:05.024661"', ',',\
'"matrix_type": "sparse"', ',',\
'"matrix_element_type": "float"', ',',\
'"data": [[0,0,1.0],[0,1,2.0],[0,2,3.0],[0,3,5.0],[1,0,5.0],[1,1,1.0],[1,3,2.0]], "shape": [2, 4]',',',\
'"rows": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}]', ',',\
'"columns": [{"id": "Sample1", "metadata": {"pH":7.0}}, {"id": "Sample2", "metadata": {"pH":8.0}}, {"id": "Sample3", "metadata": {"pH":7.0}}, {"id": "Sample4", "metadata": null}]', '}']
#For now be aware that commas in generated_by
#strings won't parse correnctly
for i,piece in enumerate(exp):
self.assertEqual(obs[i],piece)
#TODO: when BIOM direct_parse_key is fixed this should pass
#self.assertEqual(obs,exp)
otu_table1 = """{"rows": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}, {"id": "GG_OTU_3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 1.0], [0, 1, 2.0], [0, 2, 3.0], [0, 3, 5.0], [1, 0, 5.0], [1, 1, 1.0], [1, 3, 2.0], [2, 2, 1.0], [2, 3, 4.0]], "columns": [{"id": "Sample1", "metadata": null}, {"id": "Sample2", "metadata": null}, {"id": "Sample3", "metadata": null}, {"id": "Sample4", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 4], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:50:05.024661", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
otu_table1_with_metadata = """{"rows": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}, {"id": "GG_OTU_3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 1.0], [0, 1, 2.0], [0, 2, 3.0], [0, 3, 5.0], [1, 0, 5.0], [1, 1, 1.0], [1, 3, 2.0], [2, 2, 1.0], [2, 3, 4.0]], "columns": [{"id": "Sample1", "metadata": {"pH":7.0}}, {"id": "Sample2", "metadata": {"pH":8.0}}, {"id": "Sample3", "metadata": {"pH":7.0}}, {"id": "Sample4", "metadata": null}],"generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 4], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:50:05.024661", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
genome_table1 = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 1.0], [0, 1, 2.0], [0, 2, 3.0], [1, 1, 1.0], [2, 2, 1.0]], "columns": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_3", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 3], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:49:58.258296", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
zero_variance_table1 = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format":
"Biological Observation Matrix v0.9", "data": [[0, 0, 0.0], [0, 1, 0.0], [0, 2, 0.0], [1, 1, 0.0], [2, 2, 0.0]], "columns": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_3", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 3], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:49:58.258296", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
variance_table1 = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 0.0], [0, 1, 0.0], [0, 2, 1.0], [1, 1, 10.0], [2, 2, 100.0]], "columns": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_3", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 3], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:49:58.258296", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
variance_table1_var_by_gene = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 0.0], [0, 1, 0.0], [0, 2, 0.0], [1, 1, 1.0],[2, 1, 10.0],[1, 2, 1.0], [2, 2, 10.0]], "columns": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_3", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 3], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:49:58.258296", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
variance_table1_var_by_otu = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 0.0], [0, 1, 10.0], [0, 2, 100.0], [1, 1, 10.0],[2, 1, 10.0],[1, 2, 100.0], [2, 2, 100.0]], "columns": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_3", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 3], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:49:58.258296", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
variance_table1_one_gene_one_otu = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 0.0], [0, 1, 0.0], [0, 2, 0.0], [1, 1, 0.0],[2, 1, 0.0],[1, 2, 0.0], [2, 2, 1000.0]], "columns": [{"id": "GG_OTU_1", "metadata": null}, {"id": "GG_OTU_3", "metadata": null}, {"id": "GG_OTU_2", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 3], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:49:58.258296", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
genome_table1_with_metadata = """{"rows": [{"id": "f1", "metadata": {"KEGG_description":"ko00100 Steroid biosynthesis"}}, {"id": "f2", "metadata": {"KEGG_description":"ko00195 Photosynthesis"}}, {"id": "f3", "metadata": {"KEGG_description":"ko00232 Caffeine metabolism"}}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 1.0], [0, 1, 2.0], [0, 2, 3.0], [1, 1, 1.0], [2, 2, 1.0]], "columns": [{"id": "GG_OTU_1", "metadata": {"confidence": 0.665,"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "GG_OTU_3", "metadata": {"confidence": 1.0,"taxonomy": ["Root", "k__Bacteria", "p__Firmicutes", "c__Clostridia", "o__Clostridiales", "f__Lachnospiraceae"]}}, {"id": "GG_OTU_2", "metadata":{"confidence": 0.98,"taxonomy": ["Root", "k__Bacteria"]}}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 3], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:49:58.258296", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
genome_table2 = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 1.0], [0, 1, 2.0], [0, 2, 3.0], [1, 1, 1.0], [2, 2, 1.0]], "columns": [{"id": "GG_OTU_21", "metadata": null}, {"id": "GG_OTU_23", "metadata": null}, {"id": "GG_OTU_22", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 3], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T20:49:58.258296", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
predicted_metagenome_table1 = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 16.0], [0, 1, 5.0], [0, 2, 5.0], [0, 3, 19.0], [1, 2, 1.0], [1, 3, 4.0], [2, 0, 5.0], [2, 1, 1.0], [2, 3, 2.0]], "columns": [{"id": "Sample1", "metadata": null}, {"id": "Sample2", "metadata": null}, {"id": "Sample3", "metadata": null}, {"id": "Sample4", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 4], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T16:01:30.837052", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
predicted_metagenome_table1_one_gene_one_otu_upper_CI = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 16.0], [0, 1, 5.0], [0, 2, 5.0], [0, 3, 19.0], [1, 2, 1.0], [1, 3, 4.0], [2, 0, 315.0], [2, 1, 63.0], [2, 3, 126.0]], "columns": [{"id": "Sample1", "metadata": null}, {"id": "Sample2", "metadata": null}, {"id": "Sample3", "metadata": null}, {"id": "Sample4", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 4], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T16:01:30.837052", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
predicted_metagenome_table1_one_gene_one_otu_lower_CI = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 16.0], [0, 1, 5.0], [0, 2, 5.0], [0, 3, 19.0], [1, 2, 1.0], [1, 3, 4.0], [2, 0, 0.0], [2, 1, 0.0], [2, 3, 0.0]], "columns": [{"id": "Sample1", "metadata": null}, {"id": "Sample2", "metadata": null}, {"id": "Sample3", "metadata": null}, {"id": "Sample4", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 4], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T16:01:30.837052", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
predicted_metagenome_variance_table1_one_gene_one_otu = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 0.0], [0, 1, 0.0], [0, 2, 0.0], [0, 3, 0.0], [1, 2, 0.0], [1, 3, 0.0], [2, 0, 25000.0], [2, 1, 1000.0], [2, 3, 4000.0]], "columns": [{"id": "Sample1", "metadata": null}, {"id": "Sample2", "metadata": null}, {"id": "Sample3", "metadata": null}, {"id": "Sample4", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 4], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T16:01:30.837052", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
predicted_metagenome_table1_zero_variance = """{"rows": [{"id": "f1", "metadata": null}, {"id": "f2", "metadata": null}, {"id": "f3", "metadata": null}], "format": "Biological Observation Matrix v0.9", "data": [[0, 0, 0.0], [0, 1, 0.0], [0, 2, 0.0], [0, 3, 0.0], [1, 2, 0.0], [1, 3, 0.0], [2, 0, 0.0], [2, 1, 0.0], [2, 3, 0.0]], "columns": [{"id": "Sample1", "metadata": null}, {"id": "Sample2", "metadata": null}, {"id": "Sample3", "metadata": null}, {"id": "Sample4", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 4], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T16:01:30.837052", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
predicted_metagenome_table1_with_metadata = """{"rows": [{"id": "f1", "metadata": {"KEGG_description":"ko00100 Steroid biosynthesis"}}, {"id": "f2", "metadata": {"KEGG_description":"ko00195 Photosynthesis"}}, {"id": "f3", "metadata": {"KEGG_description":"ko00232 Caffeine metabolism"}}], "format": "Biological Observation Matrix v0.9","data": [[0, 0, 16.0], [0, 1, 5.0], [0, 2, 5.0], [0, 3, 19.0], [1, 2, 1.0], [1, 3, 4.0], [2, 0, 5.0], [2, 1, 1.0], [2, 3, 2.0]], "columns": [{"id": "Sample1", "metadata": {"pH":7.0}}, {"id": "Sample2", "metadata": {"pH":8.0}}, {"id": "Sample3", "metadata": {"pH":7.0}}, {"id": "Sample4", "metadata": null}], "generated_by": "QIIME 1.4.0-dev, svn revision 2753", "matrix_type": "sparse", "shape": [3, 4], "format_url": "http://www.qiime.org/svn_documentation/documentation/biom_format.html", "date": "2012-02-22T16:01:30.837052", "type": "OTU table", "id": null, "matrix_element_type": "float"}"""
if __name__ == "__main__":
main()
|
LumaPictures/rez | refs/heads/pull_requests | src/rez/tests/data/solver/packages/bahish/2/package.py | 8 | name = "bahish"
version = "2"
requires = ["pybah-5"]
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
fangcode/shadowsocks | refs/heads/master | shadowsocks/crypto/util.py | 1032 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
results.append(fname)
return results
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = b''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start)))
assert b''.join(results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
|
slurdge/shadowsocks | refs/heads/master | shadowsocks/shell.py | 652 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None) \
and not config.get('manager_address'):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if config.get('server_port', None) and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', None)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
|
lablup/sorna-client | refs/heads/master | tests/test_load.py | 1 | '''
A standalone script to generate some loads to the public API server.
It assumes that you have already configured the access key and secret key
as environment variables.
'''
import logging
import multiprocessing
from statistics import mean, median, stdev
import time
import pytest
from ai.backend.client.compat import token_hex
from ai.backend.client.kernel import Kernel
log = logging.getLogger('ai.backend.client.test.load')
sample_code = '''
import os
print('ls:', os.listdir('.'))
with open('test.txt', 'w') as f:
f.write('hello world')
'''
sample_code_julia = '''
println("wow")
'''
def print_stat(msg, times_taken):
print('{}: mean {:.2f} secs, median {:.2f} secs, stdev {:.2f}'.format(
msg, mean(times_taken), median(times_taken), stdev(times_taken)
))
def run_create_kernel(_idx):
begin = time.monotonic()
try:
k = Kernel.get_or_create('python3')
ret = k.kernel_id
except:
log.exception('run_create_kernel')
ret = None
finally:
end = time.monotonic()
t = end - begin
return t, ret
def create_kernels(concurrency, parallel=False):
kernel_ids = []
times_taken = []
if parallel:
pool = multiprocessing.Pool(concurrency)
results = pool.map(run_create_kernel, range(concurrency))
for t, kid in results:
times_taken.append(t)
kernel_ids.append(kid)
else:
for _idx in range(concurrency):
t, kid = run_create_kernel(_idx)
times_taken.append(t)
kernel_ids.append(kid)
print_stat('create_kernel', times_taken)
return kernel_ids
def run_execute_code(kid):
if kid is not None:
begin = time.monotonic()
console = []
run_id = token_hex(8)
while True:
result = Kernel(kid).execute(run_id, sample_code)
console.extend(result['console'])
if result['status'] == 'finished':
break
stdout = ''.join(rec[1] for rec in console if rec[0] == 'stdout')
end = time.monotonic()
print(stdout)
return end - begin
return None
def execute_codes(kernel_ids, parallel=False):
times_taken = []
if parallel:
pool = multiprocessing.Pool(len(kernel_ids))
results = pool.map(run_execute_code, kernel_ids)
for t in results:
if t is not None:
times_taken.append(t)
else:
for kid in kernel_ids:
t = run_execute_code(kid)
if t is not None:
times_taken.append(t)
print_stat('execute_code', times_taken)
def run_restart_kernel(kid):
# 2nd params is currently ignored.
if kid is not None:
begin = time.monotonic()
Kernel(kid).restart()
end = time.monotonic()
return end - begin
return None
def restart_kernels(kernel_ids, parallel=False):
times_taken = []
if parallel:
pool = multiprocessing.Pool(len(kernel_ids))
results = pool.map(run_restart_kernel, kernel_ids)
for t in results:
if t is not None:
times_taken.append(t)
else:
for kid in kernel_ids:
t = run_restart_kernel(kid)
if t is not None:
times_taken.append(t)
print_stat('restart_kernel', times_taken)
def run_destroy_kernel(kid):
if kid is not None:
begin = time.monotonic()
Kernel(kid).destroy()
end = time.monotonic()
return end - begin
return None
def destroy_kernels(kernel_ids, parallel=False):
times_taken = []
if parallel:
pool = multiprocessing.Pool(len(kernel_ids))
results = pool.map(run_destroy_kernel, kernel_ids)
for t in results:
if t is not None:
times_taken.append(t)
else:
for kid in kernel_ids:
t = run_destroy_kernel(kid)
if t is not None:
times_taken.append(t)
print_stat('destroy_kernel', times_taken)
@pytest.mark.integration
@pytest.mark.parametrize('concurrency,parallel,restart', [
(5, False, False),
(5, True, False),
(5, False, True),
(5, True, True),
])
def test_high_load_requests(capsys, defconfig, concurrency, parallel, restart):
'''
Tests creation and use of multiple concurrent kernels in various ways.
NOTE: This test may fail if your system has too less cores compared to the
given concurrency. The exact number of cores required is determined by the
Python3 kernel's resource requirements (CPU slots).
NOTE: This test may occasionally fail if it takes too long time to destroy
Docker containers in the manager because the resources occupation is
restored after container destruction but the destroy API returns after
stopping containers but before they are actually destroyed.
We have inserted some small delay to work-around this.
Running this tests with different parameters without no delays between
parameter sets would cause "503 Service Unavailable" errors as it will
quickly saturate the resource limit of the developer's PC.
'''
# Show stdout for timing statistics
with capsys.disabled():
print('waiting for previous asynchronous kernel destruction for 5 secs...')
time.sleep(5)
kids = create_kernels(concurrency, parallel)
execute_codes(kids, parallel)
if restart:
restart_kernels(kids, parallel)
execute_codes(kids, parallel)
destroy_kernels(kids, parallel)
|
kerkeslager/don | refs/heads/master | test_tags.py | 1 | import collections
import unittest
from ton import tags
class AutoTagTests(unittest.TestCase):
def test_autotags_void(self):
self.assertEqual(
tags.autotag(None),
tags.TaggedObject(tag = tags.VOID, value = None),
)
def test_autotags_true(self):
self.assertEqual(
tags.autotag(True),
tags.TaggedObject(tag = tags.TRUE, value = True),
)
def test_autotags_false(self):
self.assertEqual(
tags.autotag(False),
tags.TaggedObject(tag = tags.FALSE, value = False),
)
def test_autotags_int_defaults_to_INT32(self):
self.assertEqual(
tags.autotag(127),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = 127),
)
self.assertEqual(
tags.autotag(-128),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = -128),
)
self.assertEqual(
tags.autotag(128),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = 128),
)
self.assertEqual(
tags.autotag(-129),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = -129),
)
self.assertEqual(
tags.autotag(-32768),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = -32768),
)
self.assertEqual(
tags.autotag(32767),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = 32767),
)
self.assertEqual(
tags.autotag(-32769),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = -32769),
)
self.assertEqual(
tags.autotag(32768),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = 32768),
)
self.assertEqual(
tags.autotag(-2147483648),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = -2147483648),
)
self.assertEqual(
tags.autotag(2147483647),
tags.TaggedObject(tag = tags.DEFAULT_INTEGER_ENCODING, value = 2147483647),
)
self.assertEqual(
tags.autotag(-2147483649),
tags.TaggedObject(tag = tags.INT64, value = -2147483649),
)
self.assertEqual(
tags.autotag(2147483648),
tags.TaggedObject(tag = tags.INT64, value = 2147483648),
)
self.assertEqual(
tags.autotag(-9223372036854775808),
tags.TaggedObject(tag = tags.INT64, value = -9223372036854775808),
)
self.assertEqual(
tags.autotag(9223372036854775807),
tags.TaggedObject(tag = tags.INT64, value = 9223372036854775807),
)
with self.assertRaises(tags.TooWideError):
tags.autotag(9223372036854775808)
with self.assertRaises(tags.TooWideError):
tags.autotag(-9223372036854775809)
def test_autotags_int_to_smallest_possible_type_when_preferred_type_is_smallest(self):
self.assertEqual(
tags.autotag(127, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT8, value = 127),
)
self.assertEqual(
tags.autotag(-128, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT8, value = -128),
)
self.assertEqual(
tags.autotag(128, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT16, value = 128),
)
self.assertEqual(
tags.autotag(-129, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT16, value = -129),
)
self.assertEqual(
tags.autotag(-32768, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT16, value = -32768),
)
self.assertEqual(
tags.autotag(32767, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT16, value = 32767),
)
self.assertEqual(
tags.autotag(-32769, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT32, value = -32769),
)
self.assertEqual(
tags.autotag(32768, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT32, value = 32768),
)
self.assertEqual(
tags.autotag(-2147483648, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT32, value = -2147483648),
)
self.assertEqual(
tags.autotag(2147483647, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT32, value = 2147483647),
)
self.assertEqual(
tags.autotag(-2147483649, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT64, value = -2147483649),
)
self.assertEqual(
tags.autotag(2147483648, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT64, value = 2147483648),
)
self.assertEqual(
tags.autotag(-9223372036854775808, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT64, value = -9223372036854775808),
)
self.assertEqual(
tags.autotag(9223372036854775807, preferred_integer_tag=tags.SMALLEST),
tags.TaggedObject(tag = tags.INT64, value = 9223372036854775807),
)
with self.assertRaises(tags.TooWideError):
tags.autotag(9223372036854775808, preferred_integer_tag=tags.SMALLEST)
with self.assertRaises(tags.TooWideError):
tags.autotag(-9223372036854775809, preferred_integer_tag=tags.SMALLEST)
def test_tags_integer_to_preferred_integer_tag(self):
self.assertEqual(
tags.autotag(42, preferred_integer_tag = tags.INT8),
tags.TaggedObject(tag = tags.INT8, value = 42),
)
self.assertEqual(
tags.autotag(42, preferred_integer_tag = tags.INT16),
tags.TaggedObject(tag = tags.INT16, value = 42),
)
self.assertEqual(
tags.autotag(42, preferred_integer_tag = tags.INT32),
tags.TaggedObject(tag = tags.INT32, value = 42),
)
self.assertEqual(
tags.autotag(42, preferred_integer_tag = tags.INT64),
tags.TaggedObject(tag = tags.INT64, value = 42),
)
def test_tags_string_to_utf8_by_default(self):
self.assertEqual(
tags.autotag('Hello, world'),
tags.TaggedObject(tag = tags.DEFAULT_STRING_ENCODING, value = 'Hello, world'),
)
def test_tags_string_to_preferred_string_encoding(self):
self.assertEqual(
tags.autotag('Hello, world', preferred_string_tag=tags.UTF8),
tags.TaggedObject(tag = tags.UTF8, value = 'Hello, world'),
)
self.assertEqual(
tags.autotag('Hello, world', preferred_string_tag=tags.UTF16),
tags.TaggedObject(tag = tags.UTF16, value = 'Hello, world'),
)
self.assertEqual(
tags.autotag('Hello, world', preferred_string_tag=tags.UTF32),
tags.TaggedObject(tag = tags.UTF32, value = 'Hello, world'),
)
def test_tags_bytes(self):
self.assertEqual(
tags.autotag(b'\xde\xad\xbe\xef'),
tags.TaggedObject(tag = tags.BINARY, value = b'\xde\xad\xbe\xef'),
)
def test_tags_list(self):
self.assertEqual(
tags.autotag([1,2,3]),
tags.TaggedObject(
tag = tags.LIST,
value = [
tags.TaggedObject(tag = tags.INT32, value = 1),
tags.TaggedObject(tag = tags.INT32, value = 2),
tags.TaggedObject(tag = tags.INT32, value = 3),
],
),
)
def test_tags_dictionary(self):
self.assertEqual(
tags.autotag(collections.OrderedDict([
('foo', 1),
('bar', True),
])),
tags.TaggedObject(
tag = tags.DICTIONARY,
value = collections.OrderedDict([
(
tags.TaggedObject(tag = tags.UTF8, value = 'foo'),
tags.TaggedObject(tag = tags.INT32, value = 1),
),
(
tags.TaggedObject(tag = tags.UTF8, value = 'bar'),
tags.TaggedObject(tag = tags.TRUE, value = True),
),
]),
),
)
unittest.main()
|
jmcorgan/gnuradio | refs/heads/master | gr-vocoder/python/vocoder/qa_ulaw_vocoder.py | 57 | #!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, vocoder, blocks
class test_ulaw_vocoder (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test001_module_load (self):
data = (8,24,40,56,72,88,104,120,132,148,164,180,
196,212,228,244,260,276,292,308,324,340)
src = blocks.vector_source_s(data)
enc = vocoder.ulaw_encode_sb()
dec = vocoder.ulaw_decode_bs()
snk = blocks.vector_sink_s()
self.tb.connect(src, enc, dec, snk)
self.tb.run()
actual_result = snk.data()
self.assertEqual(data, actual_result)
if __name__ == '__main__':
gr_unittest.run(test_ulaw_vocoder, "test_ulaw_vocoder.xml")
|
mr3bn/DAT210x | refs/heads/master | Module2/module2Notes.py | 1 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 04 14:30:05 2017
@author: Mark
"""
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine('sqlite:///:memory:')
# some import examples
#df = pd.read_sql_table('my_table', engine, columns = ['ColA', 'ColB'])
#df = pd.read_excel('my_dataset.xlsx', 'Sheet1', na_values=['NA'])
#df = pd.read_json('my_dataset.json', orient='columns')
#df = pd.read_csv('my_dataset.csv')
#
## writing is easy, too
#df.to_sql('table', engine)
#df.to_excel('dataset.xlsx')
#df.to_json('dataset.json')
#df.to_csv('dataset.csv')
# none of those will work, so let's do one that will:
df = pd.read_csv('Datasets/direct_marketing.csv')
# basic summary stuff included in pandas...
print df.head(5)
print df.tail(5)
print df.describe()
print df.columns
#################### COLUMN INDEXING ####################
# one way to get a single column out of data frame is to access it by name...
# this syntax will return a SERIES object of one column, size=nrow:
rec = df.recency
rec = df['recency']
# doubling up on the brackets returns a DATA FRAME of the same column, size=nrowx1
# intuitively, the interior set of brackets is a list in itself,
# so this application can actually handle more than one column:
rec = df[['recency']]
rec = df[['recency', 'history']]
# the df.loc method provides a marginally faster way to access a column by name...
# same series of size=nrow, just using a different method:
rec = df.loc[:, 'recency']
# and this application will again return a data frame (nrowx1)
rec = df.loc[:, ['recency']]
# same story, can slice to > 1 column:
rec = df.loc[:, ['recency', 'history']]
# df.loc also works with boolean masks, but won't bother with that right now
# the df.iloc method uses numbered indexes instead of names
rec = df.iloc[:, 0]
rec = df.iloc[:, [0]]
# IMPORTANT: for the list implementation of .iloc, note that the results
# are NOT inclusive.
rec = df.iloc[:, 0:1] # returns the same as df.iloc[:, [0]]
# df.ix is sort of a hybrid of .loc and .iloc
rec = df.ix[:, 0]
rec = df.ix[:, 'recency']
rec = df.ix[:, 0:1]
#################### ROW INDEXING ####################
# one easy way to subest rows is with a boolean operation...
df.recency < 7 # returns a series of booleans, which we can use as a mask:
df[df.recency < 7]
# this methodology can work with multiple boolean tests:
df[(df.recency < 7) & (df.newbie == 0)]
# it's also possible to write to a dataframe into a slice:
# df[df.recency < 7] = -100 will render ALL rows in the data frame as -100
# where recency is < 7. a better implementation is to do this for one
# column at a time, to account for data frame nonhomogeneity |
wolverineav/neutron | refs/heads/master | neutron/tests/unit/agent/test_rpc.py | 9 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_context import context as oslo_context
import oslo_messaging
from neutron.agent import rpc
from neutron.tests import base
class AgentRPCPluginApi(base.BaseTestCase):
def _test_rpc_call(self, method):
agent = rpc.PluginApi('fake_topic')
ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
expect_val = 'foo'
with mock.patch.object(agent.client, 'call') as mock_call,\
mock.patch.object(agent.client, 'prepare') as mock_prepare:
mock_prepare.return_value = agent.client
mock_call.return_value = expect_val
func_obj = getattr(agent, method)
if method == 'tunnel_sync':
actual_val = func_obj(ctxt, 'fake_tunnel_ip')
else:
actual_val = func_obj(ctxt, 'fake_device', 'fake_agent_id')
self.assertEqual(actual_val, expect_val)
def test_get_device_details(self):
self._test_rpc_call('get_device_details')
def test_get_devices_details_list(self):
self._test_rpc_call('get_devices_details_list')
def test_devices_details_list_unsupported(self):
agent = rpc.PluginApi('fake_topic')
ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
expect_val_get_device_details = 'foo'
expect_val = [expect_val_get_device_details]
with mock.patch.object(agent.client, 'call') as mock_call, \
mock.patch.object(agent.client, 'prepare') as mock_prepare:
mock_prepare.return_value = agent.client
mock_call.side_effect = [oslo_messaging.UnsupportedVersion('1.2'),
expect_val_get_device_details]
func_obj = getattr(agent, 'get_devices_details_list')
actual_val = func_obj(ctxt, ['fake_device'], 'fake_agent_id')
self.assertEqual(actual_val, expect_val)
def test_update_device_down(self):
self._test_rpc_call('update_device_down')
def test_tunnel_sync(self):
self._test_rpc_call('tunnel_sync')
class AgentPluginReportState(base.BaseTestCase):
def test_plugin_report_state_use_call(self):
topic = 'test'
reportStateAPI = rpc.PluginReportStateAPI(topic)
expected_agent_state = {'agent': 'test'}
with mock.patch.object(reportStateAPI.client, 'call') as mock_call, \
mock.patch.object(reportStateAPI.client, 'cast'), \
mock.patch.object(reportStateAPI.client, 'prepare'
) as mock_prepare:
mock_prepare.return_value = reportStateAPI.client
ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
reportStateAPI.report_state(ctxt, expected_agent_state,
use_call=True)
self.assertEqual(mock_call.call_args[0][0], ctxt)
self.assertEqual(mock_call.call_args[0][1], 'report_state')
self.assertEqual(mock_call.call_args[1]['agent_state'],
{'agent_state': expected_agent_state})
self.assertIsInstance(mock_call.call_args[1]['time'], str)
def test_plugin_report_state_cast(self):
topic = 'test'
reportStateAPI = rpc.PluginReportStateAPI(topic)
expected_agent_state = {'agent': 'test'}
with mock.patch.object(reportStateAPI.client, 'call'), \
mock.patch.object(reportStateAPI.client, 'cast'
) as mock_cast, \
mock.patch.object(reportStateAPI.client, 'prepare'
) as mock_prepare:
mock_prepare.return_value = reportStateAPI.client
ctxt = oslo_context.RequestContext('fake_user', 'fake_project')
reportStateAPI.report_state(ctxt, expected_agent_state)
self.assertEqual(mock_cast.call_args[0][0], ctxt)
self.assertEqual(mock_cast.call_args[0][1], 'report_state')
self.assertEqual(mock_cast.call_args[1]['agent_state'],
{'agent_state': expected_agent_state})
self.assertIsInstance(mock_cast.call_args[1]['time'], str)
def test_plugin_report_state_microsecond_is_0(self):
topic = 'test'
expected_time = datetime.datetime(2015, 7, 27, 15, 33, 30, 0)
expected_time_str = '2015-07-27T15:33:30.000000'
expected_agent_state = {'agent': 'test'}
with mock.patch('neutron.agent.rpc.datetime') as mock_datetime:
reportStateAPI = rpc.PluginReportStateAPI(topic)
mock_datetime.utcnow.return_value = expected_time
with mock.patch.object(reportStateAPI.client, 'call'), \
mock.patch.object(reportStateAPI.client, 'cast'
) as mock_cast, \
mock.patch.object(reportStateAPI.client, 'prepare'
) as mock_prepare:
mock_prepare.return_value = reportStateAPI.client
ctxt = oslo_context.RequestContext('fake_user',
'fake_project')
reportStateAPI.report_state(ctxt, expected_agent_state)
self.assertEqual(expected_time_str,
mock_cast.call_args[1]['time'])
class AgentRPCMethods(base.BaseTestCase):
def _test_create_consumers(
self, endpoints, method, expected, topics, listen):
call_to_patch = 'neutron.common.rpc.create_connection'
with mock.patch(call_to_patch) as create_connection:
rpc.create_consumers(
endpoints, method, topics, start_listening=listen)
create_connection.assert_has_calls(expected)
def test_create_consumers_start_listening(self):
endpoints = [mock.Mock()]
expected = [
mock.call(),
mock.call().create_consumer('foo-topic-op', endpoints,
fanout=True),
mock.call().consume_in_threads()
]
method = 'foo'
topics = [('topic', 'op')]
self._test_create_consumers(
endpoints, method, expected, topics, True)
def test_create_consumers_do_not_listen(self):
endpoints = [mock.Mock()]
expected = [
mock.call(),
mock.call().create_consumer('foo-topic-op', endpoints,
fanout=True),
]
method = 'foo'
topics = [('topic', 'op')]
self._test_create_consumers(
endpoints, method, expected, topics, False)
def test_create_consumers_with_node_name(self):
endpoints = [mock.Mock()]
expected = [
mock.call(),
mock.call().create_consumer('foo-topic-op', endpoints,
fanout=True),
mock.call().create_consumer('foo-topic-op.node1', endpoints,
fanout=False),
mock.call().consume_in_threads()
]
call_to_patch = 'neutron.common.rpc.create_connection'
with mock.patch(call_to_patch) as create_connection:
rpc.create_consumers(endpoints, 'foo', [('topic', 'op', 'node1')])
create_connection.assert_has_calls(expected)
|
un33k/robotframework | refs/heads/master | src/robot/variables/finders.py | 11 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
try:
from java.lang.System import (getProperty as get_java_property,
getProperties as get_java_properties)
except ImportError:
get_java_property = lambda name: None
get_java_properties = lambda: {}
from robot.errors import DataError, VariableError
from robot.utils import (get_env_var, get_env_vars, get_error_message,
is_dict_like, is_list_like, normalize, DotDict,
NormalizedDict)
from .isvar import validate_var
from .notfound import variable_not_found
class VariableFinder(object):
def __init__(self, variable_store):
self._finders = (StoredFinder(variable_store),
NumberFinder(),
EmptyFinder(),
EnvironmentFinder(),
ExtendedFinder(self))
self._store = variable_store
def find(self, name):
validate_var(name, '$@&%')
identifier = name[0]
for finder in self._finders:
if identifier in finder.identifiers:
try:
value = finder.find(name)
except (KeyError, ValueError):
continue
return self._validate_value(value, identifier, name)
variable_not_found(name, self._store.data)
def _validate_value(self, value, identifier, name):
if identifier == '@':
if not is_list_like(value):
raise VariableError("Value of variable '%s' is not list or "
"list-like." % name)
return list(value)
if identifier == '&':
if not is_dict_like(value):
raise VariableError("Value of variable '%s' is not dictionary "
"or dictionary-like." % name)
return DotDict(value)
return value
class StoredFinder(object):
identifiers = '$@&'
def __init__(self, store):
self._store = store
def find(self, name):
return self._store[name[2:-1]]
class NumberFinder(object):
identifiers = '$'
def find(self, name):
number = normalize(name)[2:-1]
try:
return self._get_int(number)
except ValueError:
return float(number)
def _get_int(self, number):
bases = {'0b': 2, '0o': 8, '0x': 16}
if number.startswith(tuple(bases)):
return int(number[2:], bases[number[:2]])
return int(number)
class EmptyFinder(object):
identifiers = '$@&'
find = NormalizedDict({'${EMPTY}': '', '@{EMPTY}': (), '&{EMPTY}': {}},
ignore='_').__getitem__
class ExtendedFinder(object):
identifiers = '$@&'
_match_extended = re.compile(r'''
(.+?) # base name (group 1)
([^\s\w].+) # extended part (group 2)
''', re.UNICODE|re.VERBOSE).match
def __init__(self, finder):
self._find_variable = finder.find
def find(self, name):
match = self._match_extended(name[2:-1])
if match is None:
raise ValueError
base_name, extended = match.groups()
try:
variable = self._find_variable('${%s}' % base_name)
except DataError as err:
raise VariableError("Resolving variable '%s' failed: %s"
% (name, unicode(err)))
try:
return eval('_BASE_VAR_' + extended, {'_BASE_VAR_': variable})
except:
raise VariableError("Resolving variable '%s' failed: %s"
% (name, get_error_message()))
class EnvironmentFinder(object):
identifiers = '%'
def find(self, name):
for getter in get_env_var, get_java_property:
value = getter(name[2:-1])
if value is not None:
return value
variable_not_found(name, self._get_candidates(),
"Environment variable '%s' not found." % name)
def _get_candidates(self):
candidates = dict(get_java_properties())
candidates.update(get_env_vars())
return candidates
|
talk-to/PjSip-Repo | refs/heads/master | tests/pjsua/scripts-sendto/201_ice_mismatch_1.py | 42 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=pjmedia
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0 101
a=ice-ufrag:1234
a=ice-pwd:5678
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
a=candidate:XX 1 UDP 1 1.1.1.1 2222 typ host
"""
args = "--null-audio --use-ice --auto-answer 200 --max-calls 1"
include = ["a=ice-mismatch"]
exclude = []
sendto_cfg = sip.SendtoCfg( "caller sends mismatched offer for comp 1",
pjsua_args=args, sdp=sdp, resp_code=200,
resp_inc=include, resp_exc=exclude)
|
chaostrigger/rl-library | refs/heads/master | system/common/libs/mwclient/simplejson/tests/test_attacks.py | 4 | def test_script_close_attack():
import simplejson
res = simplejson.dumps('</script>')
assert '</script>' not in res
res = simplejson.dumps(simplejson.loads('"</script>"'))
assert '</script>' not in res
|
Rodolfoarv/Mars-Explorer-AI-with-Python- | refs/heads/master | env/lib/python2.7/site-packages/pip/commands/hash.py | 514 | from __future__ import absolute_import
import hashlib
import logging
import sys
from pip.basecommand import Command
from pip.status_codes import ERROR
from pip.utils import read_chunks
from pip.utils.hashes import FAVORITE_HASH, STRONG_HASHES
logger = logging.getLogger(__name__)
class HashCommand(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
name = 'hash'
usage = '%prog [options] <file> ...'
summary = 'Compute hashes of package archives.'
def __init__(self, *args, **kw):
super(HashCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-a', '--algorithm',
dest='algorithm',
choices=STRONG_HASHES,
action='store',
default=FAVORITE_HASH,
help='The hash algorithm to use: one of %s' %
', '.join(STRONG_HASHES))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
logger.info('%s:\n--hash=%s:%s',
path, algorithm, _hash_of_file(path, algorithm))
def _hash_of_file(path, algorithm):
"""Return the hash digest of a file."""
with open(path, 'rb') as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
|
weka511/bioinformatics | refs/heads/master | PAR3.py | 1 | # Copyright (C) 2017 Greenweaves Software Pty Ltd
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>
def par3(n,A):
prefix=[]
suffix=[]
mid=[]
for a in A:
if a<A[0]:
prefix.append(a)
elif a==A[0]:
mid.append(a)
else:
suffix.append(a)
return prefix + mid + suffix
if __name__=='__main__':
with open('c:/Users/Weka/Downloads/rosalind_par3.txt') as f:
n=0
A=[]
i=0
for line in f:
text=line.strip()
if i==0:
n=int(text)
elif i==1:
A=[int(t) for t in text.split(' ')]
i+=1
print (' '.join([str(r) for r in par3(n,A)])) |
kouaw/CouchPotatoServer | refs/heads/develop | libs/chardet/hebrewprober.py | 2928 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
mistercrunch/panoramix | refs/heads/master | superset/connectors/sqla/__init__.py | 7 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from . import models, views
|
proliming/zulip | refs/heads/master | zerver/management/commands/expunge_logs.py | 116 | from __future__ import absolute_import
import os
import sys
import datetime
import tempfile
import traceback
import ujson
from django.core.management.base import BaseCommand
from zerver.retention_policy import should_expunge_from_log
now = datetime.datetime.now()
def copy_retained_messages(infile, outfile):
"""Copy messages from infile to outfile which should be retained
according to policy."""
for ln in infile:
msg = ujson.loads(ln)
if not should_expunge_from_log(msg, now):
outfile.write(ln)
def expunge(filename):
"""Expunge entries from the named log file, in place."""
# We don't use the 'with' statement for tmpfile because we need to
# either move it or delete it, depending on success or failure.
#
# We create it in the same directory as infile for two reasons:
#
# - It makes it more likely we will notice leftover temp files
#
# - It ensures that they are on the same filesystem, so we can
# use atomic os.rename().
#
tmpfile = tempfile.NamedTemporaryFile(
mode = 'wb',
dir = os.path.dirname(filename),
delete = False)
try:
try:
with open(filename, 'rb') as infile:
copy_retained_messages(infile, tmpfile)
finally:
tmpfile.close()
os.rename(tmpfile.name, filename)
except:
os.unlink(tmpfile.name)
raise
class Command(BaseCommand):
help = ('Expunge old entries from one or more log files, '
+ 'according to the retention policy.')
def add_arguments(self, parser):
parser.add_argument('log_files', metavar='<log file>', type=str, nargs='*',
help='file to expunge entries from')
def handle(self, *args, **options):
if len(options['log_files']) == 0:
print >>sys.stderr, 'WARNING: No log files specified; doing nothing.'
for infile in options['log_files']:
try:
expunge(infile)
except KeyboardInterrupt:
raise
except:
print >>sys.stderr, 'WARNING: Could not expunge from', infile
traceback.print_exc()
|
bloomark/python-bitcoinlib | refs/heads/master | bitcoin/wallet.py | 11 | # Copyright (C) 2012-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Wallet-related functionality
Includes things like representing addresses and converting them to/from
scriptPubKeys; currently there is no actual wallet support implemented.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
_bord = ord
if sys.version > '3':
_bord = lambda x: x
import bitcoin
import bitcoin.base58
import bitcoin.core
import bitcoin.core.key
import bitcoin.core.script as script
class CBitcoinAddressError(bitcoin.base58.Base58Error):
"""Raised when an invalid Bitcoin address is encountered"""
class CBitcoinAddress(bitcoin.base58.CBase58Data):
"""A Bitcoin address"""
@classmethod
def from_bytes(cls, data, nVersion):
self = super(CBitcoinAddress, cls).from_bytes(data, nVersion)
if nVersion == bitcoin.params.BASE58_PREFIXES['SCRIPT_ADDR']:
self.__class__ = P2SHBitcoinAddress
elif nVersion == bitcoin.params.BASE58_PREFIXES['PUBKEY_ADDR']:
self.__class__ = P2PKHBitcoinAddress
else:
raise CBitcoinAddressError('Version %d not a recognized Bitcoin Address' % nVersion)
return self
@classmethod
def from_scriptPubKey(cls, scriptPubKey):
"""Convert a scriptPubKey to a CBitcoinAddress
Returns a CBitcoinAddress subclass, either P2SHBitcoinAddress or
P2PKHBitcoinAddress. If the scriptPubKey is not recognized
CBitcoinAddressError will be raised.
"""
try:
return P2SHBitcoinAddress.from_scriptPubKey(scriptPubKey)
except CBitcoinAddressError:
pass
try:
return P2PKHBitcoinAddress.from_scriptPubKey(scriptPubKey)
except CBitcoinAddressError:
pass
raise CBitcoinAddressError('scriptPubKey not a valid address')
def to_scriptPubKey(self):
"""Convert an address to a scriptPubKey"""
raise NotImplementedError
class P2SHBitcoinAddress(CBitcoinAddress):
@classmethod
def from_bytes(cls, data, nVersion=None):
if nVersion is None:
nVersion = bitcoin.params.BASE58_PREFIXES['SCRIPT_ADDR']
elif nVersion != bitcoin.params.BASE58_PREFIXES['SCRIPT_ADDR']:
raise ValueError('nVersion incorrect for P2SH address: got %d; expected %d' % \
(nVersion, bitcoin.params.BASE58_PREFIXES['SCRIPT_ADDR']))
return super(P2SHBitcoinAddress, cls).from_bytes(data, nVersion)
@classmethod
def from_redeemScript(cls, redeemScript):
"""Convert a redeemScript to a P2SH address
Convenience function: equivalent to P2SHBitcoinAddress.from_scriptPubKey(redeemScript.to_p2sh_scriptPubKey())
"""
return cls.from_scriptPubKey(redeemScript.to_p2sh_scriptPubKey())
@classmethod
def from_scriptPubKey(cls, scriptPubKey):
"""Convert a scriptPubKey to a P2SH address
Raises CBitcoinAddressError if the scriptPubKey isn't of the correct
form.
"""
if scriptPubKey.is_p2sh():
return cls.from_bytes(scriptPubKey[2:22], bitcoin.params.BASE58_PREFIXES['SCRIPT_ADDR'])
else:
raise CBitcoinAddressError('not a P2SH scriptPubKey')
def to_scriptPubKey(self):
"""Convert an address to a scriptPubKey"""
assert self.nVersion == bitcoin.params.BASE58_PREFIXES['SCRIPT_ADDR']
return script.CScript([script.OP_HASH160, self, script.OP_EQUAL])
class P2PKHBitcoinAddress(CBitcoinAddress):
@classmethod
def from_bytes(cls, data, nVersion=None):
if nVersion is None:
nVersion = bitcoin.params.BASE58_PREFIXES['PUBKEY_ADDR']
elif nVersion != bitcoin.params.BASE58_PREFIXES['PUBKEY_ADDR']:
raise ValueError('nVersion incorrect for P2PKH address: got %d; expected %d' % \
(nVersion, bitcoin.params.BASE58_PREFIXES['PUBKEY_ADDR']))
return super(P2PKHBitcoinAddress, cls).from_bytes(data, nVersion)
@classmethod
def from_pubkey(cls, pubkey, accept_invalid=False):
"""Create a P2PKH bitcoin address from a pubkey
Raises CBitcoinAddressError if pubkey is invalid, unless accept_invalid
is True.
The pubkey must be a bytes instance; CECKey instances are not accepted.
"""
if not isinstance(pubkey, bytes):
raise TypeError('pubkey must be bytes instance; got %r' % pubkey.__class__)
if not accept_invalid:
if not isinstance(pubkey, bitcoin.core.key.CPubKey):
pubkey = bitcoin.core.key.CPubKey(pubkey)
if not pubkey.is_fullyvalid:
raise CBitcoinAddressError('invalid pubkey')
pubkey_hash = bitcoin.core.Hash160(pubkey)
return P2PKHBitcoinAddress.from_bytes(pubkey_hash)
@classmethod
def from_scriptPubKey(cls, scriptPubKey, accept_non_canonical_pushdata=True, accept_bare_checksig=True):
"""Convert a scriptPubKey to a P2PKH address
Raises CBitcoinAddressError if the scriptPubKey isn't of the correct
form.
accept_non_canonical_pushdata - Allow non-canonical pushes (default True)
accept_bare_checksig - Treat bare-checksig as P2PKH scriptPubKeys (default True)
"""
if accept_non_canonical_pushdata:
# Canonicalize script pushes
scriptPubKey = script.CScript(scriptPubKey) # in case it's not a CScript instance yet
try:
scriptPubKey = script.CScript(tuple(scriptPubKey)) # canonicalize
except bitcoin.core.script.CScriptInvalidError:
raise CBitcoinAddressError('not a P2PKH scriptPubKey: script is invalid')
if (len(scriptPubKey) == 25
and _bord(scriptPubKey[0]) == script.OP_DUP
and _bord(scriptPubKey[1]) == script.OP_HASH160
and _bord(scriptPubKey[2]) == 0x14
and _bord(scriptPubKey[23]) == script.OP_EQUALVERIFY
and _bord(scriptPubKey[24]) == script.OP_CHECKSIG):
return cls.from_bytes(scriptPubKey[3:23], bitcoin.params.BASE58_PREFIXES['PUBKEY_ADDR'])
elif accept_bare_checksig:
pubkey = None
# We can operate on the raw bytes directly because we've
# canonicalized everything above.
if (len(scriptPubKey) == 35 # compressed
and _bord(scriptPubKey[0]) == 0x21
and _bord(scriptPubKey[34]) == script.OP_CHECKSIG):
pubkey = scriptPubKey[1:34]
elif (len(scriptPubKey) == 67 # uncompressed
and _bord(scriptPubKey[0]) == 0x41
and _bord(scriptPubKey[66]) == script.OP_CHECKSIG):
pubkey = scriptPubKey[1:65]
if pubkey is not None:
return cls.from_pubkey(pubkey, accept_invalid=True)
raise CBitcoinAddressError('not a P2PKH scriptPubKey')
def to_scriptPubKey(self):
"""Convert an address to a scriptPubKey"""
assert self.nVersion == bitcoin.params.BASE58_PREFIXES['PUBKEY_ADDR']
return script.CScript([script.OP_DUP, script.OP_HASH160, self, script.OP_EQUALVERIFY, script.OP_CHECKSIG])
class CKey(object):
"""An encapsulated private key
Attributes:
pub - The corresponding CPubKey for this private key
is_compressed - True if compressed
"""
def __init__(self, secret, compressed=True):
self._cec_key = bitcoin.core.key.CECKey()
self._cec_key.set_secretbytes(secret)
self._cec_key.set_compressed(compressed)
self.pub = bitcoin.core.key.CPubKey(self._cec_key.get_pubkey(), self._cec_key)
@property
def is_compressed(self):
return self.pub.is_compressed
def sign(self, hash):
return self._cec_key.sign(hash)
class CBitcoinSecretError(bitcoin.base58.Base58Error):
pass
class CBitcoinSecret(bitcoin.base58.CBase58Data, CKey):
"""A base58-encoded secret key"""
@classmethod
def from_secret_bytes(cls, secret, compressed=True):
"""Create a secret key from a 32-byte secret"""
self = cls.from_bytes(secret + (b'\x01' if compressed else b''),
bitcoin.params.BASE58_PREFIXES['SECRET_KEY'])
self.__init__(None)
return self
def __init__(self, s):
if self.nVersion != bitcoin.params.BASE58_PREFIXES['SECRET_KEY']:
raise CBitcoinSecretError('Not a base58-encoded secret key: got nVersion=%d; expected nVersion=%d' % \
(self.nVersion, bitcoin.params.BASE58_PREFIXES['SECRET_KEY']))
CKey.__init__(self, self[0:32], len(self) > 32 and _bord(self[32]) == 1)
__all__ = (
'CBitcoinAddressError',
'CBitcoinAddress',
'P2SHBitcoinAddress',
'P2PKHBitcoinAddress',
'CKey',
'CBitcoinSecretError',
'CBitcoinSecret',
)
|
twister/twister.github.io | refs/heads/master | demo/testsuite-python/test_pexpect_ssh.py | 1 |
# version: 2.001
import time
import pexpect
#
# <title>Test pExpect SSH</title>
# <description>This test is connecting to a SSH server, using pExpect.</description>
#
def test():
testName = 'test_pexpect_ssh.py'
logMsg('logTest', "\nTestCase:%s starting\n" % testName)
error_code = "PASS"
print '=== Connecting to SSH ==='
child = pexpect.spawn('ssh user@localhost')
child.expect('.+assword:', timeout=10)
child.sendline("password")
print child.before[:-4]
time.sleep(1)
child.expect('user@localhost:', timeout=5)
child.sendline("cd twister")
print child.before[:-4]
print child.after
time.sleep(1)
child.expect('user@localhost:', timeout=5)
child.sendline("ls -la")
print child.before[:-4]
print child.after
time.sleep(1)
child.expect('user@localhost:', timeout=5)
child.sendline("exit")
print child.before[:-4]
print child.after
time.sleep(1)
logMsg('logTest', "TestCase:%s %s\n" % (testName, error_code))
# This return is used by the framework!
return error_code
#
# Must have one of the statuses:
# 'pass', 'fail', 'skipped', 'aborted', 'not executed', 'timeout', 'invalid'
_RESULT = test()
|
Nihn/Diamond-1 | refs/heads/master | src/collectors/ups/ups.py | 68 | # coding=utf-8
"""
This class collects data from NUT, a UPS interface for linux.
#### Dependencies
* nut/upsc to be installed, configured and running.
"""
import diamond.collector
import os
import subprocess
from diamond.collector import str_to_bool
class UPSCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(UPSCollector, self).get_default_config_help()
config_help.update({
'ups_name': 'The name of the ups to collect data for',
'bin': 'The path to the upsc binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns default collector settings.
"""
config = super(UPSCollector, self).get_default_config()
config.update({
'path': 'ups',
'ups_name': 'cyberpower',
'bin': '/bin/upsc',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def collect(self):
if not os.access(self.config['bin'], os.X_OK):
self.log.error("%s is not executable", self.config['bin'])
return False
command = [self.config['bin'], self.config['ups_name']]
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0]
for ln in p.strip().splitlines():
datapoint = ln.split(": ")
try:
val = float(datapoint[1])
except:
continue
if len(datapoint[0].split(".")) == 2:
# If the metric name is the same as the subfolder
# double it so it's visible.
name = ".".join([datapoint[0], datapoint[0].split(".")[1]])
else:
name = datapoint[0]
self.publish(name, val)
|
jehine-MSFT/azure-storage-python | refs/heads/master | azure/storage/queue/queueservice.py | 1 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.common import (
AzureConflictHttpError,
AzureHttpError,
)
from .._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
)
from .._error import (
_dont_fail_not_exist,
_dont_fail_on_exist,
_validate_not_none,
_ERROR_CONFLICT,
_ERROR_STORAGE_MISSING_INFO,
)
from .._serialization import (
_get_request_body,
)
from .._common_conversion import (
_int_to_str,
_to_str,
)
from .._http import (
HTTPRequest,
)
from ..models import (
Services,
ListGenerator,
)
from .models import (
QueueMessageFormat,
)
from .._auth import (
_StorageSASAuthentication,
_StorageSharedKeyAuthentication,
)
from .._connection import _ServiceParameters
from .._serialization import (
_convert_signed_identifiers_to_xml,
_convert_service_properties_to_xml,
)
from .._deserialization import (
_convert_xml_to_service_properties,
_convert_xml_to_signed_identifiers,
)
from ._serialization import (
_convert_queue_message_xml,
_get_path,
)
from ._deserialization import (
_convert_xml_to_queues,
_convert_xml_to_queue_messages,
_parse_queue_message_from_headers,
_parse_metadata_and_message_count,
)
from ..sharedaccesssignature import (
SharedAccessSignature,
)
from ..storageclient import StorageClient
_HTTP_RESPONSE_NO_CONTENT = 204
class QueueService(StorageClient):
'''
This is the main class managing queue resources.
The Queue service stores messages. A queue can contain an unlimited number of
messages, each of which can be up to 64KB in size. Messages are generally added
to the end of the queue and retrieved from the front of the queue, although
first in, first out (FIFO) behavior is not guaranteed.
:ivar function(data) encode_function:
A function used to encode queue messages. Takes as
a parameter the data passed to the put_message API and returns the encoded
message. Defaults to take text and xml encode, but bytes and other
encodings can be used. For example, base64 may be preferable for developing
across multiple Azure Storage libraries in different languages. See the
:class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 and
no encoding methods as well as binary equivalents.
:ivar function(data) decode_function:
A function used to encode decode messages. Takes as
a parameter the data returned by the get_messages and peek_messages APIs and
returns the decoded message. Defaults to return text and xml decode, but
bytes and other decodings can be used. For example, base64 may be preferable
for developing across multiple Azure Storage libraries in different languages.
See the :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64
and no decoding methods as well as binary equivalents.
'''
def __init__(self, account_name=None, account_key=None, sas_token=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
request_session=None, connection_string=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given.
:param str account_key:
The storage account key. This is used for shared key authentication.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
'''
service_params = _ServiceParameters.get_service_parameters(
'queue',
account_name=account_name,
account_key=account_key,
sas_token=sas_token,
is_emulated=is_emulated,
protocol=protocol,
endpoint_suffix=endpoint_suffix,
request_session=request_session,
connection_string=connection_string)
super(QueueService, self).__init__(service_params)
if self.account_key:
self.authentication = _StorageSharedKeyAuthentication(
self.account_name,
self.account_key,
)
elif self.sas_token:
self.authentication = _StorageSASAuthentication(self.sas_token)
else:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
self.encode_function = QueueMessageFormat.text_xmlencode
self.decode_function = QueueMessageFormat.text_xmldecode
def generate_account_shared_access_signature(self, resource_types, permission,
expiry, start=None, ip=None, protocol=None):
'''
Generates a shared access signature for the queue service.
Use the returned signature with the sas_token parameter of QueueService.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account SAS.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_account(Services.QUEUE, resource_types, permission,
expiry, start=start, ip=ip, protocol=protocol)
def generate_queue_shared_access_signature(self, queue_name,
permission=None,
expiry=None,
start=None,
id=None,
ip=None, protocol=None,):
'''
Generates a shared access signature for the queue.
Use the returned signature with the sas_token parameter of QueueService.
:param str queue_name:
The name of the queue to create a SAS token for.
:param QueuePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_queue(
queue_name,
permission=permission,
expiry=expiry,
start=start,
id=id,
ip=ip,
protocol=protocol,
)
def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue service, including
logging, analytics and CORS rules.
:param int timeout:
The server timeout, expressed in seconds.
:return: The queue service properties.
:rtype: :class:`~azure.storage.models.ServiceProperties`
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path()
request.query = [
('restype', 'service'),
('comp', 'properties'),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_service_properties(response.body)
def set_queue_service_properties(self, logging=None, hour_metrics=None,
minute_metrics=None, cors=None, timeout=None):
'''
Sets the properties of a storage account's Queue service, including
Azure Storage Analytics. If an element (ex Logging) is left as None, the
existing settings on the service for that functionality are preserved.
For more information on Azure Storage Analytics, see
https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx.
:param Logging logging:
The logging settings provide request logs.
:param Metrics hour_metrics:
The hour metrics settings provide a summary of request
statistics grouped by API in hourly aggregates for blobs.
:param Metrics minute_metrics:
The minute metrics settings provide request statistics
for each minute for blobs.
:param cors:
You can include up to five CorsRule elements in the
list. If an empty list is specified, all CORS rules will be deleted,
and CORS will be disabled for the service. For detailed information
about CORS rules and evaluation logic, see
https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
:type cors: list of :class:`~azure.storage.models.CorsRule`
:param int timeout:
The server timeout, expressed in seconds.
'''
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path()
request.query = [
('restype', 'service'),
('comp', 'properties'),
('timeout', _int_to_str(timeout)),
]
request.body = _get_request_body(
_convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors))
self._perform_request(request)
def list_queues(self, prefix=None, num_results=None, include_metadata=False,
marker=None, timeout=None):
'''
Returns a generator to list the queues. The generator will lazily follow
the continuation tokens returned by the service and stop when all queues
have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
queues, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str prefix:
Filters the results to return only queues with names that begin
with the specified prefix.
:param int num_results:
The maximum number of queues to return.
:param bool include_metadata:
Specifies that container metadata be returned in the response.
:param str marker:
An opaque continuation token. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
'''
include = 'metadata' if include_metadata else None
kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include,
'marker': marker, 'timeout': timeout}
resp = self._list_queues(**kwargs)
return ListGenerator(resp, self._list_queues, (), kwargs)
def _list_queues(self, prefix=None, marker=None, max_results=None,
include=None, timeout=None):
'''
Returns a list of queues under the specified account. Makes a single list
request to the service. Used internally by the list_queues method.
:param str prefix:
Filters the results to return only queues with names that begin
with the specified prefix.
:param str marker:
A token which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
:param int max_results:
The maximum number of queues to return. A single list request may
return up to 1000 queues and potentially a continuation token which
should be followed to get additional resutls.
:param str include:
Include this parameter to specify that the container's
metadata be returned as part of the response body.
:param int timeout:
The server timeout, expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path()
request.query = [
('comp', 'list'),
('prefix', _to_str(prefix)),
('marker', _to_str(marker)),
('maxresults', _int_to_str(max_results)),
('include', _to_str(include)),
('timeout', _int_to_str(timeout))
]
response = self._perform_request(request)
return _convert_xml_to_queues(response)
def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None):
'''
Creates a queue under the given account.
:param str queue_name:
The name of the queue to create. A queue name must be from 3 through
63 characters long and may only contain lowercase letters, numbers,
and the dash (-) character. The first and last letters in the queue
must be alphanumeric. The dash (-) character cannot be the first or
last character. Consecutive dash characters are not permitted in the
queue name.
:param metadata:
A dict containing name-value pairs to associate with the queue as
metadata. Note that metadata names preserve the case with which they
were created, but are case-insensitive when set or read.
:type metadata: a dict mapping str to str
:param bool fail_on_exist:
Specifies whether to throw an exception if the queue already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the queue was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [('timeout', _int_to_str(timeout))]
request.headers = [('x-ms-meta-name-values', metadata)]
if not fail_on_exist:
try:
response = self._perform_request(request)
if response.status == _HTTP_RESPONSE_NO_CONTENT:
return False
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
response = self._perform_request(request)
if response.status == _HTTP_RESPONSE_NO_CONTENT:
raise AzureConflictHttpError(
_ERROR_CONFLICT.format(response.message), response.status)
return True
def delete_queue(self, queue_name, fail_not_exist=False, timeout=None):
'''
Deletes the specified queue and any messages it contains.
When a queue is successfully deleted, it is immediately marked for deletion
and is no longer accessible to clients. The queue is later removed from
the Queue service during garbage collection.
Note that deleting a queue is likely to take at least 40 seconds to complete.
If an operation is attempted against the queue while it was being deleted,
an :class:`AzureConflictHttpError` will be thrown.
:param str queue_name:
The name of the queue to delete.
:param bool fail_not_exist:
Specifies whether to throw an exception if the queue doesn't exist.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the queue was deleted. If fail_not_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [('timeout', _int_to_str(timeout))]
if not fail_not_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue_metadata(self, queue_name, timeout=None):
'''
Retrieves user-defined metadata and queue properties on the specified
queue. Metadata is associated with the queue as name-value pairs.
:param str queue_name:
The name of an existing queue.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A dictionary representing the queue metadata with an
approximate_message_count int property on the dict estimating the
number of messages in the queue.
:rtype: a dict mapping str to str
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [
('comp', 'metadata'),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _parse_metadata_and_message_count(response)
def set_queue_metadata(self, queue_name, metadata=None, timeout=None):
'''
Sets user-defined metadata on the specified queue. Metadata is
associated with the queue as name-value pairs.
:param str queue_name:
The name of an existing queue.
:param dict metadata:
A dict containing name-value pairs to associate with the
queue as metadata.
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [
('comp', 'metadata'),
('timeout', _int_to_str(timeout)),
]
request.headers = [('x-ms-meta-name-values', metadata)]
self._perform_request(request)
def exists(self, queue_name, timeout=None):
'''
Returns a boolean indicating whether the queue exists.
:param str queue_name:
The name of queue to check for existence.
:param int timeout:
The server timeout, expressed in seconds.
:return: A boolean indicating whether the queue exists.
:rtype: bool
'''
try:
self.get_queue_metadata(queue_name, timeout=timeout)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
def get_queue_acl(self, queue_name, timeout=None):
'''
Returns details about any stored access policies specified on the
queue that may be used with Shared Access Signatures.
:param str queue_name:
The name of an existing queue.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the queue.
:rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [
('comp', 'acl'),
('timeout', _int_to_str(timeout)),
]
response = self._perform_request(request)
return _convert_xml_to_signed_identifiers(response.body)
def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None):
'''
Sets stored access policies for the queue that may be used with Shared
Access Signatures.
When you set permissions for a queue, the existing permissions are replaced.
To update the queue’s permissions, call :func:`~get_queue_acl` to fetch
all access policies associated with the queue, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a queue, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`AzureHttpError` until the access policy becomes active.
:param str queue_name:
The name of an existing queue.
:param signed_identifiers:
A dictionary of access policies to associate with the queue. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path(queue_name)
request.query = [
('comp', 'acl'),
('timeout', _int_to_str(timeout)),
]
request.body = _get_request_body(
_convert_signed_identifiers_to_xml(signed_identifiers))
self._perform_request(request)
def put_message(self, queue_name, content, visibility_timeout=None,
time_to_live=None, timeout=None):
'''
Adds a new message to the back of the message queue.
The visibility timeout specifies the time that the message will be
invisible. After the timeout expires, the message will become visible.
If a visibility timeout is not specified, the default value of 0 is used.
The message time-to-live specifies how long a message will remain in the
queue. The message will be deleted from the queue when the time-to-live
period expires.
:param str queue_name:
The name of the queue to put the message into.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str. The encoded message can be up to
64KB in size.
:param int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:param int time_to_live:
Specifies the time-to-live interval for the message, in
seconds. The maximum time-to-live allowed is 7 days. If this
parameter is omitted, the default time-to-live is 7 days.
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('content', content)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = _get_path(queue_name, True)
request.query = [
('visibilitytimeout', _to_str(visibility_timeout)),
('messagettl', _to_str(time_to_live)),
('timeout', _int_to_str(timeout))
]
request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function))
self._perform_request(request)
def get_messages(self, queue_name, num_messages=None,
visibility_timeout=None, timeout=None):
'''
Retrieves one or more messages from the front of the queue.
When a message is retrieved from the queue, the response includes the message
content and a pop_receipt value, which is required to delete the message.
The message is not automatically deleted from the queue, but after it has
been retrieved, it is not visible to other clients for the time interval
specified by the visibility_timeout parameter.
:param str queue_name:
The name of the queue to get messages from.
:param int num_messages:
A nonzero integer value that specifies the number of
messages to retrieve from the queue, up to a maximum of 32. If
fewer are visible, the visible messages are returned. By default,
a single message is retrieved from the queue with this operation.
:param int visibility_timeout:
Specifies the new visibility timeout value, in seconds, relative
to server time. The new value must be larger than or equal to 1
second, and cannot be larger than 7 days. The visibility timeout of
a message can be set to a value later than the expiry time.
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of :class:`~azure.storage.queue.models.QueueMessage` objects.
:rtype: list of :class:`~azure.storage.queue.models.QueueMessage`
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(queue_name, True)
request.query = [
('numofmessages', _to_str(num_messages)),
('visibilitytimeout', _to_str(visibility_timeout)),
('timeout', _int_to_str(timeout))
]
response = self._perform_request(request)
return _convert_xml_to_queue_messages(response, self.decode_function)
def peek_messages(self, queue_name, num_messages=None, timeout=None):
'''
Retrieves one or more messages from the front of the queue, but does
not alter the visibility of the message.
Only messages that are visible may be retrieved. When a message is retrieved
for the first time with a call to get_messages, its dequeue_count property
is set to 1. If it is not deleted and is subsequently retrieved again, the
dequeue_count property is incremented. The client may use this value to
determine how many times a message has been retrieved. Note that a call
to peek_messages does not increment the value of DequeueCount, but returns
this value for the client to read.
:param str queue_name:
The name of the queue to peek messages from.
:param int num_messages:
A nonzero integer value that specifies the number of
messages to peek from the queue, up to a maximum of 32. By default,
a single message is peeked from the queue with this operation.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that
time_next_visible and pop_receipt will not be populated as peek does
not pop the message and can only retrieve already visible messages.
:rtype: list of :class:`~azure.storage.queue.models.QueueMessage`
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(queue_name, True)
request.query = [
('peekonly', 'true'),
('numofmessages', _to_str(num_messages)),
('timeout', _int_to_str(timeout))]
response = self._perform_request(request)
return _convert_xml_to_queue_messages(response, self.decode_function)
def delete_message(self, queue_name, message_id, pop_receipt, timeout=None):
'''
Deletes the specified message.
Normally after a client retrieves a message with the get_messages operation,
the client is expected to process and delete the message. To delete the
message, you must have two items of data: id and pop_receipt. The
id is returned from the previous get_messages operation. The
pop_receipt is returned from the most recent :func:`~get_messages` or
:func:`~update_message` operation. In order for the delete_message operation
to succeed, the pop_receipt specified on the request must match the
pop_receipt returned from the :func:`~get_messages` or :func:`~update_message`
operation.
:param str queue_name:
The name of the queue from which to delete the message.
:param str message_id:
The message id identifying the message to delete.
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~get_messages` or :func:`~update_message`.
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('pop_receipt', pop_receipt)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = _get_path(queue_name, True, message_id)
request.query = [
('popreceipt', _to_str(pop_receipt)),
('timeout', _int_to_str(timeout))]
self._perform_request(request)
def clear_messages(self, queue_name, timeout=None):
'''
Deletes all messages from the specified queue.
:param str queue_name:
The name of the queue whose messages to clear.
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = _get_path(queue_name, True)
request.query = [('timeout', _int_to_str(timeout))]
self._perform_request(request)
def update_message(self, queue_name, message_id, pop_receipt, visibility_timeout,
content=None, timeout=None):
'''
Updates the visibility timeout of a message. You can also use this
operation to update the contents of a message.
This operation can be used to continually extend the invisibility of a
queue message. This functionality can be useful if you want a worker role
to “lease” a queue message. For example, if a worker role calls get_messages
and recognizes that it needs more time to process a message, it can
continually extend the message’s invisibility until it is processed. If
the worker role were to fail during processing, eventually the message
would become visible again and another worker role could process it.
:param str queue_name:
The name of the queue containing the message to update.
:param str message_id:
The message id identifying the message to update.
:param str pop_receipt:
A valid pop receipt value returned from an earlier call
to the :func:`~get_messages` or :func:`~update_message` operation.
:param int visibility_timeout:
Specifies the new visibility timeout value, in seconds,
relative to server time. The new value must be larger than or equal
to 0, and cannot be larger than 7 days. The visibility timeout of a
message cannot be set to a value later than the expiry time. A
message can be updated until it has been deleted or has expired.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that
only time_next_visible and pop_receipt will be populated.
:rtype: list of :class:`~azure.storage.queue.models.QueueMessage`
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message_id', message_id)
_validate_not_none('pop_receipt', pop_receipt)
_validate_not_none('visibility_timeout', visibility_timeout)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path(queue_name, True, message_id)
request.query = [
('popreceipt', _to_str(pop_receipt)),
('visibilitytimeout', _int_to_str(visibility_timeout)),
('timeout', _int_to_str(timeout))
]
if content is not None:
request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function))
response = self._perform_request(request)
return _parse_queue_message_from_headers(response)
|
Accelerite/cinder | refs/heads/master | tools/colorizer.py | 19 | #!/usr/bin/env python
# Copyright (c) 2013, Nebula, Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Display a subunit stream through a colorized unittest test runner."""
import heapq
import subunit
import sys
import unittest
import testtools
class _AnsiColorizer(object):
"""ANSI colorizer that wraps a stream object.
colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""Check if platform is supported.
A class method that returns True if the current platform supports
coloring terminal output using this method.
Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""See _AnsiColorizer docstring."""
def __init__(self, stream):
import win32console
red, green, blue, bold = (win32console.FOREGROUND_RED,
win32console.FOREGROUND_GREEN,
win32console.FOREGROUND_BLUE,
win32console.FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold
}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""See _AnsiColorizer docstring."""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
def get_elapsed_time_color(elapsed_time):
if elapsed_time > 1.0:
return 'red'
elif elapsed_time > 0.25:
return 'yellow'
else:
return 'green'
class NovaTestResult(testtools.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(NovaTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.num_slow_tests = 10
self.slow_tests = [] # this is a fixed-sized heap
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
stdout = sys.stdout
sys.stdout = sys.__stdout__
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported():
self.colorizer = colorizer(self.stream)
break
sys.stdout = stdout
self.start_time = None
self.last_time = {}
self.results = {}
self.last_written = None
def _writeElapsedTime(self, elapsed):
color = get_elapsed_time_color(elapsed)
self.colorizer.write(" %.2f" % elapsed, color)
def _addResult(self, test, *args):
try:
name = test.id()
except AttributeError:
name = 'Unknown.unknown'
test_class, test_name = name.rsplit('.', 1)
elapsed = (self._now() - self.start_time).total_seconds()
item = (elapsed, test_class, test_name)
if len(self.slow_tests) >= self.num_slow_tests:
heapq.heappushpop(self.slow_tests, item)
else:
heapq.heappush(self.slow_tests, item)
self.results.setdefault(test_class, [])
self.results[test_class].append((test_name, elapsed) + args)
self.last_time[test_class] = self._now()
self.writeTests()
def _writeResult(self, test_name, elapsed, long_result, color,
short_result, success):
if self.showAll:
self.stream.write(' %s' % str(test_name).ljust(66))
self.colorizer.write(long_result, color)
if success:
self._writeElapsedTime(elapsed)
self.stream.writeln()
else:
self.colorizer.write(short_result, color)
def addSuccess(self, test):
super(NovaTestResult, self).addSuccess(test)
self._addResult(test, 'OK', 'green', '.', True)
def addFailure(self, test, err):
if test.id() == 'process-returncode':
return
super(NovaTestResult, self).addFailure(test, err)
self._addResult(test, 'FAIL', 'red', 'F', False)
def addError(self, test, err):
super(NovaTestResult, self).addFailure(test, err)
self._addResult(test, 'ERROR', 'red', 'E', False)
def addSkip(self, test, reason=None, details=None):
super(NovaTestResult, self).addSkip(test, reason, details)
self._addResult(test, 'SKIP', 'blue', 'S', True)
def startTest(self, test):
self.start_time = self._now()
super(NovaTestResult, self).startTest(test)
def writeTestCase(self, cls):
if not self.results.get(cls):
return
if cls != self.last_written:
self.colorizer.write(cls, 'white')
self.stream.writeln()
for result in self.results[cls]:
self._writeResult(*result)
del self.results[cls]
self.stream.flush()
self.last_written = cls
def writeTests(self):
time = self.last_time.get(self.last_written, self._now())
if not self.last_written or (self._now() - time).total_seconds() > 2.0:
diff = 3.0
while diff > 2.0:
classes = self.results.keys()
oldest = min(classes, key=lambda x: self.last_time[x])
diff = (self._now() - self.last_time[oldest]).total_seconds()
self.writeTestCase(oldest)
else:
self.writeTestCase(self.last_written)
def done(self):
self.stopTestRun()
def stopTestRun(self):
for cls in list(self.results.iterkeys()):
self.writeTestCase(cls)
self.stream.writeln()
self.writeSlowTests()
def writeSlowTests(self):
# Pare out 'fast' tests
slow_tests = [item for item in self.slow_tests
if get_elapsed_time_color(item[0]) != 'green']
if slow_tests:
slow_total_time = sum(item[0] for item in slow_tests)
slow = ("Slowest %i tests took %.2f secs:"
% (len(slow_tests), slow_total_time))
self.colorizer.write(slow, 'yellow')
self.stream.writeln()
last_cls = None
# sort by name
for elapsed, cls, name in sorted(slow_tests,
key=lambda x: x[1] + x[2]):
if cls != last_cls:
self.colorizer.write(cls, 'white')
self.stream.writeln()
last_cls = cls
self.stream.write(' %s' % str(name).ljust(68))
self._writeElapsedTime(elapsed)
self.stream.writeln()
def printErrors(self):
if self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavor, errors):
for test, err in errors:
self.colorizer.write("=" * 70, 'red')
self.stream.writeln()
self.colorizer.write(flavor, 'red')
self.stream.writeln(": %s" % test.id())
self.colorizer.write("-" * 70, 'red')
self.stream.writeln()
self.stream.writeln("%s" % err)
test = subunit.ProtocolTestCase(sys.stdin, passthrough=None)
if sys.version_info[0:2] <= (2, 6):
runner = unittest.TextTestRunner(verbosity=2)
else:
runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult)
if runner.run(test).wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)
|
synologix/enigma2 | refs/heads/master | lib/python/Components/HTMLComponent.py | 66 | # some helper classes first:
class HTMLComponent:
def __init__(self):
pass
def produceHTML(self):
return ""
|
mjhea0/feedzilla | refs/heads/master | feedzilla/tests.py | 2 | # -*- coding: utf-8 -*-
# Copyright: 2011, Grigoriy Petukhov
# Author: Grigoriy Petukhov (http://lorien.name)
# License: BSD
from datetime import datetime
import os.path
from django.test import TestCase
from feedzilla.util.parse import guess_date, parse_feed
ROOT = os.path.dirname(os.path.realpath(__file__))
DATA_DIR = os.path.join(ROOT, 'test_data')
class ParserTestCase(TestCase):
def test_guess_datetime(self):
def create_feed(custom_language):
class FeedMockup(object):
"Emulates feedparser.parse() object"
class Feed(object):
language = custom_language
def __getitem__(self, key):
return getattr(self, key)
feed = Feed()
return FeedMockup()
date_string = 'Птн, 18 Мар 2011 02:47:00 +0300'
feed = create_feed('ru')
guessed = guess_date([date_string], feed)
self.assertEqual(guessed, datetime(2011, 3, 18, 2, 47, 0))
# set language to en, this should fail
feed = create_feed('en')
guessed = guess_date([date_string], feed)
self.assertEqual(guessed, None)
data = open(os.path.join(DATA_DIR, 'feed_with_rudate')).read()
resp = parse_feed(source_data=data)
# Птн, 18 Мар 2011 01:04:00 +0300
self.assertEqual(resp['entries'][0]['created'], datetime(2011, 3, 18, 2, 47, 0))
|
wenjiezhang2013/My-Ghost-blog | refs/heads/master | node_modules/sqlite3/deps/extract.py | 775 | import sys
import tarfile
import os
tarball = os.path.abspath(sys.argv[1])
dirname = os.path.abspath(sys.argv[2])
tfile = tarfile.open(tarball,'r:gz');
tfile.extractall(dirname)
sys.exit(0)
|
youknowone/hangul-romanize | refs/heads/master | hangul_romanize/core.py | 1 | # -*- coding: utf-8 -*-
try:
unicode(0)
except NameError:
# py3
unicode = str
unichr = chr
class Syllable(object):
"""Hangul syllable interface"""
MIN = ord(u'가')
MAX = ord(u'힣')
def __init__(self, char=None, code=None):
if char is None and code is None:
raise TypeError('__init__ takes char or code as a keyword argument (not given)')
if char is not None and code is not None:
raise TypeError('__init__ takes char or code as a keyword argument (both given)')
if char:
code = ord(char)
if not self.MIN <= code <= self.MAX:
raise TypeError('__init__ expected Hangul syllable but {0} not in [{1}..{2}]'.format(code, self.MIN, self.MAX))
self.code = code
@property
def index(self):
return self.code - self.MIN
@property
def initial(self):
return self.index // 588
@property
def vowel(self):
return (self.index // 28) % 21
@property
def final(self):
return self.index % 28
@property
def char(self):
return unichr(self.code)
def __unicode__(self):
return self.char
def __repr__(self):
return u'''<Syllable({}({}),{}({}),{}({}),{}({}))>'''.format(
self.code, self.char, self.initial, u'', self.vowel, u'', self.final, u'')
class Transliter(object):
"""General transliting interface"""
def __init__(self, rule):
self.rule = rule
def translit(self, text):
"""Translit text to romanized text
:param text: Unicode string or unicode character iterator
"""
result = []
pre = None, None
now = None, None
for c in text:
try:
post = c, Syllable(c)
except TypeError:
post = c, None
if now[0] is not None:
out = self.rule(now, pre=pre, post=post)
if out is not None:
result.append(out)
pre = now
now = post
if now is not None:
out = self.rule(now, pre=pre, post=(None, None))
if out is not None:
result.append(out)
return u''.join(result)
|
alephu5/Soundbyte | refs/heads/master | environment/lib/python3.3/site-packages/nose/selector.py | 5 | """
Test Selection
--------------
Test selection is handled by a Selector. The test loader calls the
appropriate selector method for each object it encounters that it
thinks may be a test.
"""
import logging
import os
import unittest
from nose.config import Config
from nose.util import split_test_name, src, getfilename, getpackage, ispackage
log = logging.getLogger(__name__)
__all__ = ['Selector', 'defaultSelector', 'TestAddress']
# for efficiency and easier mocking
op_join = os.path.join
op_basename = os.path.basename
op_exists = os.path.exists
op_splitext = os.path.splitext
op_isabs = os.path.isabs
op_abspath = os.path.abspath
class Selector(object):
"""Core test selector. Examines test candidates and determines whether,
given the specified configuration, the test candidate should be selected
as a test.
"""
def __init__(self, config):
if config is None:
config = Config()
self.configure(config)
def configure(self, config):
self.config = config
self.exclude = config.exclude
self.ignoreFiles = config.ignoreFiles
self.include = config.include
self.plugins = config.plugins
self.match = config.testMatch
def matches(self, name):
"""Does the name match my requirements?
To match, a name must match config.testMatch OR config.include
and it must not match config.exclude
"""
return ((self.match.search(name)
or (self.include and
[_f for _f in [inc.search(name) for inc in self.include] if _f]))
and ((not self.exclude)
or not [_f for _f in [exc.search(name) for exc in self.exclude] if _f]
))
def wantClass(self, cls):
"""Is the class a wanted test class?
A class must be a unittest.TestCase subclass, or match test name
requirements. Classes that start with _ are always excluded.
"""
declared = getattr(cls, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = (not cls.__name__.startswith('_')
and (issubclass(cls, unittest.TestCase)
or self.matches(cls.__name__)))
plug_wants = self.plugins.wantClass(cls)
if plug_wants is not None:
log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
wanted = plug_wants
log.debug("wantClass %s? %s", cls, wanted)
return wanted
def wantDirectory(self, dirname):
"""Is the directory a wanted test directory?
All package directories match, so long as they do not match exclude.
All other directories must match test requirements.
"""
tail = op_basename(dirname)
if ispackage(dirname):
wanted = (not self.exclude
or not [_f for _f in [exc.search(tail) for exc in self.exclude] if _f])
else:
wanted = (self.matches(tail)
or (self.config.srcDirs
and tail in self.config.srcDirs))
plug_wants = self.plugins.wantDirectory(dirname)
if plug_wants is not None:
log.debug("Plugin setting selection of %s to %s",
dirname, plug_wants)
wanted = plug_wants
log.debug("wantDirectory %s? %s", dirname, wanted)
return wanted
def wantFile(self, file):
"""Is the file a wanted test file?
The file must be a python source file and match testMatch or
include, and not match exclude. Files that match ignore are *never*
wanted, regardless of plugin, testMatch, include or exclude settings.
"""
# never, ever load files that match anything in ignore
# (.* _* and *setup*.py by default)
base = op_basename(file)
ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
if ignore_this.search(base) ]
if ignore_matches:
log.debug('%s matches ignoreFiles pattern; skipped',
base)
return False
if not self.config.includeExe and os.access(file, os.X_OK):
log.info('%s is executable; skipped', file)
return False
dummy, ext = op_splitext(base)
pysrc = ext == '.py'
wanted = pysrc and self.matches(base)
plug_wants = self.plugins.wantFile(file)
if plug_wants is not None:
log.debug("plugin setting want %s to %s", file, plug_wants)
wanted = plug_wants
log.debug("wantFile %s? %s", file, wanted)
return wanted
def wantFunction(self, function):
"""Is the function a test function?
"""
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
return False
declared = getattr(function, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = not funcname.startswith('_') and self.matches(funcname)
plug_wants = self.plugins.wantFunction(function)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantFunction %s? %s", function, wanted)
return wanted
def wantMethod(self, method):
"""Is the method a test method?
"""
try:
method_name = method.__name__
except AttributeError:
# not a method
return False
if method_name.startswith('_'):
# never collect 'private' methods
return False
declared = getattr(method, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(method_name)
plug_wants = self.plugins.wantMethod(method)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantMethod %s? %s", method, wanted)
return wanted
def wantModule(self, module):
"""Is the module a test module?
The tail of the module name must match test requirements. One exception:
we always want __main__.
"""
declared = getattr(module, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(module.__name__.split('.')[-1]) \
or module.__name__ == '__main__'
plug_wants = self.plugins.wantModule(module)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantModule %s? %s", module, wanted)
return wanted
defaultSelector = Selector
class TestAddress(object):
"""A test address represents a user's request to run a particular
test. The user may specify a filename or module (or neither),
and/or a callable (a class, function, or method). The naming
format for test addresses is:
filename_or_module:callable
Filenames that are not absolute will be made absolute relative to
the working dir.
The filename or module part will be considered a module name if it
doesn't look like a file, that is, if it doesn't exist on the file
system and it doesn't contain any directory separators and it
doesn't end in .py.
Callables may be a class name, function name, method name, or
class.method specification.
"""
def __init__(self, name, workingDir=None):
if workingDir is None:
workingDir = os.getcwd()
self.name = name
self.workingDir = workingDir
self.filename, self.module, self.call = split_test_name(name)
log.debug('Test name %s resolved to file %s, module %s, call %s',
name, self.filename, self.module, self.call)
if self.filename is None:
if self.module is not None:
self.filename = getfilename(self.module, self.workingDir)
if self.filename:
self.filename = src(self.filename)
if not op_isabs(self.filename):
self.filename = op_abspath(op_join(workingDir,
self.filename))
if self.module is None:
self.module = getpackage(self.filename)
log.debug(
'Final resolution of test name %s: file %s module %s call %s',
name, self.filename, self.module, self.call)
def totuple(self):
return (self.filename, self.module, self.call)
def __str__(self):
return self.name
def __repr__(self):
return "%s: (%s, %s, %s)" % (self.name, self.filename,
self.module, self.call)
|
gustavoanatoly/hbase | refs/heads/master | dev-support/findHangingTests.py | 3 | #!/usr/bin/env python
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
# To disable 'invalid constant name' warnings.
"""
# Script to find hanging test from Jenkins build output
# usage: ./findHangingTests.py <url of Jenkins build console>
"""
import re
import sys
import requests
# If any of these strings appear in the console output, it's a build one should probably ignore
# for analyzing failed/hanging tests.
BAD_RUN_STRINGS = [
"Slave went offline during the build", # Machine went down, can't do anything about it.
"The forked VM terminated without properly saying goodbye", # JVM crashed.
]
def get_bad_tests(console_url):
"""
Returns [[all tests], [failed tests], [timeout tests], [hanging tests]] if successfully gets
the build information.
If there is error getting console text or if there are blacklisted strings in console text,
then returns None.
"""
response = requests.get(console_url)
if response.status_code != 200:
print "Error getting consoleText. Response = {} {}".format(
response.status_code, response.reason)
return
# All tests: All testcases which were run.
# Hanging test: A testcase which started but never finished.
# Failed test: Testcase which encountered any kind of failure. It can be failing atomic tests,
# timed out tests, etc
# Timeout test: A Testcase which encountered timeout. Naturally, all timeout tests will be
# included in failed tests.
all_tests_set = set()
hanging_tests_set = set()
failed_tests_set = set()
timeout_tests_set = set()
for line in response.content.splitlines():
result1 = re.findall("Running org.apache.hadoop.hbase.(.*)", line)
if len(result1) == 1:
test_case = result1[0]
if test_case in all_tests_set:
print ("ERROR! Multiple tests with same name '{}'. Might get wrong results "
"for this test.".format(test_case))
else:
hanging_tests_set.add(test_case)
all_tests_set.add(test_case)
result2 = re.findall("Tests run:.*?- in org.apache.hadoop.hbase.(.*)", line)
if len(result2) == 1:
test_case = result2[0]
if "FAILURE!" in line:
failed_tests_set.add(test_case)
if test_case not in hanging_tests_set:
print ("ERROR! No test '{}' found in hanging_tests. Might get wrong results "
"for this test.".format(test_case))
else:
hanging_tests_set.remove(test_case)
result3 = re.match("^\\s+(\\w*).*\\sTestTimedOut", line)
if result3:
test_case = result3.group(1)
timeout_tests_set.add(test_case)
for bad_string in BAD_RUN_STRINGS:
if re.match(".*" + bad_string + ".*", line):
print "Bad string found in build:\n > {}".format(line)
return
print "Result > total tests: {:4} failed : {:4} timedout : {:4} hanging : {:4}".format(
len(all_tests_set), len(failed_tests_set), len(timeout_tests_set), len(hanging_tests_set))
return [all_tests_set, failed_tests_set, timeout_tests_set, hanging_tests_set]
if __name__ == "__main__":
if len(sys.argv) != 2:
print "ERROR : Provide the jenkins job console URL as the only argument."
sys.exit(1)
print "Fetching {}".format(sys.argv[1])
result = get_bad_tests(sys.argv[1])
if not result:
sys.exit(1)
[all_tests, failed_tests, timedout_tests, hanging_tests] = result
print "Found {} hanging tests:".format(len(hanging_tests))
for test in hanging_tests:
print test
print "\n"
print "Found {} failed tests of which {} timed out:".format(
len(failed_tests), len(timedout_tests))
for test in failed_tests:
print "{0} {1}".format(test, ("(Timed Out)" if test in timedout_tests else ""))
print ("\nA test may have had 0 or more atomic test failures before it timed out. So a "
"'Timed Out' test may have other errors too.")
|
phracek/rebase-helper | refs/heads/master | rebasehelper/spec_hooks/paths_to_rpm_macros.py | 1 | # -*- coding: utf-8 -*-
#
# This tool helps you to rebase package to the latest version
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Petr Hracek <phracek@redhat.com>
# Tomas Hozza <thozza@redhat.com>
import six
from rebasehelper.specfile import BaseSpecHook
from rebasehelper.helpers.macro_helper import MacroHelper
class PathsToRPMMacrosHook(BaseSpecHook):
"""SpecHook for replacing paths to files with RPM macros."""
@classmethod
def run(cls, spec_file, rebase_spec_file, **kwargs):
macros = [m for m in rebase_spec_file.macros if m['name'] in MacroHelper.MACROS_WHITELIST]
macros = MacroHelper.expand_macros(macros)
# ensure maximal greediness
macros.sort(key=lambda k: len(k['value']), reverse=True)
for sec_name, sec_content in six.iteritems(rebase_spec_file.spec_content.sections):
if sec_name.startswith('%files'):
for index, line in enumerate(sec_content):
new_path = MacroHelper.substitute_path_with_macros(line, macros)
rebase_spec_file.spec_content.sections[sec_name][index] = new_path
rebase_spec_file.save()
|
garnertb/geonode | refs/heads/master | geonode/people/utils.py | 33 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib.auth import get_user_model
from geonode import GeoNodeException
def get_default_user():
"""Create a default user
"""
superusers = get_user_model().objects.filter(
is_superuser=True).order_by('id')
if superusers.count() > 0:
# Return the first created superuser
return superusers[0]
else:
raise GeoNodeException('You must have an admin account configured '
'before importing data. '
'Try: django-admin.py createsuperuser')
def get_valid_user(user=None):
"""Gets the default user or creates it if it does not exist
"""
if user is None:
theuser = get_default_user()
elif isinstance(user, basestring):
theuser = get_user_model().objects.get(username=user)
elif user == user.get_anonymous():
raise GeoNodeException('The user uploading files must not '
'be anonymous')
else:
theuser = user
# FIXME: Pass a user in the unit tests that is not yet saved ;)
assert isinstance(theuser, get_user_model())
return theuser
def format_address(street=None, zipcode=None, city=None, area=None, country=None):
if country is not None and country == "USA":
address = ""
if city and area:
if street:
address += street+", "
address += city+", "+area
if zipcode:
address += " "+zipcode
elif (not city) and area:
if street:
address += street+", "
address += area
if zipcode:
address += " "+zipcode
elif city and (not area):
if street:
address += street+", "
address += city
if zipcode:
address += " "+zipcode
else:
if street:
address += ", "+street
if zipcode:
address += " "+zipcode
if address:
address += ", United States"
else:
address += "United States"
return address
else:
address = []
if street:
address.append(street)
if zipcode:
address.append(zipcode)
if city:
address.append(city)
if area:
address.append(area)
address.append(country)
return " ".join(address)
|
tsdmgz/ansible | refs/heads/devel | lib/ansible/modules/network/cloudengine/ce_config.py | 27 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_config
version_added: "2.4"
author: "QijunPan (@CloudEngine-Ansible)"
short_description: Manage Huawei CloudEngine configuration sections.
description:
- Huawei CloudEngine configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with CloudEngine configuration sections in
a deterministic way. This module works with CLI transports.
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device current-configuration. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the current-configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(current-configuration) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
type: bool
default: false
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current current-configuration to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current-configuration for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
defaults:
description:
- The I(defaults) argument will influence how the current-configuration
is collected from the device. When the value is set to true,
the command used to collect the current-configuration is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword.
required: false
type: bool
default: false
save:
description:
- The C(save) argument instructs the module to save the
current-configuration to saved-configuration. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed.
required: false
type: bool
default: false
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
- name: CloudEngine config test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure top level configuration and save it"
ce_config:
lines: sysname {{ inventory_hostname }}
save: yes
provider: "{{ cli }}"
- name: "Configure acl configuration and save it"
ce_config:
lines:
- rule 10 permit source 1.1.1.1 32
- rule 20 permit source 2.2.2.2 32
- rule 30 permit source 3.3.3.3 32
- rule 40 permit source 4.4.4.4 32
- rule 50 permit source 5.5.5.5 32
parents: acl 2000
before: undo acl 2000
match: exact
provider: "{{ cli }}"
- name: "Configure acl configuration and save it"
ce_config:
lines:
- rule 10 permit source 1.1.1.1 32
- rule 20 permit source 2.2.2.2 32
- rule 30 permit source 3.3.3.3 32
- rule 40 permit source 4.4.4.4 32
parents: acl 2000
before: undo acl 2000
replace: block
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/ce_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.ce import get_config, load_config, run_commands
from ansible.module_utils.ce import ce_argument_spec
from ansible.module_utils.ce import check_args as ce_check_args
def check_args(module, warnings):
ce_check_args(module, warnings)
def get_running_config(module):
contents = module.params['config']
if not contents:
flags = []
if module.params['defaults']:
flags.append('include-default')
contents = get_config(module, flags=flags)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none':
config = get_running_config(module)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
argument_spec.update(ce_argument_spec)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
if any((module.params['src'], module.params['lines'])):
run(module, result)
if module.params['save']:
if not module.check_mode:
run_commands(module, ['save'])
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
robobrobro/ballin-octo-shame | refs/heads/master | lib/Python-3.4.3/Lib/keyword.py | 162 | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# load the output skeleton from the target, taking care to preserve its
# newline convention.
with open(optfile, newline='') as fp:
format = fp.readlines()
nl = format[0][len(format[0].strip()):] if format else '\n'
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "'," + nl)
lines.sort()
# insert the lines of keywords into the skeleton
try:
start = format.index("#--start keywords--" + nl) + 1
end = format.index("#--end keywords--" + nl)
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
with open(optfile, 'w', newline='') as fp:
fp.writelines(format)
if __name__ == "__main__":
main()
|
adore-hrzz/nao-sound-classification | refs/heads/master | scripts/cross_validation.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import csv
import os
import sys
import itertools
from sklearn import svm, datasets
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes, data_type, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):
"""This function prints and plots the confusion matrix. Normalization can be applied by setting 'normalize=True'."""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm,2)
print('Normalized confusion matrix for {} dataset'.format(data_type))
else:
print('Confusion matrix for {} dataset, without normalization'.format(data_type))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def crossValidation(directory, csvfile):
"""The function calculates running average for each confusion matrix element by going through all matrices. The program can be called with 1 or 3 arguments from command line. The first argument is 'test' or 'train' which indicates which matrices to calculate moving average for and print them. The second and third arguments are the row and column numbers of matrix which you want to plot. Function then plots the value of running average after every iteration to see the convergence of cross-validation. Example: cross_validation.py train 1 1"""
curAvg=np.zeros((5,5))
n=0.0
if len(sys.argv)==4:
plt.figure()
plt.ion()
for i in range(len(os.listdir(directory))):
csv.register_dialect('commadot', delimiter=';')
f = open(csvfile+str(i+1)+'.csv', 'rb')
reader = csv.reader(f, dialect='commadot')
#generate confusion matrix of type numpy.array
vocalization_labels=reader.next()[1::]
matrix_list = []
for rows in reader:
matrix_list.append([ int(x) for x in rows[1::] ])
matrix_list = np.asarray(matrix_list)
f.close()
#calculating moving average for every confusion matrix element
curAvg = curAvg + (matrix_list - curAvg)/(n+1.0)
n += 1
if len(sys.argv)==4:
plt.scatter(i,curAvg[sys.argv[2]][sys.argv[3]])
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(curAvg, classes=vocalization_labels, data_type=sys.argv[1], title='')
plt.show()
if len(sys.argv)==4:
while True:
plt.pause(0.05)
'''Main function'''
if __name__ == '__main__':
if sys.argv[1]=='train':
csvfile='./Resources/Train/results_train'
directory='./Resources/Train'
elif sys.argv[1]=='test':
csvfile='./Resources/Test/results_test'
directory='./Resources/Test'
crossValidation(directory, csvfile)
|
kidsfm/cms | refs/heads/master | videos/apps.py | 6 | from django.apps import AppConfig
class VideosConfig(AppConfig):
name = 'videos'
|
rossant/podoc | refs/heads/master | setup.py | 2 | # -*- coding: utf-8 -*-
# flake8: noqa
"""Installation script."""
#-------------------------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------------------------
import os
import os.path as op
import re
from setuptools import setup
#-------------------------------------------------------------------------------------------------
# Setup
#-------------------------------------------------------------------------------------------------
def _package_tree(pkgroot):
path = op.dirname(__file__)
subdirs = [op.relpath(i[0], path).replace(op.sep, '.')
for i in os.walk(op.join(path, pkgroot))
if '__init__.py' in i[2]]
return subdirs
curdir = op.dirname(op.realpath(__file__))
readme = open(op.join(curdir, 'README.md')).read()
# Find version number from `__init__.py` without executing it.
filename = op.join(curdir, 'podoc/__init__.py')
with open(filename, 'r') as f:
version = re.search(r"__version__ = '([^']+)'", f.read()).group(1)
setup(
name='podoc',
version=version,
license="BSD",
description='Markup document conversion',
long_description=readme,
author='Cyrille Rossant',
author_email='cyrille.rossant at gmail.com',
url='https://github.com/podoc/podoc/',
packages=_package_tree('podoc'),
package_dir={'podoc': 'podoc'},
package_data={
},
entry_points={
'console_scripts': [
'podoc=podoc.cli:podoc',
],
},
include_package_data=True,
keywords='podoc,pandoc,markup,markdown,conversion',
classifiers=[
'Development Status :: 1 - Planning',
'Framework :: IPython',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Topic :: Text Processing :: Markup',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
40223110/2015cd_midterm- | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/_weakrefset.py | 766 | # Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
|
xaviercobain88/framework-python | refs/heads/master | openerp/addons/document_page/wizard/document_page_show_diff.py | 59 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import base64
class showdiff(osv.osv_memory):
""" Disp[ay Difference for History """
_name = 'wizard.document.page.history.show_diff'
def get_diff(self, cr, uid, context=None):
if context is None:
context = {}
history = self.pool.get('document.page.history')
ids = context.get('active_ids', [])
diff = ""
if len(ids) == 2:
if ids[0] > ids[1]:
diff = history.getDiff(cr, uid, ids[1], ids[0])
else:
diff = history.getDiff(cr, uid, ids[0], ids[1])
elif len(ids) == 1:
old = history.browse(cr, uid, ids[0])
nids = history.search(cr, uid, [('page_id', '=', old.page_id.id)])
nids.sort()
diff = history.getDiff(cr, uid, ids[0], nids[-1])
else:
raise osv.except_osv(_('Warning!'), _('You need to select minimum one or maximum two history revisions!'))
return diff
_columns = {
'diff': fields.text('Diff', readonly=True),
}
_defaults = {
'diff': get_diff
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sarakha63/persomov | refs/heads/master | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/discovery.py | 18 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
class DiscoveryIE(InfoExtractor):
_VALID_URL = r'http://www\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
_TEST = {
'url': 'http://www.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
'md5': 'e12614f9ee303a6ccef415cb0793eba2',
'info_dict': {
'id': '614784',
'ext': 'mp4',
'title': 'MythBusters: Mission Impossible Outtakes',
'description': ('Watch Jamie Hyneman and Adam Savage practice being'
' each other -- to the point of confusing Jamie\'s dog -- and '
'don\'t miss Adam moon-walking as Jamie ... behind Jamie\'s'
' back.'),
'duration': 156,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
video_list_json = self._search_regex(r'var videoListJSON = ({.*?});',
webpage, 'video list', flags=re.DOTALL)
video_list = json.loads(video_list_json)
info = video_list['clips'][0]
formats = []
for f in info['mp4']:
formats.append(
{'url': f['src'], 'ext': 'mp4', 'tbr': int(f['bitrate'][:-1])})
return {
'id': info['contentId'],
'title': video_list['name'],
'formats': formats,
'description': info['videoCaption'],
'thumbnail': info.get('videoStillURL') or info.get('thumbnailURL'),
'duration': info['duration'],
}
|
ormnv/os_final_project | refs/heads/master | django/middleware/transaction.py | 143 | from django.db import transaction
class TransactionMiddleware(object):
"""
Transaction middleware. If this is enabled, each view function will be run
with commit_on_response activated - that way a save() doesn't do a direct
commit, the commit is done when a successful response is created. If an
exception happens, the database is rolled back.
"""
def process_request(self, request):
"""Enters transaction management"""
transaction.enter_transaction_management()
transaction.managed(True)
def process_exception(self, request, exception):
"""Rolls back the database and leaves transaction management"""
if transaction.is_dirty():
# This rollback might fail because of network failure for example.
# If rollback isn't possible it is impossible to clean the
# connection's state. So leave the connection in dirty state and
# let request_finished signal deal with cleaning the connection.
transaction.rollback()
transaction.leave_transaction_management()
def process_response(self, request, response):
"""Commits and leaves transaction management."""
if transaction.is_managed():
if transaction.is_dirty():
# Note: it is possible that the commit fails. If the reason is
# closed connection or some similar reason, then there is
# little hope to proceed nicely. However, in some cases (
# deferred foreign key checks for exampl) it is still possible
# to rollback().
try:
transaction.commit()
except Exception:
# If the rollback fails, the transaction state will be
# messed up. It doesn't matter, the connection will be set
# to clean state after the request finishes. And, we can't
# clean the state here properly even if we wanted to, the
# connection is in transaction but we can't rollback...
transaction.rollback()
transaction.leave_transaction_management()
raise
transaction.leave_transaction_management()
return response
|
brendandburns/tensorflow | refs/heads/master | tensorflow/models/rnn/ptb/ptb_word_lm.py | 3 | """Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
To compile on CPU:
bazel build -c opt tensorflow/models/rnn/ptb:ptb_word_lm
To compile on GPU:
bazel build -c opt tensorflow --config=cuda \
tensorflow/models/rnn/ptb:ptb_word_lm
To run:
./bazel-bin/.../ptb_word_lm \
--data_path=/tmp/simple-examples/data/ --alsologtostderr
"""
from __future__ import print_function
import time
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import seq2seq
from tensorflow.models.rnn.ptb import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None, "data_path")
FLAGS = flags.FLAGS
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
if is_training and config.keep_prob < 1:
lstm_cell = rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.split(
1, num_steps, tf.nn.embedding_lookup(embedding, self._input_data))
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
if is_training and config.keep_prob < 1:
inputs = [tf.nn.dropout(input_, config.keep_prob) for input_ in inputs]
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# from tensorflow.models.rnn import rnn
# outputs, states = rnn.rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
states = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step, input_ in enumerate(inputs):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(input_, state)
outputs.append(cell_output)
states.append(state)
output = tf.reshape(tf.concat(1, outputs), [-1, size])
logits = tf.nn.xw_plus_b(output,
tf.get_variable("softmax_w", [size, vocab_size]),
tf.get_variable("softmax_b", [vocab_size]))
loss = seq2seq.sequence_loss_by_example([logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])],
vocab_size)
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = states[-1]
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) / m.batch_size) - 1) / m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size / 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def main(unused_args):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config)
mtest = PTBModel(is_training=False, config=eval_config)
tf.initialize_all_variables().run()
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, train_data, m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
print("Test Perplexity: %.3f" % test_perplexity)
if __name__ == "__main__":
tf.app.run()
|
rbramwell/pulp | refs/heads/master | nodes/extensions/admin/pulp_node/__init__.py | 56 | # Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) |
xkcd1253/SocialNetworkforTwo | refs/heads/master | flask/lib/python2.7/site-packages/wtforms/ext/csrf/session.py | 68 | """
A provided CSRF implementation which puts CSRF data in a session.
This can be used fairly comfortably with many `request.session` type
objects, including the Werkzeug/Flask session store, Django sessions, and
potentially other similar objects which use a dict-like API for storing
session keys.
The basic concept is a randomly generated value is stored in the user's
session, and an hmac-sha1 of it (along with an optional expiration time,
for extra security) is used as the value of the csrf_token. If this token
validates with the hmac of the random value + expiration time, and the
expiration time is not passed, the CSRF validation will pass.
"""
from __future__ import unicode_literals
import hmac
import os
from hashlib import sha1
from datetime import datetime, timedelta
from ...validators import ValidationError
from .form import SecureForm
__all__ = ('SessionSecureForm', )
class SessionSecureForm(SecureForm):
TIME_FORMAT = '%Y%m%d%H%M%S'
TIME_LIMIT = timedelta(minutes=30)
SECRET_KEY = None
def generate_csrf_token(self, csrf_context):
if self.SECRET_KEY is None:
raise Exception('must set SECRET_KEY in a subclass of this form for it to work')
if csrf_context is None:
raise TypeError('Must provide a session-like object as csrf context')
session = getattr(csrf_context, 'session', csrf_context)
if 'csrf' not in session:
session['csrf'] = sha1(os.urandom(64)).hexdigest()
self.csrf_token.csrf_key = session['csrf']
if self.TIME_LIMIT:
expires = (datetime.now() + self.TIME_LIMIT).strftime(self.TIME_FORMAT)
csrf_build = '%s%s' % (session['csrf'], expires)
else:
expires = ''
csrf_build = session['csrf']
hmac_csrf = hmac.new(self.SECRET_KEY, csrf_build.encode('utf8'), digestmod=sha1)
return '%s##%s' % (expires, hmac_csrf.hexdigest())
def validate_csrf_token(self, field):
if not field.data or '##' not in field.data:
raise ValidationError(field.gettext('CSRF token missing'))
expires, hmac_csrf = field.data.split('##')
check_val = (field.csrf_key + expires).encode('utf8')
hmac_compare = hmac.new(self.SECRET_KEY, check_val, digestmod=sha1)
if hmac_compare.hexdigest() != hmac_csrf:
raise ValidationError(field.gettext('CSRF failed'))
if self.TIME_LIMIT:
now_formatted = datetime.now().strftime(self.TIME_FORMAT)
if now_formatted > expires:
raise ValidationError(field.gettext('CSRF token expired'))
|
adsorensen/girder | refs/heads/master | girder/api/v1/group.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from ..describe import Description, autoDescribeRoute
from ..rest import Resource, filtermodel
from girder.api import access
from girder.constants import AccessType, SettingKey
from girder.models.model_base import AccessException
from girder.utility import mail_utils
from girder.utility.model_importer import ModelImporter
class Group(Resource):
"""API Endpoint for groups."""
def __init__(self):
super(Group, self).__init__()
self.resourceName = 'group'
self.route('DELETE', (':id',), self.deleteGroup)
self.route('DELETE', (':id', 'member'), self.removeFromGroup)
self.route('DELETE', (':id', 'moderator'), self.demote)
self.route('DELETE', (':id', 'admin'), self.demote)
self.route('GET', (), self.find)
self.route('GET', (':id',), self.getGroup)
self.route('GET', (':id', 'access'), self.getGroupAccess)
self.route('GET', (':id', 'invitation'), self.getGroupInvitations)
self.route('GET', (':id', 'member'), self.listMembers)
self.route('POST', (), self.createGroup)
self.route('POST', (':id', 'invitation'), self.inviteToGroup)
self.route('POST', (':id', 'member'), self.joinGroup)
self.route('POST', (':id', 'moderator'), self.promoteToModerator)
self.route('POST', (':id', 'admin'), self.promoteToAdmin)
self.route('PUT', (':id',), self.updateGroup)
@access.public
@filtermodel(model='group')
@autoDescribeRoute(
Description('Search for groups or list all groups.')
.param('text', 'Pass this to perform a full-text search for groups.', required=False)
.param('exact', 'If true, only return exact name matches. This is '
'case sensitive.', required=False, dataType='boolean', default=False)
.pagingParams(defaultSort='name')
.errorResponse()
)
def find(self, text, exact, limit, offset, sort):
user = self.getCurrentUser()
if text is not None:
if exact:
groupList = self.model('group').find(
{'name': text}, offset=offset, limit=limit, sort=sort)
else:
groupList = self.model('group').textSearch(
text, user=user, offset=offset, limit=limit, sort=sort)
else:
groupList = self.model('group').list(
user=user, offset=offset, limit=limit, sort=sort)
return list(groupList)
@access.user
@filtermodel(model='group')
@autoDescribeRoute(
Description('Create a new group.')
.responseClass('Group')
.notes('Must be logged in.')
.param('name', 'Unique name for the group.', strip=True)
.param('description', 'Description of the group.', required=False,
default='', strip=True)
.param('public', 'Whether the group should be publicly visible.',
required=False, dataType='boolean', default=False)
.errorResponse()
.errorResponse('Write access was denied on the parent', 403)
)
def createGroup(self, name, description, public):
return self.model('group').createGroup(
name=name, creator=self.getCurrentUser(), description=description, public=public)
@access.public
@filtermodel(model='group')
@autoDescribeRoute(
Description('Get a group by ID.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the group.', 403)
)
def getGroup(self, group):
# Add in the current setting for adding to groups
group['_addToGroupPolicy'] = self.model('setting').get(SettingKey.ADD_TO_GROUP_POLICY)
return group
@access.public
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description('Get the access control list for a group.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the group.', 403)
)
def getGroupAccess(self, group):
groupModel = self.model('group')
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.public
@filtermodel(model='user')
@autoDescribeRoute(
Description('Show outstanding invitations for a group.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.READ)
.pagingParams(defaultSort='lastName')
.errorResponse()
.errorResponse('Read access was denied for the group.', 403)
)
def getGroupInvitations(self, group, limit, offset, sort):
return list(self.model('group').getInvites(group, limit, offset, sort))
@access.user
@filtermodel(model='group')
@autoDescribeRoute(
Description('Update a group by ID.')
.modelParam('id', model='group', level=AccessType.WRITE)
.param('name', 'The name to set on the group.', required=False, strip=True)
.param('description', 'Description for the group.', required=False, strip=True)
.param('public', 'Whether the group should be publicly visible', dataType='boolean',
required=False)
.param('addAllowed', 'Can admins or moderators directly add members '
'to this group? Only system administrators are allowed to '
'set this field', required=False,
enum=['default', 'no', 'yesmod', 'yesadmin'])
.errorResponse()
.errorResponse('Write access was denied for the group.', 403)
)
def updateGroup(self, group, name, description, public, addAllowed):
if public is not None:
self.model('group').setPublic(group, public)
if name is not None:
group['name'] = name
if description is not None:
group['description'] = description
if addAllowed is not None:
self.requireAdmin(self.getCurrentUser())
group['addAllowed'] = addAllowed
return self.model('group').updateGroup(group)
@access.user
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description('Request to join a group, or accept an invitation to join.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('You were not invited to this group, or do not have '
'read access to it.', 403)
)
def joinGroup(self, group):
groupModel = self.model('group')
group = groupModel.joinGroup(group, self.getCurrentUser())
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.public
@filtermodel(model='user')
@autoDescribeRoute(
Description('List members of a group.')
.modelParam('id', model='group', level=AccessType.READ)
.pagingParams(defaultSort='lastName')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the group.', 403)
)
def listMembers(self, group, limit, offset, sort):
return list(self.model('group').listMembers(group, offset=offset, limit=limit, sort=sort))
@access.user
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description("Invite a user to join a group, or accept a user's request to join.")
.responseClass('Group')
.notes('The "force" option to this endpoint is only available to '
'administrators and can be used to bypass the invitation process'
' and instead add the user directly to the group.')
.modelParam('id', model='group', level=AccessType.WRITE)
.modelParam('userId', 'The ID of the user to invite or accept.',
destName='userToInvite', level=AccessType.READ, paramType='form')
.param('level', 'The access level the user will be given when they accept the invitation.',
required=False, dataType='integer', default=AccessType.READ)
.param('quiet', 'If you do not want this action to send an email to '
'the target user, set this to true.', dataType='boolean',
required=False, default=False)
.param('force', 'Add user directly rather than sending an invitation '
'(admin-only option).', dataType='boolean', required=False, default=False)
.errorResponse()
.errorResponse('Write access was denied for the group.', 403)
)
def inviteToGroup(self, group, userToInvite, level, quiet, force):
groupModel = self.model('group')
user = self.getCurrentUser()
if force:
if not user['admin']:
mustBeAdmin = True
addPolicy = self.model('setting').get(SettingKey.ADD_TO_GROUP_POLICY)
addGroup = group.get('addAllowed', 'default')
if addGroup not in ['no', 'yesadmin', 'yesmod']:
addGroup = addPolicy
if (groupModel.hasAccess(
group, user, AccessType.ADMIN) and
('mod' in addPolicy or 'admin' in addPolicy) and
addGroup.startswith('yes')):
mustBeAdmin = False
elif (groupModel.hasAccess(
group, user, AccessType.WRITE) and
'mod' in addPolicy and
addGroup == 'yesmod'):
mustBeAdmin = False
if mustBeAdmin:
self.requireAdmin(user)
groupModel.addUser(group, userToInvite, level=level)
else:
# Can only invite into access levels that you yourself have
groupModel.requireAccess(group, user, level)
groupModel.inviteUser(group, userToInvite, level)
if not quiet:
html = mail_utils.renderTemplate('groupInvite.mako', {
'userToInvite': userToInvite,
'user': user,
'group': group
})
mail_utils.sendEmail(
to=userToInvite['email'], text=html,
subject="%s: You've been invited to a group"
% ModelImporter.model('setting').get(SettingKey.BRAND_NAME)
)
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.user
@filtermodel(model='group', addFields={'access'})
@autoDescribeRoute(
Description('Promote a member to be a moderator of the group.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.ADMIN)
.modelParam('userId', 'The ID of the user to promote.',
level=AccessType.READ, paramType='formData')
.errorResponse('ID was invalid.')
.errorResponse("You don't have permission to promote users.", 403)
)
def promoteToModerator(self, group, user):
return self._promote(group, user, AccessType.WRITE)
@access.user
@filtermodel(model='group', addFields={'access'})
@autoDescribeRoute(
Description('Promote a member to be an administrator of the group.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.ADMIN)
.modelParam('userId', 'The ID of the user to promote.',
level=AccessType.READ, paramType='formData')
.errorResponse('ID was invalid.')
.errorResponse("You don't have permission to promote users.", 403)
)
def promoteToAdmin(self, group, user):
return self._promote(group, user, AccessType.ADMIN)
def _promote(self, group, user, level):
"""
Promote a user to moderator or administrator.
:param group: The group to promote within.
:param user: The user to promote.
:param level: Either WRITE or ADMIN, for moderator or administrator.
:type level: AccessType
:returns: The updated group document.
"""
if not group['_id'] in user.get('groups', []):
raise AccessException('That user is not a group member.')
group = self.model('group').setUserAccess(group, user, level=level, save=True)
group['access'] = self.model('group').getFullAccessList(group)
return group
@access.user
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description('Demote a user to a normal group member.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.ADMIN)
.modelParam('userId', 'The ID of the user to demote.',
level=AccessType.READ, paramType='formData')
.errorResponse()
.errorResponse("You don't have permission to demote users.", 403)
)
def demote(self, group, user):
groupModel = self.model('group')
group = groupModel.setUserAccess(group, user, level=AccessType.READ, save=True)
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.user
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description('Remove a user from a group, or uninvite them.')
.responseClass('Group')
.notes('If the specified user is not yet a member of the group, this '
'will delete any outstanding invitation or membership request for '
'the user. Passing no userId parameter will assume that the '
'current user is removing themself.')
.modelParam('id', model='group', level=AccessType.READ)
.modelParam('userId', 'The ID of the user to remove. If not passed, will '
'remove yourself from the group.', required=False,
level=AccessType.READ, destName='userToRemove', paramType='formData')
.errorResponse()
.errorResponse("You don't have permission to remove that user.", 403)
)
def removeFromGroup(self, group, userToRemove):
user = self.getCurrentUser()
groupModel = self.model('group')
if userToRemove is None:
# Assume user is removing themself from the group
userToRemove = user
# If removing someone else, you must have at least as high an
# access level as they do, and you must have at least write access
# to remove any user other than yourself.
if user['_id'] != userToRemove['_id']:
if groupModel.hasAccess(group, userToRemove, AccessType.ADMIN):
groupModel.requireAccess(group, user, AccessType.ADMIN)
else:
groupModel.requireAccess(group, user, AccessType.WRITE)
group = groupModel.removeUser(group, userToRemove)
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.user
@autoDescribeRoute(
Description('Delete a group by ID.')
.modelParam('id', model='group', level=AccessType.ADMIN)
.errorResponse('ID was invalid.')
.errorResponse('Admin access was denied for the group.', 403)
)
def deleteGroup(self, group):
self.model('group').remove(group)
return {'message': 'Deleted the group %s.' % group['name']}
|
JanHendrikDolling/configvalidator | refs/heads/master | test/test_context_data.py | 1 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by Jan-Hendrik Dolling.
:license: Apache 2.0, see LICENSE for more details.
"""
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
import testutils
from configvalidator import ConfigValidator, ParserException
from configvalidator.tools.basics import Validator
from configvalidator import AttributeDict
from configvalidator import remove_data, add_data
class MyTestCase(unittest.TestCase):
@staticmethod
def validate(inst, data):
if "VAL" not in inst.data:
raise Exception("need VAL")
return inst.data["VAL"]
@classmethod
def setUpClass(cls):
type("DEMO_CLASS", (Validator,), {"validate": MyTestCase.validate})
def setUp(self):
self.config_dict = {
"SectionB": {
"option_B2": {
"default": "INPUT",
"validator": "DEMO_CLASS",
},
},
}
from configvalidator.tools.basics import GLOBAL_DATA
for key in list(GLOBAL_DATA.keys()):
remove_data(key)
@classmethod
def tearDownClass(cls):
from configvalidator.tools.basics import DATA_VALIDATOR
del DATA_VALIDATOR["DEMO_CLASS"]
def test_global(self):
add_data("VAL", "foo")
cp = testutils.get_cp()
res = cp.parse(self.config_dict)
self.assertTrue(isinstance(res, AttributeDict))
self.assertEqual(1, len(res))
self.assertTrue(isinstance(res.SectionB, AttributeDict))
self.assertEqual(1, len(res.SectionB))
self.assertEqual("foo", res.SectionB.option_B2)
remove_data("VAL")
with self.assertRaises(ParserException) as e:
cp.parse(self.config_dict)
self.assertEqual("error validating [SectionB]option_B2: need VAL", str(e.exception))
def test_not_data(self):
cp = testutils.get_cp()
with self.assertRaises(ParserException) as e:
cp.parse(self.config_dict)
self.assertEqual("error validating [SectionB]option_B2: need VAL", str(e.exception))
def test_local_data(self):
cp = testutils.get_cp()
cp.add_data("VAL", 123)
res = cp.parse(self.config_dict)
self.assertTrue(isinstance(res, AttributeDict))
self.assertEqual(1, len(res))
self.assertTrue(isinstance(res.SectionB, AttributeDict))
self.assertEqual(1, len(res.SectionB))
self.assertEqual(123, res.SectionB.option_B2)
cp.remove_data("VAL")
with self.assertRaises(ParserException) as e:
cp.parse(self.config_dict)
self.assertEqual("error validating [SectionB]option_B2: need VAL", str(e.exception))
if __name__ == '__main__':
unittest.main()
|
andykimpe/chromium-test-npapi | refs/heads/master | tools/metrics/histograms/validate_format.py | 63 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verifies that the histograms XML file is well-formatted."""
import extract_histograms
import os.path
def main():
# This will raise an exception if the file is not well-formatted.
xml_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'histograms.xml')
histograms = extract_histograms.ExtractHistograms(xml_file)
if __name__ == '__main__':
main()
|
anielsen001/scipy | refs/heads/master | benchmarks/benchmarks/cluster.py | 36 | import numpy as np
from .common import Benchmark
try:
from scipy.cluster.hierarchy import linkage
except ImportError:
pass
class HierarchyLinkage(Benchmark):
params = ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']
param_names = ['method']
def __init__(self):
rnd = np.random.RandomState(0)
self.X = rnd.randn(2000, 2)
def time_linkage(self, method):
linkage(self.X, method=method)
|
fragaria/suds | refs/heads/master | suds/transport/options.py | 201 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for transport options.
"""
from suds.transport import *
from suds.properties import *
class Options(Skin):
"""
Options:
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
- B{headers} - Extra HTTP headers.
- type: I{dict}
- I{str} B{http} - The I{http} protocol proxy URL.
- I{str} B{https} - The I{https} protocol proxy URL.
- default: {}
- B{username} - The username used for http authentication.
- type: I{str}
- default: None
- B{password} - The password used for http authentication.
- type: I{str}
- default: None
"""
def __init__(self, **kwargs):
domain = __name__
definitions = [
Definition('proxy', dict, {}),
Definition('timeout', (int,float), 90),
Definition('headers', dict, {}),
Definition('username', basestring, None),
Definition('password', basestring, None),
]
Skin.__init__(self, domain, definitions, kwargs) |
echodaemon/Empire | refs/heads/master | lib/modules/powershell/management/mailraider/disable_security.py | 10 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Disable-SecuritySettings',
'Author': ['@xorrior'],
'Description': ("This function checks for the ObjectModelGuard, PromptOOMSend, and AdminSecurityMode registry keys for Outlook security. This function must be "
"run in an administrative context in order to set the values for the registry keys."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/xorrior/EmailRaider',
'http://www.xorrior.com/phishing-on-the-inside/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'AdminUser' : {
'Description' : 'Optional AdminUser credentials to use for registry changes.',
'Required' : False,
'Value' : ''
},
'AdminPassword' : {
'Description' : 'Optional AdminPassword credentials to use for registry changes.',
'Required' : False,
'Value' : ''
},
'Version' : {
'Description' : 'The version of Microsoft Outlook.',
'Required' : True,
'Value' : ''
},
'Reset' : {
'Description' : 'Switch. Reset security settings to default values.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
reset = self.options['Reset']['Value']
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/management/MailRaider.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode + "\n"
scriptEnd = ""
if reset.lower() == "true":
# if the flag is set to restore the security settings
scriptEnd += "Reset-SecuritySettings "
else:
scriptEnd += "Disable-SecuritySettings "
for option,values in self.options.iteritems():
if option.lower() != "agent" and option.lower() != "reset":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
scriptEnd += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
|
DistributedSystemsGroup/sysadmin-dsp | refs/heads/master | openstackdns/novaqueue.py | 1 | import sys
import os
import pika
import json
import logging
import cPickle
# attach to our global logger
logger = logging.getLogger("novadns")
def load_cache(file):
if os.path.exists(file):
return cPickle.load(open(file, "r"))
else:
return {}
def write_cache(file, cache):
cPickle.dump(cache, open(file, "w"), -1)
class novaQueue:
def __init__(self, config, pdns):
self.config = config
self.pdns = pdns
self.broker = "amqp://" + self.config.get('AMQP', 'amqpuser')
self.broker += ":" + self.config.get("AMQP", "amqppass")
self.broker += "@" + self.config.get('AMQP', 'amqphost')
self.broker += ':' + self.config.get('AMQP', 'amqpport') + "/%2F"
self.broker = pika.URLParameters(self.broker)
self.connection = pika.BlockingConnection(self.broker)
self.channel = self.connection.channel()
self.channel.queue_declare("notifications.info")
self.channel.basic_consume(self.get, queue="notifications.info", no_ack=True)
self.channel.start_consuming()
def get(self, ch, method, properties, body):
message = json.loads(body)
try:
if 'oslo.message' in message:
message = json.loads(message['oslo.message'])
# pull out the messages that we care about
if message['event_type']:
if message['event_type'] == 'compute.instance.create.end':
instance = message['payload']
name = instance['hostname']
# tenant = message['_context_project_name']
# name = name + "-" + tenant
if not "fixed_ips" in instance:
logger.warning("VM %s does not have an IP address, skipping" % name)
return
logger.info("Queue Add Message: %s %s %s" % (instance['instance_id'], name.lower(), instance['fixed_ips'][0]['address']))
self.pdns.update(name.lower(), instance['fixed_ips'][0]['address'], instance['instance_id'])
cache = load_cache(self.config.get('CACHE', 'cache_file'))
cache[instance['instance_id']] = (name.lower(), instance['fixed_ips'][0]['address'])
write_cache(self.config.get('CACHE', 'cache_file'), cache)
elif message['event_type'] == 'compute.instance.delete.end':
instance = message['payload']
logger.info("Queue Remove Message: %s" % (instance['instance_id']))
cache = load_cache(self.config.get('CACHE', 'cache_file'))
if instance['instance_id'] in cache:
name = cache[instance['instance_id']][0]
ip = cache[instance['instance_id']][1]
self.pdns.remove(name.lower(), ip, instance['instance_id'])
del cache[instance['instance_id']]
write_cache(self.config.get('CACHE', 'cache_file'), cache)
except:
logger.exception("Exception handling message")
logger.debug(message)
|
SlimRoms/android_external_chromium_org | refs/heads/lp5.0 | chrome/common/extensions/docs/server2/datastore_models.py | 86 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cPickle
import traceback
from appengine_wrappers import db
# A collection of the data store models used throughout the server.
# These values are global within datastore.
class PersistentObjectStoreItem(db.Model):
pickled_value = db.BlobProperty()
@classmethod
def CreateKey(cls, namespace, key):
path = '%s/%s' % (namespace, key)
try:
return db.Key.from_path(cls.__name__, path)
except Exception:
# Probably AppEngine's BadValueError for the name being too long, but
# it's not documented which errors can actually be thrown here, so catch
# 'em all.
raise ValueError(
'Exception thrown when trying to create db.Key from path %s: %s' % (
path, traceback.format_exc()))
@classmethod
def CreateItem(cls, namespace, key, value):
return PersistentObjectStoreItem(key=cls.CreateKey(namespace, key),
pickled_value=cPickle.dumps(value))
def GetValue(self):
return cPickle.loads(self.pickled_value)
|
ihsanudin/odoo | refs/heads/8.0 | addons/survey/wizard/__init__.py | 385 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey_email_compose_message
|
ujenmr/ansible | refs/heads/devel | lib/ansible/modules/network/fortios/fortios_wireless_controller_utm_profile.py | 19 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_utm_profile
short_description: Configure UTM (Unified Threat Management) profile in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify wireless_controller feature and utm_profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
wireless_controller_utm_profile:
description:
- Configure UTM (Unified Threat Management) profile.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
antivirus-profile:
description:
- AntiVirus profile name. Source antivirus.profile.name.
application-list:
description:
- Application control list name. Source application.list.name.
comment:
description:
- Comment.
ips-sensor:
description:
- IPS sensor name. Source ips.sensor.name.
name:
description:
- UTM profile name.
required: true
scan-botnet-connections:
description:
- Block or monitor connections to Botnet servers or disable Botnet scanning.
choices:
- disable
- block
- monitor
utm-log:
description:
- Enable/disable UTM logging.
choices:
- enable
- disable
webfilter-profile:
description:
- WebFilter profile name. Source webfilter.profile.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure UTM (Unified Threat Management) profile.
fortios_wireless_controller_utm_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
wireless_controller_utm_profile:
state: "present"
antivirus-profile: "<your_own_value> (source antivirus.profile.name)"
application-list: "<your_own_value> (source application.list.name)"
comment: "Comment."
ips-sensor: "<your_own_value> (source ips.sensor.name)"
name: "default_name_7"
scan-botnet-connections: "disable"
utm-log: "enable"
webfilter-profile: "<your_own_value> (source webfilter.profile.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_wireless_controller_utm_profile_data(json):
option_list = ['antivirus-profile', 'application-list', 'comment',
'ips-sensor', 'name', 'scan-botnet-connections',
'utm-log', 'webfilter-profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def wireless_controller_utm_profile(data, fos):
vdom = data['vdom']
wireless_controller_utm_profile_data = data['wireless_controller_utm_profile']
flattened_data = flatten_multilists_attributes(wireless_controller_utm_profile_data)
filtered_data = filter_wireless_controller_utm_profile_data(flattened_data)
if wireless_controller_utm_profile_data['state'] == "present":
return fos.set('wireless-controller',
'utm-profile',
data=filtered_data,
vdom=vdom)
elif wireless_controller_utm_profile_data['state'] == "absent":
return fos.delete('wireless-controller',
'utm-profile',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_wireless_controller(data, fos):
login(data, fos)
if data['wireless_controller_utm_profile']:
resp = wireless_controller_utm_profile(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"wireless_controller_utm_profile": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"antivirus-profile": {"required": False, "type": "str"},
"application-list": {"required": False, "type": "str"},
"comment": {"required": False, "type": "str"},
"ips-sensor": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"scan-botnet-connections": {"required": False, "type": "str",
"choices": ["disable", "block", "monitor"]},
"utm-log": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"webfilter-profile": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
crmccreary/openerp_server | refs/heads/master | openerp/addons/web_process/__openerp__.py | 2 | {
"name" : "Process",
"version": "2.0",
"description":
"""
OpenERP Web process view.
""",
"depends" : ["web"],
"js": [
"static/src/js/process.js"
],
"css": [
"static/src/css/process.css"
],
'qweb': [
"static/src/xml/*.xml"
],
'active': True
}
|
2014c2g19/2014c2g19 | refs/heads/master | exts/w2/static/Brython2.0.0-20140209-164925/Lib/sre_parse.py | 111 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
|
bolkedebruin/airflow | refs/heads/master | airflow/contrib/hooks/sqoop_hook.py | 1 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.apache.sqoop.hooks.sqoop`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.sqoop.hooks.sqoop import SqoopHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.sqoop.hooks.sqoop`.",
DeprecationWarning, stacklevel=2
)
|
partofthething/home-assistant | refs/heads/dev | tests/components/advantage_air/test_cover.py | 9 | """Test the Advantage Air Cover Platform."""
from json import loads
from homeassistant.components.advantage_air.const import (
ADVANTAGE_AIR_STATE_CLOSE,
ADVANTAGE_AIR_STATE_OPEN,
)
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_DAMPER,
DOMAIN as COVER_DOMAIN,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_SET_COVER_POSITION,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OPEN
from tests.components.advantage_air import (
TEST_SET_RESPONSE,
TEST_SET_URL,
TEST_SYSTEM_DATA,
TEST_SYSTEM_URL,
add_mock_config,
)
async def test_cover_async_setup_entry(hass, aioclient_mock):
"""Test climate setup without sensors."""
aioclient_mock.get(
TEST_SYSTEM_URL,
text=TEST_SYSTEM_DATA,
)
aioclient_mock.get(
TEST_SET_URL,
text=TEST_SET_RESPONSE,
)
await add_mock_config(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
assert len(aioclient_mock.mock_calls) == 1
# Test Cover Zone Entity
entity_id = "cover.zone_open_without_sensor"
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OPEN
assert state.attributes.get("device_class") == DEVICE_CLASS_DAMPER
assert state.attributes.get("current_position") == 100
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac2-z01"
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 3
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac2"]["zones"]["z01"]["state"] == ADVANTAGE_AIR_STATE_CLOSE
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: [entity_id]},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 5
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac2"]["zones"]["z01"]["state"] == ADVANTAGE_AIR_STATE_OPEN
assert data["ac2"]["zones"]["z01"]["value"] == 100
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: [entity_id], ATTR_POSITION: 50},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 7
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac2"]["zones"]["z01"]["value"] == 50
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: [entity_id], ATTR_POSITION: 0},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 9
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac2"]["zones"]["z01"]["state"] == ADVANTAGE_AIR_STATE_CLOSE
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
|
sammerry/ansible | refs/heads/devel | test/units/plugins/filter/__init__.py | 7690 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
harry-7/addons-server | refs/heads/master | src/olympia/constants/base.py | 1 | import re
from collections import namedtuple
from django.utils.translation import ugettext_lazy as _
# Add-on and File statuses.
STATUS_NULL = 0 # No review type chosen yet, add-on is incomplete.
STATUS_AWAITING_REVIEW = 1 # File waiting for review.
STATUS_PENDING = 2 # Personas (lightweight themes) waiting for review.
STATUS_NOMINATED = 3 # Waiting for review.
STATUS_PUBLIC = 4 # Approved.
STATUS_DISABLED = 5 # Rejected (single files) or disabled by Mozilla (addons).
_STATUS_LISTED = 6 # Deprecated. See bug 616242
_STATUS_BETA = 7 # Deprecated, see addons-server/issues/7163
_STATUS_LITE = 8 # Deprecated, preliminary reviewed.
_STATUS_LITE_AND_NOMINATED = 9 # Deprecated, prelim & waiting for full review.
STATUS_DELETED = 11 # Add-on has been deleted.
STATUS_REJECTED = 12 # This applies only to rejected personas.
STATUS_REVIEW_PENDING = 14 # personas, needing further action.
STATUS_CHOICES_ADDON = {
STATUS_NULL: _(u'Incomplete'),
STATUS_NOMINATED: _(u'Awaiting Review'),
STATUS_PUBLIC: _(u'Approved'),
STATUS_DISABLED: _(u'Disabled by Mozilla'),
STATUS_DELETED: _(u'Deleted'),
}
STATUS_CHOICES_PERSONA = {
STATUS_NULL: STATUS_CHOICES_ADDON[STATUS_NULL],
STATUS_PENDING: _(u'Pending approval'),
STATUS_PUBLIC: STATUS_CHOICES_ADDON[STATUS_PUBLIC],
STATUS_DISABLED: STATUS_CHOICES_ADDON[STATUS_DISABLED],
STATUS_DELETED: STATUS_CHOICES_ADDON[STATUS_DELETED],
STATUS_REJECTED: _(u'Rejected'),
# Approved, but the developer would like to put it public when they want.
STATUS_REVIEW_PENDING: _(u'Flagged for further review'),
}
STATUS_CHOICES_FILE = {
STATUS_AWAITING_REVIEW: _(u'Awaiting Review'),
STATUS_PUBLIC: _(u'Approved'),
STATUS_DISABLED: _(u'Disabled by Mozilla'),
}
# We need to expose nice values that aren't localisable.
STATUS_CHOICES_API = {
STATUS_NULL: 'incomplete',
STATUS_AWAITING_REVIEW: 'unreviewed',
STATUS_PENDING: 'pending',
STATUS_NOMINATED: 'nominated',
STATUS_PUBLIC: 'public',
STATUS_DISABLED: 'disabled',
STATUS_DELETED: 'deleted',
STATUS_REJECTED: 'rejected',
STATUS_REVIEW_PENDING: 'review-pending',
}
STATUS_CHOICES_API_LOOKUP = {
'incomplete': STATUS_NULL,
'unreviewed': STATUS_AWAITING_REVIEW,
'pending': STATUS_PENDING,
'nominated': STATUS_NOMINATED,
'public': STATUS_PUBLIC,
'disabled': STATUS_DISABLED,
'deleted': STATUS_DELETED,
'rejected': STATUS_REJECTED,
'review-pending': STATUS_REVIEW_PENDING,
}
REVIEWED_STATUSES = (STATUS_PUBLIC,)
UNREVIEWED_ADDON_STATUSES = (STATUS_NOMINATED,)
UNREVIEWED_FILE_STATUSES = (STATUS_AWAITING_REVIEW, STATUS_PENDING)
VALID_ADDON_STATUSES = (STATUS_NOMINATED, STATUS_PUBLIC)
VALID_FILE_STATUSES = (STATUS_AWAITING_REVIEW, STATUS_PUBLIC)
# Version channels
RELEASE_CHANNEL_UNLISTED = 1
RELEASE_CHANNEL_LISTED = 2
RELEASE_CHANNEL_CHOICES = (
(RELEASE_CHANNEL_UNLISTED, _(u'Unlisted')),
(RELEASE_CHANNEL_LISTED, _(u'Listed')),
)
CHANNEL_CHOICES_API = {
RELEASE_CHANNEL_UNLISTED: 'unlisted',
RELEASE_CHANNEL_LISTED: 'listed',
}
CHANNEL_CHOICES_LOOKUP = {
'unlisted': RELEASE_CHANNEL_UNLISTED,
'listed': RELEASE_CHANNEL_LISTED,
}
# Add-on author roles.
AUTHOR_ROLE_DEV = 4
AUTHOR_ROLE_OWNER = 5
AUTHOR_CHOICES = (
(AUTHOR_ROLE_OWNER, _(u'Owner')),
(AUTHOR_ROLE_DEV, _(u'Developer')),
)
# Addon types
ADDON_ANY = 0
ADDON_EXTENSION = 1
ADDON_THEME = 2
ADDON_DICT = 3
ADDON_SEARCH = 4
ADDON_LPAPP = 5
ADDON_LPADDON = 6
ADDON_PLUGIN = 7
ADDON_API = 8 # not actually a type but used to identify extensions + themes
ADDON_PERSONA = 9
ADDON_STATICTHEME = 10
# Addon type groupings.
GROUP_TYPE_ADDON = [ADDON_EXTENSION, ADDON_DICT, ADDON_SEARCH, ADDON_LPAPP,
ADDON_LPADDON, ADDON_PLUGIN, ADDON_API]
GROUP_TYPE_THEME = [ADDON_THEME, ADDON_PERSONA, ADDON_STATICTHEME]
# Singular
ADDON_TYPE = {
ADDON_EXTENSION: _(u'Extension'),
ADDON_THEME: _(u'Complete Theme'),
ADDON_DICT: _(u'Dictionary'),
ADDON_SEARCH: _(u'Search Engine'),
ADDON_LPAPP: _(u'Language Pack (Application)'),
ADDON_LPADDON: _(u'Language Pack (Add-on)'),
ADDON_PLUGIN: _(u'Plugin'),
ADDON_PERSONA: _(u'Theme'),
ADDON_STATICTHEME: _(u'Theme (Static)'),
}
# Plural
ADDON_TYPES = {
ADDON_EXTENSION: _(u'Extensions'),
ADDON_THEME: _(u'Complete Themes'),
ADDON_DICT: _(u'Dictionaries'),
ADDON_SEARCH: _(u'Search Tools'),
ADDON_LPAPP: _(u'Language Packs (Application)'),
ADDON_LPADDON: _(u'Language Packs (Add-on)'),
ADDON_PLUGIN: _(u'Plugins'),
ADDON_PERSONA: _(u'Themes'),
ADDON_STATICTHEME: _(u'Themes (Static)'),
}
# Searchable Add-on Types
ADDON_SEARCH_TYPES = [
ADDON_ANY,
ADDON_EXTENSION,
ADDON_THEME,
ADDON_DICT,
ADDON_SEARCH,
ADDON_LPAPP,
ADDON_PERSONA,
ADDON_STATICTHEME,
]
# Icons
ADDON_ICONS = {
ADDON_ANY: 'default-addon.png',
ADDON_THEME: 'default-theme.png',
ADDON_STATICTHEME: 'default-theme.png',
}
# We use these slugs in browse page urls.
ADDON_SLUGS = {
ADDON_EXTENSION: 'extensions',
ADDON_THEME: 'themes',
ADDON_DICT: 'language-tools',
ADDON_LPAPP: 'language-tools',
ADDON_PERSONA: 'personas',
ADDON_SEARCH: 'search-tools',
ADDON_STATICTHEME: 'static-themes',
}
# These are used in the update API.
ADDON_SLUGS_UPDATE = {
ADDON_EXTENSION: 'extension',
ADDON_THEME: 'theme',
ADDON_DICT: 'extension',
ADDON_SEARCH: 'search',
ADDON_LPAPP: 'item',
ADDON_LPADDON: 'extension',
ADDON_PERSONA: 'background-theme',
ADDON_PLUGIN: 'plugin',
ADDON_STATICTHEME: 'static-theme',
}
# A slug to ID map for the search API. Included are all ADDON_TYPES that are
# found in ADDON_SEARCH_TYPES.
ADDON_SEARCH_SLUGS = {
'any': ADDON_ANY,
'extension': ADDON_EXTENSION,
'theme': ADDON_THEME,
'dictionary': ADDON_DICT,
'search': ADDON_SEARCH,
'language': ADDON_LPAPP,
'persona': ADDON_PERSONA,
'statictheme': ADDON_STATICTHEME,
}
ADDON_TYPE_CHOICES_API = {
ADDON_EXTENSION: 'extension',
ADDON_THEME: 'theme',
ADDON_DICT: 'dictionary',
ADDON_SEARCH: 'search',
ADDON_LPAPP: 'language',
ADDON_PERSONA: 'persona',
ADDON_STATICTHEME: 'statictheme',
}
# Edit addon information
MAX_TAGS = 20
MIN_TAG_LENGTH = 2
MAX_CATEGORIES = 2
VALID_CONTRIBUTION_DOMAINS = (
'donate.mozilla.org',
'liberapay.com',
'micropayment.de',
'opencollective.com',
'patreon.com',
'paypal.com',
'paypal.me'
)
# Icon upload sizes
ADDON_ICON_SIZES = [32, 48, 64, 128, 256, 512]
# Preview upload sizes [thumb, full]
ADDON_PREVIEW_SIZES = [(200, 150), (700, 525)]
THEME_PREVIEW_SIZE = namedtuple('SizeTuple', 'width height')(680, 100)
THEME_ACCENTCOLOR_DEFAULT = 'rgba(229,230,232,1)'
# Persona image sizes [preview, full]
PERSONA_IMAGE_SIZES = {
'header': [(680, 100), (3000, 200)],
'footer': [None, (3000, 100)],
'icon': [None, (32, 32)],
}
# Accepted image MIME-types
IMG_TYPES = ('image/png', 'image/jpeg')
VIDEO_TYPES = ('video/webm',)
# The string concatinating all accepted image MIME-types with '|'
SUPPORTED_IMAGE_TYPES = '|'.join(IMG_TYPES)
# These types don't maintain app compatibility in the db. Instead, we look at
# APP.types and APP_TYPE_SUPPORT to figure out where they are compatible.
NO_COMPAT = (ADDON_SEARCH, ADDON_DICT, ADDON_PERSONA)
HAS_COMPAT = {t: t not in NO_COMPAT for t in ADDON_TYPES}
# Personas
PERSONAS_ADDON_ID = 10900 # Add-on ID of the Personas Plus Add-on
PERSONAS_FIREFOX_MIN = '3.6' # First Firefox version to support Personas
PERSONAS_THUNDERBIRD_MIN = '3.1' # Ditto for Thunderbird
# Collections.
COLLECTION_NORMAL = 0
COLLECTION_SYNCHRONIZED = 1
COLLECTION_FEATURED = 2
COLLECTION_RECOMMENDED = 3
COLLECTION_FAVORITES = 4
COLLECTION_MOBILE = 5
COLLECTION_ANONYMOUS = 6
COLLECTIONS_NO_CONTRIB = (COLLECTION_SYNCHRONIZED, COLLECTION_FAVORITES)
COLLECTION_SPECIAL_SLUGS = {
COLLECTION_MOBILE: 'mobile',
COLLECTION_FAVORITES: 'favorites',
}
COLLECTION_CHOICES = {
COLLECTION_NORMAL: 'Normal',
COLLECTION_SYNCHRONIZED: 'Synchronized',
COLLECTION_FEATURED: 'Featured',
COLLECTION_RECOMMENDED: 'Generated Recommendations',
COLLECTION_FAVORITES: 'Favorites',
COLLECTION_MOBILE: 'Mobile',
COLLECTION_ANONYMOUS: 'Anonymous',
}
COLLECTION_SEARCH_CHOICES = [
COLLECTION_NORMAL,
COLLECTION_FEATURED,
COLLECTION_RECOMMENDED,
COLLECTION_MOBILE,
COLLECTION_ANONYMOUS,
]
# Validation.
# A skeleton set of passing validation results.
# TODO: Move to validator, generate dynamically via ErrorBundle instance.
VALIDATOR_SKELETON_RESULTS = {
"errors": 0,
"warnings": 0,
"notices": 0,
"success": True,
"compatibility_summary": {"notices": 0, "errors": 0, "warnings": 0},
"metadata": {"requires_chrome": False, "listed": True},
"messages": [],
"message_tree": {},
"detected_type": "extension",
"ending_tier": 5,
}
# A skeleton set of validation results for a system error.
VALIDATOR_SKELETON_EXCEPTION = {
"errors": 1,
"warnings": 0,
"notices": 0,
"success": True,
"compatibility_summary": {"notices": 0, "errors": 0, "warnings": 0},
"metadata": {"requires_chrome": False, "listed": True},
"messages": [
{"id": ["validator", "unexpected_exception"],
"message": "Sorry, we couldn't load your add-on.",
"description": [
"Validation was unable to complete successfully due to an "
"unexpected error.",
"The error has been logged, but please consider filing an issue "
"report here: http://bit.ly/1POrYYU"],
"type": "error",
"tier": 1,
"for_appversions": None,
"uid": "35432f419340461897aa8362398339c4"}
],
"message_tree": {},
"detected_type": "extension",
"ending_tier": 5,
}
VALIDATOR_SKELETON_EXCEPTION_WEBEXT = {
"errors": 1,
"warnings": 0,
"notices": 0,
"success": True,
"compatibility_summary": {"notices": 0, "errors": 0, "warnings": 0},
"metadata": {
"requires_chrome": False,
"listed": True,
"is_webextension": True
},
"messages": [
{"id": ["validator", "unexpected_exception"],
"message": "Sorry, we couldn't load your WebExtension.",
"description": [
"Validation was unable to complete successfully due to an "
"unexpected error.",
"Check https://developer.mozilla.org/en-US/Add-ons/WebExtensions "
"to ensure your webextension is valid or file a bug at "
"http://bit.ly/1POrYYU"],
"type": "error",
"tier": 1,
"for_appversions": None,
"uid": "35432f419340461897aa8362398339c4"}
],
"message_tree": {},
"detected_type": "extension",
"ending_tier": 5,
}
VERSION_SEARCH = re.compile('\.(\d+)$')
# Types of SiteEvent
SITE_EVENT_OTHER = 1
SITE_EVENT_EXCEPTION = 2
SITE_EVENT_RELEASE = 3
SITE_EVENT_CHANGE = 4
SITE_EVENT_CHOICES = {
SITE_EVENT_OTHER: _('Other'),
SITE_EVENT_EXCEPTION: _('Exception'),
SITE_EVENT_RELEASE: _('Release'),
SITE_EVENT_CHANGE: _('Change'),
}
# For use in urls.
ADDON_ID = r"""(?P<addon_id>[^/<>"']+)"""
ADDON_UUID = r'(?P<uuid>[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12})'
# Default strict_min_version and strict_max_version for WebExtensions
DEFAULT_WEBEXT_MIN_VERSION = '42.0'
DEFAULT_WEBEXT_MAX_VERSION = '*'
# Android only started to support WebExtensions with version 48
DEFAULT_WEBEXT_MIN_VERSION_ANDROID = '48.0'
# The default version of Firefox that supports WebExtensions without an id
DEFAULT_WEBEXT_MIN_VERSION_NO_ID = '48.0'
# The version of Firefox that first supported static themes. Not Android yet.
DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX = '53.0'
E10S_UNKNOWN = 0
E10S_COMPATIBLE = 1
E10S_COMPATIBLE_WEBEXTENSION = 2
E10S_INCOMPATIBLE = 3
E10S_COMPATIBILITY_CHOICES = (
(E10S_UNKNOWN, _('Unknown')),
# We don't need to show developers the actual, more granular state, only
# that it's compatible or not.
(E10S_COMPATIBLE_WEBEXTENSION, _('Compatible')),
(E10S_COMPATIBLE, _('Compatible')),
(E10S_INCOMPATIBLE, _('Incompatible')),
)
E10S_COMPATIBILITY_CHOICES_API = {
E10S_UNKNOWN: 'unknown',
E10S_COMPATIBLE_WEBEXTENSION: 'compatible-webextension',
E10S_COMPATIBLE: 'compatible',
E10S_INCOMPATIBLE: 'incompatible',
}
ADDON_GUID_PATTERN = re.compile(
# Match {uuid} or something@host.tld ("something" being optional)
# guids. Copied from mozilla-central XPIProvider.jsm.
r'^(\{[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\}'
r'|[a-z0-9-\._]*\@[a-z0-9-\._]+)$', re.IGNORECASE)
SYSTEM_ADDON_GUIDS = (
u'@mozilla.org', u'@shield.mozilla.org', u'@pioneer.mozilla.org')
MOZILLA_TRADEMARK_SYMBOLS = (
'mozilla', 'firefox')
ALLOWED_TRADEMARK_SUBMITTING_EMAILS = (
'@mozilla.com', '@mozilla.org')
DISCO_API_ALLOWED_PARAMETERS = (
'telemetry-client-id', 'lang', 'platform', 'branch', 'study', 'edition')
# If you add/remove any sources, update the docs: /api/download_sources.html
# Note there are some additional sources here for historical/backwards compat.
DOWNLOAD_SOURCES_FULL = (
'addondetail', 'addon-detail-version', 'api', 'category', 'collection',
'creatured', 'developers', 'discovery-dependencies', 'discovery-upsell',
'discovery-video', 'email', 'find-replacement', 'fxcustomization',
'fxfirstrun', 'fxwhatsnew', 'homepagebrowse', 'homepagepromo',
'installservice', 'mostshared', 'oftenusedwith', 'prerelease-banner',
'recommended', 'rockyourfirefox', 'search', 'sharingapi',
'similarcollections', 'ss', 'userprofile', 'version-history',
'co-hc-sidebar', 'co-dp-sidebar',
'cb-hc-featured', 'cb-dl-featured', 'cb-hc-toprated', 'cb-dl-toprated',
'cb-hc-mostpopular', 'cb-dl-mostpopular', 'cb-hc-recentlyadded',
'cb-dl-recentlyadded',
'hp-btn-promo', 'hp-dl-promo', 'hp-hc-featured', 'hp-dl-featured',
'hp-hc-upandcoming', 'hp-dl-upandcoming', 'hp-hc-mostpopular',
'hp-dl-mostpopular', 'hp-contest-winners',
'dp-hc-oftenusedwith', 'dp-dl-oftenusedwith', 'dp-hc-othersby',
'dp-dl-othersby', 'dp-btn-primary', 'dp-btn-version', 'dp-btn-devchannel',
'dp-hc-dependencies', 'dp-dl-dependencies', 'dp-hc-upsell', 'dp-dl-upsell',
)
DOWNLOAD_SOURCES_PREFIX = (
'external-', 'mozcom-', 'discovery-', 'cb-btn-', 'cb-dl-')
|
jdramani/servo | refs/heads/master | tests/wpt/web-platform-tests/html/infrastructure/urls/resolving-urls/query-encoding/resources/manifest.py | 253 | def main(request, response):
id = request.GET['id']
mode = request.GET['mode']
fallback_url = ""
if mode == "FALLBACK":
fallback_url = "fallback-namespace/"
manifest = u"""CACHE MANIFEST
%s:
%s stash.py?q=\u00E5&id=%s&action=put
""" % (mode, fallback_url, id)
return [("Content-Type", "text/cache-manifest; charset=%s" % request.GET['encoding'])], manifest.encode('utf-8') # charset should be ignored for cache manifests
|
dharmabumstead/ansible | refs/heads/devel | lib/ansible/modules/network/routing/net_static_route.py | 65 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_static_route
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage static IP routes on network appliances (routers, switches et. al.)
description:
- This module provides declarative management of static
IP routes on network appliances (routers, switches et. al.).
options:
prefix:
description:
- Network prefix of the static route.
required: true
mask:
description:
- Network prefix mask of the static route.
required: true
next_hop:
description:
- Next hop IP of the static route.
required: true
admin_distance:
description:
- Admin distance of the static route.
aggregate:
description: List of static route definitions
purge:
description:
- Purge static routes not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure static route
net_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
- name: remove configuration
net_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
state: absent
- name: configure aggregates of static routes
net_static_route:
aggregate:
- { prefix: 192.168.2.0, mask 255.255.255.0, next_hop: 10.0.0.1 }
- { prefix: 192.168.3.0, mask 255.255.255.0, next_hop: 10.0.2.1 }
- name: Remove static route collections
net_static_route:
aggregate:
- { prefix: 172.24.1.0/24, next_hop: 192.168.42.64 }
- { prefix: 172.24.3.0/24, next_hop: 192.168.42.64 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 192.168.2.0/24 10.0.0.1
"""
|
GdZ/scriptfile | refs/heads/master | software/googleAppEngine/lib/django_1_4/tests/regressiontests/i18n/forms.py | 95 | from __future__ import absolute_import
from django import forms
from django.forms.extras import SelectDateWidget
from .models import Company
class I18nForm(forms.Form):
decimal_field = forms.DecimalField(localize=True)
float_field = forms.FloatField(localize=True)
date_field = forms.DateField(localize=True)
datetime_field = forms.DateTimeField(localize=True)
time_field = forms.TimeField(localize=True)
integer_field = forms.IntegerField(localize=True)
class SelectDateForm(forms.Form):
date_field = forms.DateField(widget=SelectDateWidget)
class CompanyForm(forms.ModelForm):
cents_paid = forms.DecimalField(max_digits=4, decimal_places=2, localize=True)
products_delivered = forms.IntegerField(localize=True)
date_added = forms.DateTimeField(localize=True)
class Meta:
model = Company
|
Tinysymphony/shadowsocks | refs/heads/master | tests/graceful_cli.py | 977 | #!/usr/bin/python
import socks
import time
SERVER_IP = '127.0.0.1'
SERVER_PORT = 8001
if __name__ == '__main__':
s = socks.socksocket()
s.set_proxy(socks.SOCKS5, SERVER_IP, 1081)
s.connect((SERVER_IP, SERVER_PORT))
s.send(b'test')
time.sleep(30)
s.close()
|
nnethercote/servo | refs/heads/master | components/script/dom/bindings/codegen/parser/tests/test_constructor_no_interface_object.py | 4 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
[Constructor, NoInterfaceObject]
interface TestConstructorNoInterfaceObject {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[NoInterfaceObject, Constructor]
interface TestConstructorNoInterfaceObject {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
parser.parse("""
[NoInterfaceObject, NamedConstructor=FooBar]
interface TestNamedConstructorNoInterfaceObject {
};
""")
# Test HTMLConstructor and NoInterfaceObject
parser = parser.reset()
threw = False
try:
parser.parse("""
[NoInterfaceObject, HTMLConstructor]
interface TestHTMLConstructorNoInterfaceObject {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[HTMLConstructor, NoInterfaceObject]
interface TestHTMLConstructorNoInterfaceObject {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
|
maxcountryman/cryptotrade | refs/heads/master | cryptotrade/public_api.py | 1 | class MtGoxPublic(object):
'''Public API methods for mtgox.com; provides various metrics about the
market status.
'''
def _set_url(self, path='data/'):
url = self.url_base + path
return url
def ticker(self):
return self.api_request(self._set_url() + 'ticker.php')
def depth(self):
return self.api_request(self._set_url() + 'getDepth.php')
def trades(self):
return self.api_request(self._set_url() + 'getTrades.php')
class TradeHillPublic(object):
'''Public API methods for tradehill.com; provides various metrics about the
market status.
'''
def ticker(self):
return self.api_request(self.url_base + 'Ticker')
def trades(self):
return self.api_request(self.url_base + 'Trades')
def orderbook(self):
return self.api_request(self.url_base + 'Orderbook')
|
czpython/django-cms | refs/heads/develop | cms/tests/test_placeholder_operation_signals.py | 4 | # -*- coding: utf-8 -*-
from cms.api import add_plugin
from cms.models import Page, Placeholder, UserSettings
from cms.operations import (
ADD_PLUGIN,
ADD_PLUGINS_FROM_PLACEHOLDER,
CLEAR_PLACEHOLDER,
CHANGE_PLUGIN,
DELETE_PLUGIN,
CUT_PLUGIN,
MOVE_PLUGIN,
PASTE_PLUGIN,
PASTE_PLACEHOLDER,
)
from cms.signals import pre_placeholder_operation, post_placeholder_operation
from cms.test_utils.testcases import CMSTestCase
from cms.utils.compat.tests import UnittestCompatMixin
from cms.test_utils.util.context_managers import signal_tester
class PagePlaceholderTestCase(CMSTestCase, UnittestCompatMixin):
def _add_plugin(self, placeholder=None, plugin_type='LinkPlugin', language='en'):
placeholder = placeholder or self._placeholder_1
plugin_data = {
'LinkPlugin': {'name': 'A Link', 'external_link': 'https://www.django-cms.org'},
'PlaceholderPlugin': {},
}
plugin = add_plugin(
placeholder,
plugin_type,
language,
**plugin_data[plugin_type]
)
return plugin
def _get_add_plugin_uri(self, language='en'):
uri = self.get_add_plugin_uri(
placeholder=self._placeholder_1,
plugin_type='LinkPlugin',
language=language,
)
return uri
def setUp(self):
self._admin_user = self.get_superuser()
self._cms_page = self.create_homepage(
"home",
"nav_playground.html",
"en",
created_by=self._admin_user,
published=True,
)
self._placeholder_1 = self._cms_page.placeholders.get(slot='body')
self._placeholder_2 = self._cms_page.placeholders.get(slot='right-column')
def test_pre_add_plugin(self):
with signal_tester(pre_placeholder_operation) as env:
endpoint = self._get_add_plugin_uri()
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], ADD_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._placeholder_1)
self.assertEqual(call_kwargs['plugin'].name, data['name'])
self.assertEqual(call_kwargs['plugin'].external_link, data['external_link'])
def test_post_add_plugin(self):
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
endpoint = self._get_add_plugin_uri()
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], ADD_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._placeholder_1)
self.assertTrue(post_call_kwargs['plugin'].pk)
self.assertEqual(post_call_kwargs['plugin'].name, data['name'])
self.assertEqual(post_call_kwargs['plugin'].external_link, data['external_link'])
def test_pre_edit_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'edit_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation) as env:
data = {'name': 'A Link 2', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], CHANGE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._placeholder_1)
self.assertEqual(call_kwargs['old_plugin'].name, 'A Link')
self.assertEqual(call_kwargs['old_plugin'].external_link, data['external_link'])
self.assertEqual(call_kwargs['new_plugin'].name, data['name'])
self.assertEqual(call_kwargs['new_plugin'].external_link, data['external_link'])
def test_post_edit_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'edit_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
data = {'name': 'A Link 2', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CHANGE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._placeholder_1)
self.assertEqual(post_call_kwargs['old_plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['old_plugin'].external_link, data['external_link'])
self.assertEqual(post_call_kwargs['new_plugin'].name, data['name'])
self.assertEqual(post_call_kwargs['new_plugin'].external_link, data['external_link'])
def test_pre_delete_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'delete_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], DELETE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._placeholder_1)
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
def test_post_delete_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'delete_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], DELETE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._placeholder_1)
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
def test_pre_move_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
source_placeholder = plugin.placeholder
data = {
'plugin_id': plugin.pk,
'target_language': 'en',
'placeholder_id': self._placeholder_2.pk,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], MOVE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, source_placeholder)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(call_kwargs['source_parent_id'], plugin.parent_id)
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], self._placeholder_2)
self.assertEqual(call_kwargs['target_parent_id'], None)
def test_post_move_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
source_placeholder = plugin.placeholder
data = {
'plugin_id': plugin.pk,
'target_language': 'en',
'placeholder_id': self._placeholder_2.pk,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], MOVE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, self._placeholder_2)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(post_call_kwargs['source_parent_id'], plugin.parent_id)
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], self._placeholder_2)
self.assertEqual(post_call_kwargs['target_parent_id'], None)
def test_pre_cut_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'target_language': 'en',
'placeholder_id': user_settings.clipboard_id,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], CUT_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, self._placeholder_1)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['clipboard'], user_settings.clipboard)
self.assertEqual(call_kwargs['clipboard_language'], 'en')
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], self._placeholder_1)
self.assertEqual(call_kwargs['source_parent_id'], plugin.parent_id)
def test_post_cut_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'target_language': 'en',
'placeholder_id': user_settings.clipboard_id,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CUT_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, user_settings.clipboard)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['clipboard'], user_settings.clipboard)
self.assertEqual(post_call_kwargs['clipboard_language'], 'en')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], self._placeholder_1)
self.assertEqual(post_call_kwargs['source_parent_id'], plugin.parent_id)
def test_pre_paste_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin(placeholder=user_settings.clipboard)
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'placeholder_id': self._placeholder_1.pk,
'move_a_copy': 'true',
'target_language': 'en',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], PASTE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, user_settings.clipboard)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], self._placeholder_1)
self.assertEqual(call_kwargs['target_parent_id'], None)
def test_post_paste_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin(placeholder=user_settings.clipboard)
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'placeholder_id': self._placeholder_1.pk,
'target_language': 'en',
'move_a_copy': 'true',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], PASTE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, self._placeholder_1)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], self._placeholder_1)
self.assertEqual(post_call_kwargs['target_parent_id'], None)
def test_pre_paste_placeholder(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
placeholder_plugin = self._add_plugin(
user_settings.clipboard,
'PlaceholderPlugin',
)
ref_placeholder = placeholder_plugin.placeholder_ref
self._add_plugin(ref_placeholder)
endpoint = self.get_move_plugin_uri(placeholder_plugin)
data = {
'plugin_id': placeholder_plugin.pk,
'placeholder_id': self._placeholder_1.pk,
'move_a_copy': 'true',
'target_language': 'en',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
plugin = call_kwargs['plugins'][0].get_bound_plugin()
self.assertEqual(call_kwargs['operation'], PASTE_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(plugin.name, 'A Link')
self.assertEqual(plugin.placeholder, ref_placeholder)
self.assertEqual(plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], self._placeholder_1)
def test_post_paste_placeholder(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
placeholder_plugin = self._add_plugin(
user_settings.clipboard,
'PlaceholderPlugin',
)
ref_placeholder = placeholder_plugin.placeholder_ref
self._add_plugin(ref_placeholder)
endpoint = self.get_move_plugin_uri(placeholder_plugin)
data = {
'plugin_id': placeholder_plugin.pk,
'placeholder_id': self._placeholder_1.pk,
'target_language': 'en',
'move_a_copy': 'true',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
plugin = post_call_kwargs['plugins'][0].get_bound_plugin()
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], PASTE_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(plugin.name, 'A Link')
self.assertEqual(plugin.placeholder, self._placeholder_1)
self.assertEqual(plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], self._placeholder_1)
def test_pre_add_plugins_from_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'copy_plugins') + '?cms_path=/en/'
source_placeholder = plugin.placeholder
data = {
'source_language': 'en',
'source_placeholder_id': self._placeholder_1.pk,
'target_language': 'de',
'target_placeholder_id': self._placeholder_2.pk,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
en_plugin = call_kwargs['plugins'][0].get_bound_plugin()
self.assertEqual(call_kwargs['operation'], ADD_PLUGINS_FROM_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(plugin, en_plugin)
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(call_kwargs['target_language'], 'de')
self.assertEqual(call_kwargs['target_placeholder'], self._placeholder_2)
def test_post_add_plugins_from_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'copy_plugins') + '?cms_path=/en/'
source_placeholder = plugin.placeholder
data = {
'source_language': 'en',
'source_placeholder_id': self._placeholder_1.pk,
'target_language': 'de',
'target_placeholder_id': self._placeholder_2.pk,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
new_plugin = post_call_kwargs['plugins'][0].get_bound_plugin()
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], ADD_PLUGINS_FROM_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertNotEqual(plugin, new_plugin)
self.assertEqual(new_plugin.name, 'A Link')
self.assertEqual(new_plugin.placeholder, self._placeholder_2)
self.assertEqual(new_plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(post_call_kwargs['target_language'], 'de')
self.assertEqual(post_call_kwargs['target_placeholder'], self._placeholder_2)
def test_pre_clear_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_clear_placeholder_url(self._placeholder_1)
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
del_plugin = call_kwargs['plugins'][0]
self.assertEqual(call_kwargs['operation'], CLEAR_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(del_plugin.pk, plugin.pk)
self.assertEqual(call_kwargs['placeholder'], self._placeholder_1)
def test_post_clear_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_clear_placeholder_url(self._placeholder_1)
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
del_plugin = post_call_kwargs['plugins'][0]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CLEAR_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(del_plugin.pk, plugin.pk)
self.assertEqual(post_call_kwargs['placeholder'], self._placeholder_1)
class AppPlaceholderTestCase(PagePlaceholderTestCase):
def setUp(self):
self._admin_user = self.get_superuser()
self._cms_page = self.create_homepage(
"home",
"nav_playground.html",
"en",
created_by=self._admin_user,
published=True,
)
self._placeholder_1 = self._cms_page.placeholders.get(slot='body')
self._placeholder_2 = self._cms_page.placeholders.get(slot='right-column')
|
gloaec/trifle | refs/heads/master | src/trifle/raft/client.py | 1 | from __future__ import print_function
import uuid
import msgpack
import trifle.raft.tcp as tcp
class NoConnection(Exception):
pass
class RaftClient(object):
def __init__(self, server):
self.tcp = tcp.TCP(0, 'client')
self.tcp.start()
self.msgs = {}
self.tcp.connect(server)
if not self.tcp.u2c:
# wait 2 seconds to connect
self.tcp.recv(0.5)
if not self.tcp.u2c:
raise NoConnection
self.leader = next(iter(self.tcp.u2c.keys()))
def _send(self, rpc, msgid):
self.tcp.send(rpc, self.leader)
msgids = self.poll(0.5)
if not msgids or not msgid in msgids:
return # XXX put real recovery logic here
msg = self.msgs[msgid][0]
if msg['type'] == 'cr_rdr':
self.leader = msg['leader']
print("redirected to %s! %s" % (self.leader, msg['addr']))
self.tcp.connect(msg['addr'])
del self.msgs[msgid]
return self._send(rpc, msgid)
def poll(self, timeout=0):
ans = self.tcp.recv(timeout)
if not ans:
return
msgids = set()
for _, msgs in ans:
for msg in msgs:
msg = msgpack.unpackb(msg, use_list=False)
msgid = msg['id']
msgids.add(msgid)
ums = self.msgs.get(msgid, [])
ums.append(msg)
self.msgs[msgid] = ums
return msgids
def send(self, data):
msgid = uuid.uuid4().hex
rpc = self.cq_rpc(data, msgid)
self._send(rpc, msgid)
return msgid
def update_hosts(self, config):
msgid = uuid.uuid4().hex
rpc = self.pu_rpc(config, msgid)
self._send(rpc, msgid)
return msgid
def cq_rpc(self, data, msgid):
# client query rpc
rpc = {
'type': 'cq',
'id': msgid,
'data': data
}
return msgpack.packb(rpc)
def pu_rpc(self, config, msgid):
# protocol update rpc
rpc = {
'type': 'pu',
'id': msgid,
'config': config
}
return msgpack.packb(rpc)
|
StephenWeber/ansible | refs/heads/devel | lib/ansible/modules/network/eos/eos_command.py | 6 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'core',
'version': '1.0'
}
DOCUMENTATION = """
---
module: eos_command
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Run arbitrary commands on an Arista EOS device
description:
- Sends an arbitrary set of commands to an EOS node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: eos
options:
commands:
description:
- The commands to send to the remote EOS device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of I(retries) has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
- name: run show version on remote devices
eos_command:
commands: show version
- name: run show version and check to see if output contains Arista
eos_command:
commands: show version
wait_for: result[0] contains Arista
- name: run multiple commands on remote nodes
eos_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
eos_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Arista
- result[1] contains Loopback0
- name: run commands and specify the output format
eos_command:
commands:
- command: show version
output: json
"""
RETURN = """
failed_conditions:
description: the conditionals that failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import string_types
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.eos import run_commands
from ansible.module_utils.eos import eos_argument_spec, check_args
VALID_KEYS = ['command', 'output', 'prompt', 'response']
def to_lines(stdout):
lines = list()
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def parse_commands(module, warnings):
spec = dict(
command=dict(key=True),
output=dict(),
prompt=dict(),
answer=dict()
)
transform = ComplexList(spec, module)
commands = transform(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'Only show commands are supported when using check_mode, not '
'executing %s' % item['command']
)
return commands
def to_cli(obj):
cmd = obj['command']
if obj.get('output') == 'json':
cmd += ' | json'
return cmd
def main():
"""entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
if warnings:
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError:
exc = get_exception()
module.fail_json(msg=str(exc))
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': to_lines(responses)
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
ACS-Community/ACS | refs/heads/master | LGPL/CommonSoftware/acspy/test/acspytestImpl/PyTestNestedModule.py | 4 | import acspytest__POA
from Acspy.Servants.ContainerServices import ContainerServices
from Acspy.Servants.ComponentLifecycle import ComponentLifecycle
from Acspy.Servants.ACSComponent import ACSComponent
from CORBA import TRUE, FALSE
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) Associated Universities Inc., 2002
# (c) European Southern Observatory, 2002
# Copyright by ESO (in the framework of the ALMA collaboration)
# and Cosylab 2002, All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
#------------------------------------------------------------------------------
'''
Module designed to test the full functionality of the Python Container. Since
Python is not a compiled language, its vital that everything be tested.
'''
#------------------------------------------------------------------------------
class PyTestNestedModule(acspytest__POA.nested.PyTestNestedModule,
ACSComponent, #Base IDL interface
ContainerServices, #Developer niceties
ComponentLifecycle): #HLA stuff
def __init__(self):
ACSComponent.__init__(self)
ContainerServices.__init__(self)
return
'''
Component designed to test the functionality of the Python container.
'''
def test(self):
'''
Python implementation of IDL method.
'''
print "Testing method from component in a nested module."
return
#------------------------------------------------------------------------------
if __name__ == "__main__":
print "Creating an object"
g = PyTestNestedModule()
g.test()
print "Done..."
|
mylog00/flink | refs/heads/master | flink-libraries/flink-streaming-python/src/test/python/org/apache/flink/streaming/python/api/test_map.py | 4 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from utils.pygeneratorbase import PyGeneratorBase
from org.apache.flink.api.common.functions import MapFunction, FlatMapFunction, ReduceFunction
from org.apache.flink.api.java.functions import KeySelector
from org.apache.flink.streaming.api.windowing.time.Time import milliseconds
class Generator(PyGeneratorBase):
def __init__(self, num_iters):
super(Generator, self).__init__(num_iters)
def do(self, ctx):
ctx.collect(222)
class DummyTupple(MapFunction):
def map(self, value):
return (value, value)
class MinusOne(MapFunction):
def map(self, value):
return value[0] - 1
class Tokenizer(FlatMapFunction):
def flatMap(self, value, collector):
collector.collect((1, value))
class Sum(ReduceFunction):
def reduce(self, input1, input2):
count1, val1 = input1
count2, val2 = input2
return (count1 + count2, val1)
class Selector(KeySelector):
def getKey(self, input):
return input[1]
class Main:
def run(self, flink):
env = flink.get_execution_environment()
env.from_collection([3] * 5) \
.map(DummyTupple()) \
.map(MinusOne()) \
.flat_map(Tokenizer()) \
.key_by(Selector()) \
.time_window(milliseconds(5)) \
.reduce(Sum()) \
.output()
env.execute()
def main(flink):
Main().run(flink)
|
nick41496/Beatnik | refs/heads/master | manage.py | 1 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "beatnik.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
marcopompili/django-emarcs-utils | refs/heads/master | emarcs/utils/loremipsum.py | 1 | '''
Created on 26/dic/2013
@author: Marco Pompili
'''
def heading():
return 'heading'
def long_paragraph():
return 'Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum tortor quam, feugiat vitae, ultricies eget, tempor sit amet, ante. Donec eu libero sit amet quam egestas semper. Aenean ultricies mi vitae est. Mauris placerat eleifend leo. Quisque sit amet est et sapien ullamcorper pharetra. Vestibulum erat wisi, condimentum sed, commodo vitae, ornare sit amet, wisi. Aenean fermentum, elit eget tincidunt condimentum, eros ipsum rutrum orci, sagittis tempus lacus enim ac dui. Donec non enim in turpis pulvinar facilisis. Ut felis. Praesent dapibus, neque id cursus faucibus, tortor neque egestas augue, eu vulputate magna eros eu erat. Aliquam erat volutpat. Nam dui mi, tincidunt quis, accumsan porttitor, facilisis luctus, metus'
def medium_paragraph():
return 'Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Vestibulum tortor quam, feugiat vitae, ultricies eget, tempor sit amet, ante.'
def sentence():
return 'Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.'
def city():
return 'Roma'
def province():
return 'Roma'
def road():
return 'via Garibaldi n.1'
def zipcode():
return '00001'
def site():
return 'http://www.mysite.com'
def email():
return 'myemail@provider.com'
def phone():
return '987/123456789'
|
ldoktor/autotest | refs/heads/master | client/virt/tests/watchdog.py | 2 | import logging, time, shutil
from autotest.client.shared import error
from autotest.client.virt import virt_utils
def run_watchdog(test, params, env):
"""
Configure watchdog, crash the guest and check if watchdog_action occurs.
@param test: kvm test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
relogin_timeout = int(params.get("relogin_timeout", 240))
watchdog_enable_cmd = "chkconfig watchdog on"
watchdog_start_cmd = "service watchdog start"
def watchdog_action_reset():
"""
Trigger a crash dump through sysrq-trigger
Ensure watchdog_action(reset) occur.
"""
session = vm.wait_for_login(timeout=timeout)
logging.info("Triggering crash on vm")
crash_cmd = "echo c > /proc/sysrq-trigger"
session.sendline(crash_cmd)
if not virt_utils.wait_for(lambda: not session.is_responsive(),
240, 0, 1):
raise error.TestFail("Could not trigger crash")
logging.info("Waiting for kernel watchdog_action to take place")
session = vm.wait_for_login(timeout=relogin_timeout)
error.context("Enabling watchdog service")
session.cmd(watchdog_enable_cmd)
error.context("Starting watchdog service")
session.cmd(watchdog_start_cmd, timeout=320)
watchdog_action_reset()
# Close stablished session
session.close()
|
UCHIC/h2outility | refs/heads/master | src/GuiComponents/__init__.py | 1 |
__all__ = ['HydroShareAccountDialog', 'VisualH20MainWindow', 'UIController.py']
|
ppries/tensorflow | refs/heads/master | tensorflow/contrib/labeled_tensor/python/ops/nn_test.py | 11 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import nn
from tensorflow.contrib.labeled_tensor.python.ops import test_util
class NNTests(test_util.Base):
def setUp(self):
super(NNTests, self).setUp()
self.axes = ['x']
self.original_lt = core.LabeledTensor([0.0, 0.5, 1.0], self.axes)
self.other_lt = 1 - self.original_lt
def test_unary_ops(self):
ops = [
('relu', tf.nn.relu, nn.relu),
('relu6', tf.nn.relu6, nn.relu6),
('crelu', tf.nn.crelu, nn.crelu),
('elu', tf.nn.elu, nn.elu),
('softplus', tf.nn.softplus, nn.softplus),
('l2_loss', tf.nn.l2_loss, nn.l2_loss),
('softmax', tf.nn.softmax, nn.softmax),
('log_softmax', tf.nn.log_softmax, nn.log_softmax),
]
for op_name, tf_op, lt_op in ops:
golden_tensor = tf_op(self.original_lt.tensor)
golden_lt = core.LabeledTensor(golden_tensor, self.axes)
actual_lt = lt_op(self.original_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_binary_ops(self):
ops = [
('sigmoid_cross_entropy_with_logits',
tf.nn.sigmoid_cross_entropy_with_logits,
nn.sigmoid_cross_entropy_with_logits),
('softmax_cross_entropy_with_logits',
tf.nn.softmax_cross_entropy_with_logits,
nn.softmax_cross_entropy_with_logits),
('sparse_softmax_cross_entropy_with_logits',
tf.nn.sparse_softmax_cross_entropy_with_logits,
nn.sparse_softmax_cross_entropy_with_logits),
]
for op_name, tf_op, lt_op in ops:
golden_tensor = tf_op(self.original_lt.tensor, self.other_lt.tensor)
golden_lt = core.LabeledTensor(golden_tensor, self.axes)
actual_lt = lt_op(self.original_lt, self.other_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
|
xuxiao19910803/edx | refs/heads/master | cms/djangoapps/contentstore/views/tests/test_videos.py | 83 | #-*- coding: utf-8 -*-
"""
Unit tests for video-related REST APIs.
"""
# pylint: disable=attribute-defined-outside-init
import csv
import json
import dateutil.parser
import re
from StringIO import StringIO
from django.conf import settings
from django.test.utils import override_settings
from mock import Mock, patch
from edxval.api import create_profile, create_video, get_video_info
from contentstore.models import VideoUploadConfig
from contentstore.views.videos import KEY_EXPIRATION_IN_SECONDS, StatusDisplayStrings
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from xmodule.modulestore.tests.factories import CourseFactory
class VideoUploadTestMixin(object):
"""
Test cases for the video upload feature
"""
def get_url_for_course_key(self, course_key):
"""Return video handler URL for the given course"""
return reverse_course_url(self.VIEW_NAME, course_key)
def setUp(self):
super(VideoUploadTestMixin, self).setUp()
self.url = self.get_url_for_course_key(self.course.id)
self.test_token = "test_token"
self.course.video_upload_pipeline = {
"course_video_upload_token": self.test_token,
}
self.save_course()
self.profiles = ["profile1", "profile2"]
self.previous_uploads = [
{
"edx_video_id": "test1",
"client_video_id": "test1.mp4",
"duration": 42.0,
"status": "upload",
"courses": [unicode(self.course.id)],
"encoded_videos": [],
},
{
"edx_video_id": "test2",
"client_video_id": "test2.mp4",
"duration": 128.0,
"status": "file_complete",
"courses": [unicode(self.course.id)],
"encoded_videos": [
{
"profile": "profile1",
"url": "http://example.com/profile1/test2.mp4",
"file_size": 1600,
"bitrate": 100,
},
{
"profile": "profile2",
"url": "http://example.com/profile2/test2.mov",
"file_size": 16000,
"bitrate": 1000,
},
],
},
{
"edx_video_id": "non-ascii",
"client_video_id": u"nón-ascii-näme.mp4",
"duration": 256.0,
"status": "transcode_active",
"courses": [unicode(self.course.id)],
"encoded_videos": [
{
"profile": "profile1",
"url": u"http://example.com/profile1/nón-ascii-näme.mp4",
"file_size": 3200,
"bitrate": 100,
},
]
},
]
# Ensure every status string is tested
self.previous_uploads += [
{
"edx_video_id": "status_test_{}".format(status),
"client_video_id": "status_test.mp4",
"duration": 3.14,
"status": status,
"courses": [unicode(self.course.id)],
"encoded_videos": [],
}
for status in (
StatusDisplayStrings._STATUS_MAP.keys() + # pylint:disable=protected-access
["non_existent_status"]
)
]
for profile in self.profiles:
create_profile(profile)
for video in self.previous_uploads:
create_video(video)
def _get_previous_upload(self, edx_video_id):
"""Returns the previous upload with the given video id."""
return next(
video
for video in self.previous_uploads
if video["edx_video_id"] == edx_video_id
)
def test_anon_user(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_put(self):
response = self.client.put(self.url)
self.assertEqual(response.status_code, 405)
def test_invalid_course_key(self):
response = self.client.get(
self.get_url_for_course_key("Non/Existent/Course")
)
self.assertEqual(response.status_code, 404)
def test_non_staff_user(self):
client, __ = self.create_non_staff_authed_user_client()
response = client.get(self.url)
self.assertEqual(response.status_code, 403)
def test_video_pipeline_not_enabled(self):
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] = False
self.assertEqual(self.client.get(self.url).status_code, 404)
def test_video_pipeline_not_configured(self):
settings.VIDEO_UPLOAD_PIPELINE = None
self.assertEqual(self.client.get(self.url).status_code, 404)
def test_course_not_configured(self):
self.course.video_upload_pipeline = {}
self.save_course()
self.assertEqual(self.client.get(self.url).status_code, 404)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_VIDEO_UPLOAD_PIPELINE": True})
@override_settings(VIDEO_UPLOAD_PIPELINE={"BUCKET": "test_bucket", "ROOT_PATH": "test_root"})
class VideosHandlerTestCase(VideoUploadTestMixin, CourseTestCase):
"""Test cases for the main video upload endpoint"""
VIEW_NAME = "videos_handler"
def test_get_json(self):
response = self.client.get_json(self.url)
self.assertEqual(response.status_code, 200)
response_videos = json.loads(response.content)["videos"]
self.assertEqual(len(response_videos), len(self.previous_uploads))
for i, response_video in enumerate(response_videos):
# Videos should be returned by creation date descending
original_video = self.previous_uploads[-(i + 1)]
self.assertEqual(
set(response_video.keys()),
set(["edx_video_id", "client_video_id", "created", "duration", "status"])
)
dateutil.parser.parse(response_video["created"])
for field in ["edx_video_id", "client_video_id", "duration"]:
self.assertEqual(response_video[field], original_video[field])
self.assertEqual(
response_video["status"],
StatusDisplayStrings.get(original_video["status"])
)
def test_get_html(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertRegexpMatches(response["Content-Type"], "^text/html(;.*)?$")
# Crude check for presence of data in returned HTML
for video in self.previous_uploads:
self.assertIn(video["edx_video_id"], response.content)
def test_post_non_json(self):
response = self.client.post(self.url, {"files": []})
self.assertEqual(response.status_code, 400)
def test_post_malformed_json(self):
response = self.client.post(self.url, "{", content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_post_invalid_json(self):
def assert_bad(content):
"""Make request with content and assert that response is 400"""
response = self.client.post(
self.url,
json.dumps(content),
content_type="application/json"
)
self.assertEqual(response.status_code, 400)
# Top level missing files key
assert_bad({})
# Entry missing file_name
assert_bad({"files": [{"content_type": "video/mp4"}]})
# Entry missing content_type
assert_bad({"files": [{"file_name": "test.mp4"}]})
@override_settings(AWS_ACCESS_KEY_ID="test_key_id", AWS_SECRET_ACCESS_KEY="test_secret")
@patch("boto.s3.key.Key")
@patch("boto.s3.connection.S3Connection")
def test_post_success(self, mock_conn, mock_key):
files = [
{
"file_name": "first.mp4",
"content_type": "video/mp4",
},
{
"file_name": "second.webm",
"content_type": "video/webm",
},
{
"file_name": "third.mov",
"content_type": "video/quicktime",
},
{
"file_name": "fourth.mp4",
"content_type": "video/mp4",
},
]
bucket = Mock()
mock_conn.return_value = Mock(get_bucket=Mock(return_value=bucket))
mock_key_instances = [
Mock(
generate_url=Mock(
return_value="http://example.com/url_{}".format(file_info["file_name"])
)
)
for file_info in files
]
# If extra calls are made, return a dummy
mock_key.side_effect = mock_key_instances + [Mock()]
response = self.client.post(
self.url,
json.dumps({"files": files}),
content_type="application/json"
)
self.assertEqual(response.status_code, 200)
response_obj = json.loads(response.content)
mock_conn.assert_called_once_with(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
self.assertEqual(len(response_obj["files"]), len(files))
self.assertEqual(mock_key.call_count, len(files))
for i, file_info in enumerate(files):
# Ensure Key was set up correctly and extract id
key_call_args, __ = mock_key.call_args_list[i]
self.assertEqual(key_call_args[0], bucket)
path_match = re.match(
(
settings.VIDEO_UPLOAD_PIPELINE["ROOT_PATH"] +
"/([a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12})$"
),
key_call_args[1]
)
self.assertIsNotNone(path_match)
video_id = path_match.group(1)
mock_key_instance = mock_key_instances[i]
mock_key_instance.set_metadata.assert_any_call(
"course_video_upload_token",
self.test_token
)
mock_key_instance.set_metadata.assert_any_call(
"client_video_id",
file_info["file_name"]
)
mock_key_instance.set_metadata.assert_any_call("course_key", unicode(self.course.id))
mock_key_instance.generate_url.assert_called_once_with(
KEY_EXPIRATION_IN_SECONDS,
"PUT",
headers={"Content-Type": file_info["content_type"]}
)
# Ensure VAL was updated
val_info = get_video_info(video_id)
self.assertEqual(val_info["status"], "upload")
self.assertEqual(val_info["client_video_id"], file_info["file_name"])
self.assertEqual(val_info["status"], "upload")
self.assertEqual(val_info["duration"], 0)
self.assertEqual(val_info["courses"], [unicode(self.course.id)])
# Ensure response is correct
response_file = response_obj["files"][i]
self.assertEqual(response_file["file_name"], file_info["file_name"])
self.assertEqual(response_file["upload_url"], mock_key_instance.generate_url())
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_VIDEO_UPLOAD_PIPELINE": True})
@override_settings(VIDEO_UPLOAD_PIPELINE={"BUCKET": "test_bucket", "ROOT_PATH": "test_root"})
class VideoUrlsCsvTestCase(VideoUploadTestMixin, CourseTestCase):
"""Test cases for the CSV download endpoint for video uploads"""
VIEW_NAME = "video_encodings_download"
def setUp(self):
super(VideoUrlsCsvTestCase, self).setUp()
VideoUploadConfig(profile_whitelist="profile1").save()
def _check_csv_response(self, expected_profiles):
"""
Check that the response is a valid CSV response containing rows
corresponding to previous_uploads and including the expected profiles.
"""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename={course}_video_urls.csv".format(course=self.course.id.course)
)
response_reader = StringIO(response.content)
reader = csv.DictReader(response_reader, dialect=csv.excel)
self.assertEqual(
reader.fieldnames,
(
["Name", "Duration", "Date Added", "Video ID", "Status"] +
["{} URL".format(profile) for profile in expected_profiles]
)
)
rows = list(reader)
self.assertEqual(len(rows), len(self.previous_uploads))
for i, row in enumerate(rows):
response_video = {
key.decode("utf-8"): value.decode("utf-8") for key, value in row.items()
}
# Videos should be returned by creation date descending
original_video = self.previous_uploads[-(i + 1)]
self.assertEqual(response_video["Name"], original_video["client_video_id"])
self.assertEqual(response_video["Duration"], str(original_video["duration"]))
dateutil.parser.parse(response_video["Date Added"])
self.assertEqual(response_video["Video ID"], original_video["edx_video_id"])
self.assertEqual(response_video["Status"], StatusDisplayStrings.get(original_video["status"]))
for profile in expected_profiles:
response_profile_url = response_video["{} URL".format(profile)]
original_encoded_for_profile = next(
(
original_encoded
for original_encoded in original_video["encoded_videos"]
if original_encoded["profile"] == profile
),
None
)
if original_encoded_for_profile:
self.assertEqual(response_profile_url, original_encoded_for_profile["url"])
else:
self.assertEqual(response_profile_url, "")
def test_basic(self):
self._check_csv_response(["profile1"])
def test_profile_whitelist(self):
VideoUploadConfig(profile_whitelist="profile1,profile2").save()
self._check_csv_response(["profile1", "profile2"])
def test_non_ascii_course(self):
course = CourseFactory.create(
number=u"nón-äscii",
video_upload_pipeline={
"course_video_upload_token": self.test_token,
}
)
response = self.client.get(self.get_url_for_course_key(course.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response["Content-Disposition"],
"attachment; filename=video_urls.csv; filename*=utf-8''n%C3%B3n-%C3%A4scii_video_urls.csv"
)
|
rouge8/pip | refs/heads/develop | tests/data/packages/LocalExtras-0.0.2/setup.py | 6 | import os
from setuptools import find_packages, setup
def path_to_url(path):
"""
Convert a path to URI. The path will be made absolute and
will not have quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join(filepath)
if drive:
return 'file:///' + drive + url
return 'file://' + url
setup(
name='LocalExtras',
version='0.0.2',
packages=find_packages(),
install_requires=['simple==1.0'],
extras_require={'bar': ['simple==2.0'], 'baz': ['singlemodule']}
)
|
EraYaN/CouchPotatoServer | refs/heads/master | couchpotato/core/media/movie/providers/torrent/hdbits.py | 81 | from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.hdbits import Base
from couchpotato.core.media.movie.providers.base import MovieProvider
log = CPLog(__name__)
autoload = 'HDBits'
class HDBits(MovieProvider, Base):
pass
|
mvo5/snapcraft | refs/heads/master | tests/fixture_setup/os_release.py | 3 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from textwrap import dedent
from unittest import mock
import fixtures
from snapcraft.internal import os_release
class FakeOsRelease(fixtures.Fixture):
def __init__(
self,
id: str = "ubuntu",
version_id: str = "16.04",
version_codename: str = None,
) -> None:
self._id = id
self._version_id = version_id
self._version_codename = version_codename
def _setUp(self):
super()._setUp()
with open("os-release", "w") as release_file:
print(
dedent(
"""\
NAME="Ubuntu"
VERSION="16.04.3 LTS (Xenial Xerus)"
ID_LIKE=debian
PRETTY_NAME="Ubuntu 16.04.3 LTS"
HOME_URL="http://www.ubuntu.com/"
SUPPORT_URL="http://help.ubuntu.com/"
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
UBUNTU_CODENAME=xenial"""
),
file=release_file,
)
if self._id is not None:
print("ID={}".format(self._id), file=release_file)
if self._version_id is not None:
print('VERSION_ID="{}"'.format(self._version_id), file=release_file)
if self._version_codename is not None:
print(
"VERSION_CODENAME={}".format(self._version_codename),
file=release_file,
)
release = os_release.OsRelease(os_release_file="os-release")
def _create_os_release(*args, **kwargs):
return release
patcher = mock.patch(
"snapcraft.internal.os_release.OsRelease", wraps=_create_os_release
)
patcher.start()
self.addCleanup(patcher.stop)
|
nacl-webkit/chrome_deps | refs/heads/master | tools/generate_shim_headers/generate_shim_headers.py | 3 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Generates shim headers that mirror the directory structure of bundled headers,
but just forward to the system ones.
This allows seamless compilation against system headers with no changes
to our source code.
"""
import optparse
import os.path
import sys
SHIM_TEMPLATE = """
#if defined(OFFICIAL_BUILD)
#error shim headers must not be used in official builds!
#endif
"""
def GeneratorMain(argv):
parser = optparse.OptionParser()
parser.add_option('--headers-root', action='append')
parser.add_option('--define', action='append')
parser.add_option('--output-directory')
parser.add_option('--use-include-next', action='store_true')
parser.add_option('--outputs', action='store_true')
parser.add_option('--generate', action='store_true')
options, args = parser.parse_args(argv)
if not options.headers_root:
parser.error('Missing --headers-root parameter.')
if not options.output_directory:
parser.error('Missing --output-directory parameter.')
if not args:
parser.error('Missing arguments - header file names.')
source_tree_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
for root in options.headers_root:
target_directory = os.path.join(
options.output_directory,
os.path.relpath(root, source_tree_root))
if options.generate and not os.path.exists(target_directory):
os.makedirs(target_directory)
for header_spec in args:
if ';' in header_spec:
(header_filename,
include_before,
include_after) = header_spec.split(';', 2)
else:
header_filename = header_spec
include_before = ''
include_after = ''
if options.outputs:
yield os.path.join(target_directory, header_filename)
if options.generate:
with open(os.path.join(target_directory, header_filename), 'w') as f:
f.write(SHIM_TEMPLATE)
if options.define:
for define in options.define:
key, value = define.split('=', 1)
# This non-standard push_macro extension is supported
# by compilers we support (GCC, clang).
f.write('#pragma push_macro("%s")\n' % key)
f.write('#undef %s\n' % key)
f.write('#define %s %s\n' % (key, value))
if include_before:
for header in include_before.split(':'):
f.write('#include %s\n' % header)
if options.use_include_next:
f.write('#include_next <%s>\n' % header_filename)
else:
f.write('#include <%s>\n' % header_filename)
if include_after:
for header in include_after.split(':'):
f.write('#include %s\n' % header)
if options.define:
for define in options.define:
key, value = define.split('=', 1)
# This non-standard pop_macro extension is supported
# by compilers we support (GCC, clang).
f.write('#pragma pop_macro("%s")\n' % key)
def DoMain(argv):
return '\n'.join(GeneratorMain(argv))
if __name__ == '__main__':
DoMain(sys.argv[1:])
|
Maethorin/concept2 | refs/heads/master | migrations/versions/9f0fa98ec79d_.py | 1 | """empty message
Revision ID: 9f0fa98ec79d
Revises: 5e873c9ec136
Create Date: 2016-03-08 00:15:10.934364
"""
# revision identifiers, used by Alembic.
revision = '9f0fa98ec79d'
down_revision = '5e873c9ec136'
from alembic import op
import app
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('eventos', sa.Column('resumo', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('eventos', 'resumo')
### end Alembic commands ###
|
AOSPU/external_chromium_org_tools_gyp | refs/heads/android-5.0/py3 | test/relative/gyptest-default.py | 336 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simplest-possible build of a "Hello, world!" program
using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default', formats=['msvs'])
# Run from down in foo.
test.run_gyp('a.gyp', chdir='foo/a')
sln = test.workpath('foo/a/a.sln')
sln_data = open(sln, 'rb').read()
vcproj = sln_data.count('b.vcproj')
vcxproj = sln_data.count('b.vcxproj')
if (vcproj, vcxproj) not in [(1, 0), (0, 1)]:
test.fail_test()
test.pass_test()
|
annayqho/TheCannon | refs/heads/master | code/lamost/mass_age/apokasc_validate.py | 1 | import numpy as np
import pyfits
from mass_age_functions import asteroseismic_mass
from mass_age_functions import calc_mass_2
# load data
ref_id = np.load("ref_id.npz")['arr_0']
ref_label = np.load("ref_label.npz")['arr_0']
teff = ref_label[:,0]
mh = ref_label[:,2]
logg =ref_label[:,1]
cm = ref_label[:,3]
nm = ref_label[:,4]
a = pyfits.open("apokasc_lamost_overlap.fits")
data = a[1].data
a.close()
apokasc_ids = data['2MASS_ID']
apokasc_ids = np.array([val.strip() for val in apokasc_ids])
nu_max = data['OCT_NU_MAX']
delta_nu = data['OCT_DELTA_NU']
marie_vals = np.load("marie_vals.npz")
marie_ids = marie_vals['arr_0']
marie_masses = marie_vals['arr_1']
keep = np.logical_and(nu_max > -900, delta_nu > -900)
apokasc_ids = apokasc_ids[keep]
nu_max = nu_max[keep]
delta_nu = delta_nu[keep]
# find corresponding 2mass IDs to the LAMOST IDs
direc = "/home/annaho/aida41040/annaho/TheCannon/examples"
apogee_key = np.loadtxt("%s/apogee_sorted_by_ra.txt" %direc, dtype=str)
lamost_key = np.loadtxt("%s/lamost_sorted_by_ra.txt" %direc, dtype=str)
ref_id_2mass = []
for ii,val in enumerate(ref_id):
ind = np.where(lamost_key==val)[0][0]
twomass = apogee_key[ind][19:37]
ref_id_2mass.append(twomass)
ref_id_2mass = np.array(ref_id_2mass)
# find the overlap between LAMOST and APOKASC
#overlap_id = np.intersect1d(ref_id_2mass, apokasc_ids) #1426 objects
overlap_id = np.intersect1d(
ref_id_2mass, np.intersect1d(marie_ids, apokasc_ids))
# for each ID in ref ID, calculate mass using the astroseismic scaling relation
inds_astr = np.array([np.where(apokasc_ids==val)[0][0] for val in overlap_id])
inds_ref = np.array([np.where(ref_id_2mass==val)[0][0] for val in overlap_id])
m_astr = asteroseismic_mass(
nu_max[inds_astr], delta_nu[inds_astr], teff[inds_ref])
# for each ID in ref ID, calculate mass using Marie's formula
m_marie = calc_mass_2(
mh[inds_ref], cm[inds_ref], nm[inds_ref], teff[inds_ref], logg[inds_ref])
|
CubicERP/odoo | refs/heads/8.1 | addons/sale_stock/sale_stock.py | 10 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import pytz
from openerp import SUPERUSER_ID
class sale_order(osv.osv):
_inherit = "sale.order"
def _get_default_warehouse(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
warehouse_ids = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not warehouse_ids:
return False
return warehouse_ids[0]
def _get_shipped(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
group = sale.procurement_group_id
if group:
res[sale.id] = all([proc.state in ['cancel', 'done'] for proc in group.procurement_ids])
else:
res[sale.id] = False
return res
def _get_orders(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id:
res.add(move.procurement_id.sale_line_id.order_id.id)
return list(res)
def _get_orders_procurements(self, cr, uid, ids, context=None):
res = set()
for proc in self.pool.get('procurement.order').browse(cr, uid, ids, context=context):
if proc.state =='done' and proc.sale_line_id:
res.add(proc.sale_line_id.order_id.id)
return list(res)
def _get_picking_ids(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
if not sale.procurement_group_id:
res[sale.id] = []
continue
res[sale.id] = self.pool.get('stock.picking').search(cr, uid, [('group_id', '=', sale.procurement_group_id.id)], context=context)
return res
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
vals = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
location_id = order.partner_shipping_id.property_stock_customer.id
vals['location_id'] = location_id
routes = line.route_id and [(4, line.route_id.id)] or []
vals['route_ids'] = routes
vals['warehouse_id'] = order.warehouse_id and order.warehouse_id.id or False
vals['partner_dest_id'] = order.partner_shipping_id.id
return vals
_columns = {
'incoterm': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'picking_policy': fields.selection([('direct', 'Deliver each product when available'), ('one', 'Deliver all products at once')],
'Shipping Policy', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""Pick 'Deliver each product when available' if you allow partial delivery."""),
'order_policy': fields.selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered."""),
'shipped': fields.function(_get_shipped, string='Delivered', type='boolean', store={
'procurement.order': (_get_orders_procurements, ['state'], 10)
}),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking associated to this sale'),
}
_defaults = {
'warehouse_id': _get_default_warehouse,
'picking_policy': 'direct',
'order_policy': 'manual',
}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
val = {}
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
if warehouse.company_id:
val['company_id'] = warehouse.company_id.id
return {'value': val}
def action_view_delivery(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing delivery orders
of given sales order ids. It can either be a in a list or in a form
view, if there is only one delivery order to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree_all')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of delivery orders to display
pick_ids = []
for so in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in so.picking_ids]
#choose the view_mode accordingly
if len(pick_ids) > 1:
result['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed', 'done', 'exception'], date_invoice = False, context=None):
move_obj = self.pool.get("stock.move")
res = super(sale_order,self).action_invoice_create(cr, uid, ids, grouped=grouped, states=states, date_invoice = date_invoice, context=context)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy == 'picking':
for picking in order.picking_ids:
move_obj.write(cr, uid, [x.id for x in picking.move_lines], {'invoice_state': 'invoiced'}, context=context)
return res
def action_wait(self, cr, uid, ids, context=None):
res = super(sale_order, self).action_wait(cr, uid, ids, context=context)
for o in self.browse(cr, uid, ids):
noprod = self.test_no_product(cr, uid, o, context)
if noprod and o.order_policy=='picking':
self.write(cr, uid, [o.id], {'order_policy': 'manual'}, context=context)
return res
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = super(sale_order, self)._get_date_planned(cr, uid, order, line, start_date, context=context)
date_planned = (date_planned - timedelta(days=order.company_id.security_lead)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
res = super(sale_order, self)._prepare_procurement_group(cr, uid, order, context=None)
res.update({'move_type': order.picking_policy})
return res
def action_ship_end(self, cr, uid, ids, context=None):
super(sale_order, self).action_ship_end(cr, uid, ids, context=context)
for order in self.browse(cr, uid, ids, context=context):
val = {'shipped': True}
if order.state == 'shipping_except':
val['state'] = 'progress'
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
res = self.write(cr, uid, [order.id], val)
return True
def has_stockable_products(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
class product_product(osv.osv):
_inherit = 'product.product'
def need_procurement(self, cr, uid, ids, context=None):
#when sale/product is installed alone, there is no need to create procurements, but with sale_stock
#we must create a procurement for each product that is not a service.
for product in self.browse(cr, uid, ids, context=context):
if product.type != 'service':
return True
return super(product_product, self).need_procurement(cr, uid, ids, context=context)
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def _number_packages(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
try:
res[line.id] = int((line.product_uom_qty+line.product_packaging.qty-0.0001) / line.product_packaging.qty)
except:
res[line.id] = 1
return res
_columns = {
'product_packaging': fields.many2one('product.packaging', 'Packaging'),
'number_packages': fields.function(_number_packages, type='integer', string='Number Packages'),
'route_id': fields.many2one('stock.location.route', 'Route', domain=[('sale_selectable', '=', True)]),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
}
_defaults = {
'product_packaging': False,
}
def product_packaging_change(self, cr, uid, ids, pricelist, product, qty=0, uom=False,
partner_id=False, packaging=False, flag=False, context=None):
if not product:
return {'value': {'product_packaging': False}}
product_obj = self.pool.get('product.product')
product_uom_obj = self.pool.get('product.uom')
pack_obj = self.pool.get('product.packaging')
warning = {}
result = {}
warning_msgs = ''
if flag:
res = self.product_id_change(cr, uid, ids, pricelist=pricelist,
product=product, qty=qty, uom=uom, partner_id=partner_id,
packaging=packaging, flag=False, context=context)
warning_msgs = res.get('warning') and res['warning'].get('message', '') or ''
products = product_obj.browse(cr, uid, product, context=context)
if not products.packaging_ids:
packaging = result['product_packaging'] = False
if packaging:
default_uom = products.uom_id and products.uom_id.id
pack = pack_obj.browse(cr, uid, packaging, context=context)
q = product_uom_obj._compute_qty(cr, uid, uom, pack.qty, default_uom)
# qty = qty - qty % q + q
if qty and (q and not (qty % q) == 0):
ean = pack.ean or _('(n/a)')
qty_pack = pack.qty
type_ul = pack.ul
if not warning_msgs:
warn_msg = _("You selected a quantity of %d Units.\n"
"But it's not compatible with the selected packaging.\n"
"Here is a proposition of quantities according to the packaging:\n"
"EAN: %s Quantity: %s Type of ul: %s") % \
(qty, ean, qty_pack, type_ul.name)
warning_msgs += _("Picking Information ! : ") + warn_msg + "\n\n"
warning = {
'title': _('Configuration Error!'),
'message': warning_msgs
}
result['product_uom_qty'] = qty
return {'value': result, 'warning': warning}
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, warehouse_id=False, context=None):
context = context or {}
product_uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
warehouse_obj = self.pool['stock.warehouse']
warning = {}
#UoM False due to hack which makes sure uom changes price, ... in product_id_change
res = self.product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=False, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not product:
res['value'].update({'product_packaging': False})
return res
# set product uom in context to get virtual stock in current uom
if 'product_uom' in res.get('value', {}):
# use the uom changed by super call
context = dict(context, uom=res['value']['product_uom'])
elif uom:
# fallback on selected
context = dict(context, uom=uom)
#update of result obtained in super function
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value'].update({'product_tmpl_id': product_obj.product_tmpl_id.id, 'delay': (product_obj.sale_delay or 0.0)})
# Calling product_packaging_change function after updating UoM
res_packing = self.product_packaging_change(cr, uid, ids, pricelist, product, qty, uom, partner_id, packaging, context=context)
res['value'].update(res_packing.get('value', {}))
warning_msgs = res_packing.get('warning') and res_packing['warning']['message'] or ''
if product_obj.type == 'product':
#determine if the product is MTO or not (for a further check)
isMto = False
if warehouse_id:
warehouse = warehouse_obj.browse(cr, uid, warehouse_id, context=context)
for product_route in product_obj.route_ids:
if warehouse.mto_pull_id and warehouse.mto_pull_id.route_id and warehouse.mto_pull_id.route_id.id == product_route.id:
isMto = True
break
else:
try:
mto_route_id = warehouse_obj._get_mto_route(cr, uid, context=context)
except:
# if route MTO not found in ir_model_data, we treat the product as in MTS
mto_route_id = False
if mto_route_id:
for product_route in product_obj.route_ids:
if product_route.id == mto_route_id:
isMto = True
break
#check if product is available, and if not: raise a warning, but do this only for products that aren't processed in MTO
if not isMto:
uom_record = False
if uom:
uom_record = product_uom_obj.browse(cr, uid, uom, context=context)
if product_obj.uom_id.category_id.id != uom_record.category_id.id:
uom_record = False
if not uom_record:
uom_record = product_obj.uom_id
compare_qty = float_compare(product_obj.virtual_available, qty, precision_rounding=uom_record.rounding)
if compare_qty == -1:
warn_msg = _('You plan to sell %.2f %s but you only have %.2f %s available !\nThe real stock is %.2f %s. (without reservations)') % \
(qty, uom_record.name,
max(0,product_obj.virtual_available), uom_record.name,
max(0,product_obj.qty_available), uom_record.name)
warning_msgs += _("Not enough stock ! : ") + warn_msg + "\n\n"
#update of warning messages
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
res.update({'warning': warning})
return res
class stock_move(osv.osv):
_inherit = 'stock.move'
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if move.procurement_id and move.procurement_id.sale_line_id:
sale_line = move.procurement_id.sale_line_id
self.pool.get('sale.order.line').write(cr, uid, [sale_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('sale.order').write(cr, uid, [sale_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
sale_line_obj = self.pool.get('sale.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
sale_line_ids = sale_line_obj.search(cr, uid, [('order_id', '=', move.procurement_id.sale_line_id.order_id.id), ('invoiced', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context)
if sale_line_ids:
created_lines = sale_line_obj.invoice_line_create(cr, uid, sale_line_ids, context=context)
invoice_line_obj.write(cr, uid, created_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context)
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.order_policy == 'picking':
sale_order = move.procurement_id.sale_line_id.order_id
return sale_order.partner_invoice_id, sale_order.user_id.id, sale_order.pricelist_id.currency_id.id
elif move.picking_id.sale_id:
# In case of extra move, it is better to use the same data as the original moves
sale_order = move.picking_id.sale_id
return sale_order.partner_invoice_id, sale_order.user_id.id, sale_order.pricelist_id.currency_id.id
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if move.procurement_id and move.procurement_id.sale_line_id:
sale_line = move.procurement_id.sale_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in sale_line.tax_id])]
res['account_analytic_id'] = sale_line.order_id.project_id and sale_line.order_id.project_id.id or False
res['discount'] = sale_line.discount
if move.product_id.id != sale_line.product_id.id:
res['price_unit'] = self.pool['product.pricelist'].price_get(
cr, uid, [sale_line.order_id.pricelist_id.id],
move.product_id.id, move.product_uom_qty or 1.0,
sale_line.order_id.partner_id, context=context)[sale_line.order_id.pricelist_id.id]
else:
res['price_unit'] = sale_line.price_unit
uos_coeff = move.product_uom_qty and move.product_uos_qty / move.product_uom_qty or 1.0
res['price_unit'] = res['price_unit'] / uos_coeff
return res
def _get_moves_taxes(self, cr, uid, moves, context=None):
is_extra_move, extra_move_tax = super(stock_move, self)._get_moves_taxes(cr, uid, moves, context=context)
for move in moves:
if move.procurement_id and move.procurement_id.sale_line_id:
is_extra_move[move.id] = False
extra_move_tax[move.picking_id, move.product_id] = [(6, 0, [x.id for x in move.procurement_id.sale_line_id.tax_id])]
return (is_extra_move, extra_move_tax)
class stock_location_route(osv.osv):
_inherit = "stock.location.route"
_columns = {
'sale_selectable': fields.boolean("Selectable on Sales Order Line")
}
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Inherit the original function of the 'stock' module
We select the partner of the sales order as the partner of the customer invoice
"""
saleorder_ids = self.pool['sale.order'].search(cr, uid, [('procurement_group_id' ,'=', picking.group_id.id)], context=context)
saleorders = self.pool['sale.order'].browse(cr, uid, saleorder_ids, context=context)
if saleorders and saleorders[0] and saleorders[0].order_policy == 'picking':
saleorder = saleorders[0]
return saleorder.partner_invoice_id.id
return super(stock_picking, self)._get_partner_to_invoice(cr, uid, picking, context=context)
def _get_sale_id(self, cr, uid, ids, name, args, context=None):
sale_obj = self.pool.get("sale.order")
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
if picking.group_id:
sale_ids = sale_obj.search(cr, uid, [('procurement_group_id', '=', picking.group_id.id)], context=context)
if sale_ids:
res[picking.id] = sale_ids[0]
return res
_columns = {
'sale_id': fields.function(_get_sale_id, type="many2one", relation="sale.order", string="Sale Order"),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
sale_obj = self.pool.get('sale.order')
sale_line_obj = self.pool.get('sale.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
sale = move.picking_id.sale_id
if sale:
inv_vals.update({
'fiscal_position': sale.fiscal_position.id,
'payment_term': sale.payment_term.id,
'user_id': sale.user_id.id,
'section_id': sale.section_id.id,
'name': sale.client_order_ref or '',
})
return inv_vals
|
rixrix/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/lint.py | 979 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
if not data:
raise LintError(_("%(type)s token with empty data") % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
yield token
|
HoracioAlvarado/fwd | refs/heads/master | venv/Lib/site-packages/pkg_resources/__init__.py | 6 | """
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import token
import symbol
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pkg_resources.extern import six
from pkg_resources.extern.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
try:
import parser
except ImportError:
pass
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*'+part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_'+v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_'+_state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class MarkerEvaluation(object):
values = {
'os_name': lambda: os.name,
'sys_platform': lambda: sys.platform,
'python_full_version': platform.python_version,
'python_version': lambda: platform.python_version()[:3],
'platform_version': platform.version,
'platform_machine': platform.machine,
'platform_python_implementation': platform.python_implementation,
'python_implementation': platform.python_implementation,
}
@classmethod
def is_invalid_marker(cls, text):
"""
Validate text as a PEP 426 environment marker; return an exception
if invalid or False otherwise.
"""
try:
cls.evaluate_marker(text)
except SyntaxError as e:
return cls.normalize_exception(e)
return False
@staticmethod
def normalize_exception(exc):
"""
Given a SyntaxError from a marker evaluation, normalize the error
message:
- Remove indications of filename and line number.
- Replace platform-specific error messages with standard error
messages.
"""
subs = {
'unexpected EOF while parsing': 'invalid syntax',
'parenthesis is never closed': 'invalid syntax',
}
exc.filename = None
exc.lineno = None
exc.msg = subs.get(exc.msg, exc.msg)
return exc
@classmethod
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)
@classmethod
def test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.or_, items)
@classmethod
def atom(cls, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
if nodelist[2][0] == token.RPAR:
raise SyntaxError("Empty parentheses")
return cls.interpret(nodelist[2])
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@classmethod
def comparison(cls, nodelist):
if len(nodelist) > 4:
msg = "Chained comparison not allowed in environment markers"
raise SyntaxError(msg)
comp = nodelist[2][1]
cop = comp[1]
if comp[0] == token.NAME:
if len(nodelist[2]) == 3:
if cop == 'not':
cop = 'not in'
else:
cop = 'is not'
try:
cop = cls.get_op(cop)
except KeyError:
msg = repr(cop) + " operator not allowed in environment markers"
raise SyntaxError(msg)
return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3]))
@classmethod
def get_op(cls, op):
ops = {
symbol.test: cls.test,
symbol.and_test: cls.and_test,
symbol.atom: cls.atom,
symbol.comparison: cls.comparison,
'not in': lambda x, y: x not in y,
'in': lambda x, y: x in y,
'==': operator.eq,
'!=': operator.ne,
'<': operator.lt,
'>': operator.gt,
'<=': operator.le,
'>=': operator.ge,
}
if hasattr(symbol, 'or_test'):
ops[symbol.or_test] = cls.test
return ops[op]
@classmethod
def evaluate_marker(cls, text, extra=None):
"""
Evaluate a PEP 426 environment marker on CPython 2.4+.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'parser' module, which is not implemented
on
Jython and has been superseded by the 'ast' module in Python 2.6 and
later.
"""
return cls.interpret(parser.expr(text).totuple(1)[1])
@staticmethod
def _translate_metadata2(env):
"""
Markerlib implements Metadata 1.2 (PEP 345) environment markers.
Translate the variables to Metadata 2.0 (PEP 426).
"""
return dict(
(key.replace('.', '_'), value)
for key, value in env.items()
)
@classmethod
def _markerlib_evaluate(cls, text):
"""
Evaluate a PEP 426 environment marker using markerlib.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
"""
import _markerlib
env = cls._translate_metadata2(_markerlib.default_environment())
try:
result = _markerlib.interpret(text, env)
except NameError as e:
raise SyntaxError(e.args[0])
return result
if 'parser' not in globals():
# Fall back to less-complete _markerlib implementation if 'parser' module
# is not available.
evaluate_marker = _markerlib_evaluate
@classmethod
def interpret(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
try:
op = cls.get_op(nodelist[0])
except KeyError:
raise SyntaxError("Comparison or logical expression expected")
return op(nodelist)
@classmethod
def evaluate(cls, nodelist):
while len(nodelist)==2: nodelist = nodelist[1]
kind = nodelist[0]
name = nodelist[1]
if kind==token.NAME:
try:
op = cls.values[name]
except KeyError:
raise SyntaxError("Unknown name %r" % name)
return op()
if kind==token.STRING:
s = nodelist[1]
if not cls._safe_string(s):
raise SyntaxError(
"Only plain strings allowed in environment markers")
return s[1:-1]
msg = "Language feature not supported in environment markers"
raise SyntaxError(msg)
@staticmethod
def _safe_string(cand):
return (
cand[:1] in "'\"" and
not cand.startswith('"""') and
not cand.startswith("'''") and
'\\' not in cand
)
invalid_marker = MarkerEvaluation.is_invalid_marker
evaluate_marker = MarkerEvaluation.evaluate_marker
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info, name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename,'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre+zip_path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name=='nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size!=size or stat.st_mtime!=timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name=='PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name=='PKG-INFO':
with io.open(self.path, encoding='utf-8') as f:
metadata = f.read()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders = {})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def position_in_sys_path(p):
"""
Return the ordinal of the path based on its position in sys.path
"""
parts = p.split(os.sep)
parts = parts[:-(package_name.count('.') + 1)]
return sys_path.index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
# whitespace and comment
LINE_END = re.compile(r"\s*(#.*)?$").match
# line continuation
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match
# Distribution or extra
DISTRO = re.compile(r"\s*((\w|[-.])+)").match
# ver. info
VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match
# comma between items
COMMA = re.compile(r"\s*,").match
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs=[]
elif not evaluate_marker(marker):
reqs=[]
extra = safe_extra(extra) or None
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=True)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
break
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _preparse_requirement(self, requires_dist):
"""Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz')
Split environment marker, add == prefix to version specifiers as
necessary, and remove parenthesis.
"""
parts = requires_dist.split(';', 1) + ['']
distvers = parts[0].strip()
mark = parts[1].strip()
distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers)
distvers = distvers.replace('(', '').replace(')', '')
return (distvers, mark)
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
from _markerlib import compile as compile_marker
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
distvers, mark = self._preparse_requirement(req)
parsed = next(parse_requirements(distvers))
parsed.marker_fn = compile_marker(mark)
reqs.append(parsed)
def reqs_for_extra(extra):
for req in reqs:
if req.marker_fn(override={'extra':extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
extra = safe_extra(extra.strip())
dm[extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras)
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
self.specifier = packaging.specifiers.SpecifierSet(
",".join(["".join([x, y]) for x, y in specs])
)
self.specs = specs
self.extras = tuple(map(safe_extra, extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
)
self.__hash = hash(self.hashCmp)
def __str__(self):
extras = ','.join(self.extras)
if extras:
extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, self.specifier)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args,**kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
|
saisaizhang/Food | refs/heads/master | flask/lib/python2.7/site-packages/werkzeug/testsuite/wsgi.py | 146 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wsgi
~~~~~~~~~~~~~~~~~~~~~~~
Tests the WSGI utilities.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from os import path
from contextlib import closing
from werkzeug.testsuite import WerkzeugTestCase, get_temporary_directory
from werkzeug.wrappers import BaseResponse
from werkzeug.exceptions import BadRequest, ClientDisconnected
from werkzeug.test import Client, create_environ, run_wsgi_app
from werkzeug import wsgi
from werkzeug._compat import StringIO, BytesIO, NativeStringIO, to_native
class WSGIUtilsTestCase(WerkzeugTestCase):
def test_shareddatamiddleware_get_file_loader(self):
app = wsgi.SharedDataMiddleware(None, {})
assert callable(app.get_file_loader('foo'))
def test_shared_data_middleware(self):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield b'NOT FOUND'
test_dir = get_temporary_directory()
with open(path.join(test_dir, to_native(u'äöü', 'utf-8')), 'w') as test_file:
test_file.write(u'FOUND')
app = wsgi.SharedDataMiddleware(null_application, {
'/': path.join(path.dirname(__file__), 'res'),
'/sources': path.join(path.dirname(__file__), 'res'),
'/pkg': ('werkzeug.debug', 'shared'),
'/foo': test_dir
})
for p in '/test.txt', '/sources/test.txt', '/foo/äöü':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
self.assert_equal(status, '200 OK')
with closing(app_iter) as app_iter:
data = b''.join(app_iter).strip()
self.assert_equal(data, b'FOUND')
app_iter, status, headers = run_wsgi_app(
app, create_environ('/pkg/debugger.js'))
with closing(app_iter) as app_iter:
contents = b''.join(app_iter)
self.assert_in(b'$(function() {', contents)
app_iter, status, headers = run_wsgi_app(
app, create_environ('/missing'))
self.assert_equal(status, '404 NOT FOUND')
self.assert_equal(b''.join(app_iter).strip(), b'NOT FOUND')
def test_get_host(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env), 'example.org')
self.assert_equal(
wsgi.get_host(create_environ('/', 'http://example.org')),
'example.org')
def test_get_host_multiple_forwarded(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.com, example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env), 'example.com')
self.assert_equal(
wsgi.get_host(create_environ('/', 'http://example.com')),
'example.com')
def test_get_host_validation(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
self.assert_equal(wsgi.get_host(env, trusted_hosts=['.example.org']),
'example.org')
self.assert_raises(BadRequest, wsgi.get_host, env,
trusted_hosts=['example.com'])
def test_responder(self):
def foo(environ, start_response):
return BaseResponse(b'Test')
client = Client(wsgi.responder(foo), BaseResponse)
response = client.get('/')
self.assert_equal(response.status_code, 200)
self.assert_equal(response.data, b'Test')
def test_pop_path_info(self):
original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'}
# regular path info popping
def assert_tuple(script_name, path_info):
self.assert_equal(env.get('SCRIPT_NAME'), script_name)
self.assert_equal(env.get('PATH_INFO'), path_info)
env = original_env.copy()
pop = lambda: wsgi.pop_path_info(env)
assert_tuple('/foo', '/a/b///c')
self.assert_equal(pop(), 'a')
assert_tuple('/foo/a', '/b///c')
self.assert_equal(pop(), 'b')
assert_tuple('/foo/a/b', '///c')
self.assert_equal(pop(), 'c')
assert_tuple('/foo/a/b///c', '')
self.assert_is_none(pop())
def test_peek_path_info(self):
env = {
'SCRIPT_NAME': '/foo',
'PATH_INFO': '/aaa/b///c'
}
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
self.assert_equal(wsgi.peek_path_info(env), 'aaa')
self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa')
self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa')
def test_path_info_and_script_name_fetching(self):
env = create_environ(u'/\N{SNOWMAN}', u'http://example.com/\N{COMET}/')
self.assert_equal(wsgi.get_path_info(env), u'/\N{SNOWMAN}')
self.assert_equal(wsgi.get_path_info(env, charset=None), u'/\N{SNOWMAN}'.encode('utf-8'))
self.assert_equal(wsgi.get_script_name(env), u'/\N{COMET}')
self.assert_equal(wsgi.get_script_name(env, charset=None), u'/\N{COMET}'.encode('utf-8'))
def test_query_string_fetching(self):
env = create_environ(u'/?\N{SNOWMAN}=\N{COMET}')
qs = wsgi.get_query_string(env)
self.assert_strict_equal(qs, '%E2%98%83=%E2%98%84')
def test_limited_stream(self):
class RaisingLimitedStream(wsgi.LimitedStream):
def on_exhausted(self):
raise BadRequest('input stream exhausted')
io = BytesIO(b'123456')
stream = RaisingLimitedStream(io, 3)
self.assert_strict_equal(stream.read(), b'123')
self.assert_raises(BadRequest, stream.read)
io = BytesIO(b'123456')
stream = RaisingLimitedStream(io, 3)
self.assert_strict_equal(stream.tell(), 0)
self.assert_strict_equal(stream.read(1), b'1')
self.assert_strict_equal(stream.tell(), 1)
self.assert_strict_equal(stream.read(1), b'2')
self.assert_strict_equal(stream.tell(), 2)
self.assert_strict_equal(stream.read(1), b'3')
self.assert_strict_equal(stream.tell(), 3)
self.assert_raises(BadRequest, stream.read)
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readline(), b'123456\n')
self.assert_strict_equal(stream.readline(), b'ab')
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(), [b'123456\n', b'ab'])
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(2), [b'12'])
self.assert_strict_equal(stream.readlines(2), [b'34'])
self.assert_strict_equal(stream.readlines(), [b'56\n', b'ab'])
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readline(100), b'123456\n')
io = BytesIO(b'123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
self.assert_strict_equal(stream.readlines(100), [b'123456\n', b'ab'])
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_strict_equal(stream.read(1), b'1')
self.assert_strict_equal(stream.read(1), b'2')
self.assert_strict_equal(stream.read(), b'3')
self.assert_strict_equal(stream.read(), b'')
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 3)
self.assert_strict_equal(stream.read(-1), b'123')
io = BytesIO(b'123456')
stream = wsgi.LimitedStream(io, 0)
self.assert_strict_equal(stream.read(-1), b'')
io = StringIO(u'123456')
stream = wsgi.LimitedStream(io, 0)
self.assert_strict_equal(stream.read(-1), u'')
io = StringIO(u'123\n456\n')
stream = wsgi.LimitedStream(io, 8)
self.assert_strict_equal(list(stream), [u'123\n', u'456\n'])
def test_limited_stream_disconnection(self):
io = BytesIO(b'A bit of content')
# disconnect detection on out of bytes
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
# disconnect detection because file close
io = BytesIO(b'x' * 255)
io.close()
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
def test_path_info_extraction(self):
x = wsgi.extract_path_info('http://example.com/app', '/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello')
self.assert_equal(x, u'/hello')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app')
self.assert_equal(x, u'/')
x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
env = create_environ(u'/fööbär', u'http://☃.net/x/')
x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär')
self.assert_equal(x, u'/fööbär')
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/a/hello')
self.assert_is_none(x)
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello',
collapse_http_schemes=False)
self.assert_is_none(x)
def test_get_host_fallback(self):
self.assert_equal(wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '80'
}), 'foobar.example.com')
self.assert_equal(wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '81'
}), 'foobar.example.com:81')
def test_get_current_url_unicode(self):
env = create_environ()
env['QUERY_STRING'] = 'foo=bar&baz=blah&meh=\xcf'
rv = wsgi.get_current_url(env)
self.assert_strict_equal(rv,
u'http://localhost/?foo=bar&baz=blah&meh=\ufffd')
def test_multi_part_line_breaks(self):
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=16))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
data = 'abc\r\nThis line is broken by the buffer length.' \
'\r\nFoo bar baz'
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=24))
self.assert_equal(lines, ['abc\r\n', 'This line is broken by the '
'buffer length.\r\n', 'Foo bar baz'])
def test_multi_part_line_breaks_bytes(self):
data = b'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=16))
self.assert_equal(lines, [b'abcdef\r\n', b'ghijkl\r\n',
b'mnopqrstuvwxyz\r\n', b'ABCDEFGHIJK'])
data = b'abc\r\nThis line is broken by the buffer length.' \
b'\r\nFoo bar baz'
test_stream = BytesIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=24))
self.assert_equal(lines, [b'abc\r\n', b'This line is broken by the '
b'buffer length.\r\n', b'Foo bar baz'])
def test_multi_part_line_breaks_problematic(self):
data = 'abc\rdef\r\nghi'
for x in range(1, 10):
test_stream = NativeStringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data),
buffer_size=4))
self.assert_equal(lines, ['abc\r', 'def\r\n', 'ghi'])
def test_iter_functions_support_iterators(self):
data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK']
lines = list(wsgi.make_line_iter(data))
self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n',
'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK'])
def test_make_chunk_iter(self):
data = [u'abcdefXghi', u'jklXmnopqrstuvwxyzX', u'ABCDEFGHIJK']
rv = list(wsgi.make_chunk_iter(data, 'X'))
self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz',
u'ABCDEFGHIJK'])
data = u'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
test_stream = StringIO(data)
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
buffer_size=4))
self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz',
u'ABCDEFGHIJK'])
def test_make_chunk_iter_bytes(self):
data = [b'abcdefXghi', b'jklXmnopqrstuvwxyzX', b'ABCDEFGHIJK']
rv = list(wsgi.make_chunk_iter(data, 'X'))
self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz',
b'ABCDEFGHIJK'])
data = b'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK'
test_stream = BytesIO(data)
rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data),
buffer_size=4))
self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz',
b'ABCDEFGHIJK'])
def test_lines_longer_buffer_size(self):
data = '1234567890\n1234567890\n'
for bufsize in range(1, 15):
lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data),
buffer_size=4))
self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WSGIUtilsTestCase))
return suite
|
roboime/pyroboime | refs/heads/master | roboime/core/skills/drivetoball.py | 1 | #
# Copyright (C) 2013-2015 RoboIME
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
from numpy import remainder
from .drivetoobject import DriveToObject
class DriveToBall(DriveToObject):
"""
This skill is a DriveToObject except that the object is the ball
and that some parameters are optimized for getting on the ball.
"""
#enter_angle = 30.0
enter_angle = 10.0
exit_angle = 20.0
def __init__(self, robot, **kwargs):
# TODO: magic parameters
super(DriveToBall, self).__init__(robot, point=robot.world.ball, **kwargs)
self.avoid = robot.world.ball
def _step(self):
#if self.target is not None and self.lookpoint is not None:
# base_angle = self.target.angle_to_point(self.ball)
# robot_angle = self.robot.angle_to_point(self.ball)
# delta = remainder(robot_angle - base_angle, 360)
# delta = min(abs(delta), abs(delta - 360))
# if delta >= self.exit_angle:
# self.should_avoid = True
# if self.should_avoid:
# if delta <= self.enter_angle:
# self.should_avoid = False
super(DriveToBall, self)._step()
|
rawg/levis | refs/heads/master | examples/knapsack01.py | 1 | """
Genetic solution to the 0/1 Knapsack Problem.
usage: knapsack01.py [-h] [--data-file DATA_FILE]
[--population-size POPULATION_SIZE]
[--iterations MAX_ITERATIONS] [--mutation MUTATION_PROB]
[--crossover CROSSOVER_PROB] [--seed SEED]
[--stats-file STATS_FILE]
[--population-file POPULATION_FILE] [--verbose]
[--elitism ELITISM] [--uniform_cx] [--generate]
[--items NUM_ITEMS]
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
#from builtins import str
#from builtins import range
import math
import random
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from levis import (configuration, crossover, mutation, FitnessLoggingGA,
ProportionateGA, ElitistGA)
class Knapsack01GA(ElitistGA, ProportionateGA, FitnessLoggingGA):
"""Genetic solution to the 0/1 Knapsack Problem."""
def __init__(self, config={}):
"""Initialize a 0/1 knapsack solver.
Raises:
AttributeError: If ``items`` is not in the configuration dict.
"""
super(self.__class__, self).__init__(config)
self.max_weight = self.config.setdefault("max_weight", 15)
self.items = self.config.setdefault("items", [])
self.chromosome_length = len(self.items)
self.uniform_cx = config.setdefault("uniform_cx", False)
for i, item in enumerate(self.items):
item["mask"] = 2 ** i
def assess(self, chromosome):
"""Return a 2-tuple of the total weight and value of a chromosome."""
weight = 0
value = 0
# pylint: disable=unused-variable
for locus, _ in enumerate(self.items):
if chromosome & 2 ** locus:
item = self.items[locus]
weight += item["weight"]
value += item["value"]
return (weight, value)
def score(self, chromosome):
weight, value = self.assess(chromosome)
if weight > self.max_weight:
return 0.0
return value
def create(self):
# The below generates chromosomes, but the majority are too heavy
# return random.randint(0, 2 ** self.chromosome_length - 1)
items = list(self.items)
random.shuffle(items)
weight = 0
chromosome = 0
for i in items:
if weight + i["weight"] <= self.max_weight:
weight += i["weight"]
chromosome |= i["mask"]
return chromosome
def crossover(self):
parent1 = self.select()
parent2 = self.select()
length = self.chromosome_length
if self.uniform_cx:
return crossover.uniform_bin(parent1, parent2, length)
else:
return crossover.single_point_bin(parent1, parent2, length)
def mutate(self, chromosome):
return mutation.toggle(chromosome, self.chromosome_length,
self.mutation_prob)
def chromosome_str(self, chromosome):
sack = []
for locus, _ in enumerate(self.items):
item = self.items[locus]["name"]
packed= 0
if chromosome & 2 ** locus:
packed = 1
sack.append("%s: %i" % (item, packed))
weight, value = self.assess(chromosome)
vals = (weight, value, ", ".join(sack))
return "{weight: %0.2f, value: %0.2f, contents: [%s]}" % vals
def chromosome_repr(self, chromosome):
return bin(chromosome)[2:].zfill(self.chromosome_length)
def create_data(config={}):
"""Create data and write to a JSON file."""
max_weight = config.setdefault("max_weight", 15)
items = []
if "num_items" in config:
num_items = config["num_items"]
del config["num_items"]
else:
num_items = 32
# Generate items
digits = int(math.ceil(math.log(num_items, 16)))
fmt = "%0" + str(digits) + "X"
for i in range(0, num_items):
name = fmt % (i + 1)
weight = random.triangular(1.0, max_weight // 3, max_weight)
value = random.random() * 100
items.append({"name": name, "weight": weight, "value": value})
config["items"] = items
configuration.write_file(config)
def main():
"""Main method to parse args and run."""
defaults = {
"population_size": 10,
"max_iterations": 10,
"elitism_pct": 0.01,
"population_file": "population.log",
"stats_file": "stats.csv"
}
description = "Genetic solution to the 0/1 Knapsack Problem"
parent = [Knapsack01GA.arg_parser()]
parser = configuration.get_parser(description, "knapsack01.json", parent)
parser.add_argument("--uniform_cx", action="store_true",
help="Use uniform crossover instead of single-point")
parser.add_argument("--generate", action="store_true",
help="Generate and store problem data")
group = parser.add_argument_group("data generation options")
group.add_argument("--items", type=int, dest="num_items", default=32,
help="Number of items to generate")
args = configuration.read_args(parser)
if args["generate"]:
del args["generate"]
create_data(args)
else:
config_file = configuration.read_file(args)
config = configuration.merge(defaults, config_file, args)
solver = Knapsack01GA(config)
solver.solve()
print(solver.chromosome_str(solver.best()))
if __name__ == "__main__":
main()
|
lindycoder/fake-switches | refs/heads/master | fake_switches/brocade/command_processor/config_vrf.py | 4 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fake_switches.command_processing.base_command_processor import BaseCommandProcessor
class ConfigVrfCommandProcessor(BaseCommandProcessor):
def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args):
super(ConfigVrfCommandProcessor, self).init(switch_configuration, terminal_controller, logger, piping_processor)
self.vrf = args[0]
def get_prompt(self):
return "SSH@%s(config-vrf-%s)#" % (self.switch_configuration.name, self.vrf.name)
def do_exit(self):
self.is_done = True
|
tailorian/Sick-Beard | refs/heads/ThePirateBay | lib/unidecode/x04f.py | 252 | data = (
'Zhong ', # 0x00
'Qi ', # 0x01
'Pei ', # 0x02
'Yu ', # 0x03
'Diao ', # 0x04
'Dun ', # 0x05
'Wen ', # 0x06
'Yi ', # 0x07
'Xin ', # 0x08
'Kang ', # 0x09
'Yi ', # 0x0a
'Ji ', # 0x0b
'Ai ', # 0x0c
'Wu ', # 0x0d
'Ji ', # 0x0e
'Fu ', # 0x0f
'Fa ', # 0x10
'Xiu ', # 0x11
'Jin ', # 0x12
'Bei ', # 0x13
'Dan ', # 0x14
'Fu ', # 0x15
'Tang ', # 0x16
'Zhong ', # 0x17
'You ', # 0x18
'Huo ', # 0x19
'Hui ', # 0x1a
'Yu ', # 0x1b
'Cui ', # 0x1c
'Chuan ', # 0x1d
'San ', # 0x1e
'Wei ', # 0x1f
'Chuan ', # 0x20
'Che ', # 0x21
'Ya ', # 0x22
'Xian ', # 0x23
'Shang ', # 0x24
'Chang ', # 0x25
'Lun ', # 0x26
'Cang ', # 0x27
'Xun ', # 0x28
'Xin ', # 0x29
'Wei ', # 0x2a
'Zhu ', # 0x2b
'[?] ', # 0x2c
'Xuan ', # 0x2d
'Nu ', # 0x2e
'Bo ', # 0x2f
'Gu ', # 0x30
'Ni ', # 0x31
'Ni ', # 0x32
'Xie ', # 0x33
'Ban ', # 0x34
'Xu ', # 0x35
'Ling ', # 0x36
'Zhou ', # 0x37
'Shen ', # 0x38
'Qu ', # 0x39
'Si ', # 0x3a
'Beng ', # 0x3b
'Si ', # 0x3c
'Jia ', # 0x3d
'Pi ', # 0x3e
'Yi ', # 0x3f
'Si ', # 0x40
'Ai ', # 0x41
'Zheng ', # 0x42
'Dian ', # 0x43
'Han ', # 0x44
'Mai ', # 0x45
'Dan ', # 0x46
'Zhu ', # 0x47
'Bu ', # 0x48
'Qu ', # 0x49
'Bi ', # 0x4a
'Shao ', # 0x4b
'Ci ', # 0x4c
'Wei ', # 0x4d
'Di ', # 0x4e
'Zhu ', # 0x4f
'Zuo ', # 0x50
'You ', # 0x51
'Yang ', # 0x52
'Ti ', # 0x53
'Zhan ', # 0x54
'He ', # 0x55
'Bi ', # 0x56
'Tuo ', # 0x57
'She ', # 0x58
'Yu ', # 0x59
'Yi ', # 0x5a
'Fo ', # 0x5b
'Zuo ', # 0x5c
'Kou ', # 0x5d
'Ning ', # 0x5e
'Tong ', # 0x5f
'Ni ', # 0x60
'Xuan ', # 0x61
'Qu ', # 0x62
'Yong ', # 0x63
'Wa ', # 0x64
'Qian ', # 0x65
'[?] ', # 0x66
'Ka ', # 0x67
'[?] ', # 0x68
'Pei ', # 0x69
'Huai ', # 0x6a
'He ', # 0x6b
'Lao ', # 0x6c
'Xiang ', # 0x6d
'Ge ', # 0x6e
'Yang ', # 0x6f
'Bai ', # 0x70
'Fa ', # 0x71
'Ming ', # 0x72
'Jia ', # 0x73
'Er ', # 0x74
'Bing ', # 0x75
'Ji ', # 0x76
'Hen ', # 0x77
'Huo ', # 0x78
'Gui ', # 0x79
'Quan ', # 0x7a
'Tiao ', # 0x7b
'Jiao ', # 0x7c
'Ci ', # 0x7d
'Yi ', # 0x7e
'Shi ', # 0x7f
'Xing ', # 0x80
'Shen ', # 0x81
'Tuo ', # 0x82
'Kan ', # 0x83
'Zhi ', # 0x84
'Gai ', # 0x85
'Lai ', # 0x86
'Yi ', # 0x87
'Chi ', # 0x88
'Kua ', # 0x89
'Guang ', # 0x8a
'Li ', # 0x8b
'Yin ', # 0x8c
'Shi ', # 0x8d
'Mi ', # 0x8e
'Zhu ', # 0x8f
'Xu ', # 0x90
'You ', # 0x91
'An ', # 0x92
'Lu ', # 0x93
'Mou ', # 0x94
'Er ', # 0x95
'Lun ', # 0x96
'Tong ', # 0x97
'Cha ', # 0x98
'Chi ', # 0x99
'Xun ', # 0x9a
'Gong ', # 0x9b
'Zhou ', # 0x9c
'Yi ', # 0x9d
'Ru ', # 0x9e
'Jian ', # 0x9f
'Xia ', # 0xa0
'Jia ', # 0xa1
'Zai ', # 0xa2
'Lu ', # 0xa3
'Ko ', # 0xa4
'Jiao ', # 0xa5
'Zhen ', # 0xa6
'Ce ', # 0xa7
'Qiao ', # 0xa8
'Kuai ', # 0xa9
'Chai ', # 0xaa
'Ning ', # 0xab
'Nong ', # 0xac
'Jin ', # 0xad
'Wu ', # 0xae
'Hou ', # 0xaf
'Jiong ', # 0xb0
'Cheng ', # 0xb1
'Zhen ', # 0xb2
'Zuo ', # 0xb3
'Chou ', # 0xb4
'Qin ', # 0xb5
'Lu ', # 0xb6
'Ju ', # 0xb7
'Shu ', # 0xb8
'Ting ', # 0xb9
'Shen ', # 0xba
'Tuo ', # 0xbb
'Bo ', # 0xbc
'Nan ', # 0xbd
'Hao ', # 0xbe
'Bian ', # 0xbf
'Tui ', # 0xc0
'Yu ', # 0xc1
'Xi ', # 0xc2
'Cu ', # 0xc3
'E ', # 0xc4
'Qiu ', # 0xc5
'Xu ', # 0xc6
'Kuang ', # 0xc7
'Ku ', # 0xc8
'Wu ', # 0xc9
'Jun ', # 0xca
'Yi ', # 0xcb
'Fu ', # 0xcc
'Lang ', # 0xcd
'Zu ', # 0xce
'Qiao ', # 0xcf
'Li ', # 0xd0
'Yong ', # 0xd1
'Hun ', # 0xd2
'Jing ', # 0xd3
'Xian ', # 0xd4
'San ', # 0xd5
'Pai ', # 0xd6
'Su ', # 0xd7
'Fu ', # 0xd8
'Xi ', # 0xd9
'Li ', # 0xda
'Fu ', # 0xdb
'Ping ', # 0xdc
'Bao ', # 0xdd
'Yu ', # 0xde
'Si ', # 0xdf
'Xia ', # 0xe0
'Xin ', # 0xe1
'Xiu ', # 0xe2
'Yu ', # 0xe3
'Ti ', # 0xe4
'Che ', # 0xe5
'Chou ', # 0xe6
'[?] ', # 0xe7
'Yan ', # 0xe8
'Lia ', # 0xe9
'Li ', # 0xea
'Lai ', # 0xeb
'[?] ', # 0xec
'Jian ', # 0xed
'Xiu ', # 0xee
'Fu ', # 0xef
'He ', # 0xf0
'Ju ', # 0xf1
'Xiao ', # 0xf2
'Pai ', # 0xf3
'Jian ', # 0xf4
'Biao ', # 0xf5
'Chu ', # 0xf6
'Fei ', # 0xf7
'Feng ', # 0xf8
'Ya ', # 0xf9
'An ', # 0xfa
'Bei ', # 0xfb
'Yu ', # 0xfc
'Xin ', # 0xfd
'Bi ', # 0xfe
'Jian ', # 0xff
)
|
vast-io/vast | refs/heads/master | aux/lz4/tests/test-lz4-versions.py | 12 | #!/usr/bin/env python3
"""Test LZ4 interoperability between versions"""
#
# Copyright (C) 2011-present, Takayuki Matsuoka
# All rights reserved.
# GPL v2 License
#
import glob
import subprocess
import filecmp
import os
import shutil
import sys
import hashlib
repo_url = 'https://github.com/lz4/lz4.git'
tmp_dir_name = 'tests/versionsTest'
make_cmd = 'make'
git_cmd = 'git'
test_dat_src = 'README.md'
test_dat = 'test_dat'
head = 'v999'
def proc(cmd_args, pipe=True, dummy=False):
if dummy:
return
if pipe:
subproc = subprocess.Popen(cmd_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
subproc = subprocess.Popen(cmd_args)
return subproc.communicate()
def make(args, pipe=True):
return proc([make_cmd] + args, pipe)
def git(args, pipe=True):
return proc([git_cmd] + args, pipe)
def get_git_tags():
stdout, stderr = git(['tag', '-l', 'r[0-9][0-9][0-9]'])
tags = stdout.decode('utf-8').split()
stdout, stderr = git(['tag', '-l', 'v[1-9].[0-9].[0-9]'])
tags += stdout.decode('utf-8').split()
return tags
# https://stackoverflow.com/a/19711609/2132223
def sha1_of_file(filepath):
with open(filepath, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
if __name__ == '__main__':
error_code = 0
base_dir = os.getcwd() + '/..' # /path/to/lz4
tmp_dir = base_dir + '/' + tmp_dir_name # /path/to/lz4/tests/versionsTest
clone_dir = tmp_dir + '/' + 'lz4' # /path/to/lz4/tests/versionsTest/lz4
programs_dir = base_dir + '/programs' # /path/to/lz4/programs
os.makedirs(tmp_dir, exist_ok=True)
# since Travis clones limited depth, we should clone full repository
if not os.path.isdir(clone_dir):
git(['clone', repo_url, clone_dir])
shutil.copy2(base_dir + '/' + test_dat_src, tmp_dir + '/' + test_dat)
# Retrieve all release tags
print('Retrieve all release tags :')
os.chdir(clone_dir)
tags = [head] + get_git_tags()
print(tags);
# Build all release lz4c and lz4c32
for tag in tags:
os.chdir(base_dir)
dst_lz4c = '{}/lz4c.{}' .format(tmp_dir, tag) # /path/to/lz4/test/lz4test/lz4c.<TAG>
dst_lz4c32 = '{}/lz4c32.{}'.format(tmp_dir, tag) # /path/to/lz4/test/lz4test/lz4c32.<TAG>
if not os.path.isfile(dst_lz4c) or not os.path.isfile(dst_lz4c32) or tag == head:
if tag != head:
r_dir = '{}/{}'.format(tmp_dir, tag) # /path/to/lz4/test/lz4test/<TAG>
os.makedirs(r_dir, exist_ok=True)
os.chdir(clone_dir)
git(['--work-tree=' + r_dir, 'checkout', tag, '--', '.'], False)
os.chdir(r_dir + '/programs') # /path/to/lz4/lz4test/<TAG>/programs
else:
os.chdir(programs_dir)
make(['clean', 'lz4c'], False)
shutil.copy2('lz4c', dst_lz4c)
make(['clean', 'lz4c32'], False)
shutil.copy2('lz4c32', dst_lz4c32)
# Compress test.dat by all released lz4c and lz4c32
print('Compress test.dat by all released lz4c and lz4c32')
os.chdir(tmp_dir)
for lz4 in glob.glob("*.lz4"):
os.remove(lz4)
for tag in tags:
proc(['./lz4c.' + tag, '-1fz', test_dat, test_dat + '_1_64_' + tag + '.lz4'])
proc(['./lz4c.' + tag, '-9fz', test_dat, test_dat + '_9_64_' + tag + '.lz4'])
proc(['./lz4c32.' + tag, '-1fz', test_dat, test_dat + '_1_32_' + tag + '.lz4'])
proc(['./lz4c32.' + tag, '-9fz', test_dat, test_dat + '_9_32_' + tag + '.lz4'])
print('Full list of compressed files')
lz4s = sorted(glob.glob('*.lz4'))
for lz4 in lz4s:
print(lz4 + ' : ' + repr(os.path.getsize(lz4)))
# Remove duplicated .lz4 files
print('')
print('Duplicated files')
lz4s = sorted(glob.glob('*.lz4'))
for i, lz4 in enumerate(lz4s):
if not os.path.isfile(lz4):
continue
for j in range(i+1, len(lz4s)):
lz4t = lz4s[j]
if not os.path.isfile(lz4t):
continue
if filecmp.cmp(lz4, lz4t):
os.remove(lz4t)
print('{} == {}'.format(lz4, lz4t))
print('Enumerate only different compressed files')
lz4s = sorted(glob.glob('*.lz4'))
for lz4 in lz4s:
print(lz4 + ' : ' + repr(os.path.getsize(lz4)) + ', ' + sha1_of_file(lz4))
# Decompress remained .lz4 files by all released lz4c and lz4c32
print('Decompression tests and verifications')
lz4s = sorted(glob.glob('*.lz4'))
for dec in glob.glob("*.dec"):
os.remove(dec)
for lz4 in lz4s:
print(lz4, end=" ")
for tag in tags:
print(tag, end=" ")
proc(['./lz4c.' + tag, '-df', lz4, lz4 + '_d64_' + tag + '.dec'])
proc(['./lz4c32.' + tag, '-df', lz4, lz4 + '_d32_' + tag + '.dec'])
print(' OK') # well, here, decompression has worked; but file is not yet verified
# Compare all '.dec' files with test_dat
decs = glob.glob('*.dec')
for dec in decs:
if not filecmp.cmp(dec, test_dat):
print('ERR : ' + dec)
error_code = 1
else:
print('OK : ' + dec)
os.remove(dec)
if error_code != 0:
print('ERROR')
sys.exit(error_code)
|
ex0hunt/redrat | refs/heads/master | common/connector.py | 1 | import configparser
import os
from redmine import Redmine
def redmine():
rootdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
config_path = os.path.join(rootdir, 'settings.conf')
config = configparser.ConfigParser()
config.read(config_path)
host = config.get('RedmineServer', 'host')
username = config.get('RedmineServer', 'username')
password = config.get('RedmineServer', 'password')
redmine = Redmine(host, username=username, password=password)
return redmine |
OmkarPathak/Python-Programs | refs/heads/master | OOP/P05_FirstProgramusingOOP.py | 1 | #Author: OMKAR PATHAK
#In this assignment we would see the use of OOP
class MaxSizeList(object):
def __init__(self, value):
self.myList = []
self.value = value
def push(self, String):
try:
String = str(String)
self.myList.append(String)
except ValueError:
print('You can only push strings!')
def getList(self):
print(self.myList[-self.value:])
if __name__ == '__main__':
a = MaxSizeList(3)
b = MaxSizeList(1)
a.push('Hey')
a.push('Hello')
a.push('Hi')
a.push('Let\'s')
a.push('Go')
b.push('Hey')
b.push('Hello')
b.push('Hi')
b.push('Let\'s')
b.push('Go')
a.getList()
b.getList()
|
csunny/blog_project | refs/heads/master | source/libs/analysis/src/8_箱形图/homework-lesson8.py | 2 | import numpy as np
import matplotlib.pyplot as plt
np.random.seed(100)
data = np.random.normal(size=(100, 5), loc=0.0, scale=1.0)
labels = ['A','B','C','D','E']
plt.boxplot(data, labels=labels,sym='o',whis=1.25)
plt.show()
|
stevenmizuno/QGIS | refs/heads/master | tests/src/python/test_qgsconditionalstyle.py | 24 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for the memory layer provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nathan.Woodrow'
__date__ = '2015-08-11'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsConditionalStyle,
QgsFeature,
QgsFields,
QgsField,
QgsExpressionContextUtils
)
from qgis.testing import (start_app,
unittest,
)
from utilities import unitTestDataPath
from qgis.PyQt.QtCore import QVariant
#
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsConditionalStyle(unittest.TestCase):
def new_context(self):
feature = QgsFeature()
fields = QgsFields()
fields.append(QgsField("testfield", QVariant.Int))
feature.setFields(fields, True)
feature["testfield"] = 20
context = QgsExpressionContextUtils.createFeatureBasedContext(feature, fields)
return context
def test_MatchesReturnsTrueForSimpleMatch(self):
style = QgsConditionalStyle("@value > 10")
context = QgsExpressionContextUtils.createFeatureBasedContext(QgsFeature(), QgsFields())
assert style.matches(20, context)
def test_MatchesReturnsTrueForComplexMatch(self):
style = QgsConditionalStyle("@value > 10 and @value = 20")
context = QgsExpressionContextUtils.createFeatureBasedContext(QgsFeature(), QgsFields())
assert style.matches(20, context)
def test_MatchesTrueForFields(self):
style = QgsConditionalStyle('"testfield" = @value')
context = self.new_context()
assert style.matches(20, context)
def test_MatchingStylesReturnsListOfCorrectStyles(self):
styles = []
style = QgsConditionalStyle("@value > 10")
style.setName("1")
styles.append(style)
style = QgsConditionalStyle("@value > 10")
style.setName("2")
styles.append(style)
style = QgsConditionalStyle("@value < 5")
style.setName("3")
styles.append(style)
context = self.new_context()
out = QgsConditionalStyle.matchingConditionalStyles(styles, 20, context)
assert len(out) == 2
out[0].name() == "1"
out[1].name() == "2"
if __name__ == '__main__':
unittest.main()
|
janhahne/nest-simulator | refs/heads/master | pynest/nest/tests/test_sp/test_sp_manager.py | 6 | # -*- coding: utf-8 -*-
#
# test_sp_manager.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
from .utils import extract_dict_a_from_b
__author__ = 'naveau'
class TestStructuralPlasticityManager(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
nest.set_verbosity('M_INFO')
self.exclude_synapse_model = [
'stdp_dopamine_synapse',
'stdp_dopamine_synapse_lbl',
'stdp_dopamine_synapse_hpc',
'stdp_dopamine_synapse_hpc_lbl',
'gap_junction',
'gap_junction_lbl',
'diffusion_connection',
'diffusion_connection_lbl',
'rate_connection_instantaneous',
'rate_connection_instantaneous_lbl',
'rate_connection_delayed',
'rate_connection_delayed_lbl'
]
def test_register_synapses(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
nest.SetDefaults(syn_model, {'delay': 0.5})
syn_dict = {
'synapse_model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
nest.SetKernelStatus({
'min_delay': 0.1,
'max_delay': 1.0,
'structural_plasticity_synapses': {'syn1': syn_dict}
})
kernel_status = nest.GetKernelStatus(
'structural_plasticity_synapses')
self.assertIn('syn1', kernel_status)
self.assertEqual(kernel_status['syn1'], extract_dict_a_from_b(
kernel_status['syn1'], syn_dict))
def test_min_max_delay_using_default_delay(self):
nest.ResetKernel()
delay = 1.0
syn_model = 'static_synapse'
nest.SetStructuralPlasticityStatus(
{
'structural_plasticity_synapses': {
'syn1': {
'synapse_model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2',
}
}
}
)
self.assertLessEqual(nest.GetKernelStatus('min_delay'), delay)
self.assertGreaterEqual(nest.GetKernelStatus('max_delay'), delay)
def test_synapse_creation(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
syn_dict = {
'synapse_model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
nest.SetStructuralPlasticityStatus({
'structural_plasticity_synapses': {'syn1': syn_dict}
})
neurons = nest.Create('iaf_psc_alpha', 2, {
'synaptic_elements': {
'SE1': {'z': 10.0, 'growth_rate': 0.0},
'SE2': {'z': 10.0, 'growth_rate': 0.0}
}
})
nest.EnableStructuralPlasticity()
nest.Simulate(10.0)
status = nest.GetStatus(neurons, 'synaptic_elements')
for st_neuron in status:
self.assertEqual(10, st_neuron['SE1']['z_connected'])
self.assertEqual(10, st_neuron['SE2']['z_connected'])
self.assertEqual(
20, len(nest.GetConnections(neurons, neurons, syn_model)))
break
def suite():
test_suite = unittest.makeSuite(TestStructuralPlasticityManager, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
|
flar2/ville-bulletproof | refs/heads/master | scripts/build-all.py | 1250 | #! /usr/bin/env python
# Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
elemel/drillion | refs/heads/master | drillion/shader.py | 1 | #
# Copyright Tristam Macdonald 2008.
#
# Distributed under the Boost Software License, Version 1.0
# (see http://www.boost.org/LICENSE_1_0.txt)
#
from ctypes import *
from pyglet.gl import *
class Shader:
# vert, frag and geom take arrays of source strings
# the arrays will be concattenated into one string by OpenGL
def __init__(self, vert = [], frag = [], geom = []):
# create the program handle
self.handle = glCreateProgram()
# we are not linked yet
self.linked = False
# create the vertex shader
self.createShader(vert, GL_VERTEX_SHADER)
# create the fragment shader
self.createShader(frag, GL_FRAGMENT_SHADER)
# the geometry shader will be the same, once pyglet supports the extension
# self.createShader(frag, GL_GEOMETRY_SHADER_EXT)
# attempt to link the program
self.link()
def createShader(self, strings, type):
count = len(strings)
# if we have no source code, ignore this shader
if count < 1:
return
# create the shader handle
shader = glCreateShader(type)
# convert the source strings into a ctypes pointer-to-char array, and upload them
# this is deep, dark, dangerous black magick - don't try stuff like this at home!
src = (c_char_p * count)(*strings)
glShaderSource(shader, count, cast(pointer(src), POINTER(POINTER(c_char))), None)
# compile the shader
glCompileShader(shader)
temp = c_int(0)
# retrieve the compile status
glGetShaderiv(shader, GL_COMPILE_STATUS, byref(temp))
# if compilation failed, print the log
if not temp:
# retrieve the log length
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, byref(temp))
# create a buffer for the log
buffer = create_string_buffer(temp.value)
# retrieve the log text
glGetShaderInfoLog(shader, temp, None, buffer)
# print the log to the console
print buffer.value
else:
# all is well, so attach the shader to the program
glAttachShader(self.handle, shader);
def link(self):
# link the program
glLinkProgram(self.handle)
temp = c_int(0)
# retrieve the link status
glGetProgramiv(self.handle, GL_LINK_STATUS, byref(temp))
# if linking failed, print the log
if not temp:
# retrieve the log length
glGetProgramiv(self.handle, GL_INFO_LOG_LENGTH, byref(temp))
# create a buffer for the log
buffer = create_string_buffer(temp.value)
# retrieve the log text
glGetProgramInfoLog(self.handle, temp, None, buffer)
# print the log to the console
print buffer.value
else:
# all is well, so we are linked
self.linked = True
def bind(self):
# bind the program
glUseProgram(self.handle)
def unbind(self):
# unbind whatever program is currently bound - not necessarily this program,
# so this should probably be a class method instead
glUseProgram(0)
# upload a floating point uniform
# this program must be currently bound
def uniformf(self, name, *vals):
# check there are 1-4 values
if len(vals) in range(1, 5):
# select the correct function
{ 1 : glUniform1f,
2 : glUniform2f,
3 : glUniform3f,
4 : glUniform4f
# retrieve the uniform location, and set
}[len(vals)](glGetUniformLocation(self.handle, name), *vals)
# upload an integer uniform
# this program must be currently bound
def uniformi(self, name, *vals):
# check there are 1-4 values
if len(vals) in range(1, 5):
# select the correct function
{ 1 : glUniform1i,
2 : glUniform2i,
3 : glUniform3i,
4 : glUniform4i
# retrieve the uniform location, and set
}[len(vals)](glGetUniformLocation(self.handle, name), *vals)
# upload a uniform matrix
# works with matrices stored as lists,
# as well as euclid matrices
def uniform_matrixf(self, name, mat):
# obtian the uniform location
loc = glGetUniformLocation(self.Handle, name)
# uplaod the 4x4 floating point matrix
glUniformMatrix4fv(loc, 1, False, (c_float * 16)(*mat))
|
michelts/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/contrib/sitemaps/models.py | 914 | # This file intentionally left blank |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.