repo_name
stringlengths 5
100
| path
stringlengths 4
251
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 499
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|---|
pasqualguerrero/django | django/core/management/commands/showmigrations.py | 438 | 4901 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
class Command(BaseCommand):
help = "Shows all available migrations for the current project"
def add_arguments(self, parser):
parser.add_argument('app_labels', nargs='*',
help='App labels of applications to limit the output to.')
parser.add_argument('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.')
formats = parser.add_mutually_exclusive_group()
formats.add_argument('--list', '-l', action='store_const', dest='format', const='list',
help='Shows a list of all migrations and which are applied.')
formats.add_argument('--plan', '-p', action='store_const', dest='format', const='plan',
help='Shows all migrations in the order they will be applied.')
parser.set_defaults(format='list')
def handle(self, *args, **options):
self.verbosity = options.get('verbosity')
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
if options['format'] == "plan":
return self.show_plan(connection)
else:
return self.show_list(connection, options['app_labels'])
def show_list(self, connection, app_names=None):
"""
Shows a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
invalid_apps = []
for app_name in app_names:
if app_name not in loader.migrated_apps:
invalid_apps.append(app_name)
if invalid_apps:
raise CommandError("No migrations present for: %s" % (", ".join(invalid_apps)))
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.MIGRATE_FAILURE)
def show_plan(self, connection):
"""
Shows all known migrations in the order they will be applied
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
targets = graph.leaf_nodes()
plan = []
seen = set()
# Generate the plan
for target in targets:
for migration in graph.forwards_plan(target):
if migration not in seen:
plan.append(graph.nodes[migration])
seen.add(migration)
# Output
def print_deps(migration):
out = []
for dep in migration.dependencies:
if dep[1] == "__first__":
roots = graph.root_nodes(dep[0])
dep = roots[0] if roots else (dep[0], "__first__")
out.append("%s.%s" % dep)
if out:
return " ... (%s)" % ", ".join(out)
return ""
for migration in plan:
deps = ""
if self.verbosity >= 2:
deps = print_deps(migration)
if (migration.app_label, migration.name) in loader.applied_migrations:
self.stdout.write("[X] %s%s" % (migration, deps))
else:
self.stdout.write("[ ] %s%s" % (migration, deps))
| bsd-3-clause |
RotorWidgets/base-station | base_station/races/migrations/0002_auto_20160324_0525.py | 1 | 1978 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-24 05:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('races', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='raceheat',
name='ended_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='Heat ended time'),
),
migrations.AddField(
model_name='raceheat',
name='number',
field=models.PositiveSmallIntegerField(default=1, verbose_name='Heat number'),
),
migrations.AddField(
model_name='raceheat',
name='started_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='Heat started time'),
),
migrations.AlterField(
model_name='heatevent',
name='heat',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='triggered_events', to='races.RaceHeat'),
),
migrations.AlterField(
model_name='heatevent',
name='tracker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='triggered_events', to='trackers.Tracker'),
),
migrations.AlterField(
model_name='heatevent',
name='trigger',
field=models.PositiveSmallIntegerField(choices=[(0, 'Gate Trigger'), (1, 'Area Entered Trigger'), (2, 'Area Exit Trigger'), (3, 'Crash Trigger'), (4, 'Land Trigger'), (5, 'Takeoff Trigger'), (6, 'Arm Trigger'), (7, 'Disarm Trigger'), (8, 'Start Trigger'), (9, 'End Trigger')], verbose_name='trigger'),
),
migrations.AlterUniqueTogether(
name='raceheat',
unique_together=set([('number', 'event')]),
),
]
| gpl-3.0 |
dtrodrigues/nifi-minifi-cpp | docker/test/integration/MiNiFi_integration_test_driver.py | 2 | 15679 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import uuid
from pydoc import locate
from minifi.core.InputPort import InputPort
from minifi.core.DockerTestCluster import DockerTestCluster
from minifi.validators.EmptyFilesOutPutValidator import EmptyFilesOutPutValidator
from minifi.validators.NoFileOutPutValidator import NoFileOutPutValidator
from minifi.validators.SingleFileOutputValidator import SingleFileOutputValidator
from minifi.validators.MultiFileOutputValidator import MultiFileOutputValidator
from minifi.validators.SingleOrMultiFileOutputValidator import SingleOrMultiFileOutputValidator
from minifi.validators.NoContentCheckFileNumberValidator import NoContentCheckFileNumberValidator
from minifi.validators.NumFileRangeValidator import NumFileRangeValidator
from minifi.validators.SingleJSONFileOutputValidator import SingleJSONFileOutputValidator
from minifi.core.utils import decode_escaped_str
class MiNiFi_integration_test:
def __init__(self, context):
self.test_id = context.test_id
self.cluster = DockerTestCluster(context)
self.connectable_nodes = []
# Remote process groups are not connectables
self.remote_process_groups = []
self.file_system_observer = None
self.docker_directory_bindings = context.directory_bindings
self.cluster.set_directory_bindings(self.docker_directory_bindings.get_directory_bindings(self.test_id), self.docker_directory_bindings.get_data_directories(self.test_id))
def __del__(self):
self.cleanup()
def cleanup(self):
self.cluster.cleanup()
def acquire_container(self, name, engine='minifi-cpp', command=None):
return self.cluster.acquire_container(name, engine, command)
def wait_for_container_startup_to_finish(self, container_name):
startup_success = self.cluster.wait_for_startup_log(container_name, 120)
if not startup_success:
logging.error("Cluster startup failed for %s", container_name)
self.cluster.log_app_output()
return startup_success
def start_kafka_broker(self):
self.cluster.acquire_container('kafka-broker', 'kafka-broker')
self.cluster.deploy('zookeeper')
self.cluster.deploy('kafka-broker')
assert self.wait_for_container_startup_to_finish('kafka-broker')
def start_splunk(self):
self.cluster.acquire_container('splunk', 'splunk')
self.cluster.deploy('splunk')
assert self.wait_for_container_startup_to_finish('splunk')
assert self.cluster.enable_splunk_hec_indexer('splunk', 'splunk_hec_token')
def start_elasticsearch(self):
self.cluster.acquire_container('elasticsearch', 'elasticsearch')
self.cluster.deploy('elasticsearch')
assert self.wait_for_container_startup_to_finish('elasticsearch')
def start_opensearch(self):
self.cluster.acquire_container('opensearch', 'opensearch')
self.cluster.deploy('opensearch')
assert self.wait_for_container_startup_to_finish('opensearch')
def start(self, container_name=None):
if container_name is not None:
logging.info("Starting container %s", container_name)
self.cluster.deploy_flow(container_name)
assert self.wait_for_container_startup_to_finish(container_name)
return
logging.info("MiNiFi_integration_test start")
self.cluster.deploy_flow()
for container_name in self.cluster.containers:
assert self.wait_for_container_startup_to_finish(container_name)
def stop(self, container_name):
logging.info("Stopping container %s", container_name)
self.cluster.stop_flow(container_name)
def kill(self, container_name):
logging.info("Killing container %s", container_name)
self.cluster.kill_flow(container_name)
def restart(self, container_name):
logging.info("Restarting container %s", container_name)
self.cluster.restart_flow(container_name)
def add_node(self, processor):
if processor.get_name() in (elem.get_name() for elem in self.connectable_nodes):
raise Exception("Trying to register processor with an already registered name: \"%s\"" % processor.get_name())
self.connectable_nodes.append(processor)
def get_or_create_node_by_name(self, node_name):
node = self.get_node_by_name(node_name)
if node is None:
if node_name == "RemoteProcessGroup":
raise Exception("Trying to register RemoteProcessGroup without an input port or address.")
node = locate("minifi.processors." + node_name + "." + node_name)()
node.set_name(node_name)
self.add_node(node)
return node
def get_node_by_name(self, name):
for node in self.connectable_nodes:
if name == node.get_name():
return node
raise Exception("Trying to fetch unknown node: \"%s\"" % name)
def add_remote_process_group(self, remote_process_group):
if remote_process_group.get_name() in (elem.get_name() for elem in self.remote_process_groups):
raise Exception("Trying to register remote_process_group with an already registered name: \"%s\"" % remote_process_group.get_name())
self.remote_process_groups.append(remote_process_group)
def get_remote_process_group_by_name(self, name):
for node in self.remote_process_groups:
if name == node.get_name():
return node
raise Exception("Trying to fetch unknow node: \"%s\"" % name)
@staticmethod
def generate_input_port_for_remote_process_group(remote_process_group, name):
input_port_node = InputPort(name, remote_process_group)
# Generate an MD5 hash unique to the remote process group id
input_port_node.set_uuid(uuid.uuid3(remote_process_group.get_uuid(), "input_port"))
return input_port_node
def add_test_data(self, path, test_data, file_name=None):
if file_name is None:
file_name = str(uuid.uuid4())
test_data = decode_escaped_str(test_data)
self.docker_directory_bindings.put_file_to_docker_path(self.test_id, path, file_name, test_data.encode('utf-8'))
def put_test_resource(self, file_name, contents):
self.docker_directory_bindings.put_test_resource(self.test_id, file_name, contents)
def rm_out_child(self):
self.docker_directory_bindings.rm_out_child(self.test_id)
def add_file_system_observer(self, file_system_observer):
self.file_system_observer = file_system_observer
def check_for_no_files_generated(self, wait_time_in_seconds):
output_validator = NoFileOutPutValidator()
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output_after_time_period(wait_time_in_seconds, output_validator)
def check_for_single_file_with_content_generated(self, content, timeout_seconds):
output_validator = SingleFileOutputValidator(decode_escaped_str(content))
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, 1)
def check_for_single_json_file_with_content_generated(self, content, timeout_seconds):
output_validator = SingleJSONFileOutputValidator(content)
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, 1)
def check_for_multiple_files_generated(self, file_count, timeout_seconds, expected_content=[]):
output_validator = MultiFileOutputValidator(file_count, [decode_escaped_str(content) for content in expected_content])
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, file_count)
def check_for_at_least_one_file_with_content_generated(self, content, timeout_seconds):
output_validator = SingleOrMultiFileOutputValidator(decode_escaped_str(content))
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator)
def check_for_num_files_generated(self, num_flowfiles, timeout_seconds):
output_validator = NoContentCheckFileNumberValidator(num_flowfiles)
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, max(1, num_flowfiles))
def check_for_num_file_range_generated(self, min_files, max_files, wait_time_in_seconds):
output_validator = NumFileRangeValidator(min_files, max_files)
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output_after_time_period(wait_time_in_seconds, output_validator)
def check_for_an_empty_file_generated(self, timeout_seconds):
output_validator = EmptyFilesOutPutValidator()
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, 1)
def __check_output_after_time_period(self, wait_time_in_seconds, output_validator):
time.sleep(wait_time_in_seconds)
self.__validate(output_validator)
def __check_output(self, timeout_seconds, output_validator, max_files=0):
result = self.file_system_observer.validate_output(timeout_seconds, output_validator, max_files)
self.cluster.log_app_output()
assert not self.cluster.segfault_happened()
assert result
def __validate(self, validator):
self.cluster.log_app_output()
assert not self.cluster.segfault_happened()
assert validator.validate()
def check_s3_server_object_data(self, s3_container_name, object_data):
assert self.cluster.check_s3_server_object_data(s3_container_name, object_data)
def check_s3_server_object_metadata(self, s3_container_name, content_type):
assert self.cluster.check_s3_server_object_metadata(s3_container_name, content_type)
def check_empty_s3_bucket(self, s3_container_name):
assert self.cluster.is_s3_bucket_empty(s3_container_name)
def check_http_proxy_access(self, http_proxy_container_name, url):
assert self.cluster.check_http_proxy_access(http_proxy_container_name, url)
def check_azure_storage_server_data(self, azure_container_name, object_data):
assert self.cluster.check_azure_storage_server_data(azure_container_name, object_data)
def wait_for_kafka_consumer_to_be_registered(self, kafka_container_name):
assert self.cluster.wait_for_kafka_consumer_to_be_registered(kafka_container_name)
def check_splunk_event(self, splunk_container_name, query):
assert self.cluster.check_splunk_event(splunk_container_name, query)
def check_splunk_event_with_attributes(self, splunk_container_name, query, attributes):
assert self.cluster.check_splunk_event_with_attributes(splunk_container_name, query, attributes)
def check_google_cloud_storage(self, gcs_container_name, content):
assert self.cluster.check_google_cloud_storage(gcs_container_name, content)
def check_empty_gcs_bucket(self, gcs_container_name):
assert self.cluster.is_gcs_bucket_empty(gcs_container_name)
def check_empty_elastic(self, elastic_container_name):
assert self.cluster.is_elasticsearch_empty(elastic_container_name)
def elastic_generate_apikey(self, elastic_container_name):
return self.cluster.elastic_generate_apikey(elastic_container_name)
def create_doc_elasticsearch(self, elastic_container_name, index_name, doc_id):
assert self.cluster.create_doc_elasticsearch(elastic_container_name, index_name, doc_id)
def check_elastic_field_value(self, elastic_container_name, index_name, doc_id, field_name, field_value):
assert self.cluster.check_elastic_field_value(elastic_container_name, index_name, doc_id, field_name, field_value)
def add_elastic_user_to_opensearch(self, container_name):
assert self.cluster.add_elastic_user_to_opensearch(container_name)
def check_minifi_log_contents(self, line, timeout_seconds=60, count=1):
self.check_container_log_contents("minifi-cpp", line, timeout_seconds, count)
def check_minifi_log_matches_regex(self, regex, timeout_seconds=60, count=1):
for container in self.cluster.containers.values():
if container.get_engine() == "minifi-cpp":
line_found = self.cluster.wait_for_app_logs_regex(container.get_name(), regex, timeout_seconds, count)
if line_found:
return
assert False
def check_container_log_contents(self, container_engine, line, timeout_seconds=60, count=1):
for container in self.cluster.containers.values():
if container.get_engine() == container_engine:
line_found = self.cluster.wait_for_app_logs(container.get_name(), line, timeout_seconds, count)
if line_found:
return
assert False
def check_minifi_log_does_not_contain(self, line, wait_time_seconds):
time.sleep(wait_time_seconds)
for container in self.cluster.containers.values():
if container.get_engine() == "minifi-cpp":
_, logs = self.cluster.get_app_log(container.get_name())
if logs is not None and 1 <= logs.decode("utf-8").count(line):
assert False
def check_query_results(self, postgresql_container_name, query, number_of_rows, timeout_seconds):
assert self.cluster.check_query_results(postgresql_container_name, query, number_of_rows, timeout_seconds)
def check_container_log_matches_regex(self, container_name, log_pattern, timeout_seconds, count=1):
assert self.cluster.wait_for_app_logs_regex(container_name, log_pattern, timeout_seconds, count)
def add_test_blob(self, blob_name, content, with_snapshot):
self.cluster.add_test_blob(blob_name, content, with_snapshot)
def check_azure_blob_storage_is_empty(self, timeout_seconds):
assert self.cluster.check_azure_blob_storage_is_empty(timeout_seconds)
def check_azure_blob_and_snapshot_count(self, blob_and_snapshot_count, timeout_seconds):
assert self.cluster.check_azure_blob_and_snapshot_count(blob_and_snapshot_count, timeout_seconds)
def check_metric_class_on_prometheus(self, metric_class, timeout_seconds):
assert self.cluster.wait_for_metric_class_on_prometheus(metric_class, timeout_seconds)
def check_processor_metric_on_prometheus(self, metric_class, timeout_seconds, processor_name):
assert self.cluster.wait_for_processor_metric_on_prometheus(metric_class, timeout_seconds, processor_name)
| apache-2.0 |
SunPower/pvfactors | pvfactors/geometry/pvground.py | 1 | 45147 | """Classes for implementation of ground geometry"""
from pvfactors import PVFactorsError
from pvfactors.config import (
MAX_X_GROUND, MIN_X_GROUND, Y_GROUND, DISTANCE_TOLERANCE, COLOR_DIC)
from pvfactors.geometry.base import (
BaseSide, PVSegment, ShadeCollection, PVSurface)
from pvfactors.geometry.timeseries import (
TsShadeCollection, TsLineCoords, TsPointCoords, TsSurface,
_get_params_at_idx)
from shapely.geometry import LineString
import numpy as np
from copy import deepcopy
class TsGround(object):
"""Timeseries ground class: this class is a vectorized version of the
PV ground geometry class, and it will store timeseries shaded ground
and illuminated ground elements, as well as pv row cut points."""
# TODO: this needs to be passed at initialization for flexibility
x_min = MIN_X_GROUND
x_max = MAX_X_GROUND
def __init__(self, shadow_elements, illum_elements, param_names=None,
flag_overlap=None, cut_point_coords=None, y_ground=None):
"""Initialize timeseries ground using list of timeseries surfaces
for the ground shadows
Parameters
----------
shadow_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
Timeseries shaded ground elements
illum_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
Timeseries illuminated ground elements
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
flag_overlap : list of bool, optional
Flags indicating if the ground shadows are overlapping, for all
time steps (Default=None). I.e. is there direct shading on pv rows?
cut_point_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`, \
optional
List of cut point coordinates, as calculated for timeseries PV rows
(Default = None)
y_ground : float, optional
Y coordinate of flat ground [m] (Default=None)
"""
# Lists of timeseries ground elements
self.shadow_elements = shadow_elements
self.illum_elements = illum_elements
# Shade collections
list_shaded_surf = []
list_illum_surf = []
for shadow_el in shadow_elements:
list_shaded_surf += shadow_el.all_ts_surfaces
for illum_el in illum_elements:
list_illum_surf += illum_el.all_ts_surfaces
self.illum = TsShadeCollection(list_illum_surf, False)
self.shaded = TsShadeCollection(list_shaded_surf, True)
# Other ground attributes
self.param_names = [] if param_names is None else param_names
self.flag_overlap = flag_overlap
self.cut_point_coords = [] if cut_point_coords is None \
else cut_point_coords
self.y_ground = y_ground
self.shaded_params = dict.fromkeys(self.param_names)
self.illum_params = dict.fromkeys(self.param_names)
@classmethod
def from_ts_pvrows_and_angles(cls, list_ts_pvrows, alpha_vec, rotation_vec,
y_ground=Y_GROUND, flag_overlap=None,
param_names=None):
"""Create timeseries ground from list of timeseries PV rows, and
PV array and solar angles.
Parameters
----------
list_ts_pvrows : \
list of :py:class:`~pvfactors.geometry.pvrow.TsPVRow`
Timeseries PV rows to use to calculate timeseries ground shadows
alpha_vec : np.ndarray
Angle made by 2d solar vector and PV array x-axis [rad]
rotation_vec : np.ndarray
Timeseries rotation values of the PV row [deg]
y_ground : float, optional
Fixed y coordinate of flat ground [m] (Default = Y_GROUND constant)
flag_overlap : list of bool, optional
Flags indicating if the ground shadows are overlapping, for all
time steps (Default=None). I.e. is there direct shading on pv rows?
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
"""
rotation_vec = np.deg2rad(rotation_vec)
n_steps = len(rotation_vec)
# Calculate coords of ground shadows and cutting points
ground_shadow_coords = []
cut_point_coords = []
for ts_pvrow in list_ts_pvrows:
# Get pvrow coords
x1s_pvrow = ts_pvrow.full_pvrow_coords.b1.x
y1s_pvrow = ts_pvrow.full_pvrow_coords.b1.y
x2s_pvrow = ts_pvrow.full_pvrow_coords.b2.x
y2s_pvrow = ts_pvrow.full_pvrow_coords.b2.y
# --- Shadow coords calculation
# Calculate x coords of shadow
x1s_shadow = x1s_pvrow - (y1s_pvrow - y_ground) / np.tan(alpha_vec)
x2s_shadow = x2s_pvrow - (y2s_pvrow - y_ground) / np.tan(alpha_vec)
# Order x coords from left to right
x1s_on_left = x1s_shadow <= x2s_shadow
xs_left_shadow = np.where(x1s_on_left, x1s_shadow, x2s_shadow)
xs_right_shadow = np.where(x1s_on_left, x2s_shadow, x1s_shadow)
# Append shadow coords to list
ground_shadow_coords.append(
[[xs_left_shadow, y_ground * np.ones(n_steps)],
[xs_right_shadow, y_ground * np.ones(n_steps)]])
# --- Cutting points coords calculation
dx = (y1s_pvrow - y_ground) / np.tan(rotation_vec)
cut_point_coords.append(
TsPointCoords(x1s_pvrow - dx, y_ground * np.ones(n_steps)))
ground_shadow_coords = np.array(ground_shadow_coords)
return cls.from_ordered_shadows_coords(
ground_shadow_coords, flag_overlap=flag_overlap,
cut_point_coords=cut_point_coords, param_names=param_names,
y_ground=y_ground)
@classmethod
def from_ordered_shadows_coords(cls, shadow_coords, flag_overlap=None,
param_names=None, cut_point_coords=None,
y_ground=Y_GROUND):
"""Create timeseries ground from list of ground shadow coordinates.
Parameters
----------
shadow_coords : np.ndarray
List of ordered ground shadow coordinates (from left to right)
flag_overlap : list of bool, optional
Flags indicating if the ground shadows are overlapping, for all
time steps (Default=None). I.e. is there direct shading on pv rows?
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
cut_point_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`, \
optional
List of cut point coordinates, as calculated for timeseries PV rows
(Default = None)
y_ground : float, optional
Fixed y coordinate of flat ground [m] (Default = Y_GROUND constant)
"""
# Get cut point coords if any
cut_point_coords = cut_point_coords or []
# Create shadow coordinate objects
list_shadow_coords = [TsLineCoords.from_array(coords)
for coords in shadow_coords]
# If the overlap flags were passed, make sure shadows don't overlap
if flag_overlap is not None:
if len(list_shadow_coords) > 1:
for idx, coords in enumerate(list_shadow_coords[:-1]):
coords.b2.x = np.where(flag_overlap,
list_shadow_coords[idx + 1].b1.x,
coords.b2.x)
# Create shaded ground elements
ts_shadows_elements = cls._shadow_elements_from_coords_and_cut_pts(
list_shadow_coords, cut_point_coords, param_names)
# Create illuminated ground elements
ts_illum_elements = cls._illum_elements_from_coords_and_cut_pts(
ts_shadows_elements, cut_point_coords, param_names, y_ground)
return cls(ts_shadows_elements, ts_illum_elements,
param_names=param_names, flag_overlap=flag_overlap,
cut_point_coords=cut_point_coords, y_ground=y_ground)
def at(self, idx, x_min_max=None, merge_if_flag_overlap=True,
with_cut_points=True):
"""Generate a PV ground geometry for the desired index. This will
only return non-point surfaces within the ground bounds, i.e.
surfaces that are not points, and which are within x_min and x_max.
Parameters
----------
idx : int
Index to use to generate PV ground geometry
x_min_max : tuple, optional
List of minimum and maximum x coordinates for the flat surface [m]
(Default = None)
merge_if_flag_overlap : bool, optional
Decide whether to merge all shadows if they overlap or not
(Default = True)
with_cut_points : bool, optional
Decide whether to include the saved cut points in the created
PV ground geometry (Default = True)
Returns
-------
pvground : :py:class:`~pvfactors.geometry.pvground.PVGround`
"""
# Get shadow elements that are not points at the given index
non_pt_shadow_elements = [
shadow_el for shadow_el in self.shadow_elements
if shadow_el.coords.length[idx] > DISTANCE_TOLERANCE]
if with_cut_points:
# We want the ground surfaces broken up at the cut points
if merge_if_flag_overlap:
# We want to merge the shadow surfaces when they overlap
list_shadow_surfaces = self._merge_shadow_surfaces(
idx, non_pt_shadow_elements)
else:
# No need to merge the shadow surfaces
list_shadow_surfaces = []
for shadow_el in non_pt_shadow_elements:
list_shadow_surfaces += \
shadow_el.non_point_surfaces_at(idx)
# Get the illuminated surfaces
list_illum_surfaces = []
for illum_el in self.illum_elements:
list_illum_surfaces += illum_el.non_point_surfaces_at(idx)
else:
# No need to break up the surfaces at the cut points
# We will need to build up new surfaces (since not done by classes)
# Get the parameters at the given index
illum_params = _get_params_at_idx(idx, self.illum_params)
shaded_params = _get_params_at_idx(idx, self.shaded_params)
if merge_if_flag_overlap and (self.flag_overlap is not None):
# We want to merge the shadow surfaces when they overlap
is_overlap = self.flag_overlap[idx]
if is_overlap and (len(non_pt_shadow_elements) > 1):
coords = [non_pt_shadow_elements[0].b1.at(idx),
non_pt_shadow_elements[-1].b2.at(idx)]
list_shadow_surfaces = [PVSurface(
coords, shaded=True, param_names=self.param_names,
params=shaded_params)]
else:
# No overlap for the given index or config
list_shadow_surfaces = [
PVSurface(shadow_el.coords.at(idx),
shaded=True, params=shaded_params,
param_names=self.param_names)
for shadow_el in non_pt_shadow_elements
if shadow_el.coords.length[idx]
> DISTANCE_TOLERANCE]
else:
# No need to merge the shadow surfaces
list_shadow_surfaces = [
PVSurface(shadow_el.coords.at(idx),
shaded=True, params=shaded_params,
param_names=self.param_names)
for shadow_el in non_pt_shadow_elements
if shadow_el.coords.length[idx]
> DISTANCE_TOLERANCE]
# Get the illuminated surfaces
list_illum_surfaces = [PVSurface(illum_el.coords.at(idx),
shaded=False, params=illum_params,
param_names=self.param_names)
for illum_el in self.illum_elements
if illum_el.coords.length[idx]
> DISTANCE_TOLERANCE]
# Pass the created lists to the PVGround builder
return PVGround.from_lists_surfaces(
list_shadow_surfaces, list_illum_surfaces,
param_names=self.param_names, y_ground=self.y_ground,
x_min_max=x_min_max)
def plot_at_idx(self, idx, ax, color_shaded=COLOR_DIC['pvrow_shaded'],
color_illum=COLOR_DIC['pvrow_illum'], x_min_max=None,
merge_if_flag_overlap=True, with_cut_points=True,
with_surface_index=False):
"""Plot timeseries ground at a certain index.
Parameters
----------
idx : int
Index to use to plot timeseries side
ax : :py:class:`matplotlib.pyplot.axes` object
Axes for plotting
color_shaded : str, optional
Color to use for plotting the shaded surfaces (Default =
COLOR_DIC['pvrow_shaded'])
color_shaded : str, optional
Color to use for plotting the illuminated surfaces (Default =
COLOR_DIC['pvrow_illum'])
x_min_max : tuple, optional
List of minimum and maximum x coordinates for the flat surface [m]
(Default = None)
merge_if_flag_overlap : bool, optional
Decide whether to merge all shadows if they overlap or not
(Default = True)
with_cut_points : bool, optional
Decide whether to include the saved cut points in the created
PV ground geometry (Default = True)
with_surface_index : bool, optional
Plot the surfaces with their index values (Default = False)
"""
pvground = self.at(idx, x_min_max=x_min_max,
merge_if_flag_overlap=merge_if_flag_overlap,
with_cut_points=with_cut_points)
pvground.plot(ax, color_shaded=color_shaded, color_illum=color_illum,
with_index=with_surface_index)
def update_params(self, new_dict):
"""Update the illuminated parameters with new ones, not only for the
timeseries ground, but also for its ground elements and the timeseries
surfaces of the ground elements, so that they are all synced.
Parameters
----------
new_dict : dict
New parameters
"""
self.update_illum_params(new_dict)
self.update_shaded_params(new_dict)
def update_illum_params(self, new_dict):
"""Update the illuminated parameters with new ones, not only for the
timeseries ground, but also for its ground elements and the timeseries
surfaces of the ground elements, so that they are all synced.
Parameters
----------
new_dict : dict
New parameters
"""
self.illum_params.update(new_dict)
for illum_el in self.illum_elements:
illum_el.params.update(new_dict)
for surf in illum_el.surface_list:
surf.params.update(new_dict)
def update_shaded_params(self, new_dict):
"""Update the shaded parameters with new ones, not only for the
timeseries ground, but also for its ground elements and the timeseries
surfaces of the ground elements, so that they are all synced.
Parameters
----------
new_dict : dict
New parameters
"""
self.shaded_params.update(new_dict)
for shaded_el in self.shadow_elements:
shaded_el.params.update(new_dict)
for surf in shaded_el.surface_list:
surf.params.update(new_dict)
def get_param_weighted(self, param):
"""Get timeseries parameter for the ts ground, after weighting by
surface length.
Parameters
----------
param : str
Name of parameter
Returns
-------
np.ndarray
Weighted parameter values
"""
return self.get_param_ww(param) / self.length
def get_param_ww(self, param):
"""Get timeseries parameter from the ground's surfaces with weight,
i.e. after multiplying by the surface lengths.
Parameters
----------
param: str
Surface parameter to return
Returns
-------
np.ndarray
Timeseries parameter values multiplied by weights
Raises
------
KeyError
if parameter name not in a surface parameters
"""
value = 0.
for shadow_el in self.shadow_elements:
value += shadow_el.get_param_ww(param)
for illum_el in self.illum_elements:
value += illum_el.get_param_ww(param)
return value
def shadow_coords_left_of_cut_point(self, idx_cut_pt):
"""Get coordinates of shadows located on the left side of the cut point
with given index. The coordinates of the shadows will be bounded
by the coordinates of the cut point and the default minimum
ground x values.
Parameters
----------
idx_cut_pt : int
Index of the cut point of interest
Returns
-------
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Coordinates of the shadows on the left side of the cut point
"""
cut_pt_coords = self.cut_point_coords[idx_cut_pt]
return [shadow_el._coords_left_of_cut_point(shadow_el.coords,
cut_pt_coords)
for shadow_el in self.shadow_elements]
def shadow_coords_right_of_cut_point(self, idx_cut_pt):
"""Get coordinates of shadows located on the right side of the cut
point with given index. The coordinates of the shadows will be bounded
by the coordinates of the cut point and the default maximum
ground x values.
Parameters
----------
idx_cut_pt : int
Index of the cut point of interest
Returns
-------
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Coordinates of the shadows on the right side of the cut point
"""
cut_pt_coords = self.cut_point_coords[idx_cut_pt]
return [shadow_el._coords_right_of_cut_point(shadow_el.coords,
cut_pt_coords)
for shadow_el in self.shadow_elements]
def ts_surfaces_side_of_cut_point(self, side, idx_cut_pt):
"""Get a list of all the ts ground surfaces an a request side of
a cut point
Parameters
----------
side : str
Side of the cut point, either 'left' or 'right'
idx_cut_pt : int
Index of the cut point, on whose side we want to get the ground
surfaces
Returns
-------
list
List of timeseries ground surfaces on the side of the cut point
"""
list_ts_surfaces = []
for shadow_el in self.shadow_elements:
list_ts_surfaces += shadow_el.surface_dict[idx_cut_pt][side]
for illum_el in self.illum_elements:
list_ts_surfaces += illum_el.surface_dict[idx_cut_pt][side]
return list_ts_surfaces
@property
def n_ts_surfaces(self):
"""Number of timeseries surfaces in the ts ground"""
return self.n_ts_shaded_surfaces + self.n_ts_illum_surfaces
@property
def n_ts_shaded_surfaces(self):
"""Number of shaded timeseries surfaces in the ts ground"""
n_ts_surfaces = 0
for shadow_el in self.shadow_elements:
n_ts_surfaces += shadow_el.n_ts_surfaces
return n_ts_surfaces
@property
def n_ts_illum_surfaces(self):
"""Number of illuminated timeseries surfaces in the ts ground"""
n_ts_surfaces = 0
for illum_el in self.illum_elements:
n_ts_surfaces += illum_el.n_ts_surfaces
return n_ts_surfaces
@property
def all_ts_surfaces(self):
"""Number of timeseries surfaces in the ts ground"""
all_ts_surfaces = []
for shadow_el in self.shadow_elements:
all_ts_surfaces += shadow_el.all_ts_surfaces
for illum_el in self.illum_elements:
all_ts_surfaces += illum_el.all_ts_surfaces
return all_ts_surfaces
@property
def length(self):
"""Length of the timeseries ground"""
length = 0
for shadow_el in self.shadow_elements:
length += shadow_el.length
for illum_el in self.illum_elements:
length += illum_el.length
return length
@property
def shaded_length(self):
"""Length of the timeseries ground"""
length = 0
for shadow_el in self.shadow_elements:
length += shadow_el.length
return length
def non_point_shaded_surfaces_at(self, idx):
"""Return a list of shaded surfaces, that are not points
at given index
Parameters
----------
idx : int
Index at which we want the surfaces not to be points
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
list_surfaces = []
for shadow_el in self.shadow_elements:
list_surfaces += shadow_el.non_point_surfaces_at(0)
return list_surfaces
def non_point_illum_surfaces_at(self, idx):
"""Return a list of illuminated surfaces, that are not
points at given index
Parameters
----------
idx : int
Index at which we want the surfaces not to be points
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
list_surfaces = []
for illum_el in self.illum_elements:
list_surfaces += illum_el.non_point_surfaces_at(0)
return list_surfaces
def non_point_surfaces_at(self, idx):
"""Return a list of all surfaces that are not
points at given index
Parameters
----------
idx : int
Index at which we want the surfaces not to be points
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
return self.non_point_illum_surfaces_at(idx) \
+ self.non_point_shaded_surfaces_at(idx)
def n_non_point_surfaces_at(self, idx):
"""Return the number of :py:class:`~pvfactors.geometry.base.PVSurface`
that are not points at given index
Parameters
----------
idx : int
Index at which we want the surfaces not to be points
Returns
-------
int
"""
return len(self.non_point_surfaces_at(idx))
@staticmethod
def _shadow_elements_from_coords_and_cut_pts(
list_shadow_coords, cut_point_coords, param_names):
"""Create ground shadow elements from a list of ordered shadow
coordinates (from left to right), and the ground cut point coordinates.
Notes
-----
This method will clip the shadow coords to the limit of ground,
i.e. the shadow coordinates shouldn't be outside of the range
[MIN_X_GROUND, MAX_X_GROUND].
Parameters
----------
list_shadow_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
List of ordered ground shadow coordinates (from left to right)
cut_point_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
List of cut point coordinates (from left to right)
param_names : list
List of parameter names for the ground elements
Returns
-------
list_shadow_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
Ordered list of shadow elements (from left to right)
"""
list_shadow_elements = []
# FIXME: x_min and x_max should be passed as inputs
for shadow_coords in list_shadow_coords:
shadow_coords.b1.x = np.clip(shadow_coords.b1.x, MIN_X_GROUND,
MAX_X_GROUND)
shadow_coords.b2.x = np.clip(shadow_coords.b2.x, MIN_X_GROUND,
MAX_X_GROUND)
list_shadow_elements.append(
TsGroundElement(shadow_coords,
list_ordered_cut_pts_coords=cut_point_coords,
param_names=param_names, shaded=True))
return list_shadow_elements
@staticmethod
def _illum_elements_from_coords_and_cut_pts(
list_shadow_elements, cut_pt_coords, param_names, y_ground):
"""Create ground illuminated elements from a list of ordered shadow
elements (from left to right), and the ground cut point coordinates.
This method will make sure that the illuminated ground elements are
all within the ground limits [MIN_X_GROUND, MAX_X_GROUND].
Parameters
----------
list_shadow_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
List of ordered ground shadow coordinates (from left to right)
cut_point_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
List of cut point coordinates (from left to right)
param_names : list
List of parameter names for the ground elements
Returns
-------
list_shadow_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
Ordered list of shadow elements (from left to right)
"""
list_illum_elements = []
if len(list_shadow_elements) == 0:
msg = """There must be at least one shadow element on the ground,
otherwise it probably means that no PV rows were created, so
there's no point in running a simulation..."""
raise PVFactorsError(msg)
n_steps = len(list_shadow_elements[0].coords.b1.x)
y_ground_vec = y_ground * np.ones(n_steps)
# FIXME: x_min and x_max should be passed as inputs
next_x = MIN_X_GROUND * np.ones(n_steps)
# Build the groud elements from left to right, starting at x_min
# and covering the ground with illuminated elements where there's no
# shadow
for shadow_element in list_shadow_elements:
x1 = next_x
x2 = shadow_element.coords.b1.x
coords = TsLineCoords.from_array(
np.array([[x1, y_ground_vec], [x2, y_ground_vec]]))
list_illum_elements.append(TsGroundElement(
coords, list_ordered_cut_pts_coords=cut_pt_coords,
param_names=param_names, shaded=False))
next_x = shadow_element.coords.b2.x
# Add the last illuminated element to the list
coords = TsLineCoords.from_array(
np.array([[next_x, y_ground_vec],
[MAX_X_GROUND * np.ones(n_steps), y_ground_vec]]))
list_illum_elements.append(TsGroundElement(
coords, list_ordered_cut_pts_coords=cut_pt_coords,
param_names=param_names, shaded=False))
return list_illum_elements
def _merge_shadow_surfaces(self, idx, non_pt_shadow_elements):
"""Merge the shadow surfaces in a list of shadow elements
at the shadow boundaries only, at a given index, but keep the shadow
surfaces broken up at the cut points.
Parameters
----------
idx : int
Index at which we want to merge the surfaces
non_pt_shadow_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
List of non point shadow elements
Returns
-------
list_shadow_surfaces : \
list of :py:class:`~pvfactors.geometry.base.PVSurface`
List of shadow surfaces at a given index
(ordered from left to right)
"""
# TODO: check if it would be faster to merge the ground elements first,
# and then break it down with the cut points
# Decide whether to merge all shadows or not
list_shadow_surfaces = []
if self.flag_overlap is not None:
# Get the overlap flags
is_overlap = self.flag_overlap[idx]
n_shadow_elements = len(non_pt_shadow_elements)
if is_overlap and (n_shadow_elements > 1):
# If there's only one shadow, not point in going through this
# Now go from left to right and merge shadow surfaces
surface_to_merge = None
for i_el, shadow_el in enumerate(non_pt_shadow_elements):
surfaces = shadow_el.non_point_surfaces_at(idx)
n_surf = len(surfaces)
for i_surf, surface in enumerate(surfaces):
if i_surf == n_surf - 1:
# last surface, could also be first
if i_surf == 0:
# Need to merge with preceding if exists
if surface_to_merge is not None:
coords = [surface_to_merge.boundary[0],
surface.boundary[1]]
surface = PVSurface(
coords, shaded=True,
param_names=self.param_names,
params=surface.params,
index=surface.index)
if i_el == n_shadow_elements - 1:
# last surface of last shadow element
list_shadow_surfaces.append(surface)
else:
# keep for merging with next element
surface_to_merge = surface
elif i_surf == 0:
# first surface but definitely not last either
if surface_to_merge is not None:
coords = [surface_to_merge.boundary[0],
surface.boundary[1]]
list_shadow_surfaces.append(
PVSurface(coords, shaded=True,
param_names=self.param_names,
params=surface.params,
index=surface.index))
else:
list_shadow_surfaces.append(surface)
else:
# not first nor last surface
list_shadow_surfaces.append(surface)
else:
# There's no need to merge anything
for shadow_el in non_pt_shadow_elements:
list_shadow_surfaces += \
shadow_el.non_point_surfaces_at(idx)
else:
# There's no need to merge anything
for shadow_el in non_pt_shadow_elements:
list_shadow_surfaces += shadow_el.non_point_surfaces_at(idx)
return list_shadow_surfaces
class TsGroundElement(object):
"""Special class for timeseries ground elements: a ground element has known
timeseries coordinate boundaries, but it will also have a break down of
its area into n+1 timeseries surfaces located in the n+1 ground zones
defined by the n ground cutting points.
This is crucial to calculate view factors in a vectorized way."""
def __init__(self, coords, list_ordered_cut_pts_coords=None,
param_names=None, shaded=False):
"""Initialize the timeseries ground element using its timeseries
line coordinates, and build the timeseries surfaces for all the
cut point zones.
Parameters
----------
coords : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates of the ground element
list_ordered_cut_pts_coords : list, optional
List of all the cut point timeseries coordinates
(Default = [])
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
shaded : bool, optional
Flag specifying is element is a shadow or not (Default = False)
"""
self.coords = coords
self.param_names = param_names or []
self.params = dict.fromkeys(self.param_names)
self.shaded = shaded
self.surface_dict = None # will be necessary for view factor calcs
self.surface_list = [] # will be necessary for vf matrix formation
list_ordered_cut_pts_coords = list_ordered_cut_pts_coords or []
if len(list_ordered_cut_pts_coords) > 0:
self._create_all_ts_surfaces(list_ordered_cut_pts_coords)
self.n_ts_surfaces = len(self.surface_list)
@property
def b1(self):
"""Timeseries coordinates of first boundary point"""
return self.coords.b1
@property
def b2(self):
"""Timeseries coordinates of second boundary point"""
return self.coords.b2
@property
def centroid(self):
"""Timeseries point coordinates of the element's centroid"""
return self.coords.centroid
@property
def length(self):
"""Timeseries length of the ground"""
return self.coords.length
@property
def all_ts_surfaces(self):
"""List of all ts surfaces making up the ts ground element"""
return self.surface_list
def surfaces_at(self, idx):
"""Return list of surfaces (from left to right) at given index that
make up the ground element.
Parameters
----------
idx : int
Index of interest
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
return [surface.at(idx)
for surface in self.surface_list]
def non_point_surfaces_at(self, idx):
"""Return list of non-point surfaces (from left to right) at given
index that make up the ground element.
Parameters
----------
idx : int
Index of interest
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
return [surface.at(idx)
for surface in self.surface_list
if surface.length[idx] > DISTANCE_TOLERANCE]
def get_param_weighted(self, param):
"""Get timeseries parameter for the ground element, after weighting by
surface length.
Parameters
----------
param : str
Name of parameter
Returns
-------
np.ndarray
Weighted parameter values
"""
return self.get_param_ww(param) / self.length
def get_param_ww(self, param):
"""Get timeseries parameter from the ground element with weight,
i.e. after multiplying by the surface lengths.
Parameters
----------
param: str
Surface parameter to return
Returns
-------
np.ndarray
Timeseries parameter values multiplied by weights
Raises
------
KeyError
if parameter name not in a surface parameters
"""
value = 0.
for ts_surf in self.surface_list:
value += ts_surf.length * ts_surf.get_param(param)
return value
def _create_all_ts_surfaces(self, list_ordered_cut_pts):
"""Create all the n+1 timeseries surfaces that make up the timeseries
ground element, and which are located in the n+1 zones defined by
the n cut points.
Parameters
----------
list_ordered_cut_pts : list of :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
List of timeseries coordinates of all cut points, ordered from
left to right
"""
# Initialize dict
self.surface_dict = {i: {'right': [], 'left': []}
for i in range(len(list_ordered_cut_pts))}
n_cut_pts = len(list_ordered_cut_pts)
next_coords = self.coords
for idx_pt, cut_pt_coords in enumerate(list_ordered_cut_pts):
# Get coords on left of cut pt
coords_left = self._coords_left_of_cut_point(next_coords,
cut_pt_coords)
# Save that surface in the required structures
surface_left = TsSurface(coords_left, param_names=self.param_names,
shaded=self.shaded)
self.surface_list.append(surface_left)
for i in range(idx_pt, n_cut_pts):
self.surface_dict[i]['left'].append(surface_left)
for j in range(0, idx_pt):
self.surface_dict[j]['right'].append(surface_left)
next_coords = self._coords_right_of_cut_point(next_coords,
cut_pt_coords)
# Save the right most portion
next_surface = TsSurface(next_coords, param_names=self.param_names,
shaded=self.shaded)
self.surface_list.append(next_surface)
for j in range(0, n_cut_pts):
self.surface_dict[j]['right'].append(next_surface)
@staticmethod
def _coords_right_of_cut_point(coords, cut_pt_coords):
"""Calculate timeseries line coordinates that are right of the given
cut point coordinates, but still within the ground area
Parameters
----------
coords : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Original timeseries coordinates
cut_pt_coords :
:py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries coordinates of cut point
Returns
-------
:py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates that are located right of the cut
point
"""
coords = deepcopy(coords)
# FIXME: should be using x_min x_max inputs instead of global constant
coords.b1.x = np.maximum(coords.b1.x, cut_pt_coords.x)
coords.b1.x = np.minimum(coords.b1.x, MAX_X_GROUND)
coords.b2.x = np.maximum(coords.b2.x, cut_pt_coords.x)
coords.b2.x = np.minimum(coords.b2.x, MAX_X_GROUND)
return coords
@staticmethod
def _coords_left_of_cut_point(coords, cut_pt_coords):
"""Calculate timeseries line coordinates that are left of the given
cut point coordinates, but still within the ground area
Parameters
----------
coords : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Original timeseries coordinates
cut_pt_coords :
:py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries coordinates of cut point
Returns
-------
:py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates that are located left of the cut
point
"""
coords = deepcopy(coords)
# FIXME: should be using x_min x_max inputs instead of global constant
coords.b1.x = np.minimum(coords.b1.x, cut_pt_coords.x)
coords.b1.x = np.maximum(coords.b1.x, MIN_X_GROUND)
coords.b2.x = np.minimum(coords.b2.x, cut_pt_coords.x)
coords.b2.x = np.maximum(coords.b2.x, MIN_X_GROUND)
return coords
class PVGround(BaseSide):
"""Class that defines the ground geometry in PV arrays."""
def __init__(self, list_segments=None, original_linestring=None):
"""Initialize PV ground geometry.
Parameters
----------
list_segments : list of :py:class:`~pvfactors.geometry.base.PVSegment`, optional
List of PV segments that will constitute the ground (Default = [])
original_linestring : :py:class:`shapely.geometry.LineString`, optional
Full continuous linestring that the ground will be made of
(Default = None)
"""
list_segments = list_segments or []
self.original_linestring = original_linestring
super(PVGround, self).__init__(list_segments)
@classmethod
def as_flat(cls, x_min_max=None, shaded=False, y_ground=Y_GROUND,
param_names=None):
"""Build a horizontal flat ground surface, made of 1 PV segment.
Parameters
----------
x_min_max : tuple, optional
List of minimum and maximum x coordinates for the flat surface [m]
(Default = None)
shaded : bool, optional
Shaded status of the created PV surfaces (Default = False)
y_ground : float, optional
Location of flat ground on y axis in [m] (Default = Y_GROUND)
param_names : list of str, optional
Names of the surface parameters, eg reflectivity, total incident
irradiance, temperature, etc. (Default = [])
Returns
-------
PVGround object
"""
param_names = param_names or []
# Get ground boundaries
if x_min_max is None:
x_min, x_max = MIN_X_GROUND, MAX_X_GROUND
else:
x_min, x_max = x_min_max
# Create PV segment for flat ground
coords = [(x_min, y_ground), (x_max, y_ground)]
seg = PVSegment.from_linestring_coords(coords, shaded=shaded,
normal_vector=[0., 1.],
param_names=param_names)
return cls(list_segments=[seg], original_linestring=LineString(coords))
@classmethod
def from_lists_surfaces(
cls, list_shaded_surfaces, list_illum_surfaces, x_min_max=None,
y_ground=Y_GROUND, param_names=None):
"""Create ground from lists of shaded and illuminated PV surfaces.
Parameters
----------
list_shaded_surfaces : \
list of :py:class:`~pvfactors.geometry.base.PVSurface`
List of shaded ground PV surfaces
list_illum_surfaces : \
list of :py:class:`~pvfactors.geometry.base.PVSurface`
List of illuminated ground PV surfaces
x_min_max : tuple, optional
List of minimum and maximum x coordinates for the flat surface [m]
(Default = None)
y_ground : float, optional
Location of flat ground on y axis in [m] (Default = Y_GROUND)
param_names : list of str, optional
Names of the surface parameters, eg reflectivity, total incident
irradiance, temperature, etc. (Default = [])
Returns
-------
PVGround object
"""
param_names = param_names or []
# Get ground boundaries
if x_min_max is None:
x_min, x_max = MIN_X_GROUND, MAX_X_GROUND
else:
x_min, x_max = x_min_max
full_extent_coords = [(x_min, y_ground), (x_max, y_ground)]
# Create the shade collections
shaded_collection = ShadeCollection(
list_surfaces=list_shaded_surfaces, shaded=True,
param_names=param_names)
illum_collection = ShadeCollection(
list_surfaces=list_illum_surfaces, shaded=False,
param_names=param_names)
# Create the ground segment
segment = PVSegment(illum_collection=illum_collection,
shaded_collection=shaded_collection)
return cls(list_segments=[segment],
original_linestring=LineString(full_extent_coords))
@property
def boundary(self):
"""Boundaries of the ground's original linestring."""
return self.original_linestring.boundary
| bsd-3-clause |
pasqualguerrero/django | django/contrib/auth/tokens.py | 429 | 2803 | from datetime import date
from django.conf import settings
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
hash = salted_hmac(
self.key_salt,
self._make_hash_value(user, timestamp),
).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _make_hash_value(self, user, timestamp):
# Ensure results are consistent across DB backends
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
return (
six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp)
)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| bsd-3-clause |
MauHernandez/cyclope | cyclope/migrations/0026_auto__chg_field_sitesettings_font_size.py | 2 | 13275 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SiteSettings.font_size'
db.alter_column('cyclope_sitesettings', 'font_size', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2))
def backwards(self, orm):
# Changing field 'SiteSettings.font_size'
db.alter_column('cyclope_sitesettings', 'font_size', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
models = {
'collections.collection': {
'Meta': {'object_name': 'Collection'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'default_list_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'navigation_root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.author': {
'Meta': {'ordering': "['name']", 'object_name': 'Author'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'})
},
'cyclope.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '100'})
},
'cyclope.layout': {
'Meta': {'object_name': 'Layout'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.menu': {
'Meta': {'object_name': 'Menu'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cyclope.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'menu_entries'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'custom_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'menu_items'", 'to': "orm['cyclope.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cyclope.MenuItem']"}),
'persistent_layout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site_home': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"})
},
'cyclope.regionview': {
'Meta': {'object_name': 'RegionView'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'region_views'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'cyclope.relatedcontent': {
'Meta': {'ordering': "['order']", 'object_name': 'RelatedContent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'other_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'other_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_rt'", 'to': "orm['contenttypes.ContentType']"}),
'self_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'self_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_lt'", 'to': "orm['contenttypes.ContentType']"})
},
'cyclope.sitesettings': {
'Meta': {'object_name': 'SiteSettings'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'YES'", 'max_length': '4'}),
'body_custom_font': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'body_font': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'default_layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enable_abuse_reports': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_comments_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_follow_buttons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_ratings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_share_buttons': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'font_size': ('django.db.models.fields.DecimalField', [], {'default': '12', 'max_digits': '4', 'decimal_places': '2'}),
'global_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'head_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'hide_content_icons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'moderate_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'newsletter_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['collections.Collection']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'rss_content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'symmetrical': 'False'}),
'show_author': ('django.db.models.fields.CharField', [], {'default': "'AUTHOR'", 'max_length': '6'}),
'show_head_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'unique': 'True'}),
'social_follow_services': ('jsonfield.fields.JSONField', [], {'default': '\'[["twitter","USERNAME"],["facebook","USERNAME"],["google","USERNAME"],["flickr","USERNAME"],["linkedin","USERNAME"],["vimeo","USERNAME"],["youtube","USERNAME"]]\''}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'titles_custom_font': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'titles_font': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'})
},
'cyclope.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cyclope'] | gpl-3.0 |
ghickman/django | tests/serializers/test_natural.py | 13 | 9129 | from django.core import serializers
from django.db import connection
from django.test import TestCase
from .models import (
Child, FKAsPKNoNaturalKey, FKDataNaturalKey, NaturalKeyAnchor,
NaturalKeyThing, NaturalPKWithDefault,
)
from .tests import register_tests
class NaturalKeySerializerTests(TestCase):
pass
def natural_key_serializer_test(self, format):
# Create all the objects defined in the test data
with connection.constraint_checks_disabled():
objects = [
NaturalKeyAnchor.objects.create(id=1100, data="Natural Key Anghor"),
FKDataNaturalKey.objects.create(id=1101, data_id=1100),
FKDataNaturalKey.objects.create(id=1102, data_id=None),
]
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2, use_natural_foreign_keys=True)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for obj in objects:
instance = obj.__class__.objects.get(id=obj.pk)
self.assertEqual(
obj.data, instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
obj.pk, obj.data, type(obj.data), instance, type(instance.data),
)
)
def natural_key_test(self, format):
book1 = {
'data': '978-1590597255',
'title': 'The Definitive Guide to Django: Web Development Done Right',
}
book2 = {'data': '978-1590599969', 'title': 'Practical Django Projects'}
# Create the books.
adrian = NaturalKeyAnchor.objects.create(**book1)
james = NaturalKeyAnchor.objects.create(**book2)
# Serialize the books.
string_data = serializers.serialize(
format, NaturalKeyAnchor.objects.all(), indent=2,
use_natural_foreign_keys=True, use_natural_primary_keys=True,
)
# Delete one book (to prove that the natural key generation will only
# restore the primary keys of books found in the database via the
# get_natural_key manager method).
james.delete()
# Deserialize and test.
books = list(serializers.deserialize(format, string_data))
self.assertEqual(len(books), 2)
self.assertEqual(books[0].object.title, book1['title'])
self.assertEqual(books[0].object.pk, adrian.pk)
self.assertEqual(books[1].object.title, book2['title'])
self.assertIsNone(books[1].object.pk)
def natural_pk_mti_test(self, format):
"""
If serializing objects in a multi-table inheritance relationship using
natural primary keys, the natural foreign key for the parent is output in
the fields of the child so it's possible to relate the child to the parent
when deserializing.
"""
child_1 = Child.objects.create(parent_data='1', child_data='1')
child_2 = Child.objects.create(parent_data='2', child_data='2')
string_data = serializers.serialize(
format,
[child_1.parent_ptr, child_2.parent_ptr, child_2, child_1],
use_natural_foreign_keys=True, use_natural_primary_keys=True,
)
child_1.delete()
child_2.delete()
for obj in serializers.deserialize(format, string_data):
obj.save()
children = Child.objects.all()
self.assertEqual(len(children), 2)
for child in children:
# If it's possible to find the superclass from the subclass and it's
# the correct superclass, it's working.
self.assertEqual(child.child_data, child.parent_data)
def forward_ref_fk_test(self, format):
t1 = NaturalKeyThing.objects.create(key='t1')
t2 = NaturalKeyThing.objects.create(key='t2', other_thing=t1)
t1.other_thing = t2
t1.save()
string_data = serializers.serialize(
format, [t1, t2], use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(format, string_data, handle_forward_references=True):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
for obj in objs_with_deferred_fields:
obj.save_deferred_fields()
t1 = NaturalKeyThing.objects.get(key='t1')
t2 = NaturalKeyThing.objects.get(key='t2')
self.assertEqual(t1.other_thing, t2)
self.assertEqual(t2.other_thing, t1)
def forward_ref_fk_with_error_test(self, format):
t1 = NaturalKeyThing.objects.create(key='t1')
t2 = NaturalKeyThing.objects.create(key='t2', other_thing=t1)
t1.other_thing = t2
t1.save()
string_data = serializers.serialize(
format, [t1], use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(format, string_data, handle_forward_references=True):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
obj = objs_with_deferred_fields[0]
msg = 'NaturalKeyThing matching query does not exist'
with self.assertRaisesMessage(serializers.base.DeserializationError, msg):
obj.save_deferred_fields()
def forward_ref_m2m_test(self, format):
t1 = NaturalKeyThing.objects.create(key='t1')
t2 = NaturalKeyThing.objects.create(key='t2')
t3 = NaturalKeyThing.objects.create(key='t3')
t1.other_things.set([t2, t3])
string_data = serializers.serialize(
format, [t1, t2, t3], use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(format, string_data, handle_forward_references=True):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
for obj in objs_with_deferred_fields:
obj.save_deferred_fields()
t1 = NaturalKeyThing.objects.get(key='t1')
t2 = NaturalKeyThing.objects.get(key='t2')
t3 = NaturalKeyThing.objects.get(key='t3')
self.assertCountEqual(t1.other_things.all(), [t2, t3])
def forward_ref_m2m_with_error_test(self, format):
t1 = NaturalKeyThing.objects.create(key='t1')
t2 = NaturalKeyThing.objects.create(key='t2')
t3 = NaturalKeyThing.objects.create(key='t3')
t1.other_things.set([t2, t3])
t1.save()
string_data = serializers.serialize(
format, [t1, t2], use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(format, string_data, handle_forward_references=True):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
obj = objs_with_deferred_fields[0]
msg = 'NaturalKeyThing matching query does not exist'
with self.assertRaisesMessage(serializers.base.DeserializationError, msg):
obj.save_deferred_fields()
def pk_with_default(self, format):
"""
The deserializer works with natural keys when the primary key has a default
value.
"""
obj = NaturalPKWithDefault.objects.create(name='name')
string_data = serializers.serialize(
format, NaturalPKWithDefault.objects.all(), use_natural_foreign_keys=True,
use_natural_primary_keys=True,
)
objs = list(serializers.deserialize(format, string_data))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].object.pk, obj.pk)
def fk_as_pk_natural_key_not_called(self, format):
"""
The deserializer doesn't rely on natural keys when a model has a custom
primary key that is a ForeignKey.
"""
o1 = NaturalKeyAnchor.objects.create(data='978-1590599969')
o2 = FKAsPKNoNaturalKey.objects.create(pk_fk=o1)
serialized_data = serializers.serialize(format, [o1, o2])
deserialized_objects = list(serializers.deserialize(format, serialized_data))
self.assertEqual(len(deserialized_objects), 2)
for obj in deserialized_objects:
self.assertEqual(obj.object.pk, o1.pk)
# Dynamically register tests for each serializer
register_tests(NaturalKeySerializerTests, 'test_%s_natural_key_serializer', natural_key_serializer_test)
register_tests(NaturalKeySerializerTests, 'test_%s_serializer_natural_keys', natural_key_test)
register_tests(NaturalKeySerializerTests, 'test_%s_serializer_natural_pks_mti', natural_pk_mti_test)
register_tests(NaturalKeySerializerTests, 'test_%s_forward_references_fks', forward_ref_fk_test)
register_tests(NaturalKeySerializerTests, 'test_%s_forward_references_fk_errors', forward_ref_fk_with_error_test)
register_tests(NaturalKeySerializerTests, 'test_%s_forward_references_m2ms', forward_ref_m2m_test)
register_tests(NaturalKeySerializerTests, 'test_%s_forward_references_m2m_errors', forward_ref_m2m_with_error_test)
register_tests(NaturalKeySerializerTests, 'test_%s_pk_with_default', pk_with_default)
register_tests(
NaturalKeySerializerTests,
'test_%s_fk_as_pk_natural_key_not_called',
fk_as_pk_natural_key_not_called,
)
| bsd-3-clause |
KevinXuxuxu/datahub_lsems | src/service/handler.py | 3 | 5474 | import hashlib
from account.manager import *
from core.db.connection import DataHubConnection
from core.db.manager import DataHubManager
from datahub import DataHub
from datahub.constants import *
from datahub.account.constants import *
'''
@author: anant bhardwaj
@date: Oct 9, 2013
DataHub Handler
'''
def construct_result_set(res):
tuples = [Tuple(
cells=[bytes(val) for val in t]) for t in res['tuples']]
field_names = [bytes(field['name']) for field in res['fields']]
field_types = [bytes(field['type']) for field in res['fields']]
return ResultSet(status=res['status'],
num_tuples=res['row_count'],
num_more_tuples=0,
tuples=tuples,
field_names=field_names,
field_types=field_types)
class DataHubHandler:
def __init__(self):
self.sessions={}
pass
def get_version(self):
return VERSION
def open_connection(self, con_params):
try:
repo_base = con_params.user
if con_params.repo_base and con_params.repo_base != '':
repo_base = con_params.repo_base
user = ''
is_app = False
if con_params.user:
user = con_params.user
DataHubConnection(
user=con_params.user,
password=hashlib.sha1(con_params.password).hexdigest(),
repo_base=repo_base)
else:
user = con_params.app_id
is_app = True
DataHubConnection(
user=con_params.app_id,
password=hashlib.sha1(con_params.app_token).hexdigest(),
repo_base=repo_base)
'''
res = DataHubManager.has_base_privilege(user, repo_base, 'CONNECT')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
con = Connection(
user=user,
is_app=is_app,
repo_base=repo_base)
return con
except Exception, e:
raise DBException(message=str(e))
def create_repo(self, con, repo_name):
try:
'''
res = DataHubManager.has_base_privilege(con.user, con.repo_base, 'CREATE')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.create_repo(repo=repo_name)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def list_repos(self, con):
try:
'''
res = DataHubManager.has_base_privilege(con.user, con.repo_base, 'CONNECT')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.list_repos()
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def delete_repo(self, con, repo_name, force_if_non_empty):
try:
'''
res = DataHubManager.has_base_privilege(con.user, con.repo_base, 'CREATE')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
res = DataHubManager.has_repo_privilege(con.user, con.repo_base, repo_name, 'CREATE')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.delete_repo(repo=repo_name, force=force_if_non_empty)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def list_tables(self, con, repo_name):
try:
'''
res = DataHubManager.has_repo_privilege(con.user, con.repo_base, repo_name, 'USAGE')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.list_tables(repo=repo_name)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def get_schema(self, con, table_name):
try:
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.get_schema(table=table_name)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def execute_sql(self, con, query, query_params=None):
try:
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.execute_sql(query=query, params=query_params)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def create_account(
self, username, email, password, repo_name, app_id=None, app_token=None):
try:
account_register(
username=username,
email=email,
password=password,
repo_name=repo_name,
app_id=app_id,
app_token=app_token)
return True
except Exception, e:
raise AccountException(message=str(e))
def remove_account(self, username, app_id=None, app_token=None):
try:
account_remove(
username=username,
app_id=app_id,
app_token=app_token)
return True
except Exception, e:
raise AccountException(message=str(e))
| mit |
ghickman/django | django/db/models/options.py | 13 | 36791 | import bisect
import copy
import inspect
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db import connections
from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = ()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'indexes', 'constraints',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = option_together[0]
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.constraints = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete', 'view')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = {}
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# App label/class name interpolation for names of constraints and
# indexes.
if not getattr(cls._meta, 'abstract', False):
for attr_name in {'constraints', 'indexes'}:
objs = getattr(self, attr_name, [])
setattr(self, attr_name, self._format_names_with_class(cls, objs))
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _format_names_with_class(self, cls, objs):
"""App label/class name interpolation for object names."""
new_objs = []
for obj in objs:
obj = obj.clone()
obj.name = obj.name % {
'app_label': cls._meta.app_label.lower(),
'class': cls.__name__.lower(),
}
new_objs.append(obj)
return new_objs
def _get_default_pk_class(self):
pk_class_path = getattr(
self.app_config,
'default_auto_field',
settings.DEFAULT_AUTO_FIELD,
)
if self.app_config and self.app_config._is_default_auto_field_overridden:
app_config_class = type(self.app_config)
source = (
f'{app_config_class.__module__}.'
f'{app_config_class.__qualname__}.default_auto_field'
)
else:
source = 'DEFAULT_AUTO_FIELD'
if not pk_class_path:
raise ImproperlyConfigured(f'{source} must not be empty.')
try:
pk_class = import_string(pk_class_path)
except ImportError as e:
msg = (
f"{source} refers to the module '{pk_class_path}' that could "
f"not be imported."
)
raise ImproperlyConfigured(msg) from e
if not issubclass(pk_class, AutoField):
raise ValueError(
f"Primary key '{pk_class_path}' referred by {source} must "
f"subclass AutoField."
)
return pk_class
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
pk_class = self._get_default_pk_class()
auto = pk_class(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
bisect.insort(self.local_many_to_many, field)
else:
bisect.insort(self.local_fields, field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return self.label_lower
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(
from_opts=final_field.model._meta,
to_opts=opts,
target_fields=targets,
join_field=final_field,
m2m=False,
direct=True,
filtered_relation=None,
))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
remote_label = f.remote_field.model._meta.concrete_model._meta.label
related_objects_graph[remote_label].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta.label]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = seen_models is None
if topmost_call:
seen_models = set()
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if not getattr(obj, 'parent_link', False) or obj.model == self.concrete_model:
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields += self.local_fields
fields += self.local_many_to_many
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields += self.private_fields
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@cached_property
def total_unique_constraints(self):
"""
Return a list of total unique constraints. Useful for determining set
of fields guaranteed to be unique for all rows.
"""
return [
constraint
for constraint in self.constraints
if isinstance(constraint, UniqueConstraint) and constraint.condition is None
]
@cached_property
def _property_names(self):
"""Return a set of the names of the properties defined on the model."""
names = []
for name in dir(self.model):
attr = inspect.getattr_static(self.model, name)
if isinstance(attr, property):
names.append(name)
return frozenset(names)
@cached_property
def db_returning_fields(self):
"""
Private API intended only to be used by Django itself.
Fields to be returned after a database insert.
"""
return [
field for field in self._get_fields(forward=True, reverse=False, include_parents=PROXY_PARENTS)
if getattr(field, 'db_returning', False)
]
| bsd-3-clause |
omnirom/android_kernel_lge_x3 | tools/perf/scripts/python/netdev-times.py | 11266 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
all-of-us/raw-data-repository | rdr_service/alembic/versions/f3fdb9d05ab3_bq_sync_pk_fix.py | 1 | 3626 | """bq_sync_pk_fix
Revision ID: f3fdb9d05ab3
Revises: 7d63fbc6d9ca
Create Date: 2019-08-14 12:10:16.423602
"""
from alembic import op
import sqlalchemy as sa
import model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'f3fdb9d05ab3'
down_revision = '7d63fbc6d9ca'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('TRUNCATE TABLE bigquery_sync') # We want to start over now, these are some big changes.
op.add_column('bigquery_sync', sa.Column('pk_id', sa.Integer(), nullable=False))
op.execute('ALTER TABLE bigquery_sync ADD COLUMN `project_id` VARCHAR(80) NOT NULL AFTER modified')
# op.add_column('bigquery_sync', sa.Column('project_id', sa.String(length=80), nullable=True))
op.drop_constraint(u'bigquery_sync_ibfk_1', 'bigquery_sync', type_='foreignkey')
op.drop_index('ix_participant_ds_table', table_name='bigquery_sync')
op.execute('ALTER TABLE bigquery_sync CHANGE COLUMN `dataset` `dataset_id` VARCHAR(80) NOT NULL')
op.execute('ALTER TABLE bigquery_sync CHANGE COLUMN `table` `table_id` VARCHAR(80) NOT NULL')
op.create_index('ix_participant_ds_table', 'bigquery_sync', ['pk_id', 'project_id', 'dataset_id', 'table_id'], unique=False)
op.drop_column('bigquery_sync', 'participant_id')
# ### end Alembic commands ###
pass
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('TRUNCATE TABLE bigquery_sync') # We want to start over now, these are some big changes.
op.drop_index('ix_participant_ds_table', table_name='bigquery_sync')
op.execute('ALTER TABLE bigquery_sync CHANGE COLUMN `dataset_id` `dataset` VARCHAR(80) NOT NULL')
op.execute('ALTER TABLE bigquery_sync CHANGE COLUMN `table_id` `table` VARCHAR(80) NOT NULL')
op.add_column('bigquery_sync',
sa.Column('participant_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False))
op.create_foreign_key(u'bigquery_sync_ibfk_1', 'bigquery_sync', 'participant', ['participant_id'],
['participant_id'])
op.create_index('ix_participant_ds_table', 'bigquery_sync', ['participant_id', 'dataset', 'table'], unique=False)
op.drop_column('bigquery_sync', 'pk_id')
op.drop_column('bigquery_sync', 'project_id')
# ### end Alembic commands ###
pass
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause |
keflavich/spectral-cube | spectral_cube/tests/test_analysis_functions.py | 3 | 13651 |
import pytest
import warnings
import numpy as np
import astropy.units as u
# from astropy.modeling import models, fitting
from ..analysis_utilities import stack_spectra, fourier_shift, stack_cube
from .utilities import generate_gaussian_cube, gaussian
from ..utils import BadVelocitiesWarning
def test_shift():
amp = 1
v0 = 0 * u.m / u.s
sigma = 8
spectral_axis = np.arange(-50, 51) * u.m / u.s
true_spectrum = gaussian(spectral_axis.value,
amp, v0.value, sigma)
# Shift is an integer, so rolling is equivalent
rolled_spectrum = np.roll(true_spectrum, 10)
shift_spectrum = fourier_shift(true_spectrum, 10)
np.testing.assert_allclose(shift_spectrum,
rolled_spectrum,
rtol=1e-4)
# With part masked
masked_spectrum = true_spectrum.copy()
mask = np.abs(spectral_axis.value) <= 30
masked_spectrum[~mask] = np.NaN
rolled_mask = np.roll(mask, 10)
rolled_masked_spectrum = rolled_spectrum.copy()
rolled_masked_spectrum[~rolled_mask] = np.NaN
shift_spectrum = fourier_shift(masked_spectrum, 10)
np.testing.assert_allclose(shift_spectrum,
rolled_masked_spectrum,
rtol=1e-4)
def test_stacking(use_dask):
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# Now fit a Gaussian to the mean stacked profile.
# fit_vals = fit_gaussian(stacked.spectral_axis.value, stacked.value)[0]
# np.testing.assert_allclose(fit_vals, np.array([amp, v0.value, sigma]),
# atol=1e-3)
# The stacked spectrum should have the same spectral axis
np.testing.assert_allclose(stacked.spectral_axis.value,
test_cube.spectral_axis.value)
def test_cube_stacking(use_dask):
'''
Test passing a list of cubes
This test simply averages two copies of the same thing.
A more thorough test might be to verify that cubes with different frequency
supports also yield good results.
'''
amp = 1.
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
test_cube1 = test_cube.with_spectral_unit(u.GHz, rest_value=1*u.GHz, velocity_convention='radio')
test_cube2 = test_cube.with_spectral_unit(u.GHz, rest_value=2*u.GHz, velocity_convention='radio')
vmin = -10*u.km/u.s
vmax = 10*u.km/u.s
# Stack two cubes
stacked = stack_cube([test_cube1, test_cube2], linelist=[1.,2.]*u.GHz,
vmin=vmin, vmax=vmax, average=np.nanmean,
convolve_beam=None, return_cutouts=False)
np.testing.assert_allclose(stacked.filled_data[:],
test_cube.spectral_slab(vmin, vmax).filled_data[:])
# Stack one cube with two frequencies, one that's out of band
stacked = stack_cube(test_cube1, linelist=[1.,2.]*u.GHz,
vmin=vmin, vmax=vmax, average=np.nanmean,
convolve_beam=None, return_cutouts=False)
np.testing.assert_allclose(stacked.filled_data[:],
test_cube.spectral_slab(vmin, vmax).filled_data[:])
# TODO: add tests of multiple lines in the same cube
# (this requires a different test cube setup)
def test_stacking_badvels(use_dask):
'''
Regression test for #493: don't include bad velocities when stacking
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
test_vels[12,11] = 500*u.km/u.s
with pytest.warns(BadVelocitiesWarning,
match='Some velocities are outside the allowed range and will be'):
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals (the one bad value shouldn't have caused a problem)
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
def test_stacking_reversed_specaxis(use_dask):
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, spec_scale=-1. * u.km / u.s, use_dask=use_dask)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# The stacked spectrum should have the same spectral axis
np.testing.assert_allclose(stacked.spectral_axis.value,
test_cube.spectral_axis.value)
def test_stacking_wpadding(use_dask):
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
sigma = 8.
v0 = 0. * u.km / u.s
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise, use_dask=use_dask)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# Now fit a Gaussian to the mean stacked profile.
# fit_vals = fit_gaussian(stacked.spectral_axis.value, stacked.value)[0]
# np.testing.assert_allclose(fit_vals, np.array([amp, 0.0, sigma]),
# atol=1e-3)
# The spectral axis should be padded by ~25% on each side
stack_shape = int(test_cube.shape[0] * 1.5)
# This is rounded, so the shape could be +/- 1
assert (stacked.size == stack_shape) or (stacked.size == stack_shape - 1) \
or (stacked.size == stack_shape + 1)
def test_padding_direction(use_dask):
amp = 1.
sigma = 8.
v0 = 0. * u.km / u.s
noise = None
shape = (100, 2, 2)
vel_surface = np.array([[0, 5], [5, 10]])
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise,
vel_surface=vel_surface, use_dask=use_dask)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# now check that the stacked spectral axis is right
# (all shifts are negative, so vmin < -50 km/s, should be -60?)
assert stacked.spectral_axis.min() == -60*u.km/u.s
assert stacked.spectral_axis.max() == 49*u.km/u.s
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
def test_stacking_woffset(use_dask):
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
Make sure the operations aren't affected by absolute velocity offsets
'''
amp = 1.
sigma = 8.
v0 = 100. * u.km / u.s
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise,
v0=v0.value, use_dask=use_dask)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# The spectral axis should be padded by ~25% on each side
stack_shape = int(test_cube.shape[0] * 1.5)
# This is rounded, so the shape could be +/- 1
assert (stacked.size == stack_shape) or (stacked.size == stack_shape - 1) \
or (stacked.size == stack_shape + 1)
def test_stacking_shape_failure(use_dask):
"""
Regression test for #466
"""
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
# make the test_vels array the wrong shape
test_vels = test_vels[:-1, :-1]
with pytest.raises(ValueError) as exc:
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
assert 'Velocity surface map does not match' in exc.value.args[0]
test_vels = np.ones(shape[1:], dtype='float') + np.nan
with pytest.raises(ValueError) as exc:
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
assert "velocity_surface contains no finite values" in exc.value.args[0]
def test_stacking_noisy(use_dask):
# Test stack w/ S/N of 0.2
# This is cheating b/c we know the correct peak velocities, but serves as
# a good test that the stacking is working.
amp = 1.
sigma = 8.
v0 = 0 * u.km / u.s
noise = 5.0
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False,
pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= noise / np.sqrt(shape[1] * shape[2])
# Now fit a Gaussian to the mean stacked profile.
# fit_vals, fit_errs = fit_gaussian(stacked.spectral_axis.value,
# stacked.value)
# Check that the fit is consistent with the true values within 1-sigma err
# for fit_val, fit_err, true_val in zip(fit_vals, fit_errs,
# [amp, v0.value, sigma]):
# np.testing.assert_allclose(fit_val, true_val,
# atol=fit_err)
# def fit_gaussian(vels, data):
# g_init = models.Gaussian1D()
# fit_g = fitting.LevMarLSQFitter()
# g_fit = fit_g(g_init, vels, data)
# cov = fit_g.fit_info['param_cov']
# if cov is None:
# cov = np.zeros((3, 3)) * np.NaN
# parvals = g_fit.parameters
# parerrs = np.sqrt(np.diag(cov))
# return parvals, parerrs
| bsd-3-clause |
rob356/SickRage | lib/imdb/parser/sql/dbschema.py | 117 | 20506 | #-*- encoding: utf-8 -*-
"""
parser.sql.dbschema module (imdb.parser.sql package).
This module provides the schema used to describe the layout of the
database used by the imdb.parser.sql package; functions to create/drop
tables and indexes are also provided.
Copyright 2005-2012 Davide Alberani <da@erlug.linux.it>
2006 Giuseppe "Cowo" Corbelli <cowo --> lugbs.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
_dbschema_logger = logging.getLogger('imdbpy.parser.sql.dbschema')
# Placeholders for column types.
INTCOL = 1
UNICODECOL = 2
STRINGCOL = 3
_strMap = {1: 'INTCOL', 2: 'UNICODECOL', 3: 'STRINGCOL'}
class DBCol(object):
"""Define column objects."""
def __init__(self, name, kind, **params):
self.name = name
self.kind = kind
self.index = None
self.indexLen = None
# If not None, two notations are accepted: 'TableName'
# and 'TableName.ColName'; in the first case, 'id' is assumed
# as the name of the pointed column.
self.foreignKey = None
if 'index' in params:
self.index = params['index']
del params['index']
if 'indexLen' in params:
self.indexLen = params['indexLen']
del params['indexLen']
if 'foreignKey' in params:
self.foreignKey = params['foreignKey']
del params['foreignKey']
self.params = params
def __str__(self):
"""Class representation."""
s = '<DBCol %s %s' % (self.name, _strMap[self.kind])
if self.index:
s += ' INDEX'
if self.indexLen:
s += '[:%d]' % self.indexLen
if self.foreignKey:
s += ' FOREIGN'
if 'default' in self.params:
val = self.params['default']
if val is not None:
val = '"%s"' % val
s += ' DEFAULT=%s' % val
for param in self.params:
if param == 'default': continue
s += ' %s' % param.upper()
s += '>'
return s
def __repr__(self):
"""Class representation."""
s = '<DBCol(name="%s", %s' % (self.name, _strMap[self.kind])
if self.index:
s += ', index="%s"' % self.index
if self.indexLen:
s += ', indexLen=%d' % self.indexLen
if self.foreignKey:
s += ', foreignKey="%s"' % self.foreignKey
for param in self.params:
val = self.params[param]
if isinstance(val, (unicode, str)):
val = u'"%s"' % val
s += ', %s=%s' % (param, val)
s += ')>'
return s
class DBTable(object):
"""Define table objects."""
def __init__(self, name, *cols, **kwds):
self.name = name
self.cols = cols
# Default values.
self.values = kwds.get('values', {})
def __str__(self):
"""Class representation."""
return '<DBTable %s (%d cols, %d values)>' % (self.name,
len(self.cols), sum([len(v) for v in self.values.values()]))
def __repr__(self):
"""Class representation."""
s = '<DBTable(name="%s"' % self.name
col_s = ', '.join([repr(col).rstrip('>').lstrip('<')
for col in self.cols])
if col_s:
s += ', %s' % col_s
if self.values:
s += ', values=%s' % self.values
s += ')>'
return s
# Default values to insert in some tables: {'column': (list, of, values, ...)}
kindTypeDefs = {'kind': ('movie', 'tv series', 'tv movie', 'video movie',
'tv mini series', 'video game', 'episode')}
companyTypeDefs = {'kind': ('distributors', 'production companies',
'special effects companies', 'miscellaneous companies')}
infoTypeDefs = {'info': ('runtimes', 'color info', 'genres', 'languages',
'certificates', 'sound mix', 'tech info', 'countries', 'taglines',
'keywords', 'alternate versions', 'crazy credits', 'goofs',
'soundtrack', 'quotes', 'release dates', 'trivia', 'locations',
'mini biography', 'birth notes', 'birth date', 'height',
'death date', 'spouse', 'other works', 'birth name',
'salary history', 'nick names', 'books', 'agent address',
'biographical movies', 'portrayed in', 'where now', 'trade mark',
'interviews', 'article', 'magazine cover photo', 'pictorial',
'death notes', 'LD disc format', 'LD year', 'LD digital sound',
'LD official retail price', 'LD frequency response', 'LD pressing plant',
'LD length', 'LD language', 'LD review', 'LD spaciality', 'LD release date',
'LD production country', 'LD contrast', 'LD color rendition',
'LD picture format', 'LD video noise', 'LD video artifacts',
'LD release country', 'LD sharpness', 'LD dynamic range',
'LD audio noise', 'LD color information', 'LD group genre',
'LD quality program', 'LD close captions-teletext-ld-g',
'LD category', 'LD analog left', 'LD certification',
'LD audio quality', 'LD video quality', 'LD aspect ratio',
'LD analog right', 'LD additional information',
'LD number of chapter stops', 'LD dialogue intellegibility',
'LD disc size', 'LD master format', 'LD subtitles',
'LD status of availablility', 'LD quality of source',
'LD number of sides', 'LD video standard', 'LD supplement',
'LD original title', 'LD sound encoding', 'LD number', 'LD label',
'LD catalog number', 'LD laserdisc title', 'screenplay-teleplay',
'novel', 'adaption', 'book', 'production process protocol',
'printed media reviews', 'essays', 'other literature', 'mpaa',
'plot', 'votes distribution', 'votes', 'rating',
'production dates', 'copyright holder', 'filming dates', 'budget',
'weekend gross', 'gross', 'opening weekend', 'rentals',
'admissions', 'studios', 'top 250 rank', 'bottom 10 rank')}
compCastTypeDefs = {'kind': ('cast', 'crew', 'complete', 'complete+verified')}
linkTypeDefs = {'link': ('follows', 'followed by', 'remake of', 'remade as',
'references', 'referenced in', 'spoofs', 'spoofed in',
'features', 'featured in', 'spin off from', 'spin off',
'version of', 'similar to', 'edited into',
'edited from', 'alternate language version of',
'unknown link')}
roleTypeDefs = {'role': ('actor', 'actress', 'producer', 'writer',
'cinematographer', 'composer', 'costume designer',
'director', 'editor', 'miscellaneous crew',
'production designer', 'guest')}
# Schema of tables in our database.
# XXX: Foreign keys can be used to create constrains between tables,
# but they create indexes in the database, and this
# means poor performances at insert-time.
DB_SCHEMA = [
DBTable('Name',
# namePcodeCf is the soundex of the name in the canonical format.
# namePcodeNf is the soundex of the name in the normal format, if
# different from namePcodeCf.
# surnamePcode is the soundex of the surname, if different from the
# other two values.
# The 'id' column is simply skipped by SQLObject (it's a default);
# the alternateID attribute here will be ignored by SQLAlchemy.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('imdbID', INTCOL, default=None, index='idx_imdb_id'),
DBCol('gender', STRINGCOL, length=1, default=None),
DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
index='idx_pcodecf'),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CharName',
# namePcodeNf is the soundex of the name in the normal format.
# surnamePcode is the soundex of the surname, if different
# from namePcodeNf.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CompanyName',
# namePcodeNf is the soundex of the name in the normal format.
# namePcodeSf is the soundex of the name plus the country code.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('countryCode', UNICODECOL, length=255, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('namePcodeSf', STRINGCOL, length=5, default=None,
index='idx_pcodesf'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('KindType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=15, default=None, alternateID=True),
values=kindTypeDefs
),
DBTable('Title',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('title', UNICODECOL, notNone=True,
index='idx_title', indexLen=10),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
DBCol('productionYear', INTCOL, default=None),
DBCol('imdbID', INTCOL, default=None, index="idx_imdb_id"),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
foreignKey='Title'),
DBCol('seasonNr', INTCOL, default=None, index="idx_season_nr"),
DBCol('episodeNr', INTCOL, default=None, index="idx_episode_nr"),
# Maximum observed length is 44; 49 can store 5 comma-separated
# year-year pairs.
DBCol('seriesYears', STRINGCOL, length=49, default=None),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CompanyType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=32, default=None, alternateID=True),
values=companyTypeDefs
),
DBTable('AkaName',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_person',
foreignKey='Name'),
DBCol('name', UNICODECOL, notNone=True),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
index='idx_pcodecf'),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('AkaTitle',
# XXX: It's safer to set notNone to False, here.
# alias for akas are stored completely in the AkaTitle table;
# this means that episodes will set also a "tv series" alias name.
# Reading the aka-title.list file it looks like there are
# episode titles with aliases to different titles for both
# the episode and the series title, while for just the series
# there are no aliases.
# E.g.:
# aka title original title
# "Series, The" (2005) {The Episode} "Other Title" (2005) {Other Title}
# But there is no:
# "Series, The" (2005) "Other Title" (2005)
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_movieid',
foreignKey='Title'),
DBCol('title', UNICODECOL, notNone=True),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
DBCol('productionYear', INTCOL, default=None),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
foreignKey='AkaTitle'),
DBCol('seasonNr', INTCOL, default=None),
DBCol('episodeNr', INTCOL, default=None),
DBCol('note', UNICODECOL, default=None),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('RoleType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('role', STRINGCOL, length=32, notNone=True, alternateID=True),
values=roleTypeDefs
),
DBTable('CastInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_pid',
foreignKey='Name'),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('personRoleID', INTCOL, default=None, index='idx_cid',
foreignKey='CharName'),
DBCol('note', UNICODECOL, default=None),
DBCol('nrOrder', INTCOL, default=None),
DBCol('roleID', INTCOL, notNone=True, foreignKey='RoleType')
),
DBTable('CompCastType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=32, notNone=True, alternateID=True),
values=compCastTypeDefs
),
DBTable('CompleteCast',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, index='idx_mid', foreignKey='Title'),
DBCol('subjectID', INTCOL, notNone=True, foreignKey='CompCastType'),
DBCol('statusID', INTCOL, notNone=True, foreignKey='CompCastType')
),
DBTable('InfoType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('info', STRINGCOL, length=32, notNone=True, alternateID=True),
values=infoTypeDefs
),
DBTable('LinkType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('link', STRINGCOL, length=32, notNone=True, alternateID=True),
values=linkTypeDefs
),
DBTable('Keyword',
DBCol('id', INTCOL, notNone=True, alternateID=True),
# XXX: can't use alternateID=True, because it would create
# a UNIQUE index; unfortunately (at least with a common
# collation like utf8_unicode_ci) MySQL will consider
# some different keywords identical - like
# "fiancée" and "fiancee".
DBCol('keyword', UNICODECOL, notNone=True,
index='idx_keyword', indexLen=5),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode')
),
DBTable('MovieKeyword',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('keywordID', INTCOL, notNone=True, index='idx_keywordid',
foreignKey='Keyword')
),
DBTable('MovieLink',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('linkedMovieID', INTCOL, notNone=True, foreignKey='Title'),
DBCol('linkTypeID', INTCOL, notNone=True, foreignKey='LinkType')
),
DBTable('MovieInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True),
DBCol('note', UNICODECOL, default=None)
),
# This table is identical to MovieInfo, except that both 'infoTypeID'
# and 'info' are indexed.
DBTable('MovieInfoIdx',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('infoTypeID', INTCOL, notNone=True, index='idx_infotypeid',
foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True, index='idx_info', indexLen=10),
DBCol('note', UNICODECOL, default=None)
),
DBTable('MovieCompanies',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('companyID', INTCOL, notNone=True, index='idx_cid',
foreignKey='CompanyName'),
DBCol('companyTypeID', INTCOL, notNone=True, foreignKey='CompanyType'),
DBCol('note', UNICODECOL, default=None)
),
DBTable('PersonInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_pid',
foreignKey='Name'),
DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True),
DBCol('note', UNICODECOL, default=None)
)
]
# Functions to manage tables.
def dropTables(tables, ifExists=True):
"""Drop the tables."""
# In reverse order (useful to avoid errors about foreign keys).
DB_TABLES_DROP = list(tables)
DB_TABLES_DROP.reverse()
for table in DB_TABLES_DROP:
_dbschema_logger.info('dropping table %s', table._imdbpyName)
table.dropTable(ifExists)
def createTables(tables, ifNotExists=True):
"""Create the tables and insert default values."""
for table in tables:
# Create the table.
_dbschema_logger.info('creating table %s', table._imdbpyName)
table.createTable(ifNotExists)
# Insert default values, if any.
if table._imdbpySchema.values:
_dbschema_logger.info('inserting values into table %s',
table._imdbpyName)
for key in table._imdbpySchema.values:
for value in table._imdbpySchema.values[key]:
table(**{key: unicode(value)})
def createIndexes(tables, ifNotExists=True):
"""Create the indexes in the database.
Return a list of errors, if any."""
errors = []
for table in tables:
_dbschema_logger.info('creating indexes for table %s',
table._imdbpyName)
try:
table.addIndexes(ifNotExists)
except Exception, e:
errors.append(e)
continue
return errors
def createForeignKeys(tables, ifNotExists=True):
"""Create Foreign Keys.
Return a list of errors, if any."""
errors = []
mapTables = {}
for table in tables:
mapTables[table._imdbpyName] = table
for table in tables:
_dbschema_logger.info('creating foreign keys for table %s',
table._imdbpyName)
try:
table.addForeignKeys(mapTables, ifNotExists)
except Exception, e:
errors.append(e)
continue
return errors
| gpl-3.0 |
bracket/rasterizer | handsome/TileCache.py | 2 | 1374 | from .Coordinate import Coordinate
from .Pixel import Pixel
from .Tile import Tile
class TileCache:
def __init__(self, tile_shape, sample_rate=1, dtype=Pixel):
self.tiles = { }
self.tile_shape = tile_shape
self.sample_rate = sample_rate
self.dtype = dtype
def tile_origin_for_coordinate(self, coordinate):
width, height = self.tile_shape
return (
int(coordinate[0] // width * width),
int(coordinate[1] // height * height)
)
def get_tile(self, coordinate):
origin = self.tile_origin_for_coordinate(coordinate)
tile = self.tiles.get(origin)
if tile is not None:
return tile
tile = Tile(origin, self.tile_shape, self.sample_rate, self.dtype)
self.tiles[origin] = tile
return tile
def get_tiles_for_bounds(self, bounds):
width, height = self.tile_shape
left, bottom = self.tile_origin_for_coordinate((bounds.left, bounds.bottom))
right, top = self.tile_origin_for_coordinate((bounds.right + width, bounds.top + height))
for x in range(left, right, width):
for y in range(bottom, top, height):
yield self.get_tile((x, y))
def composite_into(self, target):
for source in self.tiles.values():
target.composite_from(source)
| bsd-2-clause |
AutoGentoo/AutoGentoo | autogentoo/network/__init__.py | 2 | 1109 | import struct
import sys
from typing import Union, Optional, Dict
from .autogentoo_network import Message, TCPServer, send_message
def build_message(
token: int, *args: Union[int, float], **kwargs: Optional[bytes]
) -> Message:
if len(args) > 6:
raise TypeError("Message only supports up to 6 parameters")
def convert_to_bin(s: Union[float, int]) -> bytes:
if isinstance(s, float):
return struct.pack("d", s)
elif isinstance(s, int):
return s.to_bytes(8, signed=False, byteorder=sys.byteorder)
return b""
parsed_args = []
for arg in args:
parsed_args.append(convert_to_bin(arg))
for i in range(6 - len(args)):
parsed_args.append(convert_to_bin(0))
data_param: Optional[bytes] = None
if "data" in kwargs:
data_param = kwargs["data"]
return Message(
(
token,
parsed_args[0],
parsed_args[1],
parsed_args[2],
parsed_args[3],
parsed_args[4],
parsed_args[5],
data_param,
)
)
| gpl-3.0 |
ActiveState/code | recipes/Python/259112_Improved_Gray_Scale_Quantization/recipe-259112.py | 1 | 2796 | """
Improved Gray Scale (IGS) Quantization implementation
IGS codes are used for the elimination of false contouring in images,
and image compression.
This Python program generates the IGS codes for a set of input
gray level values.
(c) 2003 Premshree Pillai (24/12/03)
http://www.qiksearch.com/
"""
import sys
def dec2bin(num):
out = []
while(num != 0):
out.append(num % 2)
num = num / 2
out = pad(out)
return out
def pad(the_list):
while(len(the_list) != pix_depth):
the_list.append(0)
return the_list
def bin2dec(the_list):
sum = 0
i = 0
while(i < len(the_list)):
sum = sum + pow(2,i) * the_list[i]
i = i + 1
return sum
def rev(the_list):
i = 0
while(i < len(the_list)/2):
temp = the_list[i]
the_list[i] = the_list[len(the_list) - i - 1]
the_list[len(the_list) - i - 1] = temp
i = i + 1
return the_list
def extractHigherBits(the_list):
out = []
i = 0
while(len(out) != igs_len):
out.append(the_list[len(the_list) - 1 - i])
i = i + 1
return(rev(out))
def extractLowerBits(the_list):
out = []
i = 0
while(len(out) != igs_len):
out.append(the_list[i])
i = i + 1
return(out)
def add(list1,list2):
out = []
carry = 0
i = 0
while(i < len(list1)):
out.append(list1[len(list1) - 1 - i] ^ list2[len(list1) - 1 - i] ^ carry)
if(list1[len(list1) - 1 - i] == list2[len(list1) - 1 - i] == 1):
carry = 1
else:
carry = 0
i = i + 1
return rev(out)
def allOnes(the_list):
if(0 in the_list):
return 0
else:
return 1
def main():
global pix_depth,igs_len
pix_depth = int(raw_input("Enter pixel depth (i.e., bits per pixel): "))
igs_len = pix_depth / 2
num_pixels = int(raw_input("Enter number of pixels: "))
pixels = []
igs = []
i = 0
while(len(pixels) != num_pixels):
print "Enter pixel ",(i + 1),":"
pixels.append(int(raw_input()))
if(pixels[i] > pow(2,pix_depth) - 1):
print "With a pixel depth of", pix_depth,", maximum allowable gray level is", pow(2,pix_depth) - 1
print "Please run the program again!"
sys.exit()
pixels[i] = dec2bin(pixels[i])
i = i + 1
pixels2 = []
pixels2 = pixels
sum = []
sum = pad(sum)
sum = pixels[0]
sum = rev(sum)
igs.append(extractLowerBits(sum))
i = 1
while(len(igs) != num_pixels):
toAdd = rev(pad(extractLowerBits(rev(sum))))
if(not(allOnes(extractHigherBits(pixels2[i - 1])))):
sum = add(rev(pixels[i]),toAdd)
else:
sum = rev(pixels[i])
igs.append(extractLowerBits(sum))
i = i + 1
j = 0
print "\nDecimal\t\tGray Level\t\t\tIGS Code"
print "-------------------------------------------------------------"
while(j < len(igs)):
if(j == 0):
num = bin2dec(pixels[j])
else:
num = bin2dec(rev(pixels[j]))
print num, "\t\t", rev(pixels[j]), "\t", igs[j]
j = j + 1
main()
print "\nPress <enter> to exit..."
if(raw_input()):
exit
| mit |
ghickman/django | django/contrib/admin/templatetags/base.py | 21 | 1318 | from inspect import getfullargspec
from django.template.library import InclusionNode, parse_bits
class InclusionAdminNode(InclusionNode):
"""
Template tag that allows its template to be overridden per model, per app,
or globally.
"""
def __init__(self, parser, token, func, template_name, takes_context=True):
self.template_name = template_name
params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(func)
bits = token.split_contents()
args, kwargs = parse_bits(
parser, bits[1:], params, varargs, varkw, defaults, kwonly,
kwonly_defaults, takes_context, bits[0],
)
super().__init__(func, takes_context, args, kwargs, filename=None)
def render(self, context):
opts = context['opts']
app_label = opts.app_label.lower()
object_name = opts.object_name.lower()
# Load template for this render call. (Setting self.filename isn't
# thread-safe.)
context.render_context[self] = context.template.engine.select_template([
'admin/%s/%s/%s' % (app_label, object_name, self.template_name),
'admin/%s/%s' % (app_label, self.template_name),
'admin/%s' % self.template_name,
])
return super().render(context)
| bsd-3-clause |
ActiveState/code | recipes/Python/578919_PythPathfinding_Binary/recipe-578919.py | 2 | 2483 | # Author: Christian Careaga (christian.careaga7@gmail.com)
# A* Pathfinding in Python (2.7)
# Please give credit if used
import numpy
from heapq import *
def heuristic(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def astar(array, start, goal):
neighbors = [(0,1),(0,-1),(1,0),(-1,0),(1,1),(1,-1),(-1,1),(-1,-1)]
close_set = set()
came_from = {}
gscore = {start:0}
fscore = {start:heuristic(start, goal)}
oheap = []
heappush(oheap, (fscore[start], start))
while oheap:
current = heappop(oheap)[1]
if current == goal:
data = []
while current in came_from:
data.append(current)
current = came_from[current]
return data
close_set.add(current)
for i, j in neighbors:
neighbor = current[0] + i, current[1] + j
tentative_g_score = gscore[current] + heuristic(current, neighbor)
if 0 <= neighbor[0] < array.shape[0]:
if 0 <= neighbor[1] < array.shape[1]:
if array[neighbor[0]][neighbor[1]] == 1:
continue
else:
# array bound y walls
continue
else:
# array bound x walls
continue
if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):
continue
if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [i[1]for i in oheap]:
came_from[neighbor] = current
gscore[neighbor] = tentative_g_score
fscore[neighbor] = tentative_g_score + heuristic(neighbor, goal)
heappush(oheap, (fscore[neighbor], neighbor))
return False
'''Here is an example of using my algo with a numpy array,
astar(array, start, destination)
astar function returns a list of points (shortest path)'''
nmap = numpy.array([
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,0,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,0,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
print astar(nmap, (0,0), (10,13))
| mit |
zulip/zulip | zerver/webhooks/trello/view/board_actions.py | 1 | 3921 | from typing import Mapping, Optional, Tuple
from zerver.lib.exceptions import UnsupportedWebhookEventTypeError
from zerver.lib.validator import WildValue, check_string
SUPPORTED_BOARD_ACTIONS = [
"removeMemberFromBoard",
"addMemberToBoard",
"createList",
"updateBoard",
]
REMOVE_MEMBER = "removeMemberFromBoard"
ADD_MEMBER = "addMemberToBoard"
CREATE_LIST = "createList"
CHANGE_NAME = "changeName"
TRELLO_BOARD_URL_TEMPLATE = "[{board_name}]({board_url})"
ACTIONS_TO_MESSAGE_MAPPER = {
REMOVE_MEMBER: "removed {member_name} from {board_url_template}.",
ADD_MEMBER: "added {member_name} to {board_url_template}.",
CREATE_LIST: "added {list_name} list to {board_url_template}.",
CHANGE_NAME: "renamed the board from {old_name} to {board_url_template}.",
}
def process_board_action(
payload: WildValue, action_type: Optional[str]
) -> Optional[Tuple[str, str]]:
action_type = get_proper_action(payload, action_type)
if action_type is not None:
return get_subject(payload), get_body(payload, action_type)
return None
def get_proper_action(payload: WildValue, action_type: Optional[str]) -> Optional[str]:
if action_type == "updateBoard":
data = get_action_data(payload)
# we don't support events for when a board's background
# is changed
if "background" in data["old"].get("prefs", {}):
return None
elif data["old"]["name"].tame(check_string):
return CHANGE_NAME
raise UnsupportedWebhookEventTypeError(action_type)
return action_type
def get_subject(payload: WildValue) -> str:
return get_action_data(payload)["board"]["name"].tame(check_string)
def get_body(payload: WildValue, action_type: str) -> str:
message_body = ACTIONS_TO_FILL_BODY_MAPPER[action_type](payload, action_type)
creator = payload["action"]["memberCreator"]["fullName"].tame(check_string)
return f"{creator} {message_body}"
def get_managed_member_body(payload: WildValue, action_type: str) -> str:
data = {
"member_name": payload["action"]["member"]["fullName"].tame(check_string),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_create_list_body(payload: WildValue, action_type: str) -> str:
data = {
"list_name": get_action_data(payload)["list"]["name"].tame(check_string),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_change_name_body(payload: WildValue, action_type: str) -> str:
data = {
"old_name": get_action_data(payload)["old"]["name"].tame(check_string),
}
return fill_appropriate_message_content(payload, action_type, data)
def fill_appropriate_message_content(
payload: WildValue, action_type: str, data: Mapping[str, str] = {}
) -> str:
data = dict(data)
if "board_url_template" not in data:
data["board_url_template"] = get_filled_board_url_template(payload)
message_body = get_message_body(action_type)
return message_body.format(**data)
def get_filled_board_url_template(payload: WildValue) -> str:
return TRELLO_BOARD_URL_TEMPLATE.format(
board_name=get_board_name(payload), board_url=get_board_url(payload)
)
def get_board_name(payload: WildValue) -> str:
return get_action_data(payload)["board"]["name"].tame(check_string)
def get_board_url(payload: WildValue) -> str:
return "https://trello.com/b/{}".format(
get_action_data(payload)["board"]["shortLink"].tame(check_string)
)
def get_message_body(action_type: str) -> str:
return ACTIONS_TO_MESSAGE_MAPPER[action_type]
def get_action_data(payload: WildValue) -> WildValue:
return payload["action"]["data"]
ACTIONS_TO_FILL_BODY_MAPPER = {
REMOVE_MEMBER: get_managed_member_body,
ADD_MEMBER: get_managed_member_body,
CREATE_LIST: get_create_list_body,
CHANGE_NAME: get_change_name_body,
}
| apache-2.0 |
ActiveState/code | recipes/Python/578010_Simple_Morse_Code_Practice/recipe-578010.py | 1 | 3174 | # MPO2x.py
#
# A DEMO, very crude, Morse Code Practice Oscillator...
# Tested on Debian 6.0.0 using Python 2.6.6 and 2.7.2 and PCLinuxOS 2009 using Python 2.5.2.
# It may well work on earlier Python versions but is untested.
#
# (C)2011-2012, B.Walker, G0LCU. Now issued as Public Domain...
#
# The device, "/dev/audio" is required for this to work. Install "oss-compat" from your
# distro's repository if you haven't got "/dev/audio". Ensure the sound system is NOT
# in use by other programs and use the OS's mixing facilities to set the levels.
#
# Copy the file to the Lib folder(/drawer/directory) or where the modules
# reside as "MPO2x.py"...
#
# For a quick way to run just use at the ">>>" prompt:-
#
# >>> import MPO2x<RETURN/ENTER>
#
# And away we go...
#
# Written in such a way that youngsters can understand what is going on.
#
# Enjoy finding simple solutiuons to often very difficult problems... ;o)
def main():
# Just three imports required for this DEMO.
import sys
import termios
import tty
# Set as globals, my choice... ;o)
global character
global delay
global n
character="(C)2011-2012, B.Walker, G0LCU."
delay=75
n=0
# This is a working function; something akin to the BASIC INKEY$ function...
# Reference:- http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/
# Many thanks to Danny Yoo for the above code, modified to suit this program...
# In THIS FUNCTION some special keys do a "break" similar to the "Esc" key inside the program.
# Be aware of this...
def inkey():
fd=sys.stdin.fileno()
remember_attributes=termios.tcgetattr(fd)
tty.setraw(sys.stdin.fileno())
character=sys.stdin.read(1)
termios.tcsetattr(fd, termios.TCSADRAIN, remember_attributes)
return character
while 1:
# A simple clear screen and user display...
for n in range(0,32,1):
print "\n"
print "A simple crude Morse Code Practice Oscillator...\n"
print "Press the 'o', 'p' or 'Esc' keys...\n"
print "Pseudo-paddle simulation, 'o' is the 'dah' and 'p' is the 'dit'...\n"
print "(C)2011-2012, B.Walker, G0LCU. Issued as Public Domain...\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
# Open "/dev/audio" in write mode...
audio=open("/dev/audio", "wb")
# Use the "inkey()" function to get a key character.
character=inkey()
# Get out ensuring that the audio device is closed.
if character==chr(27):
audio.close()
break
# This is a VERY crude simulation of a paddle key to send your Morse Code.
# It IS quirky, but, is there a better way using standard Text Mode Python?
# It uses only the keys "o", "O", "p", "P" and "Esc"...
# Lower case is the slowest speed, upper case the fastest speed.
delay=0
if character=="p": delay=75
if character=="P": delay=50
if character=="o": delay=225
if character=="O": delay=150
# Play a crude sine-wave note at 1KHz of length "delay"...
for n in range(0,delay,1):
audio.write(chr(15)+chr(45)+chr(63)+chr(45)+chr(15)+chr(3)+chr(0)+chr(3))
# Ensure that the audio device is closed after each beep!
audio.close()
main()
# End of MPO2x.py DEMO...
# Enjoy finding simple solutiuons to often very difficult problems... ;o)
| mit |
pelya/commandergenius | project/jni/python/src/Tools/scripts/h2py.py | 55 | 5750 | #! /usr/bin/env python
# Read #define's and translate to Python code.
# Handle #include statements.
# Handle #define macros with one argument.
# Anything that isn't recognized or doesn't translate into valid
# Python is ignored.
# Without filename arguments, acts as a filter.
# If one or more filenames are given, output is written to corresponding
# filenames in the local directory, translated to all uppercase, with
# the extension replaced by ".py".
# By passing one or more options of the form "-i regular_expression"
# you can specify additional strings to be ignored. This is useful
# e.g. to ignore casts to u_long: simply specify "-i '(u_long)'".
# XXX To do:
# - turn trailing C comments into Python comments
# - turn C Boolean operators "&& || !" into Python "and or not"
# - what to do about #if(def)?
# - what to do about macros with multiple parameters?
import sys, re, getopt, os
p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
p_macro = re.compile(
'^[\t ]*#[\t ]*define[\t ]+'
'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
p_include = re.compile('^[\t ]*#[\t ]*include[\t ]+<([a-zA-Z0-9_/\.]+)')
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
p_cpp_comment = re.compile('//.*')
ignores = [p_comment, p_cpp_comment]
p_char = re.compile(r"'(\\.[^\\]*|[^\\])'")
p_hex = re.compile(r"0x([0-9a-fA-F]+)L?")
filedict = {}
importable = {}
try:
searchdirs=os.environ['include'].split(';')
except KeyError:
try:
searchdirs=os.environ['INCLUDE'].split(';')
except KeyError:
try:
if sys.platform.find("beos") == 0:
searchdirs=os.environ['BEINCLUDES'].split(';')
elif sys.platform.startswith("atheos"):
searchdirs=os.environ['C_INCLUDE_PATH'].split(':')
else:
raise KeyError
except KeyError:
searchdirs=['/usr/include']
def main():
global filedict
opts, args = getopt.getopt(sys.argv[1:], 'i:')
for o, a in opts:
if o == '-i':
ignores.append(re.compile(a))
if not args:
args = ['-']
for filename in args:
if filename == '-':
sys.stdout.write('# Generated by h2py from stdin\n')
process(sys.stdin, sys.stdout)
else:
fp = open(filename, 'r')
outfile = os.path.basename(filename)
i = outfile.rfind('.')
if i > 0: outfile = outfile[:i]
modname = outfile.upper()
outfile = modname + '.py'
outfp = open(outfile, 'w')
outfp.write('# Generated by h2py from %s\n' % filename)
filedict = {}
for dir in searchdirs:
if filename[:len(dir)] == dir:
filedict[filename[len(dir)+1:]] = None # no '/' trailing
importable[filename[len(dir)+1:]] = modname
break
process(fp, outfp)
outfp.close()
fp.close()
def pytify(body):
# replace ignored patterns by spaces
for p in ignores:
body = p.sub(' ', body)
# replace char literals by ord(...)
body = p_char.sub('ord(\\0)', body)
# Compute negative hexadecimal constants
start = 0
UMAX = 2*(sys.maxint+1)
while 1:
m = p_hex.search(body, start)
if not m: break
s,e = m.span()
val = long(body[slice(*m.span(1))], 16)
if val > sys.maxint:
val -= UMAX
body = body[:s] + "(" + str(val) + ")" + body[e:]
start = s + 1
return body
def process(fp, outfp, env = {}):
lineno = 0
while 1:
line = fp.readline()
if not line: break
lineno = lineno + 1
match = p_define.match(line)
if match:
# gobble up continuation lines
while line[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: break
lineno = lineno + 1
line = line + nextline
name = match.group(1)
body = line[match.end():]
body = pytify(body)
ok = 0
stmt = '%s = %s\n' % (name, body.strip())
try:
exec stmt in env
except:
sys.stderr.write('Skipping: %s' % stmt)
else:
outfp.write(stmt)
match = p_macro.match(line)
if match:
macro, arg = match.group(1, 2)
body = line[match.end():]
body = pytify(body)
stmt = 'def %s(%s): return %s\n' % (macro, arg, body)
try:
exec stmt in env
except:
sys.stderr.write('Skipping: %s' % stmt)
else:
outfp.write(stmt)
match = p_include.match(line)
if match:
regs = match.regs
a, b = regs[1]
filename = line[a:b]
if importable.has_key(filename):
outfp.write('from %s import *\n' % importable[filename])
elif not filedict.has_key(filename):
filedict[filename] = None
inclfp = None
for dir in searchdirs:
try:
inclfp = open(dir + '/' + filename)
break
except IOError:
pass
if inclfp:
outfp.write(
'\n# Included from %s\n' % filename)
process(inclfp, outfp, env)
else:
sys.stderr.write('Warning - could not find file %s\n' %
filename)
if __name__ == '__main__':
main()
| lgpl-2.1 |
TestInABox/openstackinabox | openstackinabox/models/keystone/db/users.py | 2 | 7273 | from openstackinabox.models.keystone import exceptions
from openstackinabox.models.keystone.db.base import KeystoneDbBase
SQL_ADD_USER = '''
INSERT INTO keystone_users
(tenantid, username, email, password, apikey, enabled)
VALUES (:tenant_id, :username, :email, :password, :apikey, :enabled)
'''
SQL_DELETE_USER = '''
DELETE FROM keystone_users
WHERE tenantid = :tenant_id
AND userid = :user_id
'''
SQL_GET_MAX_USER_ID = '''
SELECT MAX(userid)
FROM keystone_users
'''
SQL_GET_USER_BY_USERNAME_AND_TENANT = '''
SELECT tenantid, userid, username, email, password, apikey, enabled
FROM keystone_users
WHERE tenantid = :tenant_id AND
username = :username
'''
SQL_GET_USER_BY_TENANT_ONLY = '''
SELECT tenantid, userid, username, email, password, apikey, enabled
FROM keystone_users
WHERE tenantid = :tenant_id
'''
SQL_GET_USER_BY_USERNAME_ONLY = '''
SELECT tenantid, userid, username, email, password, apikey, enabled
FROM keystone_users
WHERE username = :username
'''
SQL_GET_USER_BY_USERID = '''
SELECT tenantid, userid, username, email, password, apikey, enabled
FROM keystone_users
WHERE tenantid = :tenant_id AND
userid = :user_id
'''
SQL_UPDATE_USER_BY_USERID = '''
UPDATE keystone_users
SET enabled = :enabled,
email = :email,
password = :password,
apikey = :apikey
WHERE tenantid = :tenant_id AND
userid = :user_id
'''
SQL_GET_USERS_FOR_TENANT_ID = '''
SELECT tenantid, userid, username, email, password, apikey, enabled
FROM keystone_users
WHERE tenantid = :tenant_id
'''
class KeystoneDbUsers(KeystoneDbBase):
def __init__(self, master, db):
super(KeystoneDbUsers, self).__init__("KeystoneUsers", master, db)
self.__admin_user_id = None
def initialize(self):
# Create an admin user and add the admin token to that user
self.__admin_user_id = self.add(
tenant_id=self.master.tenants.admin_tenant_id,
username='system',
email='system@stackinabox',
password='stackinabox',
apikey='537461636b496e41426f78'
)
self.master.roles.add_user_role_by_id(
tenant_id=self.master.tenants.admin_tenant_id,
user_id=self.admin_user_id,
role_id=self.master.roles.admin_role_id
)
@property
def admin_user_id(self):
return self.__admin_user_id
def add(self, tenant_id=None, username=None, email=None,
password=None, apikey=None, enabled=True):
args = {
'tenant_id': tenant_id,
'username': username,
'email': email,
'password': password,
'apikey': apikey,
'enabled': self.bool_to_database(enabled)
}
dbcursor = self.database.cursor()
dbcursor.execute(SQL_ADD_USER, args)
self.database.commit()
dbcursor.execute(SQL_GET_MAX_USER_ID)
user_data = dbcursor.fetchone()
if user_data is None:
raise exceptions.KeystoneUserError('Unable to add user')
user_id = user_data[0]
self.log_debug(
'Added user {1} with user id {2} to tenant id {0}'.format(
tenant_id,
username,
user_id
)
)
return user_id
def delete(self, tenant_id=None, user_id=None):
args = {
'tenant_id': tenant_id,
'user_id': user_id
}
dbcursor = self.database.cursor()
dbcursor.execute(SQL_DELETE_USER, args)
dbcursor.fetchone()
self.database.commit()
def get_by_id(self, tenant_id=None, user_id=None):
dbcursor = self.database.cursor()
args = {
'tenant_id': tenant_id,
'user_id': user_id
}
dbcursor.execute(SQL_GET_USER_BY_USERID, args)
user_data = dbcursor.fetchone()
if user_data is None:
raise exceptions.KeystoneUnknownUserError(
'Invalid tenant_id or user_id'
)
return {
'tenant_id': user_data[0],
'user_id': user_data[1],
'username': user_data[2],
'email': user_data[3],
'password': user_data[4],
'apikey': user_data[5],
'enabled': self.bool_from_database(user_data[6])
}
def get_by_name(self, tenant_id=None, username=None):
dbcursor = self.database.cursor()
args = {
'tenant_id': tenant_id,
'username': username
}
dbcursor.execute(SQL_GET_USER_BY_USERNAME_AND_TENANT, args)
user_data = dbcursor.fetchone()
if user_data is None:
raise exceptions.KeystoneUnknownUserError(
'Invalid tenant_id or username'
)
return {
'tenant_id': user_data[0],
'user_id': user_data[1],
'username': user_data[2],
'email': user_data[3],
'password': user_data[4],
'apikey': user_data[5],
'enabled': self.bool_from_database(user_data[6])
}
def get_by_name_or_tenant_id(self, tenant_id=None, username=None):
sql_query = None
args = {}
if username is not None:
sql_query = SQL_GET_USER_BY_USERNAME_ONLY
args['username'] = username
else:
sql_query = SQL_GET_USER_BY_TENANT_ONLY
args['tenant_id'] = tenant_id
dbcursor = self.database.cursor()
for user_data in dbcursor.execute(sql_query, args):
yield {
'tenant_id': user_data[0],
'user_id': user_data[1],
'username': user_data[2],
'email': user_data[3],
'password': user_data[4],
'apikey': user_data[5],
'enabled': self.bool_from_database(user_data[6])
}
def update_by_id(self, tenant_id=None, user_id=None, email=None,
password=None, apikey=None, enabled=True):
dbcursor = self.database.cursor()
args = {
'tenant_id': tenant_id,
'user_id': user_id,
'email': email,
'password': password,
'apikey': apikey,
'enabled': enabled
}
dbcursor.execute(SQL_UPDATE_USER_BY_USERID, args)
if not dbcursor.rowcount:
raise exceptions.KeystoneUnknownUserError(
'unable to update user - {0}'.format(args)
)
self.database.commit()
def get_for_tenant_id(self, tenant_id):
dbcursor = self.database.cursor()
args = {
'tenant_id': tenant_id
}
results = []
for user_data in dbcursor.execute(SQL_GET_USERS_FOR_TENANT_ID, args):
results.append({
'tenant_id': user_data[0],
'user_id': user_data[1],
'username': user_data[2],
'email': user_data[3],
'password': user_data[4],
'apikey': user_data[5],
'enabled': self.bool_from_database(user_data[6])
})
return results
| apache-2.0 |
pferreir/indico-backup | indico/MaKaC/webinterface/pages/abstracts.py | 2 | 70361 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from flask import session
from xml.sax.saxutils import quoteattr
import urllib
from pytz import timezone
import MaKaC.webinterface.wcomponents as wcomponents
import MaKaC.webinterface.urlHandlers as urlHandlers
import MaKaC.webinterface.navigation as navigation
import MaKaC.review as review
from MaKaC.webinterface.pages.conferences import WPConferenceModifBase, WPConferenceDefaultDisplayBase, WPConferenceModifAbstractBase
from MaKaC.webinterface.pages.conferences import WConfDisplayBodyBase
from indico.core.config import Config
from MaKaC.webinterface.common.abstractStatusWrapper import AbstractStatusList
from MaKaC.i18n import _
from indico.util.i18n import i18nformat
from indico.util.date_time import format_time, format_date, format_datetime
from MaKaC.common.timezoneUtils import nowutc, getAdjustedDate, DisplayTZ
from indico.core import config as Configuration
from MaKaC.common.fossilize import fossilize
from MaKaC.fossils.conference import ILocalFileAbstractMaterialFossil
from MaKaC.review import AbstractStatusSubmitted
from MaKaC.review import AbstractTextField
from MaKaC.common.TemplateExec import render
from indico.util.string import render_markdown
class WConfCFADeactivated(WConfDisplayBodyBase):
_linkname = "CFA"
def __init__(self, aw, conf):
self._conf = conf
self._aw = aw
def getVars(self):
wvars = wcomponents.WTemplated.getVars(self)
wvars["body_title"] = self._getTitle()
return wvars
class WPCFAInactive(WPConferenceDefaultDisplayBase):
def _getBody(self, params):
wc = WConfCFADeactivated(self._getAW(), self._conf)
return wc.getHTML()
class WCFANotYetOpened(WConfDisplayBodyBase):
_linkname = "SubmitAbstract"
def __init__(self, aw, conf):
self._conf = conf
self._aw = aw
def getVars(self):
cfaMgr = self._conf.getAbstractMgr()
wvars = wcomponents.WTemplated.getVars(self)
wvars["body_title"] = self._getTitle()
wvars["start_date"] = format_date(cfaMgr.getStartSubmissionDate(), "long")
return wvars
class WPCFANotYetOpened(WPConferenceDefaultDisplayBase):
def _getBody(self, params):
wc = WCFANotYetOpened(self._getAW(), self._conf)
return wc.getHTML()
def _defineSectionMenu(self):
WPConferenceDefaultDisplayBase._defineSectionMenu(self)
self._sectionMenu.setCurrentItem(self._cfaNewSubmissionOpt)
class WCFAClosed(WConfDisplayBodyBase):
_linkname = "SubmitAbstract"
def __init__(self, aw, conf):
self._conf = conf
self._aw = aw
def getVars(self):
cfaMgr = self._conf.getAbstractMgr()
wvars = wcomponents.WTemplated.getVars(self)
wvars["body_title"] = self._getTitle()
wvars["end_date"] = format_date(cfaMgr.getEndSubmissionDate(), "long")
return wvars
class WPCFAClosed(WPConferenceDefaultDisplayBase):
def __init__(self, rh, conf, is_modif):
WPConferenceDefaultDisplayBase.__init__(self, rh, conf)
self._is_modif = is_modif
def _getBody(self, params):
wc = WCFAClosed(self._getAW(), self._conf)
return wc.getHTML({'is_modif': self._is_modif})
def _defineSectionMenu(self):
WPConferenceDefaultDisplayBase._defineSectionMenu(self)
self._sectionMenu.setCurrentItem(self._cfaNewSubmissionOpt)
class WConfCFA(WConfDisplayBodyBase):
_linkname = "CFA"
def __init__(self, aw, conf):
self._conf = conf
self._aw = aw
def _getActionsHTML(self):
html = ""
cfa = self._conf.getAbstractMgr()
if nowutc() < cfa.getStartSubmissionDate():
return html
else:
submitOpt = ""
if cfa.inSubmissionPeriod():
submitOpt = i18nformat("""<li><a href="%s"> _("Submit a new abstract")</a></li>""") % (
urlHandlers.UHAbstractSubmission.getURL(self._conf))
html = i18nformat("""
<b> _("Possible actions you can carry out"):</b>
<ul>
%s
<li><a href="%s"> _("View or modify your already submitted abstracts")</a></li>
</ul>
""") % (submitOpt, urlHandlers.UHUserAbstracts.getURL(self._conf))
return html
def getVars(self):
wvars = wcomponents.WTemplated.getVars(self)
cfa = self._conf.getAbstractMgr()
if cfa.inSubmissionPeriod():
wvars["status"] = _("OPENED")
else:
wvars["status"] = _("CLOSED")
wvars["startDate"] = cfa.getStartSubmissionDate().strftime("%d %B %Y")
wvars["endDate"] = cfa.getEndSubmissionDate().strftime("%d %B %Y")
wvars["actions"] = self._getActionsHTML()
wvars["announcement"] = cfa.getAnnouncement()
wvars["body_title"] = self._getTitle()
return wvars
class WPConferenceCFA( WPConferenceDefaultDisplayBase ):
navigationEntry = navigation.NEConferenceCFA
def _getBody(self, params):
wc = WConfCFA(self._getAW(), self._conf)
return wc.getHTML()
def _defineSectionMenu( self ):
WPConferenceDefaultDisplayBase._defineSectionMenu( self )
self._sectionMenu.setCurrentItem(self._cfaOpt)
class WPAbstractSubmission( WPConferenceDefaultDisplayBase ):
navigationEntry = navigation.NEAbstractSubmission
def getCSSFiles(self):
return WPConferenceDefaultDisplayBase.getCSSFiles(self) + \
self._asset_env['contributions_sass'].urls()
def getJSFiles(self):
return WPConferenceDefaultDisplayBase.getJSFiles(self) + \
self._includeJSPackage('Management') + \
self._asset_env['abstracts_js'].urls()
def _getHeadContent(self):
return WPConferenceDefaultDisplayBase._getHeadContent(self) + render('js/mathjax.config.js.tpl') + \
'\n'.join(['<script src="{0}" type="text/javascript"></script>'.format(url)
for url in self._asset_env['mathjax_js'].urls()])
def _getBody( self, params ):
params["postURL"] = urlHandlers.UHAbstractSubmission.getURL( self._conf )
params["origin"] = "display"
wc = WAbstractDataModification( self._conf )
return wc.getHTML( params )
def _defineSectionMenu( self ):
WPConferenceDefaultDisplayBase._defineSectionMenu( self )
self._sectionMenu.setCurrentItem(self._cfaNewSubmissionOpt)
class WUserAbstracts(WConfDisplayBodyBase):
_linkname = "ViewAbstracts"
def __init__(self, aw, conf):
self._aw = aw
self._conf = conf
def _getAbstractStatus(self, abstract):
status = abstract.getCurrentStatus()
if isinstance(status, review.AbstractStatusAccepted):
statusLabel = _("Accepted")
if status.getType() is not None and status.getType() != "":
return "%s as %s" % (statusLabel, status.getType().getName())
elif isinstance(status, review.AbstractStatusRejected):
return _("Rejected")
elif isinstance(status, review.AbstractStatusWithdrawn):
return _("Withdrawn")
elif isinstance(status, review.AbstractStatusDuplicated):
return _("Duplicated")
elif isinstance(status, review.AbstractStatusMerged):
return _("Merged")
elif isinstance(status, (review.AbstractStatusProposedToAccept, review.AbstractStatusProposedToReject)):
return _("Under Review")
elif isinstance(status, (review.AbstractInConflict)):
return _("In Conflict")
return _("Submitted")
def getVars(self):
wvars = wcomponents.WTemplated.getVars(self)
cfaMgr = self._conf.getAbstractMgr()
abstracts = cfaMgr.getAbstractListForAvatar(self._aw.getUser())
abstracts += cfaMgr.getAbstractListForAuthorEmail(self._aw.getUser().getEmail())
wvars["body_title"] = self._getTitle()
wvars["abstracts"] = sorted(set(abstracts), key=lambda i: int(i.getId()))
wvars["formatDate"] = lambda date: format_date(date, "d MMM yyyy")
wvars["formatTime"] = lambda time: format_time(time, format="short", timezone=timezone(DisplayTZ(self._aw, self._conf).getDisplayTZ()))
wvars["getAbstractStatus"] = lambda abstract: self._getAbstractStatus(abstract)
wvars["conf"] = self._conf
return wvars
class WPUserAbstracts( WPConferenceDefaultDisplayBase ):
navigationEntry = navigation.NEUserAbstracts
def _getBody( self, params ):
wc = WUserAbstracts( self._getAW(), self._conf )
return wc.getHTML()
def _defineSectionMenu( self ):
WPConferenceDefaultDisplayBase._defineSectionMenu( self )
self._sectionMenu.setCurrentItem(self._cfaViewSubmissionsOpt)
class WPAbstractDisplayBase( WPConferenceDefaultDisplayBase ):
def __init__( self, rh, abstract ):
conf = abstract.getConference()
WPConferenceDefaultDisplayBase.__init__( self, rh, conf )
self._navigationTarget = self._abstract = abstract
def getCSSFiles(self):
return WPConferenceDefaultDisplayBase.getCSSFiles(self) + \
self._asset_env['contributions_sass'].urls()
def getJSFiles(self):
return WPConferenceDefaultDisplayBase.getJSFiles(self) + \
self._includeJSPackage('Management') + \
self._asset_env['abstracts_js'].urls()
class WAbstractCannotBeModified(wcomponents.WTemplated):
def __init__(self, abstract):
self._abstract = abstract
def getVars(self):
wvars = wcomponents.WTemplated.getVars(self)
wvars['underReview'] = not isinstance( self._abstract.getCurrentStatus(), AbstractStatusSubmitted)
return wvars
class WPAbstractCannotBeModified( WPAbstractDisplayBase ):
def _getBody( self, params ):
wc = WAbstractCannotBeModified( self._abstract )
return wc.getHTML()
class WAbstractSubmissionConfirmation(wcomponents.WTemplated):
def __init__(self, aw, abstract):
self._aw = aw
self._abstract = abstract
def getVars(self):
vars = wcomponents.WTemplated.getVars(self)
vars["displayURL"] = quoteattr(str(urlHandlers.UHAbstractDisplay.getURL(self._abstract)))
vars["displayURLText"] = self.htmlText(str(urlHandlers.UHAbstractDisplay.getURL(self._abstract)))
conf = self._abstract.getConference()
vars["userAbstractsURL"] = quoteattr(str(urlHandlers.UHUserAbstracts.getURL(conf)))
vars["userAbstractsURLText"] = self.htmlText(str(urlHandlers.UHUserAbstracts.getURL(conf)))
vars["CFAURL"] = quoteattr(str(urlHandlers.UHConferenceCFA.getURL(conf)))
vars["abstractId"] = self._abstract.getId()
return vars
class WPAbstractSubmissionConfirmation(WPAbstractDisplayBase):
navigationEntry = navigation.NEAbstractSubmissionConfirmation
def _getBody(self, params):
wc = WAbstractSubmissionConfirmation(self._getAW(), self._abstract)
return wc.getHTML()
class WAbstractDisplay(wcomponents.WTemplated):
def __init__(self, aw, abstract):
self._abstract = abstract
self._aw = aw
def getVars(self):
vars = wcomponents.WTemplated.getVars(self)
tzUtil = DisplayTZ(self._aw, self._abstract.getConference())
tz = tzUtil.getDisplayTZ()
status = self._abstract.getCurrentStatus()
if isinstance(status, review.AbstractStatusAccepted):
vars["contribType"] = status.getType()
vars["tracks"] = status.getTrack()
else:
vars["tracks"] = self._abstract.getTrackListSorted()
vars["contribType"] = self._abstract.getContribType()
vars["modifyURL"] = str(urlHandlers.UHAbstractModify.getURL(self._abstract))
vars["withdrawURL"] = str(urlHandlers.UHAbstractWithdraw.getURL(self._abstract))
vars["recoverURL"] = str(urlHandlers.UHAbstractRecovery.getURL(self._abstract))
vars["attachments"] = fossilize(self._abstract.getAttachments().values(), ILocalFileAbstractMaterialFossil)
vars["abstract"] = self._abstract
vars["formatDate"] = lambda date: format_date(date, "d MMM yyyy")
vars["formatTime"] = lambda time: format_time(time, format="short", timezone=timezone(tz))
vars["modifyDisabled"] = isinstance(status, (review.AbstractStatusAccepted,
review.AbstractStatusRejected, review.AbstractStatusDuplicated, review.AbstractStatusMerged))
vars["withdrawDisabled"] = isinstance(status, (review.AbstractStatusAccepted, review.AbstractStatusRejected,
review.AbstractStatusWithdrawn, review.AbstractStatusDuplicated, review.AbstractStatusMerged))
status = self._abstract.getCurrentStatus()
if isinstance(status, review.AbstractStatusAccepted):
vars["statusText"] = _("ACCEPTED ")
if status.getType() is not None and status.getType() != "":
vars["statusText"] += "as %s" % status.getType().getName()
vars["statusClass"] = "abstractStatusAccepted"
vars["statusComments"] = ""
elif isinstance(status, review.AbstractStatusRejected):
vars["statusText"] = _("REJECTED")
vars["statusClass"] = "abstractStatusRejected"
vars["statusComments"] = ""
elif isinstance(status, review.AbstractStatusWithdrawn):
vars["statusText"] = _("Withdrawn")
vars["statusClass"] = "abstractStatusWithdrawn"
vars["statusComments"] = i18nformat("""_("Withdrawn") by %s _("on") %s %s""") % (self.htmlText(status.getResponsible().getFullName()), format_date(status.getDate(), "d MMM yyyy"), format_time(status.getDate(), format="short", timezone=timezone(tz)))
elif isinstance(status, review.AbstractStatusDuplicated):
vars["statusText"] = _("Duplicated")
vars["statusClass"] = "abstractStatusDuplicated"
vars["statusComments"] = ""
elif isinstance(status, review.AbstractStatusMerged):
vars["statusText"] = _("Merged")
vars["statusClass"] = "abstractStatusMerged"
vars["statusComments"] = i18nformat("""_("Merged") into %s-%s""") % (self.htmlText(status.getTargetAbstract().getId()), self.htmlText(status.getTargetAbstract().getTitle()))
elif isinstance(status, (review.AbstractStatusProposedToAccept, review.AbstractStatusProposedToReject)):
vars["statusText"] = _("Under Review")
vars["statusClass"] = "abstractStatusUnderReview"
vars["statusComments"] = ""
else:
vars["statusText"] = _("Submitted")
vars["statusClass"] = "abstractStatusSubmitted"
vars["statusComments"] = ""
vars["accessWrapper"] = self._aw
return vars
class WPAbstractDisplay(WPAbstractDisplayBase):
navigationEntry = navigation.NEAbstractDisplay
def _getHeadContent(self):
return WPAbstractDisplayBase._getHeadContent(self) + render('js/mathjax.config.js.tpl') + \
'\n'.join(['<script src="{0}" type="text/javascript"></script>'.format(url)
for url in self._asset_env['mathjax_js'].urls()])
def _getBody(self, params):
wc = WAbstractDisplay(self._getAW(), self._abstract)
return wc.getHTML()
class WAbstractDataModification(WConfDisplayBodyBase):
_linkname = "SubmitAbstract"
def __init__(self, conf):
self._conf = conf
self._limitedFieldList = []
self._mandatoryFieldList = [] # all mandatory fields ids, except which are also limited
def _setMandatoryAndLimitedFields(self):
abfm = self._conf.getAbstractMgr().getAbstractFieldsMgr()
for f in abfm.getFields():
id = f.getId()
if f.isActive():
if isinstance(f, AbstractTextField):
maxLength = int(f.getMaxLength())
limitation = f.getLimitation()
if maxLength > 0: # it means there is a limit for the field in words or in characters
self._limitedFieldList.append(["f_"+id, maxLength, "maxLimitionCounter_"+id.replace(" ", "_"), limitation, str(f.isMandatory())]) # append the textarea/input id
if f.isMandatory():
self._mandatoryFieldList.append("f_"+id)
def getVars(self):
vars = wcomponents.WTemplated.getVars(self)
vars["body_title"] = self._getTitle()
vars["postURL"] = quoteattr(str(vars["postURL"]))
vars["origin"] = vars.get("origin", "display")
vars["abstractTitle"] = quoteattr(str(vars.get("title", "")))
vars["prAuthors"] = fossilize(vars.get("prAuthors", []))
vars["coAuthors"] = fossilize(vars.get("coAuthors", []))
cfaMgr = self._conf.getAbstractMgr()
vars["tracksMandatory"] = cfaMgr.areTracksMandatory()
vars["tracks"] = self._conf.getTrackList()
if cfaMgr.getMultipleTracks():
vars["trackListType"] = "checkbox"
else:
vars["trackListType"] = "radio"
vars["tracksSelected"] = vars.get("tracksSelectedList", []) # list of track ids that had been selected
vars["types"] = self._conf.getContribTypeList()
vars["typeSelected"] = vars.get("type", None)
vars["comments"] = str(vars.get("comments", ""))
fieldDict = {}
for field in cfaMgr.getAbstractFieldsMgr().getFields():
f_id = "f_" + field.getId()
fieldDict[f_id] = vars.get(f_id, "")
vars["fieldDict"] = fieldDict
vars["additionalFields"] = cfaMgr.getAbstractFieldsMgr().getFields()
self._setMandatoryAndLimitedFields()
vars["limitedFieldList"] = self._limitedFieldList
vars["mandatoryFieldList"] = self._mandatoryFieldList
vars["attachedFilesAllowed"] = cfaMgr.canAttachFiles()
vars["showSelectAsSpeaker"] = cfaMgr.showSelectAsSpeaker()
vars["isSelectSpeakerMandatory"] = cfaMgr.isSelectSpeakerMandatory()
#TODO: In case of error we will lose the attached files, we should keep them somehow
vars["attachments"] = fossilize(vars.get("attachments", []), ILocalFileAbstractMaterialFossil)
return vars
class WPAbstractModify(WPAbstractDisplayBase):
navigationEntry = navigation.NEAbstractModify
def _getHeadContent(self):
return WPAbstractDisplayBase._getHeadContent(self) + render('js/mathjax.config.js.tpl') + \
'\n'.join(['<script src="{0}" type="text/javascript"></script>'.format(url)
for url in self._asset_env['mathjax_js'].urls()])
def getJSFiles(self):
return WPAbstractDisplayBase.getJSFiles(self) + \
self._includeJSPackage('Management')
def getCSSFiles(self):
return WPAbstractDisplayBase.getCSSFiles(self) + \
self._asset_env['contributions_sass'].urls()
def _getBody(self, params):
params["postURL"] = urlHandlers.UHAbstractModify.getURL(self._abstract)
wc = WAbstractDataModification(self._abstract.getConference())
return wc.getHTML(params)
class WAbstractWithdraw(wcomponents.WTemplated):
def __init__(self, abstract):
self._abstract = abstract
def getVars(self):
vars = wcomponents.WTemplated.getVars(self)
vars["title"] = self.htmlText(self._abstract.getTitle())
vars["postURL"] = urlHandlers.UHAbstractWithdraw.getURL(self._abstract)
return vars
class WPAbstractWithdraw( WPAbstractDisplayBase ):
navigationEntry = navigation.NEAbstractWithdraw
def _getBody( self, params ):
wc = WAbstractWithdraw( self._abstract )
return wc.getHTML()
class WAbstractRecovery( wcomponents.WTemplated ):
def __init__( self, abstract ):
self._abstract = abstract
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["title"] = self.htmlText( self._abstract.getTitle() )
vars["postURL"] = urlHandlers.UHAbstractRecovery.getURL( self._abstract )
return vars
class WPAbstractRecovery( WPAbstractDisplayBase ):
navigationEntry = navigation.NEAbstractRecovery
def _getBody( self, params ):
wc = WAbstractRecovery( self._abstract )
return wc.getHTML()
class WPAbstractManagementBase( WPConferenceModifBase ):
def __init__( self, rh, abstract ):
self._abstract = self._target = abstract
WPConferenceModifBase.__init__( self, rh, self._abstract.getConference() )
def _getNavigationDrawer(self):
pars = {"target": self._abstract, "isModif": True}
return wcomponents.WNavigationDrawer( pars )
def _createTabCtrl( self ):
self._tabCtrl = wcomponents.TabControl()
self._tabMain = self._tabCtrl.newTab("main", _("Main"), \
urlHandlers.UHAbstractManagment.getURL( self._abstract ) )
self._tabTracks = self._tabCtrl.newTab("tracks", _("Track judgments"), \
urlHandlers.UHAbstractTrackProposalManagment.getURL(self._abstract))
#self._tabAC=self._tabCtrl.newTab("ac", "Access control", \
# urlHandlers.UHAbstractModAC.getURL( self._abstract))
nComments=""
if len(self._abstract.getIntCommentList()) > 0:
nComments = " (%s)"%len(self._abstract.getIntCommentList())
self._tabComments=self._tabCtrl.newTab("comments", _("Internal comments%s")%nComments,\
urlHandlers.UHAbstractModIntComments.getURL( self._abstract))
self._tabNotifLog=self._tabCtrl.newTab("notif_log", _("Notification log"),\
urlHandlers.UHAbstractModNotifLog.getURL( self._abstract))
self._tabTools=self._tabCtrl.newTab("tools", _("Tools"),\
urlHandlers.UHAbstractModTools.getURL( self._abstract))
# Sub tabs for the track judgements
self._subTabTrack = self._tabTracks.newSubTab( "byTrack", "Judgement details",\
urlHandlers.UHAbstractTrackProposalManagment.getURL(self._abstract))
self._subTabRating = self._tabTracks.newSubTab( "byRating", "Rating per question",\
urlHandlers.UHAbstractTrackOrderByRating.getURL(self._abstract))
self._setActiveTab()
def _getPageContent( self, params ):
self._createTabCtrl()
banner = wcomponents.WAbstractBannerModif(self._abstract).getHTML()
html = wcomponents.WTabControl( self._tabCtrl, self._getAW() ).getHTML( self._getTabContent( params ) )
return banner + html
def _setActiveSideMenuItem(self):
self._abstractMenuItem.setActive(True)
def _getTabContent( self, params ):
return "nothing"
def _getHeadContent(self):
return WPConferenceModifBase._getHeadContent(self) + render('js/mathjax.config.js.tpl') + \
'\n'.join(['<script src="{0}" type="text/javascript"></script>'.format(url)
for url in self._asset_env['mathjax_js'].urls()])
def getJSFiles(self):
return WPConferenceModifBase.getJSFiles(self) + \
self._asset_env['abstracts_js'].urls()
def getCSSFiles(self):
return WPConferenceModifBase.getCSSFiles(self) + \
self._asset_env['contributions_sass'].urls()
class WAbstractManagment(wcomponents.WTemplated):
def __init__(self, aw, abstract):
self._abstract = abstract
self._aw = aw
self._conf = abstract.getOwner().getOwner()
def _getAuthorHTML(self, auth):
tmp = "%s (%s)" % (auth.getFullName(), auth.getAffiliation())
tmp = self.htmlText(tmp)
if auth.getEmail() != "":
mailtoSubject = i18nformat("""[%s] _("Abstract") %s: %s""") % (self._conf.getTitle(), self._abstract.getId(), self._abstract.getTitle())
mailtoURL = "mailto:%s?subject=%s" % (auth.getEmail(), urllib.quote(mailtoSubject))
href = quoteattr(mailtoURL)
tmp = """<a href=%s>%s</a>""" % (href, tmp)
return tmp
def _getStatusHTML(self):
status = self._abstract.getCurrentStatus()
html = """<b>%s</b>""" % AbstractStatusList.getInstance().getCaption(status.__class__).upper()
tzUtil = DisplayTZ(self._aw, self._conf)
tz = tzUtil.getDisplayTZ()
if hasattr(status, 'getResponsible'):
respPerson = i18nformat(""" _("by") %s""") % self._getAuthorHTML(status.getResponsible()) if status.getResponsible() else ""
else:
respPerson = ""
if status.__class__ == review.AbstractStatusAccepted:
trackTitle, contribTitle = "", ""
if status.getTrack():
trackTitle = " for %s" % self.htmlText(status.getTrack().getTitle())
if status.getType():
contribTitle = " as %s" % self.htmlText(status.getType().getName())
html = i18nformat("""%s%s%s<br><font size="-1">%s _("on") %s</font>""") % (
html,
trackTitle,
contribTitle,
respPerson,
getAdjustedDate(status.getDate(), tz=tz).strftime("%d %B %Y %H:%M")
)
if status.getComments() != "":
html = """%s<br><font size="-1"><i>%s</i></font>""" % (
html,
status.getComments()
)
elif status.__class__ == review.AbstractStatusRejected:
html = i18nformat("""%s<br><font size="-1">%s _("on") %s</font>""") % (
html,
respPerson,
getAdjustedDate(status.getDate(), tz=tz).strftime("%d %B %Y %H:%M")
)
if status.getComments() != "":
html = """%s<br><font size="-1"><i>%s</i></font>""" % (
html,
status.getComments()
)
elif status.__class__ == review.AbstractStatusWithdrawn:
html = i18nformat("""%s<font size="-1">%s _("on") %s</font>""") % (
html,
respPerson,
getAdjustedDate(status.getDate(), tz=tz).strftime("%d %B %Y %H:%M")
)
if status.getComments() != "":
html = """%s<br><font size="-1"><i>%s</i></font>""" % (
html,
status.getComments()
)
elif status.__class__ == review.AbstractStatusDuplicated:
original = status.getOriginal()
url = urlHandlers.UHAbstractManagment.getURL(original)
html = i18nformat("""%s (<a href=%s>%s-<i>%s</i></a>) <font size="-1">%s _("on") %s</font>""") % (
html,
quoteattr(str(url)),
self.htmlText(original.getId()),
self.htmlText(original.getTitle()),
respPerson,
getAdjustedDate(status.getDate(), tz=tz).strftime("%d %B %Y %H:%M")
)
if status.getComments() != "":
html = """%s<br><font size="-1"><i>%s</i></font>""" % (
html,
status.getComments()
)
elif status.__class__ == review.AbstractStatusMerged:
target = status.getTargetAbstract()
url = urlHandlers.UHAbstractManagment.getURL(target)
html = i18nformat("""<font color="black"><b>%s</b></font> (<a href=%s>%s-<i>%s</i></a>) <font size="-1">%s _("on") %s</font>""") % (
html,
quoteattr(str(url)),
self.htmlText(target.getId()),
self.htmlText(target.getTitle()),
respPerson,
getAdjustedDate(status.getDate(), tz=tz).strftime("%d %B %Y %H:%M")
)
if status.getComments() != "":
html = """%s<br><font size="-1"><i>%s</i></font>""" % (
html,
status.getComments()
)
return html
def _getTracksHTML(self):
prog = []
for track in self._abstract.getTrackListSorted():
jud = self._abstract.getTrackJudgement(track)
if jud.__class__ == review.AbstractAcceptance:
cTypeCaption = ""
if jud.getContribType() is not None:
cTypeCaption = jud.getContribType().getName()
st = i18nformat(""" - _("Proposed to accept")""")
if cTypeCaption:
st += self.htmlText(cTypeCaption)
color = """ color="#009933" """
elif jud.__class__ == review.AbstractRejection:
st = i18nformat("""- _("Proposed to reject")""")
color = """ color="red" """
elif jud.__class__ == review.AbstractReallocation:
st = i18nformat("""- _("Proposed for other tracks")""")
color = """ color="black" """
elif jud.__class__ == review.AbstractInConflict:
st = i18nformat("""- _("Conflict")""")
color = """ color="red" """
else:
st = ""
color = ""
if st != "":
prog.append("""<li>%s <font size="-1" %s> %s </font></li>""" % (self.htmlText(track.getTitle()), color, st))
else:
prog.append("""<li>%s</li>""" % (self.htmlText(track.getTitle())))
return "<ul>%s</ul>" % "".join(prog)
def _getContributionHTML(self):
res = ""
contrib = self._abstract.getContribution()
if contrib:
url = urlHandlers.UHContributionModification.getURL(contrib)
title = self.htmlText(contrib.getTitle())
id = self.htmlText(contrib.getId())
res = """<a href=%s>%s - %s</a>""" % (quoteattr(str(url)), id, title)
return res
def _getMergeFromHTML(self):
abstracts = self._abstract.getMergeFromList()
if not abstracts:
return ""
l = []
for abstract in abstracts:
if abstract.getOwner():
l.append("""<a href="%s">%s : %s</a><br>\n""" % (urlHandlers.UHAbstractManagment.getURL(abstract), abstract.getId(), abstract.getTitle()))
else:
l.append("""%s : %s [DELETED]<br>\n""" % (abstract.getId(), abstract.getTitle()))
return i18nformat("""<tr>
<td class="dataCaptionTD" nowrap><span class="dataCaptionFormat"> _("Merged from")</span></td>
<td bgcolor="white" valign="top" colspan="3">%s</td>
</tr>""") % "".join(l)
def getVars(self):
vars = wcomponents.WTemplated.getVars(self)
vars["abstract"] = self._abstract
afm = self._abstract.getConference().getAbstractMgr().getAbstractFieldsMgr()
vars["additionalFields"] = afm.getActiveFields()
vars["organisation"] = self.htmlText(self._abstract.getSubmitter().getAffiliation())
vars["status"] = self._getStatusHTML()
vars["statusName"] = AbstractStatusList.getInstance().getCaption(self._abstract.getCurrentStatus().__class__).upper()
vars["showBackToSubmitted"] = isinstance(self._abstract.getCurrentStatus(), (review.AbstractStatusWithdrawn,
review.AbstractStatusRejected,
review.AbstractStatusAccepted))
#for author in self._abstract.getAuthorList():
# if self._abstract.isPrimaryAuthor( author ):
# primary_authors.append( self._getAuthorHTML( author ) )
# else:
# co_authors.append( self._getAuthorHTML( author ) )
primary_authors = []
for author in self._abstract.getPrimaryAuthorList():
primary_authors.append(self._getAuthorHTML(author))
co_authors = []
for author in self._abstract.getCoAuthorList():
co_authors.append(self._getAuthorHTML(author))
vars["primary_authors"] = "<br>".join(primary_authors)
vars["co_authors"] = "<br>".join(co_authors)
speakers = []
for spk in self._abstract.getSpeakerList():
speakers.append(self._getAuthorHTML(spk))
vars["speakers"] = "<br>".join(speakers)
vars["tracks"] = self._getTracksHTML()
vars["type"] = ""
if self._abstract.getContribType() is not None:
vars["type"] = self._abstract.getContribType().getName()
vars["submitDate"] = self._abstract.getSubmissionDate().strftime("%d %B %Y %H:%M")
vars["modificationDate"] = self._abstract.getModificationDate().strftime("%d %B %Y %H:%M")
vars["disable"] = ""
vars["dupDisable"] = ""
vars["mergeDisable"] = ""
if self._abstract.getCurrentStatus().__class__ in [review.AbstractStatusAccepted,
review.AbstractStatusRejected,
review.AbstractStatusWithdrawn]:
vars["disable"] = "disabled"
vars["mergeDisable"] = "disabled"
vars["dupDisable"] = "disabled"
vars["duplicatedButton"] = _("mark as duplicated")
vars["duplicateURL"] = quoteattr(str(urlHandlers.UHAbstractModMarkAsDup.getURL(self._abstract)))
if self._abstract.getCurrentStatus().__class__ == review.AbstractStatusDuplicated:
vars["duplicatedButton"] = _("unmark as duplicated")
vars["duplicateURL"] = quoteattr(str(urlHandlers.UHAbstractModUnMarkAsDup.getURL(self._abstract)))
vars["mergeDisable"] = "disabled"
vars["disable"] = "disabled"
vars["mergeButton"] = _("merge into")
vars["mergeIntoURL"] = quoteattr(str(urlHandlers.UHAbstractModMergeInto.getURL(self._abstract)))
if self._abstract.getCurrentStatus().__class__ == review.AbstractStatusMerged:
vars["mergeIntoURL"] = quoteattr(str(urlHandlers.UHAbstractModUnMerge.getURL(self._abstract)))
vars["mergeButton"] = _("unmerge")
vars["dupDisable"] = "disabled"
vars["disable"] = "disabled"
vars["mergeFrom"] = self._getMergeFromHTML()
vars["abstractListURL"] = quoteattr(str(urlHandlers.UHConfAbstractManagment.getURL(self._conf)))
vars["viewTrackDetailsURL"] = quoteattr(str(urlHandlers.UHAbstractTrackProposalManagment.getURL(self._abstract)))
vars["comments"] = self._abstract.getComments()
vars["contribution"] = self._getContributionHTML()
vars["abstractPDF"] = urlHandlers.UHAbstractConfManagerDisplayPDF.getURL(self._abstract)
vars["printIconURL"] = Config.getInstance().getSystemIconURL("pdf")
vars["abstractXML"] = urlHandlers.UHAbstractToXML.getURL(self._abstract)
vars["xmlIconURL"] = Config.getInstance().getSystemIconURL("xml")
vars["acceptURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentAccept.getURL(self._abstract)))
vars["rejectURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentReject.getURL(self._abstract)))
vars["changeTrackURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentChangeTrack.getURL(self._abstract)))
vars["backToSubmittedURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentBackToSubmitted.getURL(self._abstract)))
vars["modDataURL"] = quoteattr(str(urlHandlers.UHAbstractModEditData.getURL(self._abstract)))
vars["propToAccURL"] = quoteattr(str(urlHandlers.UHConfModAbstractPropToAcc.getURL(self._abstract)))
vars["propToRejURL"] = quoteattr(str(urlHandlers.UHConfModAbstractPropToRej.getURL(self._abstract)))
vars["withdrawURL"] = quoteattr(str(urlHandlers.UHConfModAbstractWithdraw.getURL(self._abstract)))
vars["disableWithdraw"] = ""
if self._abstract.getCurrentStatus().__class__ not in \
[review.AbstractStatusSubmitted, review.AbstractStatusAccepted,
review.AbstractStatusInConflict,
review.AbstractStatusUnderReview,
review.AbstractStatusProposedToReject,
review.AbstractStatusProposedToAccept]:
vars["disableWithdraw"] = " disabled"
rating = self._abstract.getRating()
if rating is None:
vars["rating"] = ""
else:
vars["rating"] = "%.2f" % rating
vars["scaleLower"] = self._abstract.getConference().getConfAbstractReview().getScaleLower()
vars["scaleHigher"] = self._abstract.getConference().getConfAbstractReview().getScaleHigher()
vars["attachments"] = fossilize(self._abstract.getAttachments().values(), ILocalFileAbstractMaterialFossil)
vars["confId"] = self._conf.getId()
vars["confTitle"] = self._conf.getTitle()
vars["submitterFullName"] = self._abstract.getSubmitter().getFullName()
vars["submitterAffiliation"] = self._abstract.getSubmitter().getAffiliation()
vars["submitterEmail"] = self._abstract.getSubmitter().getEmail()
vars["abstractAccepted"] = isinstance(self._abstract.getCurrentStatus(), review.AbstractStatusAccepted)
return vars
class WPAbstractManagment(WPAbstractManagementBase):
def _setActiveTab( self ):
self._tabMain.setActive()
def _getTabContent( self, params ):
wc = WAbstractManagment( self._getAW(), self._target )
return wc.getHTML( params )
class WPModEditData(WPAbstractManagment):
def __init__(self, rh, abstract, abstractData):
WPAbstractManagment.__init__(self, rh, abstract)
def _getTabContent(self,params):
params["postURL"] = urlHandlers.UHAbstractModEditData.getURL(self._abstract)
params["origin"] = "management"
wc = WAbstractDataModification(self._conf)
return wc.getHTML(params)
class WAbstractManagmentAccept( wcomponents.WTemplated ):
def __init__( self, aw, abstract, track=None ):
self._abstract = abstract
self._track = track
self._aw = aw
self._conf = abstract.getOwner().getOwner()
def _getTypeItemsHTML( self ):
items = [ i18nformat("""<option value="not_defined">--_("not defined")--</option>""")]
status = self._abstract.getCurrentStatus()
isPropToAcc = isinstance(status, review.AbstractStatusProposedToAccept)
for type in self._conf.getContribTypeList():
title,default = type.getName(), ""
if isPropToAcc and status.getType() == type:
title = "[*] %s"%title
default = " selected"
items.append( """<option value=%s%s>%s</option>"""%(\
quoteattr(type.getId()), default, self.htmlText(title)))
return items
def _getTrackItemsHTML( self ):
items = [ i18nformat("""<option value="conf">--_("no track")--</option>""")]
for track in self._conf.getTrackList():
#the indicator legend:
# [*] -> suggested for that track
# [A] -> track proposed to accept
# [R] -> track proposed to reject
# [C] -> track in conflict
indicator, selected = "", ""
if self._abstract.hasTrack( track ):
indicator = "[*] "
jud = self._abstract.getTrackJudgement( track )
if isinstance(jud, review.AbstractAcceptance):
if self._abstract.getCurrentStatus().__class__ == review.AbstractStatusProposedToAccept:
selected = " selected"
indicator = "[A] "
elif isinstance(jud, review.AbstractRejection):
indicator = "[R] "
elif isinstance(jud, review.AbstractInConflict):
indicator = "[C] "
items.append("""<option value="%s"%s>%s%s</option>"""%(track.getId(), selected, indicator, track.getTitle()))
return items
def _getSessionItemsHTML( self ):
items = [ i18nformat("""<option value="conf">--_("no session")--</option>""")]
for session in self._conf.getSessionList():
items.append("""<option value="%s">%s</option>"""%(session.getId(), session.getTitle()))
return items
def _checkNotificationTpl(self):
for notificationTpl in self._abstract.getOwner().getNotificationTplList():
for condition in notificationTpl.getConditionList():
if isinstance(condition, review.NotifTplCondAccepted):
return True
return False
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["abstractName"] = self._abstract.getTitle()
vars["tracks"] = "".join( self._getTrackItemsHTML() )
vars["sessions"] = "".join( self._getSessionItemsHTML() )
vars["types"] = "".join( self._getTypeItemsHTML() )
vars["showNotifyCheckbox"] = self._checkNotificationTpl()
if self._track == None:
vars["acceptURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentAccept.getURL(self._abstract)))
vars["cancelURL"] = quoteattr(str(urlHandlers.UHAbstractManagment.getURL(self._abstract)))
vars["trackTitle"] = ""
else:
vars["acceptURL"] = quoteattr(str(urlHandlers.UHTrackAbstractAccept.getURL(self._track, self._abstract)))
vars["cancelURL"] = quoteattr(str(urlHandlers.UHTrackAbstractModif.getURL(self._track, self._abstract)))
vars["trackTitle"] = self._track.getTitle()
return vars
class WAbstractManagmentAcceptMultiple( wcomponents.WTemplated):
def __init__( self, abstracts ):
wcomponents.WTemplated.__init__(self)
self._abstracts = abstracts
# we suppose that we always have a least one abstract:
self._conf = abstracts[0].getOwner().getOwner()
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["abstractsQuantity"] = len(self._abstracts)
vars["tracks"] = self._conf.getTrackList()
vars["sessions"] = self._conf.getSessionList()
vars["types"] = self._conf.getContribTypeList()
vars["listOfAbstracts"] = []
acceptURL = urlHandlers.UHAbstractManagmentAcceptMultiple.getURL(self._conf)
IDs = []
for abstract in self._abstracts:
IDs.append(abstract.getId())
vars["listOfAbstracts"].append("[%s] %s"%(abstract.getId(), abstract.getTitle()))
acceptURL.addParams({'abstracts':IDs})
vars["acceptURL"] = quoteattr(str(acceptURL))
vars["cancelURL"] = quoteattr(str(urlHandlers.UHConfAbstractManagment.getURL(self._conf)))
return vars
class WPAbstractManagmentAccept(WPAbstractManagment):
def _getTabContent( self, params ):
wc = WAbstractManagmentAccept( self._getAW(), self._target )
return wc.getHTML()
class WPAbstractManagmentAcceptMultiple(WPConferenceModifAbstractBase):
def __init__( self, rh, abstracts ):
WPConferenceModifAbstractBase.__init__(self, rh, abstracts[0].getConference())
self._abstracts = abstracts
def _getPageContent( self, params ):
wc = WAbstractManagmentAcceptMultiple( self._abstracts )
return wc.getHTML()
class WPAbstractManagmentRejectMultiple(WPConferenceModifAbstractBase):
def __init__( self, rh, abstracts ):
WPConferenceModifAbstractBase.__init__(self, rh, abstracts[0].getConference())
self._abstracts = abstracts
def _getPageContent( self, params ):
wc = WAbstractManagmentRejectMultiple( self._abstracts )
return wc.getHTML()
class WAbsModAcceptConfirmation(wcomponents.WTemplated):
def __init__(self,abstract):
self._abstract=abstract
def getVars(self):
vars=wcomponents.WTemplated.getVars(self)
vars["track"]=quoteattr(vars["track"])
vars["comments"]=quoteattr(vars["comments"])
vars["type"]=quoteattr(vars["type"])
vars["acceptURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentAccept.getURL(self._abstract)))
vars["cancelURL"] = quoteattr(str(urlHandlers.UHAbstractManagment.getURL(self._abstract)))
return vars
class WPModAcceptConfirmation(WPAbstractManagment):
def _getTabContent(self,params):
wc = WAbsModAcceptConfirmation(self._target)
p={"track":params["track"],
"session":params["session"],
"comments":params["comments"],
"type":params["type"]}
return wc.getHTML(p)
class WAbsModRejectConfirmation(wcomponents.WTemplated):
def __init__(self,abstract):
self._abstract=abstract
def getVars(self):
vars=wcomponents.WTemplated.getVars(self)
vars["comments"]=quoteattr(vars["comments"])
vars["rejectURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentReject.getURL(self._abstract)))
vars["cancelURL"] = quoteattr(str(urlHandlers.UHAbstractManagment.getURL(self._abstract)))
return vars
class WPModRejectConfirmation(WPAbstractManagment):
def _getTabContent(self,params):
wc = WAbsModRejectConfirmation(self._target)
p={ "comments":params["comments"] }
return wc.getHTML(p)
class WAbstractManagmentReject( wcomponents.WTemplated ):
def __init__( self, aw, abstract, track=None ):
self._abstract = abstract
self._track = track
self._aw = aw
self._conf = abstract.getOwner().getOwner()
def _checkNotificationTpl(self):
for notificationTpl in self._abstract.getOwner().getNotificationTplList():
for condition in notificationTpl.getConditionList():
if isinstance(condition, review.NotifTplCondRejected):
return True
return False
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["abstractName"] = self._abstract.getTitle()
vars["showNotifyCheckbox"] = self._checkNotificationTpl()
if self._track == None:
vars["rejectURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentReject.getURL(self._abstract)))
vars["cancelURL"] = quoteattr(str(urlHandlers.UHAbstractManagment.getURL(self._abstract)))
vars["trackTitle"] = ""
else:
vars["rejectURL"] = quoteattr(str(urlHandlers.UHTrackAbstractReject.getURL(self._track, self._abstract)))
vars["cancelURL"] = quoteattr(str(urlHandlers.UHTrackAbstractModif.getURL(self._track, self._abstract)))
vars["trackTitle"] = self._track.getTitle()
return vars
class WAbstractManagmentRejectMultiple( wcomponents.WTemplated ):
def __init__( self, abstracts ):
self._abstracts = abstracts
# we suppose that we always have a least one abstract:
self._conf = abstracts[0].getOwner().getOwner()
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["abstractsQuantity"] = len(self._abstracts)
vars["listOfAbstracts"] = []
rejectURL = urlHandlers.UHAbstractManagmentRejectMultiple.getURL(self._conf)
IDs = []
for abstract in self._abstracts:
IDs.append(abstract.getId())
vars["listOfAbstracts"].append("[%s] %s"%(abstract.getId(), abstract.getTitle()))
rejectURL.addParams({'abstracts':IDs})
vars["rejectURL"] = quoteattr(str(rejectURL))
vars["cancelURL"] = quoteattr(str(urlHandlers.UHConfAbstractManagment.getURL(self._conf)))
return vars
class WPAbstractManagmentReject(WPAbstractManagment):
def _getTabContent( self, params ):
wc = WAbstractManagmentReject( self._getAW(), self._target )
return wc.getHTML( params )
class WPModMarkAsDup(WPAbstractManagment):
def _getTabContent(self, params):
wc = wcomponents.WAbstractModMarkAsDup(self._target)
p = {"comments": params.get("comments", ""),
"id": params.get("originalId", ""),
"duplicateURL": urlHandlers.UHAbstractModMarkAsDup.getURL(self._abstract),
"cancelURL": urlHandlers.UHAbstractManagment.getURL(self._abstract),
"error": params.get('errorMsg', '')}
return wc.getHTML(p)
class WPModUnMarkAsDup(WPAbstractManagment):
def _getTabContent(self, params):
wc = wcomponents.WAbstractModUnMarkAsDup(self._target)
p = {"comments": params.get("comments", ""),
"unduplicateURL": urlHandlers.UHAbstractModUnMarkAsDup.getURL(self._abstract),
"cancelURL": urlHandlers.UHAbstractManagment.getURL(self._abstract)}
return wc.getHTML(p)
class WAbstractModMergeInto(wcomponents.WTemplated):
def __init__(self,abstract):
self._abstract=abstract
def getVars(self):
vars=wcomponents.WTemplated.getVars(self)
vars["mergeURL"]=quoteattr(str(vars["mergeURL"]))
vars["cancelURL"]=quoteattr(str(vars["cancelURL"]))
vars["includeAuthorsChecked"]=""
if vars.get("includeAuthors",False):
vars["includeAuthorsChecked"]=" checked"
vars["notifyChecked"]=""
if vars.get("doNotify",False):
vars["notifyChecked"]=" checked"
return vars
class WPModMergeInto(WPAbstractManagment):
def _getTabContent(self, params):
wc = WAbstractModMergeInto(self._target)
p = {"cancelURL": urlHandlers.UHAbstractManagment.getURL(self._abstract),
"mergeURL": urlHandlers.UHAbstractModMergeInto.getURL(self._abstract),
"comments": params.get("comments", ""),
"id": params.get("targetId", ""),
"includeAuthors": params.get("includeAuthors", False),
"doNotify": params.get("notify", False),
"error": params.get('errorMsg', '')}
return wc.getHTML(p)
class WAbstractModUnMerge(wcomponents.WTemplated):
def __init__(self,abstract):
self._abstract=abstract
def getVars(self):
vars=wcomponents.WTemplated.getVars(self)
vars["unmergeURL"]=quoteattr(str(vars["unmergeURL"]))
vars["cancelURL"]=quoteattr(str(vars["cancelURL"]))
return vars
class WPModUnMerge(WPAbstractManagment):
def _getTabContent(self, params):
wc = WAbstractModUnMerge(self._target)
p = {"comments": params.get("comments", ""),
"unmergeURL": urlHandlers.UHAbstractModUnMerge.getURL(self._abstract),
"cancelURL": urlHandlers.UHAbstractManagment.getURL(self._abstract),
"error": params.get('errorMsg', '')}
return wc.getHTML(p)
class WConfModAbstractPropToAcc(wcomponents.WTemplated):
def __init__(self,aw,abstract):
self._abstract=abstract
self._aw=aw
def _getTracksHTML(self):
res=[]
for track in self._abstract.getTrackListSorted():
u=self._aw.getUser()
if not self._abstract.canUserModify(u) and \
not track.canUserCoordinate(u):
continue
id=quoteattr(str(track.getId()))
legend=""
jud=self._abstract.getTrackJudgement(track)
if isinstance(jud,review.AbstractAcceptance):
legend="[PA]"
elif isinstance(jud,review.AbstractRejection):
legend="[PR]"
elif isinstance(jud,review.AbstractReallocation):
legend="[PM]"
elif isinstance(jud,review.AbstractInConflict):
legend="[C]"
caption="%s%s"%(legend,self.htmlText(track.getTitle()))
res.append("""<option value=%s>%s</option>"""%(id,caption))
return "".join(res)
def _getContribTypesHTML(self):
res=["""<option value="">--none--</option>"""]
for cType in self._abstract.getConference().getContribTypeList():
id=quoteattr(str(cType.getId()))
caption=self.htmlText(cType.getName())
res.append("""<option value=%s>%s</option>"""%(id,caption))
return res
def getVars(self):
vars=wcomponents.WTemplated.getVars(self)
vars["postURL"]=quoteattr(str(urlHandlers.UHConfModAbstractPropToAcc.getURL(self._abstract)))
vars["tracks"]=self._getTracksHTML()
vars["contribTypes"]=self._getContribTypesHTML()
vars["comment"]=""
vars["changeTrackURL"]=quoteattr(str(urlHandlers.UHAbstractManagmentChangeTrack.getURL(self._abstract)))
vars["abstractReview"] = self._abstract.getConference().getConfAbstractReview()
return vars
class WPModPropToAcc(WPAbstractManagment):
def _getTabContent( self, params ):
wc=WConfModAbstractPropToAcc(self._rh.getAW(),self._abstract)
return wc.getHTML()
class WConfModAbstractPropToRej(wcomponents.WTemplated):
def __init__(self,aw,abstract):
self._abstract=abstract
self._aw=aw
def _getTracksHTML(self):
res=[]
u=self._aw.getUser()
for track in self._abstract.getTrackListSorted():
if not self._abstract.canUserModify(u) and \
not track.canUserCoordinate(u):
continue
id=quoteattr(str(track.getId()))
legend=""
jud=self._abstract.getTrackJudgement(track)
if isinstance(jud,review.AbstractAcceptance):
legend="[PA]"
elif isinstance(jud,review.AbstractRejection):
legend="[PR]"
elif isinstance(jud,review.AbstractReallocation):
legend="[PM]"
elif isinstance(jud,review.AbstractInConflict):
legend="[C]"
caption="%s%s"%(legend,self.htmlText(track.getTitle()))
res.append("""<option value=%s>%s</option>"""%(id,caption))
return "".join(res)
def getVars(self):
vars=wcomponents.WTemplated.getVars(self)
vars["postURL"]=quoteattr(str(urlHandlers.UHConfModAbstractPropToRej.getURL(self._abstract)))
vars["tracks"]=self._getTracksHTML()
vars["comment"]=""
vars["changeTrackURL"]=quoteattr(str(urlHandlers.UHAbstractManagmentChangeTrack.getURL(self._abstract)))
vars["abstractReview"] = self._abstract.getConference().getConfAbstractReview()
return vars
class WPModPropToRej(WPAbstractManagment):
def _getTabContent( self, params ):
wc=WConfModAbstractPropToRej(self._rh.getAW(),self._abstract)
return wc.getHTML()
class WAbstractManagmentChangeTrack( wcomponents.WTemplated ):
def __init__( self, aw, abstract ):
self._abstract = abstract
self._aw = aw
self._conf = abstract.getOwner().getOwner()
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
vars["abstractName"] = self._abstract.getTitle()
tracks = ""
for track in self._conf.getTrackList():
checked = ""
if self._abstract.hasTrack( track ):
checked = "checked"
tracks += "<input %s name=\"tracks\" type=\"checkbox\" value=\"%s\"> %s<br>\n"%(checked,track.getId(), track.getTitle())
vars["tracks"] = tracks
return vars
class WPAbstractManagmentChangeTrack(WPAbstractManagementBase):
def _setActiveTab( self ):
self._tabMain.setActive()
def _getTabContent( self, params ):
wc = WAbstractManagmentChangeTrack( self._getAW(), self._target )
params["saveURL"] = quoteattr(str(urlHandlers.UHAbstractManagmentChangeTrack.getURL(self._abstract)))
params["cancelURL"] = quoteattr(str(urlHandlers.UHAbstractManagment.getURL(self._abstract)))
return wc.getHTML( params )
class WAbstractTrackManagment(wcomponents.WTemplated):
def __init__( self, aw, abstract ):
self._abstract = abstract
self._aw = aw
self._conf = abstract.getOwner().getOwner()
def _getResponsibleHTML( self, track, res ):
tmp = "%s (%s)"%(res.getFullName(), res.getAffiliation())
tmp = self.htmlText( tmp )
if res.getEmail() != "":
mailtoSubject = _("[%s] Abstract %s: %s")%( self._conf.getTitle(), self._abstract.getId(), self._abstract.getTitle() )
mailtoBody=""
if track is not None:
mailtoBody = _("You can access the abstract at [%s]")%str( urlHandlers.UHTrackAbstractModif.getURL( track, self._abstract ) )
mailtoURL = "mailto:%s?subject=%s&body=%s"%( res.getEmail(), \
urllib.quote( mailtoSubject ), \
urllib.quote( mailtoBody ) )
href = quoteattr( mailtoURL )
tmp = """<a href=%s><font size=\"-2\">%s</font></a>"""%(href, tmp)
return tmp
def getVars( self ):
vars = wcomponents.WTemplated.getVars( self )
tracks = ""
tzUtil = DisplayTZ(self._aw,self._conf)
tz = tzUtil.getDisplayTZ()
judgements = False # this var shows if there is any judgement to show in the table
for track in self._abstract.getTrackListSorted():
firstJudg=True
for status in self._abstract.getJudgementHistoryByTrack(track):
judgements = True
if status.__class__ == review.AbstractAcceptance:
contribType = ""
if status.getContribType() is not None:
contribType = "(%s)"%status.getContribType().getName()
st = _("Proposed to accept %s")%(self.htmlText(contribType))
color = "#D2FFE9"
modifDate = getAdjustedDate(status.getDate(),tz=tz).strftime("%d %B %Y %H:%M")
modifier = self._getResponsibleHTML( track, status.getResponsible() )
comments = status.getComment()
elif status.__class__ == review.AbstractRejection:
st = _("Proposed to reject")
color = "#FFDDDD"
modifDate = getAdjustedDate(status.getDate(),tz=tz).strftime("%d %B %Y %H:%M")
modifier = self._getResponsibleHTML( track, status.getResponsible() )
comments = status.getComment()
elif status.__class__ == review.AbstractReallocation:
l = []
for propTrack in status.getProposedTrackList():
l.append( self.htmlText( propTrack.getTitle() ) )
st = i18nformat(""" _("Proposed for other tracks"):<font size="-1"><table style="padding-left:10px"><tr><td>%s</td></tr></table></font>""")%"<br>".join(l)
color = "#F6F6F6"
modifDate = getAdjustedDate(status.getDate(),tz=tz).strftime("%d %B %Y %H:%M")
modifier = self._getResponsibleHTML( track, status.getResponsible() )
comments = status.getComment()
elif status.__class__ == review.AbstractMarkedAsDuplicated:
st = _("""Marked as duplicated: original abstract has id '%s'""")%(status.getOriginalAbstract().getId())
color = "#DDDDFF"
modifDate = getAdjustedDate(status.getDate(),tz=tz).strftime("%d %B %Y %H:%M")
modifier = self._getResponsibleHTML( track, status.getResponsible() )
comments = status.getComment()
elif status.__class__ == review.AbstractUnMarkedAsDuplicated:
st = _("""Unmarked as duplicated""")
color = "#FFFFCC"
modifDate = getAdjustedDate(status.getDate(),tz=tz).strftime("%d %B %Y %H:%M")
modifier = self._getResponsibleHTML( track, status.getResponsible() )
comments = status.getComment()
else:
st = " "
color = "white"
modifDate = " "
modifier = " "
comments = " "
trackTitle = _("General Judgment")
if track is not None and firstJudg:
trackTitle = track.getTitle()
if firstJudg:
firstJudg=False
else:
trackTitle=" "
if status.getJudValue() == None:
# There were no questions when the abstract was judgement, the value 0 is wrong for this case
# because it is possible to have answered questions and a final value of 0.
rating = "-"
detailsImg = ""
else:
# Get the list of questions and the answers values
questionNames = []
answerValues = []
answers = status.getAnswers()
for ans in answers:
questionNames.append(ans.getQuestion().getText())
answerValues.append("%.2f" % ans.getValue())
rating = "%.2f" % status.getJudValue()
total = "%.2f" % status.getTotalJudValue()
imgIcon = Configuration.Config.getInstance().getSystemIconURL("itemCollapsed")
detailsImg = """<img src="%s" onClick = "showQuestionDetails(%s,%s,%s,%s)" style="cursor: pointer;">"""% (imgIcon, questionNames, answerValues, rating, total)
tracks += "<tr bgcolor=\"%s\">"%color
tracks += "<td nowrap class=\"blacktext\" style=\"padding-right:10px;background-color:white\"><b> %s</b></td>"%(trackTitle)
tracks += "<td nowrap class=\"blacktext\" style=\"padding-right:10px\"> %s</td>"%st
tracks += "<td nowrap class=\"blacktext\" style=\"padding-right:10px\"> %s</td>"%modifier
tracks += "<td nowrap class=\"blacktext\" style=\"padding-right:10px\"><font size=\"-2\"> %s</font></td>"%modifDate
tracks += "<td nowrap class=\"blacktext\" style=\"padding-right:10px\"> %s %s</td>"%(rating, detailsImg)
tracks += """<td class=\"blacktext\"> %s</td>"""%comments
tracks += "</tr>"
if self._abstract.getJudgementHistoryByTrack(track) != []:
tracks+="""
<tr><td> </td></tr>
"""
if self._abstract.getRating():
vars["ratingAverage"] = "%.2f" % self._abstract.getRating()
else:
vars["ratingAverage"] = None
vars["judgements"] = judgements
vars["tracks"] = tracks
vars["scaleLower"] = self._conf.getConfAbstractReview().getScaleLower()
vars["scaleHigher"] = self._conf.getConfAbstractReview().getScaleHigher()
return vars
class WPAbstractTrackManagment(WPAbstractManagementBase):
def _setActiveTab( self ):
self._tabTracks.setActive()
self._subTabTrack.setActive()
def _getTabContent( self, params ):
wc = WAbstractTrackManagment( self._getAW(), self._target )
return wc.getHTML( params )
class WAbstractTrackOrderByRating(wcomponents.WTemplated):
def __init__( self, aw, abstract ):
self._abstract = abstract
self._conf = abstract.getOwner().getOwner()
def getVars( self ):
questionIds = self._abstract.getQuestionsAverage().keys()
answerValues = self._abstract.getQuestionsAverage().values()
i = 0
questions = {}
for qText in questionIds:
questions[qText] = "%.2f" % answerValues[i]
i += 1
vars = wcomponents.WTemplated.getVars( self )
vars["questions"] = questions
if self._abstract.getRating():
vars["ratingAverage"] = "%.2f" % self._abstract.getRating()
else:
vars["ratingAverage"] = None
vars["scaleLower"] = self._conf.getConfAbstractReview().getScaleLower()
vars["scaleHigher"] = self._conf.getConfAbstractReview().getScaleHigher()
return vars
class WPAbstractTrackOrderByRating(WPAbstractManagementBase):
def _setActiveTab(self):
self._tabTracks.setActive()
self._subTabRating.setActive()
def _getTabContent(self, params):
wc = WAbstractTrackOrderByRating(self._getAW(), self._target)
return wc.getHTML(params)
class WPModIntComments(WPAbstractManagementBase):
def _setActiveTab(self):
self._tabComments.setActive()
def _getTabContent(self, params):
wc = wcomponents.WAbstractModIntComments(self._getAW(), self._target)
p = {"newCommentURL": urlHandlers.UHAbstractModNewIntComment.getURL(self._abstract),
"commentEditURLGen": urlHandlers.UHAbstractModIntCommentEdit.getURL,
"commentRemURLGen": urlHandlers.UHAbstractModIntCommentRem.getURL
}
return wc.getHTML(p)
class WPModNewIntComment(WPModIntComments):
def _getTabContent(self, params):
wc = wcomponents.WAbstractModNewIntComment(self._getAW(), self._target)
p = {"postURL": urlHandlers.UHAbstractModNewIntComment.getURL(self._abstract)}
return wc.getHTML(p)
class WPModIntCommentEdit(WPModIntComments):
def __init__(self, rh, comment):
self._comment = comment
WPModIntComments.__init__(self, rh, comment.getAbstract())
def _getTabContent(self, params):
wc = wcomponents.WAbstractModIntCommentEdit(self._comment)
p = {"postURL": urlHandlers.UHAbstractModIntCommentEdit.getURL(self._comment)}
return wc.getHTML(p)
class WAbstractModNotifLog(wcomponents.WTemplated):
def __init__(self, abstract):
self._abstract = abstract
def _getResponsibleHTML(self, res):
conf = self._abstract.getConference()
tmp = "%s (%s)" % (res.getFullName(), res.getAffiliation())
tmp = self.htmlText(tmp)
if res.getEmail() != "":
mailtoSubject = _("[%s] Abstract %s: %s") % (conf.getTitle(), self._abstract.getId(), self._abstract.getTitle())
mailtoURL = "mailto:%s?subject=%s" % (res.getEmail(), \
urllib.quote(mailtoSubject))
href = quoteattr(mailtoURL)
tmp = """<a href=%s>%s</a>""" % (href, tmp)
return tmp
def getVars(self):
vars = wcomponents.WTemplated.getVars(self)
res = []
for entry in self._abstract.getNotificationLog().getEntryList():
d = entry.getDate().strftime("%Y-%m-%d %H:%M")
resp = entry.getResponsible()
tplCaption = entry.getTpl().getName()
tplLink = i18nformat("""
<b>%s</b> <font color="red"> _("(This template doesn't exist anymore)")</font>
""") % tplCaption
if entry.getTpl().getOwner() is not None:
url = urlHandlers.UHAbstractModNotifTplDisplay.getURL(entry.getTpl())
tplLink = "<a href=%s>%s</a>" % (quoteattr(str(url)), self.htmlText(tplCaption))
res.append(i18nformat("""
<tr>
<td bgcolor="white">
%s _("by") %s
<br>
_("notification template used"): %s
</td>
</tr>
""") % (self.htmlText(d), self._getResponsibleHTML(resp), tplLink))
vars["entries"] = "".join(res)
return vars
class WPModNotifLog(WPAbstractManagementBase):
def _setActiveTab(self):
self._tabNotifLog.setActive()
def _getTabContent(self, params):
wc = WAbstractModNotifLog(self._target)
return wc.getHTML()
class WConfModAbstractWithdraw(wcomponents.WTemplated):
def __init__(self, aw, abstract):
self._abstract = abstract
self._aw = aw
def getVars(self):
vars = wcomponents.WTemplated.getVars(self)
vars["postURL"] = quoteattr(str(urlHandlers.UHConfModAbstractWithdraw.getURL(self._abstract)))
vars["comment"] = ""
return vars
class WPModWithdraw(WPAbstractManagment):
def _getTabContent(self, params):
wc = WConfModAbstractWithdraw(self._rh.getAW(), self._abstract)
return wc.getHTML()
class WAbstractModifTool(wcomponents.WTemplated):
def __init__(self, contrib):
self._contrib = contrib
def getVars(self):
vars = wcomponents.WTemplated.getVars(self)
vars["deleteIconURL"] = Config.getInstance().getSystemIconURL("delete")
return vars
class WPModTools(WPAbstractManagment):
def _setActiveTab(self):
self._tabTools.setActive()
def _getTabContent(self, params):
wc = WAbstractModifTool(self._target)
pars = { \
"deleteContributionURL": urlHandlers.UHAbstractDelete.getURL(self._target)
}
return wc.getHTML(pars)
class WPModRemConfirmation(WPModTools):
def __init__(self, rh, abs):
WPAbstractManagment.__init__(self, rh, abs)
self._abs = abs
def _getTabContent(self, params):
wc = wcomponents.WConfirmation()
msg = {'challenge': _("Are you sure you want to delete the abstract?"),
'target': self._abs.getTitle(),
'subtext': None
}
url = urlHandlers.UHAbstractDelete.getURL(self._abs)
return wc.getHTML(msg, url, {},
severity="danger")
| gpl-3.0 |
cloudify-examples/cloudify-bigip-plugin | bigip_plugin/external/bigsuds.py | 2 | 24205 | #!/usr/bin/env python
"""An iControl client library.
See the documentation for the L{BIGIP} class for usage examples.
"""
import httplib
import logging
import os
import re
import urllib2
from xml.sax import SAXParseException
import suds.client
from suds.cache import ObjectCache
from suds.sudsobject import Object as SudsObject
from suds.client import Client
from suds.xsd.doctor import ImportDoctor, Import
from suds.transport import TransportError
from suds import WebFault, TypeNotFound, MethodNotFound as _MethodNotFound
__version__ = '1.0.1'
# We need to monkey-patch the Client's ObjectCache due to a suds bug:
# https://fedorahosted.org/suds/ticket/376
suds.client.ObjectCache = lambda **kwargs: None
log = logging.getLogger('bigsuds')
class OperationFailed(Exception):
"""Base class for bigsuds exceptions."""
class ServerError(OperationFailed, WebFault):
"""Raised when the BIGIP returns an error via the iControl interface."""
class ConnectionError(OperationFailed):
"""Raised when the connection to the BIGIP fails."""
class ParseError(OperationFailed):
"""Raised when parsing data from the BIGIP as a soap message fails.
This is also raised when an invalid iControl namespace
is looked up on the BIGIP (e.g. <bigip>.LocalLB.Bad).
"""
class MethodNotFound(OperationFailed, _MethodNotFound):
"""Raised when a particular iControl method does not exist."""
class ArgumentError(OperationFailed):
"""Raised when too many arguments or incorrect keyword arguments
are passed to an iControl method."""
class BIGIP(object):
"""This class exposes the BIGIP's iControl interface.
Example usage:
>>> b = BIGIP('bigip-hostname')
>>> print b.LocalLB.Pool.get_list()
['/Common/test_pool']
>>> b.LocalLB.Pool.add_member(['/Common/test_pool'], \
[[{'address': '10.10.10.10', 'port': 20030}]])
>>> print b.LocalLB.Pool.get_member(['/Common/test_pool'])
[[{'port': 20020, 'address': '10.10.10.10'},
{'port': 20030, 'address': '10.10.10.10'}]]
Some notes on Exceptions:
* The looking up of iControl namespaces on the L{BIGIP} instance can raise
L{ParseError} and L{ServerError}.
* The looking up of an iControl method can raise L{MethodNotFound}.
* Calling an iControl method can raise L{ServerError} when the BIGIP
reports an error via iControl, L{ConnectionError}, or L{MethodNotFound},
or L{ParseError} when the BIGIP return non-SOAP data, or
L{ArgumentError} when too many arguments are passed or invalid
keyword arguments are passed.
* All of these exceptions derive from L{OperationFailed}.
"""
def __init__(self, hostname, username='admin', password='admin',
debug=False, cachedir=None):
"""init
@param hostname: The IP address or hostname of the BIGIP.
@param username: The admin username on the BIGIP.
@param password: The admin password on the BIGIP.
@param debug: When True sets up additional interactive features
like the ability to introspect/tab-complete the list of method
names.
@param cachedir: The directory to cache wsdls in. None indicates
that caching should be disabled.
"""
self._hostname = hostname
self._username = username
self._password = password
self._debug = debug
self._cachedir = cachedir
if debug:
self._instantiate_namespaces()
def with_session_id(self, session_id=None):
"""Returns a new instance of L{BIGIP} that uses a unique session id.
@param session_id: The integer session id to use. If None, a new
session id will be requested from the BIGIP.
@return: A new instance of L{BIGIP}. All iControl calls made through
this new instance will use the unique session id. All calls made
through the L{BIGIP} that with_session_id() was called on will
continue to use that instances session id (or no session id if
it did not have one).
@raise: MethodNotFound: When no session_id is specified and the BIGIP
does not support sessions. Sessions are new in 11.0.0.
@raise: OperationFaled: When getting the session_id from the BIGIP
fails for some other reason.
"""
if session_id is None:
session_id = self.System.Session.get_session_identifier()
return _BIGIPSession(self._hostname, session_id, self._username,
self._password, self._debug, self._cachedir)
def __getattr__(self, attr):
if attr.startswith('__'):
return getattr(super(BIGIP, self), attr)
if '_' in attr:
# Backwards compatability with pycontrol:
first, second = attr.split('_', 1)
return getattr(getattr(self, first), second)
ns = _Namespace(attr, self._create_client)
setattr(self, attr, ns)
return ns
def _create_client(self, wsdl_name):
try:
client = get_client(self._hostname, wsdl_name, self._username,
self._password, self._cachedir)
except SAXParseException, e:
raise ParseError('%s\nFailed to parse wsdl. Is "%s" a valid '
'namespace?' % (e, wsdl_name))
# One situation that raises TransportError is when credentials are bad.
except (urllib2.URLError, TransportError), e:
raise ConnectionError(str(e))
return self._create_client_wrapper(client, wsdl_name)
def _create_client_wrapper(self, client, wsdl_name):
return _ClientWrapper(client,
self._arg_processor_factory,
_NativeResultProcessor,
wsdl_name,
self._debug)
def _arg_processor_factory(self, client, method):
return _DefaultArgProcessor(method, client.factory)
def _instantiate_namespaces(self):
wsdl_hierarchy = get_wsdls(self._hostname, self._username,
self._password)
for namespace, attr_list in wsdl_hierarchy.iteritems():
ns = getattr(self, namespace)
ns.set_attr_list(attr_list)
class Transaction(object):
"""This class is a context manager for iControl transactions.
Upon successful exit of the with statement, the transaction will be
submitted, otherwise it will be rolled back.
NOTE: This feature was added to BIGIP in version 11.0.0.
Example:
> bigip = BIGIP(<args>)
> with Transaction(bigip):
> <perform actions inside a transaction>
Example which creates a new session id for the transaction:
> bigip = BIGIP(<args>)
> with Transaction(bigip.use_session_id()) as bigip:
> <perform actions inside a transaction>
"""
def __init__(self, bigip):
self.bigip = bigip
def __enter__(self):
self.bigip.System.Session.start_transaction()
return self.bigip
def __exit__(self, excy_type, exc_value, exc_tb):
if exc_tb is None:
self.bigip.System.Session.submit_transaction()
else:
try:
self.bigip.System.Session.rollback_transaction()
# Ignore ServerError. This happens if the transaction is already
# timed out. We don't want to ignore other errors, like
# ConnectionErrors.
except ServerError:
pass
def get_client(hostname, wsdl_name, username='admin', password='admin',
cachedir=None):
"""Returns and instance of suds.client.Client.
A separate client is used for each iControl WSDL/Namespace (e.g.
"LocalLB.Pool").
This function allows any suds exceptions to propagate up to the caller.
@param hostname: The IP address or hostname of the BIGIP.
@param wsdl_name: The iControl namespace (e.g. "LocalLB.Pool")
@param username: The admin username on the BIGIP.
@param password: The admin password on the BIGIP.
@param cachedir: The directory to cache wsdls in. None indicates
that caching should be disabled.
"""
url = 'https://%s/iControl/iControlPortal.cgi?WSDL=%s' % (
hostname, wsdl_name)
imp = Import('http://schemas.xmlsoap.org/soap/encoding/')
imp.filter.add('urn:iControl')
if cachedir is not None:
cachedir = ObjectCache(location=os.path.expanduser(cachedir), days=1)
doctor = ImportDoctor(imp)
client = Client(url, doctor=doctor, username=username, password=password,
cache=cachedir)
# Without this, subsequent requests will use the actual hostname of the
# BIGIP, which is often times invalid.
client.set_options(location=url.split('?')[0])
client.factory.separator('_')
return client
def get_wsdls(hostname, username='admin', password='admin'):
"""Returns the set of all available WSDLs on this server
Used for providing introspection into the available namespaces and WSDLs
dynamically (e.g. when using iPython)
@param hostname: The IP address or hostname of the BIGIP.
@param username: The admin username on the BIGIP.
@param password: The admin password on the BIGIP.
"""
url = 'https://%s/iControl/iControlPortal.cgi' % (hostname)
regex = re.compile(r'/iControl/iControlPortal.cgi\?WSDL=([^"]+)"')
auth_handler = urllib2.HTTPBasicAuthHandler()
# 10.1.0 has a realm of "BIG-IP"
auth_handler.add_password(uri='https://%s/' % (hostname), user=username, passwd=password,
realm="BIG-IP")
# 11.3.0 has a realm of "BIG-\IP". I'm not sure exactly when it changed.
auth_handler.add_password(uri='https://%s/' % (hostname), user=username, passwd=password,
realm="BIG\-IP")
opener = urllib2.build_opener(auth_handler)
try:
result = opener.open(url)
except urllib2.URLError, e:
raise ConnectionError(str(e))
wsdls = {}
for line in result.readlines():
result = regex.search(line)
if result:
namespace, rest = result.groups()[0].split(".", 1)
if namespace not in wsdls:
wsdls[namespace] = []
wsdls[namespace].append(rest)
return wsdls
class _BIGIPSession(BIGIP):
def __init__(self, hostname, session_id, username='admin', password='admin',
debug=False, cachedir=None):
super(_BIGIPSession, self).__init__(hostname, username=username,
password=password, debug=debug, cachedir=cachedir)
self._headers = {'X-iControl-Session': str(session_id)}
def _create_client_wrapper(self, client, wsdl_name):
client.set_options(headers=self._headers)
return super(_BIGIPSession, self)._create_client_wrapper(client, wsdl_name)
class _Namespace(object):
"""Represents a top level iControl namespace.
Examples of this are "LocalLB", "System", etc.
The purpose of this class is to store context allowing iControl clients
to be looked up using only the remaining part of the namespace.
Example:
<LocalLB namespace>.Pool returns the iControl client for "LocalLB.Pool"
"""
def __init__(self, name, client_creator):
"""init
@param name: The high-level namespace (e.g "LocalLB").
@param client_creator: A function that will be passed the full
namespace string (e.g. "LocalLB.Pool") and should return
some type of iControl client.
"""
self._name = name
self._client_creator = client_creator
self._attrs = []
def __dir__(self):
return sorted(set(dir(type(self)) + list(self.__dict__) +
self._attrs))
def __getattr__(self, attr):
if attr.startswith('__'):
return getattr(super(_Namespace, self), attr)
client = self._client_creator('%s.%s' % (self._name, attr))
setattr(self, attr, client)
return client
def set_attr_list(self, attr_list):
self._attrs = attr_list
class _ClientWrapper(object):
"""A wrapper class that abstracts/extends the suds client API.
"""
def __init__(self, client, arg_processor_factory, result_processor_factory,
wsdl_name, debug=False):
"""init
@param client: An instance of suds.client.Client.
@param arg_processor_factory: This will be called to create processors
for arguments before they are passed to suds methods. This callable
will be passed the suds method and factory and should return an
instance of L{_ArgProcessor}.
@param result_processor_factory: This will be called to create
processors for results returned from suds methods. This callable
will be passed no arguments and should return an instance of
L{_ResultProcessor}.
"""
self._client = client
self._arg_factory = arg_processor_factory
self._result_factory = result_processor_factory
self._wsdl_name = wsdl_name
self._usage = {}
# This populates self.__dict__. Helpful for tab completion.
# I'm not sure if this slows things down much. Maybe we should just
# always do it.
if debug:
# Extract the documentation from the WSDL (before populating
# self.__dict__)
binding_el = client.wsdl.services[0].ports[0].binding[0]
for op in binding_el.getChildren("operation"):
usage = None
doc = op.getChild("documentation")
if doc is not None:
usage = doc.getText().strip()
self._usage[op.get("name")] = usage
for method in client.sd[0].ports[0][1]:
getattr(self, method[0])
def __getattr__(self, attr):
# Looks up the corresponding suds method and returns a wrapped version.
try:
method = getattr(self._client.service, attr)
except _MethodNotFound, e:
e.__class__ = MethodNotFound
raise
wrapper = _wrap_method(method,
self._wsdl_name,
self._arg_factory(self._client, method),
self._result_factory(),
attr in self._usage and self._usage[attr] or None)
setattr(self, attr, wrapper)
return wrapper
def __str__(self):
# The suds clients strings contain the entire soap API. This is really
# useful, so lets expose it.
return str(self._client)
def _wrap_method(method, wsdl_name, arg_processor, result_processor, usage):
"""
This function wraps a suds method and returns a new function which
provides argument/result processing.
Each time a method is called, the incoming args will be passed to the
specified arg_processor before being passed to the suds method.
The return value from the underlying suds method will be passed to the
specified result_processor prior to being returned to the caller.
@param method: A suds method (can be obtained via
client.service.<method_name>).
@param arg_processor: An instance of L{_ArgProcessor}.
@param result_processor: An instance of L{_ResultProcessor}.
"""
icontrol_sig = "iControl signature: %s" % _method_string(method)
if usage:
usage += "\n\n%s" % icontrol_sig
else:
usage = "Wrapper for %s.%s\n\n%s" % (
wsdl_name, method.method.name, icontrol_sig)
def wrapped_method(*args, **kwargs):
log.debug('Executing iControl method: %s.%s(%s, %s)',
wsdl_name, method.method.name, args, kwargs)
args, kwargs = arg_processor.process(args, kwargs)
# This exception wrapping is purely for pycontrol compatability.
# Maybe we want to make this optional and put it in a separate class?
try:
result = method(*args, **kwargs)
except AttributeError:
# Oddly, his seems to happen when the wrong password is used.
raise ConnectionError('iControl call failed, possibly invalid '
'credentials.')
except _MethodNotFound, e:
e.__class__ = MethodNotFound
raise
except WebFault, e:
e.__class__ = ServerError
raise
except urllib2.URLError, e:
raise ConnectionError('URLError: %s' % str(e))
except httplib.BadStatusLine, e:
raise ConnectionError('BadStatusLine: %s' % e)
except SAXParseException, e:
raise ParseError("Failed to parse the BIGIP's response. This "
"was likely caused by a 500 error message.")
return result_processor.process(result)
wrapped_method.__doc__ = usage
wrapped_method.__name__ = method.method.name.encode("utf-8")
# It's occasionally convenient to be able to grab the suds object directly
wrapped_method._method = method
return wrapped_method
class _ArgProcessor(object):
"""Base class for suds argument processors."""
def process(self, args, kwargs):
"""This method is passed the user-specified args and kwargs.
@param args: The user specified positional arguements.
@param kwargs: The user specified keyword arguements.
@return: A tuple of (args, kwargs).
"""
raise NotImplementedError('process')
class _DefaultArgProcessor(_ArgProcessor):
def __init__(self, method, factory):
self._factory = factory
self._method = method
self._argspec = self._make_argspec(method)
def _make_argspec(self, method):
# Returns a list of tuples indicating the arg names and types.
# E.g., [('pool_names', 'Common.StringSequence')]
spec = []
for part in method.method.soap.input.body.parts:
spec.append((part.name, part.type[0]))
return spec
def process(self, args, kwargs):
return (self._process_args(args), self._process_kwargs(kwargs))
def _process_args(self, args):
newargs = []
for i, arg in enumerate(args):
try:
newargs.append(self._process_arg(self._argspec[i][1], arg))
except IndexError:
raise ArgumentError(
'Too many arguments passed to method: %s' % (
_method_string(self._method)))
return newargs
def _process_kwargs(self, kwargs):
newkwargs = {}
for name, value in kwargs.items():
try:
argtype = [x[1] for x in self._argspec if x[0] == name][0]
newkwargs[name] = self._process_arg(argtype, value)
except IndexError:
raise ArgumentError(
'Invalid keyword argument "%s" passed to method: %s' % (
name, _method_string(self._method)))
return newkwargs
def _process_arg(self, arg_type, value):
if isinstance(value, SudsObject):
# If the user explicitly created suds objects to pass in,
# we don't want to mess with them.
return value
if '.' not in arg_type and ':' not in arg_type:
# These are not iControl namespace types, they are part of:
# ns0 = "http://schemas.xmlsoap.org/soap/encoding/"
# From what I can tell, we don't need to send these to the factory.
# Sending them to the factory as-is actually fails to resolve, the
# type names would need the "ns0:" qualifier. Some examples of
# these types are: ns0:string, ns0:long, ns0:unsignedInt.
return value
try:
obj = self._factory.create(arg_type)
except TypeNotFound:
log.error('Failed to create type: %s', arg_type)
return value
if isinstance(value, dict):
for name, value in value.items():
# The new object we created has the type of each attribute
# accessible via the attribute's class name.
try:
class_name = getattr(obj, name).__class__.__name__
except AttributeError:
valid_attrs = ', '.join([x[0] for x in obj])
raise ArgumentError(
'"%s" is not a valid attribute for %s, '
'expecting: %s' % (name, obj.__class__.__name__,
valid_attrs))
setattr(obj, name, self._process_arg(class_name, value))
return obj
array_type = self._array_type(obj)
if array_type is not None:
# This is a common mistake. We might as well catch it here.
if isinstance(value, basestring):
raise ArgumentError(
'%s needs an iterable, but was specified as a string: '
'"%s"' % (obj.__class__.__name__, value))
obj.items = [self._process_arg(array_type, x) for x in value]
return obj
# If this object doesn't have any attributes, then we know it's not
# a complex type or enum type. We'll want to skip the next validation
# step.
if not obj:
return value
# The passed in value doesn't belong to an array type and wasn't a
# complex type (no dictionary received). At this point we know that
# the object type has attributes associated with it. It's likely
# an enum, but could be an incorrect argument to a complex type (e.g.
# the user specified some other type when a dictionary is expected).
# Either way, this error is more helpful than what the BIGIP provides.
if value not in obj:
valid_values = ', '.join([x[0] for x in obj])
raise ArgumentError('"%s" is not a valid value for %s, expecting: '
'%s' % (value, obj.__class__.__name__,
valid_values))
return value
def _array_type(self, obj):
# Determines if the specified type is an array.
# If so, the type name of the elements is returned. Otherwise None
# is returned.
try:
attributes = obj.__metadata__.sxtype.attributes()
except AttributeError:
return None
# The type contained in the array is in one of the attributes.
# According to a suds docstring, the "aty" is the "soap-enc:arrayType".
# We need to find the attribute which has it.
for each in attributes:
if each[0].name == 'arrayType':
try:
return each[0].aty[0]
except AttributeError:
pass
return None
class _ResultProcessor(object):
"""Base class for suds result processors."""
def process(self, value):
"""Processes the suds return value for the caller.
@param value: The return value from a suds method.
@return: The processed value.
"""
raise NotImplementedError('process')
class _NativeResultProcessor(_ResultProcessor):
def process(self, value):
return self._convert_to_native_type(value)
def _convert_to_native_type(self, value):
if isinstance(value, list):
return [self._convert_to_native_type(x) for x in value]
elif isinstance(value, SudsObject):
d = {}
for attr_name, attr_value in value:
d[attr_name] = self._convert_to_native_type(attr_value)
return d
elif isinstance(value, unicode):
# This handles suds.sax.text.Text as well, as it derives from
# unicode.
return str(value)
elif isinstance(value, long):
return int(value)
return value
def _method_string(method):
parts = []
for part in method.method.soap.input.body.parts:
parts.append("%s %s" % (part.type[0], part.name))
return "%s(%s)" % (method.method.name, ', '.join(parts))
| apache-2.0 |
jbedorf/tensorflow | tensorflow/contrib/timeseries/python/timeseries/state_space_models/kalman_filter_test.py | 23 | 18471 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Kalman filtering."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.state_space_models import kalman_filter
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# Two-dimensional state model with "slope" and "level" components.
STATE_TRANSITION = [
[1., 1.], # Add slope to level
[0., 1.] # Maintain slope
]
# Independent noise for each component
STATE_TRANSITION_NOISE = [[0.1, 0.0], [0.0, 0.2]]
OBSERVATION_MODEL = [[[0.5, 0.0], [0.0, 1.0]]]
OBSERVATION_NOISE = [[0.0001, 0.], [0., 0.0002]]
STATE_NOISE_TRANSFORM = [[1.0, 0.0], [0.0, 1.0]]
def _powers_and_sums_from_transition_matrix(
state_transition, state_transition_noise_covariance,
state_noise_transform, max_gap=1):
def _transition_matrix_powers(powers):
return math_utils.matrix_to_powers(state_transition, powers)
def _power_sums(num_steps):
power_sums_tensor = math_utils.power_sums_tensor(
max_gap + 1, state_transition,
math_ops.matmul(state_noise_transform,
math_ops.matmul(
state_transition_noise_covariance,
state_noise_transform,
adjoint_b=True)))
return array_ops.gather(power_sums_tensor, indices=num_steps)
return (_transition_matrix_powers, _power_sums)
class MultivariateTests(test.TestCase):
def _multivariate_symmetric_covariance_test_template(
self, dtype, simplified_posterior_variance_computation):
"""Check that errors aren't building up asymmetries in covariances."""
kf = kalman_filter.KalmanFilter(dtype=dtype)
observation_noise_covariance = constant_op.constant(
[[1., 0.5], [0.5, 1.]], dtype=dtype)
observation_model = constant_op.constant(
[[[1., 0., 0., 0.], [0., 0., 1., 0.]]], dtype=dtype)
state = array_ops.placeholder(shape=[1, 4], dtype=dtype)
state_var = array_ops.placeholder(shape=[1, 4, 4], dtype=dtype)
observation = array_ops.placeholder(shape=[1, 2], dtype=dtype)
transition_fn, power_sum_fn = _powers_and_sums_from_transition_matrix(
state_transition=constant_op.constant(
[[1., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 1.],
[0., 0., 0., 1.]],
dtype=dtype),
state_noise_transform=linalg_ops.eye(4, dtype=dtype),
state_transition_noise_covariance=constant_op.constant(
[[1., 0., 0.5, 0.], [0., 1., 0., 0.5], [0.5, 0., 1., 0.],
[0., 0.5, 0., 1.]],
dtype=dtype))
pred_state = kf.predict_state_mean(
prior_state=state, transition_matrices=transition_fn([1]))
pred_state_var = kf.predict_state_var(
prior_state_var=state_var, transition_matrices=transition_fn([1]),
transition_noise_sums=power_sum_fn([1]))
observed_mean, observed_var = kf.observed_from_state(
state_mean=pred_state, state_var=pred_state_var,
observation_model=observation_model,
observation_noise=observation_noise_covariance)
post_state, post_state_var = kf.posterior_from_prior_state(
prior_state=pred_state, prior_state_var=pred_state_var,
observation=observation,
observation_model=observation_model,
predicted_observations=(observed_mean, observed_var),
observation_noise=observation_noise_covariance)
with self.cached_session() as session:
evaled_state = numpy.array([[1., 1., 1., 1.]])
evaled_state_var = numpy.eye(4)[None]
for i in range(500):
evaled_state, evaled_state_var, evaled_observed_var = session.run(
[post_state, post_state_var, observed_var],
feed_dict={state: evaled_state,
state_var: evaled_state_var,
observation: [[float(i), float(i)]]})
self.assertAllClose(evaled_observed_var[0],
evaled_observed_var[0].T)
self.assertAllClose(evaled_state_var[0],
evaled_state_var[0].T)
def test_multivariate_symmetric_covariance_float32(self):
self._multivariate_symmetric_covariance_test_template(
dtypes.float32, simplified_posterior_variance_computation=False)
def test_multivariate_symmetric_covariance_float64(self):
self._multivariate_symmetric_covariance_test_template(
dtypes.float64, simplified_posterior_variance_computation=True)
class KalmanFilterNonBatchTest(test.TestCase):
"""Single-batch KalmanFilter tests."""
def setUp(self):
"""The basic model defined above, with unit batches."""
self.kalman_filter = kalman_filter.KalmanFilter()
self.transition_fn, self.power_sum_fn = (
_powers_and_sums_from_transition_matrix(
state_transition=STATE_TRANSITION,
state_transition_noise_covariance=STATE_TRANSITION_NOISE,
state_noise_transform=STATE_NOISE_TRANSFORM,
max_gap=5))
def test_observed_from_state(self):
"""Compare observation mean and noise to hand-computed values."""
with self.cached_session():
state = constant_op.constant([[2., 1.]])
state_var = constant_op.constant([[[4., 0.], [0., 3.]]])
observed_mean, observed_var = self.kalman_filter.observed_from_state(
state, state_var,
observation_model=OBSERVATION_MODEL,
observation_noise=OBSERVATION_NOISE)
observed_mean_override, observed_var_override = (
self.kalman_filter.observed_from_state(
state, state_var,
observation_model=OBSERVATION_MODEL,
observation_noise=100 * constant_op.constant(
OBSERVATION_NOISE)[None]))
self.assertAllClose(numpy.array([[1., 1.]]),
observed_mean.eval())
self.assertAllClose(numpy.array([[1., 1.]]),
observed_mean_override.eval())
self.assertAllClose(numpy.array([[[1.0001, 0.], [0., 3.0002]]]),
observed_var.eval())
self.assertAllClose(numpy.array([[[1.01, 0.], [0., 3.02]]]),
observed_var_override.eval())
def _posterior_from_prior_state_test_template(
self, state, state_var, observation, observation_model, observation_noise,
expected_state, expected_state_var):
"""Test that repeated observations converge to the expected value."""
predicted_observations = self.kalman_filter.observed_from_state(
state, state_var, observation_model,
observation_noise=observation_noise)
state_update, state_var_update = (
self.kalman_filter.posterior_from_prior_state(
state, state_var, observation,
observation_model=observation_model,
predicted_observations=predicted_observations,
observation_noise=observation_noise))
with self.cached_session() as session:
evaled_state, evaled_state_var = session.run([state, state_var])
for _ in range(300):
evaled_state, evaled_state_var = session.run(
[state_update, state_var_update],
feed_dict={state: evaled_state, state_var: evaled_state_var})
self.assertAllClose(expected_state,
evaled_state,
atol=1e-5)
self.assertAllClose(
expected_state_var,
evaled_state_var,
atol=1e-5)
def test_posterior_from_prior_state_univariate(self):
self._posterior_from_prior_state_test_template(
state=constant_op.constant([[0.3]]),
state_var=constant_op.constant([[[1.]]]),
observation=constant_op.constant([[1.]]),
observation_model=[[[2.]]],
observation_noise=[[[0.01]]],
expected_state=numpy.array([[0.5]]),
expected_state_var=[[[0.]]])
def test_posterior_from_prior_state_univariate_unit_noise(self):
self._posterior_from_prior_state_test_template(
state=constant_op.constant([[0.3]]),
state_var=constant_op.constant([[[1e10]]]),
observation=constant_op.constant([[1.]]),
observation_model=[[[2.]]],
observation_noise=[[[1.0]]],
expected_state=numpy.array([[0.5]]),
expected_state_var=[[[1. / (300. * 2. ** 2)]]])
def test_posterior_from_prior_state_multivariate_2d(self):
self._posterior_from_prior_state_test_template(
state=constant_op.constant([[1.9, 1.]]),
state_var=constant_op.constant([[[1., 0.], [0., 2.]]]),
observation=constant_op.constant([[1., 1.]]),
observation_model=OBSERVATION_MODEL,
observation_noise=OBSERVATION_NOISE,
expected_state=numpy.array([[2., 1.]]),
expected_state_var=[[[0., 0.], [0., 0.]]])
def test_posterior_from_prior_state_multivariate_3d(self):
self._posterior_from_prior_state_test_template(
state=constant_op.constant([[1.9, 1., 5.]]),
state_var=constant_op.constant(
[[[200., 0., 1.], [0., 2000., 0.], [1., 0., 40000.]]]),
observation=constant_op.constant([[1., 1., 3.]]),
observation_model=constant_op.constant(
[[[0.5, 0., 0.],
[0., 10., 0.],
[0., 0., 100.]]]),
observation_noise=linalg_ops.eye(3) / 10000.,
expected_state=numpy.array([[2., .1, .03]]),
expected_state_var=numpy.zeros([1, 3, 3]))
def test_predict_state_mean(self):
"""Compare state mean transitions with simple hand-computed values."""
with self.cached_session():
state = constant_op.constant([[4., 2.]])
state = self.kalman_filter.predict_state_mean(
state, self.transition_fn([1]))
for _ in range(2):
state = self.kalman_filter.predict_state_mean(
state, self.transition_fn([1]))
self.assertAllClose(
numpy.array([[2. * 3. + 4., # Slope * time + base
2.]]),
state.eval())
def test_predict_state_var(self):
"""Compare a variance transition with simple hand-computed values."""
with self.cached_session():
state_var = constant_op.constant([[[1., 0.], [0., 2.]]])
state_var = self.kalman_filter.predict_state_var(
state_var, self.transition_fn([1]), self.power_sum_fn([1]))
self.assertAllClose(
numpy.array([[[3.1, 2.0], [2.0, 2.2]]]),
state_var.eval())
def test_do_filter(self):
"""Tests do_filter.
Tests that correct values have high probability and incorrect values
have low probability when there is low uncertainty.
"""
with self.cached_session():
state = constant_op.constant([[4., 2.]])
state_var = constant_op.constant([[[0.0001, 0.], [0., 0.0001]]])
observation = constant_op.constant([[
.5 * (
4. # Base
+ 2.), # State transition
2.
]])
estimated_state = self.kalman_filter.predict_state_mean(
state, self.transition_fn([1]))
estimated_state_covariance = self.kalman_filter.predict_state_var(
state_var, self.transition_fn([1]), self.power_sum_fn([1]))
(predicted_observation,
predicted_observation_covariance) = (
self.kalman_filter.observed_from_state(
estimated_state, estimated_state_covariance,
observation_model=OBSERVATION_MODEL,
observation_noise=OBSERVATION_NOISE))
(_, _, first_log_prob) = self.kalman_filter.do_filter(
estimated_state=estimated_state,
estimated_state_covariance=estimated_state_covariance,
predicted_observation=predicted_observation,
predicted_observation_covariance=predicted_observation_covariance,
observation=observation,
observation_model=OBSERVATION_MODEL,
observation_noise=OBSERVATION_NOISE)
self.assertGreater(first_log_prob.eval()[0], numpy.log(0.99))
def test_predict_n_ahead_mean(self):
with self.cached_session():
original_state = constant_op.constant([[4., 2.]])
n = 5
iterative_state = original_state
for i in range(n):
self.assertAllClose(
iterative_state.eval(),
self.kalman_filter.predict_state_mean(
original_state,
self.transition_fn([i])).eval())
iterative_state = self.kalman_filter.predict_state_mean(
iterative_state,
self.transition_fn([1]))
def test_predict_n_ahead_var(self):
with self.cached_session():
original_var = constant_op.constant([[[2., 3.], [4., 5.]]])
n = 5
iterative_var = original_var
for i in range(n):
self.assertAllClose(
iterative_var.eval(),
self.kalman_filter.predict_state_var(
original_var,
self.transition_fn([i]),
self.power_sum_fn([i])).eval())
iterative_var = self.kalman_filter.predict_state_var(
iterative_var,
self.transition_fn([1]),
self.power_sum_fn([1]))
class KalmanFilterBatchTest(test.TestCase):
"""KalmanFilter tests with more than one element batches."""
def test_do_filter_batch(self):
"""Tests do_filter, in batch mode.
Tests that correct values have high probability and incorrect values
have low probability when there is low uncertainty.
"""
with self.cached_session():
state = constant_op.constant([[4., 2.], [5., 3.], [6., 4.]])
state_var = constant_op.constant(3 * [[[0.0001, 0.], [0., 0.0001]]])
observation = constant_op.constant([
[
.5 * (
4. # Base
+ 2.), # State transition
2.
],
[
.5 * (
5. # Base
+ 3.), # State transition
3.
],
[3.14, 2.71]
]) # Low probability observation
kf = kalman_filter.KalmanFilter()
transition_fn, power_sum_fn = _powers_and_sums_from_transition_matrix(
state_transition=STATE_TRANSITION,
state_transition_noise_covariance=STATE_TRANSITION_NOISE,
state_noise_transform=STATE_NOISE_TRANSFORM,
max_gap=2)
estimated_state = kf.predict_state_mean(state, transition_fn(3*[1]))
estimated_state_covariance = kf.predict_state_var(
state_var, transition_fn(3*[1]), power_sum_fn(3*[1]))
observation_model = array_ops.tile(OBSERVATION_MODEL, [3, 1, 1])
(predicted_observation,
predicted_observation_covariance) = (
kf.observed_from_state(
estimated_state, estimated_state_covariance,
observation_model=observation_model,
observation_noise=OBSERVATION_NOISE))
(state, state_var, log_prob) = kf.do_filter(
estimated_state=estimated_state,
estimated_state_covariance=estimated_state_covariance,
predicted_observation=predicted_observation,
predicted_observation_covariance=predicted_observation_covariance,
observation=observation,
observation_model=observation_model,
observation_noise=OBSERVATION_NOISE)
first_log_prob, second_log_prob, third_log_prob = log_prob.eval()
self.assertGreater(first_log_prob.sum(), numpy.log(0.99))
self.assertGreater(second_log_prob.sum(), numpy.log(0.99))
self.assertLess(third_log_prob.sum(), numpy.log(0.01))
def test_predict_n_ahead_mean(self):
with self.cached_session():
kf = kalman_filter.KalmanFilter()
transition_fn, _ = _powers_and_sums_from_transition_matrix(
state_transition=STATE_TRANSITION,
state_transition_noise_covariance=STATE_TRANSITION_NOISE,
state_noise_transform=STATE_NOISE_TRANSFORM,
max_gap=2)
original_state = constant_op.constant([[4., 2.], [3., 1.], [6., 2.]])
state0 = original_state
state1 = kf.predict_state_mean(state0, transition_fn(3 * [1]))
state2 = kf.predict_state_mean(state1, transition_fn(3 * [1]))
batch_eval = kf.predict_state_mean(
original_state, transition_fn([1, 0, 2])).eval()
self.assertAllClose(state0.eval()[1], batch_eval[1])
self.assertAllClose(state1.eval()[0], batch_eval[0])
self.assertAllClose(state2.eval()[2], batch_eval[2])
def test_predict_n_ahead_var(self):
with self.cached_session():
kf = kalman_filter.KalmanFilter()
transition_fn, power_sum_fn = _powers_and_sums_from_transition_matrix(
state_transition=STATE_TRANSITION,
state_transition_noise_covariance=STATE_TRANSITION_NOISE,
state_noise_transform=STATE_NOISE_TRANSFORM,
max_gap=2)
base_var = 2.0 * numpy.identity(2) + numpy.ones([2, 2])
original_var = constant_op.constant(
numpy.array(
[base_var, 2.0 * base_var, 3.0 * base_var], dtype=numpy.float32))
var0 = original_var
var1 = kf.predict_state_var(
var0, transition_fn(3 * [1]), power_sum_fn(3 * [1]))
var2 = kf.predict_state_var(
var1, transition_fn(3 * [1]), power_sum_fn(3 * [1]))
batch_eval = kf.predict_state_var(
original_var,
transition_fn([1, 0, 2]),
power_sum_fn([1, 0, 2])).eval()
self.assertAllClose(var0.eval()[1], batch_eval[1])
self.assertAllClose(var1.eval()[0], batch_eval[0])
self.assertAllClose(var2.eval()[2], batch_eval[2])
if __name__ == "__main__":
test.main()
| apache-2.0 |
digling/cddb | datasets/Shijing/__init__.py | 1 | 1328 | from clldutils.dsv import UnicodeReader
from sinopy import sinopy
def prepare(dataset):
with UnicodeReader(dataset.get_path('raw', 'O_shijing.tsv'), delimiter='\t') as reader:
data = list(reader)
header = [h.lower() for h in data[0]]
C = [('ID', 'CHARACTER', 'PINYIN', 'DOCULECT', 'SHIJING_NAME',
'SHJING_NUMBER', 'STANZA', 'VERSE', 'RHYME_CLASS', 'POSITION', 'TEXT',
'ORDER', 'SOURCE'
)]
for line in data[1:]:
tmp = dict([(a, b.strip()) for a, b in zip(header, line)])
poem = '·'.join((tmp['block'], tmp['chapter'], tmp['title']))
poem_number = tmp['number']
stanza = tmp['stanza']
verse = tmp['verse']
char = tmp['character']
# get the position
pos = str(tmp['raw_section'].index(char))
text = tmp['raw_section'] + tmp['endchar']
rhymeid = tmp['rhyme']
pinyin = sinopy.pinyin(char)
order = tmp['section_number']
if '?' in pinyin or sinopy.is_chinese(pinyin):
pinyin = ''
C += [[tmp['id'], char, pinyin, 'Old_Chinese', poem, poem_number, stanza,
verse, rhymeid, pos, text, order, 'Baxter1992']]
with open(dataset.get_path('characters.tsv'), 'w') as f:
for line in C:
f.write('\t'.join(line)+'\n')
| gpl-3.0 |
lakehanne/ensenso | ensenso_detect/manikin/train.py | 1 | 13545 | #!/usr/bin/env python
from __future__ import print_function
__author__ = 'Olalekan Ogunmolu'
#py utils
import os
import json, time
import argparse
from PIL import Image
from os import listdir
#GPU utils
# try: import setGPU
# except ImportError: pass
#torch utils
import torch
import torchvision
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data as data
import torchvision.models as models
from torch.autograd import Variable
import torchvision.transforms as transforms
from torch.nn.functional import softmax
#cv2/numpy utils
import cv2
import numpy as np
import numpy.random as npr
from random import shuffle
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
#myne utils
from model import ResNet, ResidualBlock, StackRegressive
from utils import get_bounding_boxes as bbox
parser = argparse.ArgumentParser(description='Process environmental variables')
parser.add_argument('--cuda', action='store_true', default=False, help="use cuda or not?")
parser.add_argument('--disp', type=bool, default=False, help="populate training samples in visdom")
parser.add_argument('--cmaxIter', type=int, default=50, help="classfier max iterations")
parser.add_argument('--num_iter', type=int, default=5)
parser.add_argument('--cbatchSize', type=int, default=1, help="classifier batch size")
parser.add_argument('--clr', type=float, default=1e-3, help="classifier learning rate")
parser.add_argument('--rnnLR', type=float, default=1e-2, help="regressor learning rate")
parser.add_argument('--classifier', type=str, default='')
parser.add_argument('--cepoch', type=int, default=500)
parser.add_argument('--verbose', type=bool, default=False)
args = parser.parse_args()
print(args)
torch.set_default_tensor_type('torch.DoubleTensor')
class LoadAndParse(object):
def __init__(self, args, true_path="raw/face_images/", fake_path="raw/face_neg/"):
'''
from:
https://github.com/pytorch/examples/blob/409a7262dcfa7906a92aeac25ee7d413baa88b67/imagenet/main.py#L108-L113
https://github.com/pytorch/examples/blob/409a7262dcfa7906a92aeac25ee7d413baa88b67/imagenet/main.py#L94-L95
'''
self.args = args
self.normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
self.preprocess = transforms.Compose([
transforms.Scale(40),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()
])
#provide path to true and fake images
self.true_path= true_path
pwd = os.getcwd()
face_neg = pwd + "/raw/" + "face_neg"
face_neg_1 = face_neg + '/' + 'neg_1'
face_neg_2 = face_neg + '/' + 'neg_2'
face_neg_3 = face_neg + '/' + 'neg_3'
face_neg_4 = face_neg + '/' + 'neg_4'
self.neg_dirs = [face_neg_1, face_neg_2, face_neg_3, face_neg_4]
self.fake_path= fake_path
self.real_images = []
self.true_images = None
self.faces_bbox = None
self.left_bbox = None
self.right_bbox = None
self.face_left_right = None
#define tensors to hold the images in memory
self.real_images, self.real_labels = [], []
# #load labels file
def loadLabelsFromJson(self):
labels_file = open('labels.json').read()
labels = json.loads(labels_file)
classes = labels # 0 = fake, 1=real
return classes
def loadImages(self, path):
# return array of images
imagesList = listdir(path)
loadedImages, faces_bbox = [], []
left_bbox, right_bbox = [], []
#bounding boxes
dict_combo = bbox()
faces_dict, left_dict, right_dict = dict_combo[0], dict_combo[1], dict_combo[2]
#load serially to ensure labels match
for image in imagesList:
img = Image.open(path + image)
face = faces_dict[image]
left = left_dict[image]
right = right_dict[image]
loadedImages.append(img)
faces_bbox.append(face)
left_bbox.append(left)
right_bbox.append(right)
return loadedImages, faces_bbox, left_bbox, right_bbox
def loadNegative(self):
negative_list = []
for dirs in self.neg_dirs:
for img_path in listdir(dirs):
base, ext = os.path.splitext(img_path)
if ext == '.jpg':
img = Image.open(dirs + '/' + img_path)
negative_list.append(img)
if self.args.verbose:
print('appending {} to {} list'.format(img_path, 'negative'))
return negative_list
# get images in the dir
def getImages(self):
#load images
self.true_images, self.faces_bbox, self.left_bbox,self.right_bbox = self.loadImages(self.true_path)
# concatenate to ease numpy issues
left_right = np.concatenate((self.left_bbox, self.right_bbox), axis=1)
faces_top, faces_bot = [], []
#arrange faces_bbox into a 1x8 array
for i in range(len(self.faces_bbox)):
faces_top.append(self.faces_bbox[i][0])
faces_bot.append(self.faces_bbox[i][1])
"""
First 4 cols represent top coordinates of face boxes,
Followed by lower coordinates of face_boxes
Next 2 cols belong to left eye centers, last col are right eye coords
"""
self.face_left_right = np.concatenate((faces_top, faces_bot, left_right), axis=1)
#define labels
self.real_labels = [1]*len(self.true_images) #faces
imagesAll = self.true_images
# Now preprocess and create list for images
for imgs in imagesAll:
images_temp = self.preprocess(imgs).double()
#Take care of non-singleton dimensions in negative images
if not images_temp.size(0) == 3:
images_temp = images_temp.expand(3, images_temp.size(1), images_temp.size(2))
self.real_images.append(images_temp)
def partitionData(self):
# retrieve the images first
self.getImages()
#Now separate true and fake to training and testing sets
portion_train = 0.8
portion_test = 0.2
X_tr = int(portion_train * len(self.real_images))
X_te = int(portion_test * len(self.real_images))
# allocate tensors memory
train_X = torch.LongTensor(X_tr, self.real_images[0].size(0), self.real_images[0].size(1),
self.real_images[0].size(2))
test_X = torch.LongTensor(X_te, self.real_images[0].size(0), self.real_images[0].size(1),
self.real_images[0].size(2))
#Now copy tensors over
train_X = torch.stack(self.real_images[:X_tr], 0)
train_Y = torch.from_numpy(np.array(self.real_labels[:X_tr]))
# bounding box data
bbox = torch.from_numpy(self.face_left_right).double()
bbox = bbox.unsqueeze(0).expand(1, bbox.size(0), bbox.size(1))
#testing set
test_X = torch.stack(self.real_images[X_tr:], 0)
test_Y = torch.from_numpy(np.array(self.real_labels[X_tr:]))
#data loaders
train_dataset = data.TensorDataset(train_X, train_Y)
train_loader = data.DataLoader(train_dataset,
batch_size=self.args.cbatchSize, shuffle=True)
#test loader
test_dataset = data.TensorDataset(test_X, test_Y)
test_loader = data.DataLoader(test_dataset,
batch_size=self.args.cbatchSize, shuffle=True)
#bbox loader
bbox_dataset = data.TensorDataset(bbox, bbox)
bbox_loader = data.DataLoader(bbox_dataset, batch_size=self.args.cbatchSize, shuffle=True)
#check size of slices
if self.args.verbose:
print('train_X and train_Y sizes: {} | {}'.format(train_X.size(), train_Y.size()))
print('test_X and test_Y sizes: {} | {}'.format(test_X.size(), test_Y.size()))
return train_loader, test_loader, bbox_loader
def trainClassifierRegressor(train_loader, bbox_loader, args):
#cnn hyperparameters
clr = args.clr
batchSize = args.cbatchSize
maxIter = args.cmaxIter
#rnn hyperparameters
numLayers, seqLength = 2, 5
noutputs, rlr = 12, args.rnnLR
inputSize, nHidden = 128, [64, 32]
resnet = ResNet(ResidualBlock, [3, 3, 3])
#extract feture cube of last layer and reshape it
res_classifier, feature_cube = None, None
if args.classifier: #use pre-trained classifier
resnet.load_state_dict(torch.load('models_new/' + args.classifier))
print('using pretrained model')
# #freeze optimized layers
for param in resnet.parameters():
param.requires_grad = False
#extract last convolution layer
last_layer, feat_cube = res_classifier.layer3, []
for param in last_layer.parameters():
if param.dim() > 1: # extract only conv cubes
feat_cube.append(param)
lt = [] # this contains the soft max
for x in xrange(len(feat_cube)):
temp = softmax(feat_cube[x])
lt.append(temp)
#determine classification loss and clsfx_optimizer
clsfx_crit = nn.CrossEntropyLoss()
clsfx_optimizer = torch.optim.Adam(resnet.parameters(), clr)
last_layer, feat_cube = resnet.fc, []
#accummulate all the features of the fc layer into a list
for param in last_layer.parameters():
feat_cube.append(param) #will contain weights and biases
regress_input, params_bias = feat_cube[0], feat_cube[1]
#reshape regress_input
regress_input = regress_input.view(-1)
X_tr = int(0.8*len(regress_input))
X_te = int(0.2*len(regress_input))
X = len(regress_input)
#reshape inputs
rtrain_X = torch.unsqueeze(regress_input, 0).expand(seqLength, 1, X)
rtest_X = torch.unsqueeze(regress_input[X_tr:], 0).expand(seqLength, 1, X_te+1)
# Get regressor model and predict bounding boxes
regressor = StackRegressive(inputSize=128, nHidden=[64,32,12], noutputs=12,\
batchSize=args.cbatchSize, cuda=args.cuda, numLayers=2)
targ_X = None
for _, targ_X in bbox_loader:
targ_X = targ_X
if(args.cuda):
rtrain_X = rtrain_X.cuda()
rtest_X = rtest_X.cuda()
targ_X = targ_X.cuda()
# regressor = regressor.cuda()
#define optimizer
rnn_optimizer = optim.SGD(regressor.parameters(), rlr)
# Train classifier
for epoch in range(maxIter): #run through the images maxIter times
for i, (train_X, train_Y) in enumerate(train_loader):
if(args.cuda):
train_X = train_X.cuda()
train_Y = train_Y.cuda()
resnet = resnet.cuda()
images = Variable(train_X)
labels = Variable(train_Y)
#rnn input
rtargets = Variable(targ_X[:,i:i+seqLength,:])
#reshape targets for inputs
rtargets = rtargets.view(seqLength, -1)
# Forward + Backward + Optimize
clsfx_optimizer.zero_grad()
rnn_optimizer.zero_grad()
#predict classifier outs and regressor outputs
outputs = resnet(images)
routputs = regressor(rtrain_X)
#compute loss
loss = clsfx_crit(outputs, labels)
rloss = regressor.criterion(routputs, rtargets)
#backward pass
loss.backward()
rloss.backward()
# step optimizer
clsfx_optimizer.step()
rnn_optimizer.step()
print ("Epoch [%d/%d], Iter [%d] cLoss: %.8f, rLoss: %.4f" %(epoch+1, maxIter, i+1,
loss.data[0], rloss.data[0]))
if epoch % 5 == 0 and epoch >0:
clr *= 1./epoch
rlr *= 1./epoch
clsfx_optimizer = optim.Adam(resnet.parameters(), clr)
rnn_optimizer = optim.SGD(regressor.parameters(), rlr)
torch.save(regressor.state_dict(), 'regressnet_' + str(args.cmaxIter) + '.pkl')
return resnet, regressor, rtest_X
def testClassifierRegressor(test_loader, resnet, regressnet, rtest_X, args):
correct, total = 0, 0
rtest_X = rtest_X.cuda() if args.cuda else rtest_X
for test_X, test_Y in test_loader:
test_X = test_X.cuda() if args.cuda else test_X
images = Variable(test_X)
labels = test_Y
#forward
outputs = resnet(images)
# routputs = regressnet(rtest_X)
#check predcictions
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.cpu() == labels).sum()
score = 100 * correct / total
print('Accuracy of the model on the test images: %d %%' %(score))
# Save the Models
torch.save(resnet.state_dict(), 'resnet_score='+ str(score) + '.pkl')
def main(args):
#obtain training and testing data
lnp = LoadAndParse(args)
train_loader, test_loader, bbox_loader = lnp.partitionData()
# train conv+rnn nets
net, reg, rtest_X = \
trainClassifierRegressor(train_loader, bbox_loader, args)
# test conv+rnn nets
testClassifierRegressor(test_loader, net, reg, rtest_X, args)
if __name__=='__main__':
main(args)
| mit |
duncanhawthorne/robot-robot | libs/pushbullet/pushbullet.py | 1 | 9154 | import json
import requests
from .device import Device
from .channel import Channel
from .contact import Contact
from .errors import PushbulletError, InvalidKeyError, PushError
from .filetype import get_file_type
class Pushbullet(object):
DEVICES_URL = "https://api.pushbullet.com/v2/devices"
CONTACTS_URL = "https://api.pushbullet.com/v2/contacts"
CHANNELS_URL = "https://api.pushbullet.com/v2/channels"
ME_URL = "https://api.pushbullet.com/v2/users/me"
PUSH_URL = "https://api.pushbullet.com/v2/pushes"
UPLOAD_REQUEST_URL = "https://api.pushbullet.com/v2/upload-request"
EPHEMERALS_URL = "https://api.pushbullet.com/v2/ephemerals"
def __init__(self, api_key):
self.api_key = api_key
self._json_header = {'Content-Type': 'application/json'}
self._session = requests.Session()
self._session.auth = (self.api_key, "")
self._session.headers.update(self._json_header)
self.refresh()
def _get_data(self, url):
resp = self._session.get(url)
if resp.status_code == 401:
raise InvalidKeyError()
return resp.json()
def _load_devices(self):
self.devices = []
resp_dict = self._get_data(self.DEVICES_URL)
device_list = resp_dict.get("devices", [])
for device_info in device_list:
if device_info.get("active"):
d = Device(self, device_info)
self.devices.append(d)
def _load_contacts(self):
self.contacts = []
resp_dict = self._get_data(self.CONTACTS_URL)
contacts_list = resp_dict.get("contacts", [])
for contact_info in contacts_list:
if contact_info.get("active"):
c = Contact(self, contact_info)
self.contacts.append(c)
def _load_user_info(self):
self.user_info = self._get_data(self.ME_URL)
def _load_channels(self):
self.channels = []
resp_dict = self._get_data(self.CHANNELS_URL)
channel_list = resp_dict.get("channels", [])
for channel_info in channel_list:
if channel_info.get("active"):
c = Channel(self, channel_info)
self.channels.append(c)
@staticmethod
def _recipient(device=None, contact=None, email=None, channel=None):
data = dict()
if device:
data["device_iden"] = device.device_iden
elif contact:
data["email"] = contact.email
elif email:
data["email"] = email
elif channel:
data["channel_tag"] = channel.channel_tag
return data
def new_device(self, nickname):
data = {"nickname": nickname, "type": "stream"}
r = self._session.post(self.DEVICES_URL, data=json.dumps(data))
if r.status_code == requests.codes.ok:
new_device = Device(self, r.json())
self.devices.append(new_device)
return new_device
else:
raise PushbulletError(r.text)
def new_contact(self, name, email):
data = {"name": name, "email": email}
r = self._session.post(self.CONTACTS_URL, data=json.dumps(data))
if r.status_code == requests.codes.ok:
new_contact = Contact(self, r.json())
self.contacts.append(new_contact)
return new_contact
else:
raise PushbulletError(r.text)
def edit_device(self, device, nickname=None, model=None, manufacturer=None):
data = {"nickname": nickname}
iden = device.device_iden
r = self._session.post("{}/{}".format(self.DEVICES_URL, iden), data=json.dumps(data))
if r.status_code == requests.codes.ok:
new_device = Device(self, r.json())
self.devices[self.devices.index(device)] = new_device
return new_device
else:
raise PushbulletError(r.text)
def edit_contact(self, contact, name):
data = {"name": name}
iden = contact.iden
r = self._session.post("{}/{}".format(self.CONTACTS_URL, iden), data=json.dumps(data))
if r.status_code == requests.codes.ok:
new_contact = Contact(self, r.json())
self.contacts[self.contacts.index(contact)] = new_contact
return new_contact
else:
raise PushbulletError(r.text)
def remove_device(self, device):
iden = device.device_iden
r = self._session.delete("{}/{}".format(self.DEVICES_URL, iden))
if r.status_code == requests.codes.ok:
self.devices.remove(device)
else:
raise PushbulletError(r.text)
def remove_contact(self, contact):
iden = contact.iden
r = self._session.delete("{}/{}".format(self.CONTACTS_URL, iden))
if r.status_code == requests.codes.ok:
self.contacts.remove(contact)
return True
else:
raise PushbulletError(r.text)
def get_pushes(self, modified_after=None, limit=None):
data = {"modified_after": modified_after, "limit": limit}
pushes_list = []
get_more_pushes = True
while get_more_pushes:
r = self._session.get(self.PUSH_URL, params=data)
if r.status_code != requests.codes.ok:
raise PushbulletError(r.text)
pushes_list += r.json().get("pushes")
if 'cursor' in r.json() and (not limit or len(pushes_list) < limit):
data['cursor'] = r.json()['cursor']
else:
get_more_pushes = False
return True, pushes_list
def dismiss_push(self, iden):
data = {"dismissed": True}
r = self._session.post("{}/{}".format(self.PUSH_URL, iden), data=json.dumps(data))
if r.status_code != requests.codes.ok:
raise PushbulletError(r.text)
def delete_push(self, iden):
r = self._session.delete("{}/{}".format(self.PUSH_URL, iden))
if r.status_code != requests.codes.ok:
raise PushbulletError(r.text)
def upload_file(self, f, file_name, file_type=None):
if not file_type:
file_type = get_file_type(f, file_name)
data = {"file_name": file_name, "file_type": file_type}
# Request url for file upload
r = self._session.post(self.UPLOAD_REQUEST_URL, data=json.dumps(data))
if r.status_code != requests.codes.ok:
raise PushbulletError(r.text)
upload_data = r.json().get("data")
file_url = r.json().get("file_url")
upload_url = r.json().get("upload_url")
upload = requests.post(upload_url, data=upload_data, files={"file": f})
return {"file_type": file_type, "file_url": file_url, "file_name": file_name}
def push_file(self, file_name, file_url, file_type, body=None, device=None, contact=None, email=None, channel=None):
data = {"type": "file", "file_type": file_type, "file_url": file_url, "file_name": file_name}
if body:
data["body"] = body
data.update(Pushbullet._recipient(device, contact, email, channel))
return self._push(data)
def push_note(self, title, body, device=None, contact=None, email=None):
data = {"type": "note", "title": title, "body": body}
data.update(Pushbullet._recipient(device, contact, email))
return self._push(data)
def push_address(self, name, address, device=None, contact=None, email=None):
data = {"type": "address", "name": name, "address": address}
data.update(Pushbullet._recipient(device, contact, email))
return self._push(data)
def push_list(self, title, items, device=None, contact=None, email=None):
data = {"type": "list", "title": title, "items": items}
data.update(Pushbullet._recipient(device, contact, email))
return self._push(data)
def push_link(self, title, url, body=None, device=None, contact=None, email=None):
data = {"type": "link", "title": title, "url": url, "body": body}
data.update(Pushbullet._recipient(device, contact, email))
return self._push(data)
def _push(self, data):
r = self._session.post(self.PUSH_URL, data=json.dumps(data))
if r.status_code == requests.codes.ok:
return r.json()
else:
raise PushError(r.text)
def push_sms(self, device, number, message):
data = {
"type": "push",
"push": {
"type": "messaging_extension_reply",
"package_name": "com.pushbullet.android",
"source_user_iden": self.user_info['iden'],
"target_device_iden": device.device_iden,
"conversation_iden": number,
"message": message
}
}
r = self._session.post(self.EPHEMERALS_URL, data=json.dumps(data))
if r.status_code == requests.codes.ok:
return r.json()
raise PushError(r.text)
def refresh(self):
self._load_devices()
self._load_contacts()
self._load_user_info()
self._load_channels()
| mit |
mlperf/training_results_v0.6 | Google/benchmarks/resnet/implementations/tpu-v3-2048-resnet/resnet/imagenet_input.py | 6 | 18034 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient ImageNet input pipeline using tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from collections import namedtuple
import functools
import math
import os
from absl import flags
import tensorflow as tf
import resnet_preprocessing
FLAGS = flags.FLAGS
def image_serving_input_fn():
"""Serving input fn for raw images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
image = resnet_preprocessing.preprocess_image(
image_bytes=image_bytes, is_training=False)
return image
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(image_buffer, label):
"""Build an Example proto for an example.
Args:
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
Returns:
Example proto
"""
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/class/label': _int64_feature(label),
'image/encoded': _bytes_feature(image_buffer)
}))
return example
class ImageNetTFExampleInput(object):
"""Base class for ImageNet input_fn generator.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
num_cores: `int` for the number of TPU cores
"""
__metaclass__ = abc.ABCMeta
def __init__(self,
is_training,
use_bfloat16,
num_cores=8,
image_size=224,
prefetch_depth_auto_tune=False,
transpose_input=False):
self.image_preprocessing_fn = resnet_preprocessing.preprocess_image
self.is_training = is_training
self.use_bfloat16 = use_bfloat16
self.num_cores = num_cores
self.transpose_input = transpose_input
self.image_size = image_size
self.prefetch_depth_auto_tune = prefetch_depth_auto_tune
def set_shapes(self, batch_size, images, labels):
"""Statically set the batch_size dimension."""
if self.transpose_input:
if FLAGS.train_batch_size // FLAGS.num_cores > 8:
shape = [None, None, None, batch_size]
else:
shape = [None, None, batch_size, None]
images.set_shape(images.get_shape().merge_with(tf.TensorShape(shape)))
images = tf.reshape(images, [-1])
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
else:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
return images, labels
def dataset_parser(self, value):
"""Parses an image and its label from a serialized ResNet-50 TFExample.
Args:
value: serialized string containing an ImageNet TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
"""
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, ''),
'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label': tf.FixedLenFeature([], tf.int64, -1),
'image/class/text': tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(dtype=tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
# Subtract one so that labels are in [0, 1000).
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) - 1
# Return all black images for padded data.
image = tf.cond(
label < 0, lambda: self._get_null_input(None), lambda: self. # pylint: disable=g-long-lambda
image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
image_size=self.image_size,
use_bfloat16=self.use_bfloat16))
return image, label
def dataset_parser_static(self, value):
"""Parses an image and its label from a serialized ResNet-50 TFExample.
This only decodes the image, which is prepared for caching.
Args:
value: serialized string containing an ImageNet TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
"""
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, ''),
'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label': tf.FixedLenFeature([], tf.int64, -1),
'image/class/text': tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(dtype=tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image_bytes = tf.io.decode_jpeg(image_bytes, 3)
# Subtract one so that labels are in [0, 1000).
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) - 1
return image_bytes, label
def dataset_parser_dynamic(self, image_bytes, label):
return self.image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
image_size=self.image_size,
use_bfloat16=self.use_bfloat16), label
def pad_dataset(self, dataset, num_hosts):
"""Pad the eval dataset so that eval can have the same batch size as training."""
num_dataset_per_shard = int(
math.ceil(FLAGS.num_eval_images / FLAGS.eval_batch_size) *
FLAGS.eval_batch_size / num_hosts)
example_string = 'dummy_string'
padded_example = _convert_to_example(
str.encode(example_string), -1).SerializeToString()
padded_dataset = tf.data.Dataset.from_tensors(
tf.constant(padded_example, dtype=tf.string))
padded_dataset = padded_dataset.repeat(num_dataset_per_shard)
dataset = dataset.concatenate(padded_dataset).take(num_dataset_per_shard)
return dataset
@abc.abstractmethod
def make_source_dataset(self, index, num_hosts):
"""Makes dataset of serialized TFExamples.
The returned dataset will contain `tf.string` tensors, but these strings are
serialized `TFExample` records that will be parsed by `dataset_parser`.
If self.is_training, the dataset should be infinite.
Args:
index: current host index.
num_hosts: total number of hosts.
Returns:
A `tf.data.Dataset` object.
"""
return
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `tf.data.Dataset` object.
"""
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# tf.contrib.tpu.RunConfig for details.
batch_size = params['batch_size']
# TODO(dehao): Replace the following with params['context'].current_host
if 'context' in params:
current_host = params['context'].current_input_fn_deployment()[1]
num_hosts = params['context'].num_hosts
else:
if 'dataset_index' in params:
current_host = params['dataset_index']
num_hosts = params['dataset_num_shards']
else:
current_host = 0
num_hosts = 1
dataset = self.make_source_dataset(current_host, num_hosts)
if not self.is_training:
# Padding for eval.
dataset = self.pad_dataset(dataset, num_hosts)
# Use the fused map-and-batch operation.
#
# For XLA, we must used fixed shapes. Because we repeat the source training
# dataset indefinitely, we can use `drop_remainder=True` to get fixed-size
# batches without dropping any training examples.
#
# When evaluating, `drop_remainder=True` prevents accidentally evaluating
# the same image twice by dropping the final batch if it is less than a full
# batch size. As long as this validation is done with consistent batch size,
# exactly the same images will be used.
if self.is_training and FLAGS.cache_decoded_image:
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
self.dataset_parser_dynamic,
batch_size=batch_size,
num_parallel_batches=self.num_cores,
drop_remainder=True))
else:
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
self.dataset_parser,
batch_size=batch_size,
num_parallel_batches=self.num_cores,
drop_remainder=True))
# Transpose for performance on TPU
if self.transpose_input:
if FLAGS.train_batch_size // FLAGS.num_cores > 8:
transpose_array = [1, 2, 3, 0]
else:
transpose_array = [1, 2, 0, 3]
dataset = dataset.map(
lambda images, labels: (tf.transpose(images, transpose_array), labels
),
num_parallel_calls=self.num_cores)
# Assign static batch size dimension
dataset = dataset.map(functools.partial(self.set_shapes, batch_size))
# Prefetch overlaps in-feed with training
if self.prefetch_depth_auto_tune:
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
else:
dataset = dataset.prefetch(4)
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_threading.private_threadpool_size = 48
dataset = dataset.with_options(options)
return dataset
class ImageNetInput(ImageNetTFExampleInput):
"""Generates ImageNet input_fn from a series of TFRecord files.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
"""
def __init__(self,
is_training,
use_bfloat16,
transpose_input,
data_dir,
image_size=224,
num_parallel_calls=64,
num_cores=8,
prefetch_depth_auto_tune=False,
cache=False):
"""Create an input from TFRecord files.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null') or implicitly False
then construct a null pipeline, consisting of empty images
and blank labels.
image_size: size of input images
num_parallel_calls: concurrency level to use when reading data from disk.
num_cores: Number of prefetch threads
prefetch_depth_auto_tune: Auto-tuning prefetch depths in input pipeline
cache: if true, fill the dataset by repeating from its cache
"""
super(ImageNetInput, self).__init__(
is_training=is_training,
image_size=image_size,
use_bfloat16=use_bfloat16,
num_cores=num_cores,
prefetch_depth_auto_tune=prefetch_depth_auto_tune,
transpose_input=transpose_input)
self.data_dir = data_dir
if self.data_dir == 'null' or not self.data_dir:
self.data_dir = None
self.num_parallel_calls = num_parallel_calls
self.cache = cache
def _get_null_input(self, data):
"""Returns a null image (all black pixels).
Args:
data: element of a dataset, ignored in this method, since it produces
the same null image regardless of the element.
Returns:
a tensor representing a null image.
"""
del data # Unused since output is constant regardless of input
return tf.zeros([self.image_size, self.image_size, 3], tf.bfloat16
if self.use_bfloat16 else tf.float32)
def dataset_parser(self, value):
"""See base class."""
if not self.data_dir:
return value, tf.constant(0, tf.int32)
return super(ImageNetInput, self).dataset_parser(value)
def make_source_dataset(self, index, num_hosts):
"""See base class."""
if not self.data_dir:
tf.logging.info('Undefined data_dir implies null input')
return tf.data.Dataset.range(1).repeat().map(self._get_null_input)
# Shuffle the filenames to ensure better randomization.
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
# For multi-host training, we want each hosts to always process the same
# subset of files. Each host only sees a subset of the entire dataset,
# allowing us to cache larger datasets in memory.
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)
dataset = dataset.shard(num_hosts, index)
if self.is_training and not self.cache:
dataset = dataset.repeat()
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
fetch_dataset, cycle_length=self.num_parallel_calls, sloppy=True))
if self.is_training and FLAGS.cache_decoded_image:
dataset = dataset.map(
self.dataset_parser_static,
num_parallel_calls=self.num_parallel_calls)
if self.cache:
dataset = dataset.cache()
if self.is_training:
# We shuffle only during training, and during training, we must produce an
# infinite dataset, so apply the fused shuffle_and_repeat optimized
# dataset transformation.
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(1024 * 16))
return dataset
# Defines a selection of data from a Cloud Bigtable.
BigtableSelection = namedtuple('BigtableSelection',
['project',
'instance',
'table',
'prefix',
'column_family',
'column_qualifier'])
class ImageNetBigtableInput(ImageNetTFExampleInput):
"""Generates ImageNet input_fn from a Bigtable for training or evaluation.
"""
def __init__(self, is_training, use_bfloat16, transpose_input, selection):
"""Constructs an ImageNet input from a BigtableSelection.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
selection: a BigtableSelection specifying a part of a Bigtable.
"""
super(ImageNetBigtableInput, self).__init__(
is_training=is_training,
use_bfloat16=use_bfloat16,
transpose_input=transpose_input)
self.selection = selection
def make_source_dataset(self, index, num_hosts):
"""See base class."""
data = self.selection
client = tf.contrib.cloud.BigtableClient(data.project, data.instance)
table = client.table(data.table)
ds = table.parallel_scan_prefix(data.prefix,
columns=[(data.column_family,
data.column_qualifier)])
# The Bigtable datasets will have the shape (row_key, data)
ds_data = ds.map(lambda index, data: data)
if self.is_training:
ds_data = ds_data.repeat()
return ds_data
| apache-2.0 |
jaeilepp/eggie | mne/time_frequency/tests/test_csd.py | 1 | 7401 | import numpy as np
from nose.tools import (assert_raises, assert_equal, assert_almost_equal,
assert_true)
from numpy.testing import assert_array_equal
from os import path as op
import warnings
import mne
from mne.io import Raw
from mne.utils import sum_squared
from mne.time_frequency import compute_epochs_csd, induced_power
warnings.simplefilter('always')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_data():
# Read raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set picks
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
# Read several epochs
event_id, tmin, tmax = 1, -0.2, 0.5
events = mne.read_events(event_fname)[0:100]
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12))
# Create an epochs object with one epoch and one channel of artificial data
event_id, tmin, tmax = 1, 0.0, 1.0
epochs_sin = mne.Epochs(raw, events[0:5], event_id, tmin, tmax, proj=True,
picks=[0], baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13))
freq = 10
epochs_sin._data = np.sin(2 * np.pi * freq
* epochs_sin.times)[None, None, :]
return epochs, epochs_sin
def test_compute_epochs_csd():
"""Test computing cross-spectral density from epochs
"""
epochs, epochs_sin = _get_data()
# Check that wrong parameters are recognized
assert_raises(ValueError, compute_epochs_csd, epochs, mode='notamode')
assert_raises(ValueError, compute_epochs_csd, epochs, fmin=20, fmax=10)
assert_raises(ValueError, compute_epochs_csd, epochs, fmin=20, fmax=20.1)
assert_raises(ValueError, compute_epochs_csd, epochs, tmin=0.15, tmax=0.1)
assert_raises(ValueError, compute_epochs_csd, epochs, tmin=0, tmax=10)
assert_raises(ValueError, compute_epochs_csd, epochs, tmin=10, tmax=11)
data_csd_mt = compute_epochs_csd(epochs, mode='multitaper', fmin=8,
fmax=12, tmin=0.04, tmax=0.15)
data_csd_fourier = compute_epochs_csd(epochs, mode='fourier', fmin=8,
fmax=12, tmin=0.04, tmax=0.15)
# Check shape of the CSD matrix
n_chan = len(data_csd_mt.ch_names)
assert_equal(data_csd_mt.data.shape, (n_chan, n_chan))
assert_equal(data_csd_fourier.data.shape, (n_chan, n_chan))
# Check if the CSD matrix is hermitian
assert_array_equal(np.tril(data_csd_mt.data).T.conj(),
np.triu(data_csd_mt.data))
assert_array_equal(np.tril(data_csd_fourier.data).T.conj(),
np.triu(data_csd_fourier.data))
# Computing induced power for comparison
epochs.crop(tmin=0.04, tmax=0.15)
with warnings.catch_warnings(record=True): # deprecation
warnings.simplefilter('always')
power, _ = induced_power(epochs.get_data(), epochs.info['sfreq'], [10],
n_cycles=0.6)
power = np.mean(power, 2)
# Maximum PSD should occur for specific channel
max_ch_power = power.argmax()
max_ch_mt = data_csd_mt.data.diagonal().argmax()
max_ch_fourier = data_csd_fourier.data.diagonal().argmax()
assert_equal(max_ch_mt, max_ch_power)
assert_equal(max_ch_fourier, max_ch_power)
# Maximum CSD should occur for specific channel
ch_csd_mt = [np.abs(data_csd_mt.data[max_ch_power][i])
if i != max_ch_power else 0 for i in range(n_chan)]
max_ch_csd_mt = np.argmax(ch_csd_mt)
ch_csd_fourier = [np.abs(data_csd_fourier.data[max_ch_power][i])
if i != max_ch_power else 0 for i in range(n_chan)]
max_ch_csd_fourier = np.argmax(ch_csd_fourier)
assert_equal(max_ch_csd_mt, max_ch_csd_fourier)
# Check a list of CSD matrices is returned for multiple frequencies within
# a given range when fsum=False
csd_fsum = compute_epochs_csd(epochs, mode='fourier', fmin=8, fmax=20,
fsum=True)
csds = compute_epochs_csd(epochs, mode='fourier', fmin=8, fmax=20,
fsum=False)
freqs = [csd.frequencies[0] for csd in csds]
csd_sum = np.zeros_like(csd_fsum.data)
for csd in csds:
csd_sum += csd.data
assert(len(csds) == 2)
assert(len(csd_fsum.frequencies) == 2)
assert_array_equal(csd_fsum.frequencies, freqs)
assert_array_equal(csd_fsum.data, csd_sum)
def test_compute_epochs_csd_on_artificial_data():
"""Test computing CSD on artificial data
"""
epochs, epochs_sin = _get_data()
sfreq = epochs_sin.info['sfreq']
# Computing signal power in the time domain
signal_power = sum_squared(epochs_sin._data)
signal_power_per_sample = signal_power / len(epochs_sin.times)
# Computing signal power in the frequency domain
data_csd_fourier = compute_epochs_csd(epochs_sin, mode='fourier')
data_csd_mt = compute_epochs_csd(epochs_sin, mode='multitaper')
fourier_power = np.abs(data_csd_fourier.data[0, 0]) * sfreq
mt_power = np.abs(data_csd_mt.data[0, 0]) * sfreq
assert_true(abs(fourier_power - signal_power) <= 0.5)
assert_true(abs(mt_power - signal_power) <= 1)
# Power per sample should not depend on time window length
for tmax in [0.2, 0.4, 0.6, 0.8]:
for add_n_fft in [30, 0, 30]:
t_mask = (epochs_sin.times >= 0) & (epochs_sin.times <= tmax)
n_samples = sum(t_mask)
n_fft = n_samples + add_n_fft
data_csd_fourier = compute_epochs_csd(epochs_sin, mode='fourier',
tmin=None, tmax=tmax, fmin=0,
fmax=np.inf, n_fft=n_fft)
fourier_power_per_sample = np.abs(data_csd_fourier.data[0, 0]) *\
sfreq / data_csd_fourier.n_fft
assert_true(abs(signal_power_per_sample -
fourier_power_per_sample) < 0.003)
# Power per sample should not depend on number of tapers
for n_tapers in [1, 2, 3, 5]:
for add_n_fft in [30, 0, 30]:
mt_bandwidth = sfreq / float(n_samples) * (n_tapers + 1)
data_csd_mt = compute_epochs_csd(epochs_sin, mode='multitaper',
tmin=None, tmax=tmax, fmin=0,
fmax=np.inf,
mt_bandwidth=mt_bandwidth,
n_fft=n_fft)
mt_power_per_sample = np.abs(data_csd_mt.data[0, 0]) *\
sfreq / data_csd_mt.n_fft
# The estimate of power gets worse for small time windows when
# more tapers are used
if n_tapers == 5 and tmax == 0.2:
delta = 0.05
else:
delta = 0.004
assert_true(abs(signal_power_per_sample - mt_power_per_sample)
< delta)
| bsd-2-clause |
samarthmed/emacs-config | .python-environments/default/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 1003 | 2281 | import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| gpl-2.0 |
GripQA/commit-entropy | lib/commit_entropy/settings.py | 1 | 1084 | #!/usr/bin/env python
# encoding: utf-8
#------------------------------------------------------------------------------
# Application Name
#------------------------------------------------------------------------------
app_name = 'entropy'
#------------------------------------------------------------------------------
# Version Number
#------------------------------------------------------------------------------
major_version = "0"
minor_version = "2"
patch_version = "0"
#------------------------------------------------------------------------------
# Debug Flag (switch to False for production release code)
#------------------------------------------------------------------------------
debug = True
#------------------------------------------------------------------------------
# Usage String
#------------------------------------------------------------------------------
usage = ''
#------------------------------------------------------------------------------
# Help String
#------------------------------------------------------------------------------
help = ''
| apache-2.0 |
sacnayak/ssnayak-viz | lib/httplib2/__init__.py | 106 | 71120 | from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.9.2"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except (ImportError, AttributeError):
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = [
'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation',
'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError',
'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
try:
# Users can optionally provide a module that tells us where the CA_CERTS
# are located.
import ca_certs_locater
CA_CERTS = ca_certs_locater.get()
except ImportError:
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (
self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'])
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer (t.broyer@ltgt.net)"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=True, proxy_user=None, proxy_pass=None):
"""
Args:
proxy_type: The type of proxy server. This must be set to one of
socks.PROXY_TYPE_XXX constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
proxy_host: The hostname or IP address of the proxy server.
proxy_port: The port that the proxy server is running on.
proxy_rdns: If True (default), DNS queries will not be performed
locally, and instead, handed to the proxy to resolve. This is useful
if the network does not allow resolution of non-local names. In
httplib2 0.9 and earlier, this defaulted to False.
proxy_user: The username used to authenticate with the proxy server.
proxy_pass: The password used to authenticate with the proxy server.
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
def proxy_info_from_environment(method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = proxy_info_from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
def proxy_info_from_url(url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return ProxyInfo(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s) ************" % (self.host, self.port)
if use_proxy:
print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
for family, socktype, proto, canonname, sockaddr in address_info:
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
except (ImportError, AttributeError):
from google3.apphosting.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google3.apphosting.api.urlfetch import fetch
from google3.apphosting.api.urlfetch import InvalidURLError
def _new_fixed_fetch(validate_certificate):
def fixed_fetch(url, payload=None, method="GET", headers={},
allow_truncated=False, follow_redirects=True,
deadline=None):
if deadline is None:
deadline = socket.getdefaulttimeout() or 5
return fetch(url, payload=payload, method=method, headers=headers,
allow_truncated=allow_truncated,
follow_redirects=follow_redirects, deadline=deadline,
validate_certificate=validate_certificate)
return fixed_fetch
class AppEngineHttpConnection(httplib.HTTPConnection):
"""Use httplib on App Engine, but compensate for its weirdness.
The parameters key_file, cert_file, proxy_info, ca_certs, and
disable_ssl_certificate_validation are all dropped on the ground.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPConnection.__init__(self, host, port=port,
strict=strict, timeout=timeout)
class AppEngineHttpsConnection(httplib.HTTPSConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict,
timeout=timeout)
self._fetch = _new_fixed_fetch(
not disable_ssl_certificate_validation)
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except (ImportError, AttributeError):
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=proxy_info_from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
proxy_nfo_from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
# credentials which handle auth
if 'request' in state_dict:
del state_dict['request']
if 'connections' in state_dict:
del state_dict['connections']
return state_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.connections = {}
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
i = 0
seen_bad_status_line = False
while i < RETRIES:
i += 1
try:
if hasattr(conn, 'sock') and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err in (errno.ENETUNREACH, errno.EADDRNOTAVAIL) and i < RETRIES:
continue # retry on potentially transient socket errors
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if hasattr(conn, 'sock') and conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except httplib.BadStatusLine:
# If we get a BadStatusLine on the first try then that means
# the connection just went stale, so retry regardless of the
# number of RETRIES set.
if not seen_bad_status_line and i == 1:
i = 0
seen_bad_status_line = True
conn.close()
conn.connect()
continue
else:
conn.close()
raise
except (socket.error, httplib.HTTPException):
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(
location, method=redirect_method,
body=body, headers=headers,
redirections=redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == 'https':
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri.encode('utf-8')
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(
info['-x-permanent-redirect-url'], method='GET',
headers=headers, redirections=redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response({
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response({
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
self.reason = self.get('reason', self.reason)
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| apache-2.0 |
berserkerbernhard/Lidskjalv | code/networkmonitor/modules/lidskjalvservicetools.py | 1 | 1272 | import time
from modules.serviceutilities.http import HTTP
from modules.serviceutilities.rdp import RDP
from modules.serviceutilities.ssh import SSH
class ServiceTools():
def __init__(self):
self.http = HTTP()
self.rdp = RDP()
self.ssh = SSH()
def parse_tag(self, tag, site, host):
print("tag: %s" % tag)
print("site: %s" % site)
print("host: %s" % host)
time.sleep(3)
port = 0
if "TCP" in tag:
try:
port = int(tag.split("TCP")[1])
except:
port = 0
print("TCP port:", port)
if "UDP" in tag:
try:
port = int(tag.split("UDP")[1])
except:
port = 0
print("UDP port:", port)
if port:
print("Finding associated app.")
if port == 80:
self.http.show_http_menu(site, host, port)
time.sleep(4)
if port == 3389:
self.rdp.show_rdp_menu(site, host, port)
time.sleep(4)
if port == 22:
self.ssh.show_menu(site, host)
# if tag == "23":
# print("Telnet menu")
# self.telnet_menu(site, host)
| gpl-3.0 |
apporc/neutron | neutron/tests/unit/plugins/ml2/test_security_group.py | 5 | 6979 | # Copyright (c) 2013 OpenStack Foundation
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import mock
from neutron.common import constants as const
from neutron import context
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.tests import tools
from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.extensions import test_securitygroup as test_sg
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
class Ml2SecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self, plugin=None):
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
self.useFixture(tools.AttributeMapMemento())
super(Ml2SecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
def tearDown(self):
super(Ml2SecurityGroupsTestCase, self).tearDown()
class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase,
test_sg.TestSecurityGroups,
test_sg_rpc.SGNotificationTestMixin):
def setUp(self):
super(TestMl2SecurityGroups, self).setUp()
self.ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
plugin.start_rpc_listeners()
def _make_port_with_new_sec_group(self, net_id):
sg = self._make_security_group(self.fmt, 'name', 'desc')
port = self._make_port(
self.fmt, net_id, security_groups=[sg['security_group']['id']])
return port['port']
def _make_port_without_sec_group(self, net_id):
port = self._make_port(
self.fmt, net_id, security_groups=[])
return port['port']
def test_security_group_get_ports_from_devices(self):
with self.network() as n:
with self.subnet(n):
orig_ports = [
self._make_port_with_new_sec_group(n['network']['id']),
self._make_port_with_new_sec_group(n['network']['id']),
self._make_port_without_sec_group(n['network']['id'])
]
plugin = manager.NeutronManager.get_plugin()
# should match full ID and starting chars
ports = plugin.get_ports_from_devices(self.ctx,
[orig_ports[0]['id'], orig_ports[1]['id'][0:8],
orig_ports[2]['id']])
self.assertEqual(len(orig_ports), len(ports))
for port_dict in ports:
p = next(p for p in orig_ports
if p['id'] == port_dict['id'])
self.assertEqual(p['id'], port_dict['id'])
self.assertEqual(p['security_groups'],
port_dict[ext_sg.SECURITYGROUPS])
self.assertEqual([], port_dict['security_group_rules'])
self.assertEqual([p['fixed_ips'][0]['ip_address']],
port_dict['fixed_ips'])
self._delete('ports', p['id'])
def test_security_group_get_ports_from_devices_with_bad_id(self):
plugin = manager.NeutronManager.get_plugin()
ports = plugin.get_ports_from_devices(self.ctx, ['bad_device_id'])
self.assertFalse(ports)
def test_security_group_no_db_calls_with_no_ports(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch(
'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port'
) as get_mock:
self.assertFalse(plugin.get_ports_from_devices(self.ctx, []))
self.assertFalse(get_mock.called)
def test_large_port_count_broken_into_parts(self):
plugin = manager.NeutronManager.get_plugin()
max_ports_per_query = 5
ports_to_query = 73
for max_ports_per_query in (1, 2, 5, 7, 9, 31):
with mock.patch('neutron.plugins.ml2.db.MAX_PORTS_PER_QUERY',
new=max_ports_per_query),\
mock.patch(
'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port',
return_value={}) as get_mock:
plugin.get_ports_from_devices(self.ctx,
['%s%s' % (const.TAP_DEVICE_PREFIX, i)
for i in range(ports_to_query)])
all_call_args = [x[1][1] for x in get_mock.mock_calls]
last_call_args = all_call_args.pop()
# all but last should be getting MAX_PORTS_PER_QUERY ports
self.assertTrue(
all(map(lambda x: len(x) == max_ports_per_query,
all_call_args))
)
remaining = ports_to_query % max_ports_per_query
if remaining:
self.assertEqual(remaining, len(last_call_args))
# should be broken into ceil(total/MAX_PORTS_PER_QUERY) calls
self.assertEqual(
math.ceil(ports_to_query / float(max_ports_per_query)),
get_mock.call_count
)
def test_full_uuids_skip_port_id_lookup(self):
plugin = manager.NeutronManager.get_plugin()
# when full UUIDs are provided, the _or statement should only
# have one matching 'IN' criteria for all of the IDs
with mock.patch('neutron.plugins.ml2.db.or_') as or_mock,\
mock.patch('sqlalchemy.orm.Session.query') as qmock:
fmock = qmock.return_value.outerjoin.return_value.filter
# return no ports to exit the method early since we are mocking
# the query
fmock.return_value = []
plugin.get_ports_from_devices(self.ctx,
[test_base._uuid(),
test_base._uuid()])
# the or_ function should only have one argument
or_mock.assert_called_once_with(mock.ANY)
class TestMl2SGServerRpcCallBack(
Ml2SecurityGroupsTestCase,
test_sg_rpc.SGServerRpcCallBackTestCase):
pass
| apache-2.0 |
GRArmstrong/invenio-inspire-ops | modules/webcomment/lib/webcomment_unit_tests.py | 16 | 2189 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import unittest
from invenio.webcomment import calculate_start_date
from invenio.testutils import make_test_suite, run_test_suite
class TestCalculateStartDate(unittest.TestCase):
"""Test for calculating previous date."""
def test_previous_year(self):
"""webcomment - calculate_start_date, values bigger than one year"""
self.assert_(int(calculate_start_date('1y')[:4]) > 2007)
self.assert_(int(calculate_start_date('13m')[:4]) > 2007)
self.assert_(int(calculate_start_date('55w')[:4]) > 2007)
self.assert_(int(calculate_start_date('370d')[:4]) > 2007)
def test_with_random_values(self):
"""webcomment - calculate_start_date, various random values"""
self.assert_(calculate_start_date('1d') > '2009-07-08 14:39:39')
self.assert_(calculate_start_date('2w') > '2009-07-08 14:39:39')
self.assert_(calculate_start_date('2w') > '2009-06-25 14:46:31')
self.assert_(calculate_start_date('2y') > '2007-07-09 14:50:43')
self.assert_(calculate_start_date('6m') > '2009-01-09 14:51:10')
self.assert_(calculate_start_date('77d') > '2009-04-23 14:51:31')
self.assert_(calculate_start_date('20d') > '2009-06-19 14:51:55')
TEST_SUITE = make_test_suite(TestCalculateStartDate)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
GRArmstrong/invenio-inspire-ops | modules/webstyle/lib/goto_plugins/goto_plugin_cern_hr_documents.py | 8 | 7087 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This implements a redirection for CERN HR Documents in the CERN Document
Server. It's useful as a reference on how goto plugins could be implemented.
"""
import time
import re
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibdocfile import BibRecDocs
def make_cern_ssr_docname(lang, edition, modif=0):
if modif:
return "CERN_SSR_%(lang)s_ed%(edition)02d_modif%(modif)02d" % {
'lang': lang,
'edition': edition,
'modif': modif
}
else:
return "CERN_SSR_%(lang)s_ed%(edition)02d" % {
'lang': lang,
'edition': edition,
}
_RE_REVISION = re.compile(r"rev(\d\d)")
def _get_revision(docname):
"""
Return the revision in a docname. E.g.:
CERN_Circ_Op_en_02_rev01_Implementation measures.pdf -> 1
CERN_Circ_Op_en_02_rev02_Implementation measures.PDF -> 2
"""
g = _RE_REVISION.search(docname)
if g:
return int(g.group(1))
return 0
def _register_document(documents, docname, key):
"""
Register in the documents mapping the docname to key, but only if the
docname has a revision higher of the docname already associated with a key
"""
if key in documents:
if _get_revision(docname) > _get_revision(documents[key]):
documents[key] = docname
else:
documents[key] = docname
def goto(type, document='', number=0, lang='en', modif=0):
today = time.strftime('%Y-%m-%d')
if type == 'SSR':
## We would like a CERN Staff Rules and Regulations
recids = perform_request_search(cc='Staff Rules and Regulations', f="925__a:1996-01-01->%s 925__b:%s->9999-99-99" % (today, today))
recid = recids[-1]
reportnumber = get_fieldvalues(recid, '037__a')[0]
edition = int(reportnumber[-2:]) ## e.g. CERN-STAFF-RULES-ED08
return BibRecDocs(recid).get_bibdoc(make_cern_ssr_docname(lang, edition, modif)).get_file('.pdf').get_url()
elif type == "OPER-CIRC":
recids = perform_request_search(cc="Operational Circulars", p="reportnumber=\"CERN-OPER-CIRC-%s-*\"" % number, sf="925__a")
recid = recids[-1]
documents = {}
bibrecdocs = BibRecDocs(recid)
for docname in bibrecdocs.get_bibdoc_names():
ldocname = docname.lower()
if 'implementation' in ldocname:
_register_document(documents, docname, 'implementation_en')
elif 'application' in ldocname:
_register_document(documents, docname, 'implementation_fr')
elif 'archiving' in ldocname:
_register_document(documents, docname, 'archiving_en')
elif 'archivage' in ldocname:
_register_document(documents, docname, 'archiving_fr')
elif 'annexe' in ldocname or 'annexes_fr' in ldocname:
_register_document(documents, docname, 'annex_fr')
elif 'annexes_en' in ldocname or 'annex' in ldocname:
_register_document(documents, docname, 'annex_en')
elif '_en_' in ldocname or '_eng_' in ldocname or '_angl_' in ldocname:
_register_document(documents, docname, 'en')
elif '_fr_' in ldocname:
_register_document(documents, docname, 'fr')
return bibrecdocs.get_bibdoc(documents[document]).get_file('.pdf').get_url()
elif type == 'ADMIN-CIRC':
recids = perform_request_search(cc="Administrative Circulars", p="reportnumber=\"CERN-ADMIN-CIRC-%s-*\"" % number, sf="925__a")
recid = recids[-1]
documents = {}
bibrecdocs = BibRecDocs(recid)
for docname in bibrecdocs.get_bibdoc_names():
ldocname = docname.lower()
if 'implementation' in ldocname:
_register_document(documents, docname, 'implementation-en')
elif 'application' in ldocname:
_register_document(documents, docname, 'implementation-fr')
elif 'archiving' in ldocname:
_register_document(documents, docname, 'archiving-en')
elif 'archivage' in ldocname:
_register_document(documents, docname, 'archiving-fr')
elif 'annexe' in ldocname or 'annexes_fr' in ldocname:
_register_document(documents, docname, 'annex-fr')
elif 'annexes_en' in ldocname or 'annex' in ldocname:
_register_document(documents, docname, 'annex-en')
elif '_en_' in ldocname or '_eng_' in ldocname or '_angl_' in ldocname:
_register_document(documents, docname, 'en')
elif '_fr_' in ldocname:
_register_document(documents, docname, 'fr')
return bibrecdocs.get_bibdoc(documents[document]).get_file('.pdf').get_url()
def register_hr_redirections():
"""
Run this only once
"""
from invenio.goto_engine import register_redirection
plugin = 'goto_plugin_cern_hr_documents'
## Staff rules and regulations
for modif in range(1, 20):
for lang in ('en', 'fr'):
register_redirection('hr-srr-modif%02d-%s' % (modif, lang), plugin, parameters={'type': 'SSR', 'lang': lang, 'modif': modif})
for lang in ('en', 'fr'):
register_redirection('hr-srr-%s' % lang, plugin, parameters={'type': 'SSR', 'lang': lang, 'modif': 0})
## Operational Circulars
for number in range(1, 10):
for lang in ('en', 'fr'):
register_redirection('hr-oper-circ-%s-%s' % (number, lang), plugin, parameters={'type': 'OPER-CIRC', 'document': lang, 'number': number})
for number, special_document in ((2, 'implementation'), (2, 'annex'), (3, 'archiving'), (3, 'annex')):
for lang in ('en', 'fr'):
register_redirection('hr-circ-%s-%s-%s' % (number, special_document, lang), plugin, parameters={'type': 'OPER-CIRC', 'document': '%s-%s' % (special_document, lang), 'number': number})
## Administrative Circulars:
for number in range(1, 32):
for lang in ('en', 'fr'):
register_redirection('hr-admin-circ-%s-%s' % (number, lang), plugin, parameters={'type': 'ADMIN-CIRC', 'document': lang, 'number': number})
if __name__ == "__main__":
register_hr_redirections()
| gpl-2.0 |
Passw/gn_GFW | buildtools/third_party/libc++/trunk/utils/not.py | 13 | 1142 | #===----------------------------------------------------------------------===##
#
# The LLVM Compiler Infrastructure
#
# This file is dual licensed under the MIT and the University of Illinois Open
# Source Licenses. See LICENSE.TXT for details.
#
#===----------------------------------------------------------------------===##
"""not.py is a utility for inverting the return code of commands.
It acts similar to llvm/utils/not.
ex: python /path/to/not.py ' echo hello
echo $? // (prints 1)
"""
import distutils.spawn
import subprocess
import sys
def main():
argv = list(sys.argv)
del argv[0]
if len(argv) > 0 and argv[0] == '--crash':
del argv[0]
expectCrash = True
else:
expectCrash = False
if len(argv) == 0:
return 1
prog = distutils.spawn.find_executable(argv[0])
if prog is None:
sys.stderr.write('Failed to find program %s' % argv[0])
return 1
rc = subprocess.call(argv)
if rc < 0:
return 0 if expectCrash else 1
if expectCrash:
return 1
return rc == 0
if __name__ == '__main__':
exit(main())
| gpl-3.0 |
Learningtribes/edx-platform | common/lib/xmodule/xmodule/tests/test_utils_escape_html_characters.py | 116 | 1632 | """Tests for methods defined in util/misc.py"""
from xmodule.util.misc import escape_html_characters
from unittest import TestCase
class UtilHtmlEscapeTests(TestCase):
"""
Tests for methods exposed in util/misc
"""
final_content = " This is a paragraph. "
def test_escape_html_comments(self):
html_content = """
<!--This is a comment. Comments are not displayed in the browser-->
This is a paragraph.
"""
self.assertEqual(escape_html_characters(html_content), self.final_content)
def test_escape_cdata_comments(self):
html_content = """
<![CDATA[
function matchwo(a,b)
{
if (a < b && a < 0) then
{
return 1;
}
else
{
return 0;
}
}
]]>
This is a paragraph.
"""
self.assertEqual(escape_html_characters(html_content), self.final_content)
def test_escape_non_breaking_space(self):
html_content = """
<![CDATA[
function matchwo(a,b)
{
if (a < b && a < 0) then
{
return 1;
}
else
{
return 0;
}
}
]]>
This is a paragraph.
"""
self.assertEqual(escape_html_characters(html_content), self.final_content)
| agpl-3.0 |
bjornlevi/5thpower | nefndaralit/env/lib/python3.6/site-packages/urllib3/_collections.py | 58 | 10746 | from __future__ import absolute_import
try:
from collections.abc import Mapping, MutableMapping
except ImportError:
from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
from collections import OrderedDict
from .exceptions import InvalidHeader
from .packages.six import iterkeys, itervalues, PY3
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = OrderedDict()
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = [key, val]
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
# Using the MutableMapping function directly fails due to the private marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = [key, val]
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
vals.append(val)
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({0} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key, default=__marker):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
if default is self.__marker:
return []
return default
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
# Backwards compatibility for http.cookiejar
get_all = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
obs_fold_continued_leaders = (' ', '\t')
headers = []
for line in message.headers:
if line.startswith(obs_fold_continued_leaders):
if not headers:
# We received a header line that starts with OWS as described
# in RFC-7230 S3.2.4. This indicates a multiline header, but
# there exists no previous header to which we can attach it.
raise InvalidHeader(
'Header continuation with no previous header: %s' % line
)
else:
key, value = headers[-1]
headers[-1] = (key, value + ' ' + line.strip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
| mit |
neuesleben123/rt-thread | tools/vs.py | 19 | 3710 | import os
import sys
import string
import building
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
fs_encoding = sys.getfilesystemencoding()
def VS_AddGroup(ProjectFiles, parent, name, files, project_path):
Filter = SubElement(parent, 'Filter')
Filter.set('Name', name) #set group name to group
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
File = SubElement(Filter, 'File')
File.set('RelativePath', path.decode(fs_encoding))
def VS_AddHeadFilesGroup(program, elem, project_path):
building.source_ext = []
building.source_ext = ["h"]
for item in program:
building.walk_children(item)
building.source_list.sort()
# print building.source_list
for f in building.source_list:
path = _make_path_relative(project_path, f)
File = SubElement(elem, 'File')
File.set('RelativePath', path.decode(fs_encoding))
def VSProject(target, script, program):
project_path = os.path.dirname(os.path.abspath(target))
tree = etree.parse('template.vcproj')
root = tree.getroot()
out = file(target, 'wb')
out.write('<?xml version="1.0" encoding="UTF-8"?>\r\n')
ProjectFiles = []
# add "*.c" files group
for elem in tree.iter(tag='Filter'):
if elem.attrib['Name'] == 'Source Files':
#print elem.tag, elem.attrib
break
for group in script:
group_xml = VS_AddGroup(ProjectFiles, elem, group['name'], group['src'], project_path)
# add "*.h" files group
for elem in tree.iter(tag='Filter'):
if elem.attrib['Name'] == 'Header Files':
break
VS_AddHeadFilesGroup(program, elem, project_path)
# write head include path
if building.Env.has_key('CPPPATH'):
cpp_path = building.Env['CPPPATH']
paths = set()
for path in cpp_path:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
paths.sort()
cpp_path = ';'.join(paths)
# write include path, definitions
for elem in tree.iter(tag='Tool'):
if elem.attrib['Name'] == 'VCCLCompilerTool':
#print elem.tag, elem.attrib
break
elem.set('AdditionalIncludeDirectories', cpp_path)
# write cppdefinitons flags
if building.Env.has_key('CPPDEFINES'):
definitions = ';'.join(building.Env['CPPDEFINES'])
elem.set('PreprocessorDefinitions', definitions)
# write link flags
# write lib dependence
if building.Env.has_key('LIBS'):
for elem in tree.iter(tag='Tool'):
if elem.attrib['Name'] == 'VCLinkerTool':
break
libs_with_extention = [i+'.lib' for i in building.Env['LIBS']]
libs = ' '.join(libs_with_extention)
elem.set('AdditionalDependencies', libs)
# write lib include path
if building.Env.has_key('LIBPATH'):
lib_path = building.Env['LIBPATH']
paths = set()
for path in lib_path:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
paths.sort()
lib_paths = ';'.join(paths)
elem.set('AdditionalLibraryDirectories', lib_paths)
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
| gpl-2.0 |
jingweicao/SampleCode | python/flask/skills_portal_api/api_v1/helloworld.py | 1 | 3162 | from . import api
from flask import request, jsonify
from flask_restful import reqparse
from webargs import fields
from webargs.flaskparser import use_args
import time
#This is one end point. This is an endpoint with no parameters
@api.route('/',methods=['GET'])
def index():
return "Hello World 3! " + time.strftime("%d/%m/%Y %H:%M:%S")
multiply_args = {
'leftval': fields.Int(required=True),
'rightval': fields.Int(required=True)
}
#http://localhost:5000/api/v1/multiply-webargs-validate
#This is a multiply end point using webargs for parameter validation
@api.route('/multiply-webargs-validate',methods=['GET'])
@use_args(multiply_args)
def multiplyWebargsValidate(args):
# If they pass parameter valiation, output result
leftVal = 0
rightVal = 0
leftVal = args['leftval']
rightVal = args['rightval']
return "{0} x {1} = {2}".format(leftVal, rightVal, leftVal * rightVal)
#http://localhost:5000/api/v1/multiply-reqparse-validate
#This is an end ponit with 2 parameters using reqparse for parameter validation
@api.route('/multiply-reqparse-validate',methods=['GET'])
def multiplyReparseValidate():
# If leftval or rightval is not an integer. output error message
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('leftval',type=int,required=True)
parser.add_argument('rightval',type=int,required=True)
args = parser.parse_args()
# If they pass parameter valiation, output result
leftVal = 0
rigthVal = 0
leftVal = args['leftval']
rightVal = args['rightval']
return "{0} x {1} = {2}".format(leftVal, rightVal, leftVal * rightVal)
# Return a JSON response containing a list
@api.route('/fuzzy-matching-results',methods=['GET'])
def fuzzyMatchingResults():
#Debug a Flask app
#import pdb; pdb.set_trace()
#Whatever happens, return a fake JSON for now
returnVal = {
"Request": {
"ParameterList":[
{
"ParameterName": "InputSkill",
"ParameterValue": "3D"
}
]
},
"SkillList": [
{
"SkillName": "3D Printing",
"MatchScore": 0.78,
"DataVersion": "US-33"
},
{
"SkillName": "3D Modeling",
"MatchScore": 0.96,
"DataVersion": "US-34-RC2"
},
{
"SkillName": "3D Design",
"MatchScore": 0.30,
"DataVersion": "US-334"
}
],
"Summary": {
"TotalMatch": 3
}
}
import pdb; pdb.set_trace()
return jsonify(returnVal)
@api.errorhandler(422)
def handle_error(err):
# webargs attaches additional metadata, including
# validation errors, to the `data` attribute
return jsonify({
'message': str(err.data['messages'])
}), 422
@api.errorhandler(400)
def handle_error(err):
# webargs attaches additional metadata, including
# validation errors, to the `data` attribute
return jsonify({
'message': str(err.data['message'])
}), 400 | mit |
FlintHill/SUAS-Competition | env/lib/python2.7/site-packages/setuptools/command/py36compat.py | 283 | 4986 | import os
from glob import glob
from distutils.util import convert_path
from distutils.command import sdist
from setuptools.extern.six.moves import filter
class sdist_add_defaults:
"""
Mix-in providing forward-compatibility for functionality as found in
distutils on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils. Instead, override in the subclass.
"""
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist_add_defaults._cs_path_exists(__file__)
True
>>> sdist_add_defaults._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
def _add_defaults_optional(self):
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
if hasattr(sdist.sdist, '_add_defaults_standards'):
# disable the functionality already available upstream
class sdist_add_defaults:
pass
| mit |
SCOAP3/invenio | invenio/modules/upgrader/upgrades/invenio_2014_08_31_next_collection_tree.py | 15 | 1202 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
import warnings
depends_on = ['invenio_release_1_1_0']
def info():
return "Update collection tree definition to match next structure"
def pre_upgrade():
warnings.warn("Please check that you are upgrading from latest major release.")
def do_upgrade():
"""Change the score to the opposite order."""
pass
def estimate():
return 1
def post_upgrade():
pass
| gpl-2.0 |
MinFu/youtube-dl | youtube_dl/extractor/streetvoice.py | 129 | 1677 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import unified_strdate
class StreetVoiceIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?streetvoice\.com/[^/]+/songs/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://streetvoice.com/skippylu/songs/94440/',
'md5': '15974627fc01a29e492c98593c2fd472',
'info_dict': {
'id': '94440',
'ext': 'mp3',
'filesize': 4167053,
'title': '輸',
'description': 'Crispy脆樂團 - 輸',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 260,
'upload_date': '20091018',
'uploader': 'Crispy脆樂團',
'uploader_id': '627810',
}
}, {
'url': 'http://tw.streetvoice.com/skippylu/songs/94440/',
'only_matching': True,
}]
def _real_extract(self, url):
song_id = self._match_id(url)
song = self._download_json(
'http://streetvoice.com/music/api/song/%s' % song_id, song_id)
title = song['name']
author = song['musician']['name']
return {
'id': song_id,
'url': song['file'],
'filesize': song.get('size'),
'title': title,
'description': '%s - %s' % (author, title),
'thumbnail': self._proto_relative_url(song.get('image'), 'http:'),
'duration': song.get('length'),
'upload_date': unified_strdate(song.get('created_at')),
'uploader': author,
'uploader_id': compat_str(song['musician']['id']),
}
| unlicense |
roscoeZA/GeoGigSync | pycharm-debug.egg_FILES/third_party/pep8/lib2to3/lib2to3/fixes/fix_itertools_imports.py | 322 | 2094 | """ Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import BlankLine, syms, token
class FixItertoolsImports(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
import_from< 'from' 'itertools' 'import' imports=any >
""" %(locals())
def transform(self, node, results):
imports = results['imports']
if imports.type == syms.import_as_name or not imports.children:
children = [imports]
else:
children = imports.children
for child in children[::2]:
if child.type == token.NAME:
member = child.value
name_node = child
elif child.type == token.STAR:
# Just leave the import as is.
return
else:
assert child.type == syms.import_as_name
name_node = child.children[0]
member_name = name_node.value
if member_name in (u'imap', u'izip', u'ifilter'):
child.value = None
child.remove()
elif member_name in (u'ifilterfalse', u'izip_longest'):
node.changed()
name_node.value = (u'filterfalse' if member_name[1] == u'f'
else u'zip_longest')
# Make sure the import statement is still sane
children = imports.children[:] or [imports]
remove_comma = True
for child in children:
if remove_comma and child.type == token.COMMA:
child.remove()
else:
remove_comma ^= True
while children and children[-1].type == token.COMMA:
children.pop().remove()
# If there are no imports left, just get rid of the entire statement
if (not (imports.children or getattr(imports, 'value', None)) or
imports.parent is None):
p = node.prefix
node = BlankLine()
node.prefix = p
return node
| cc0-1.0 |
dupontke/unique_angle_calc | distance_functions.py | 1 | 2338 | #!/mnt/lustre_fs/users/mjmcc/apps/python2.7/bin/python
##!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# USAGE:
# from distance_functions import *
# PREAMBLE:
import numpy as np
sqrt = np.sqrt
sums = np.sum
square = np.square
zeros = np.zeros
# SUBROUTINES:
def RMSD(x,y,n):
""" Calculates the Root Mean Squared Distance between two arrays of the same size
Usage: rmsd = RMSD(x,y,n)
Arguments:
x, y: numpy arrays with the same shape (n X 3)
n: number of particles being summed over; ex: number of atoms in the atom selection being analyzed;
if n = 1, this function calculates the distance between x and y arrays
"""
return sqrt(sums(square(x-y))/n)
def MSD(x,y,n):
""" Calculates the Mean Squared Distance between two arrays of the same size
Usage: msd = MSD(x,y,n)
Arguments:
x, y: numpy arrays with the same shape
n: number of particles being summed over; ex: number of atoms in the atom selection being analyzed;
if n = 1, this function calculates the distance squared between x and y arrays
"""
return sums(square(x-y))/n
def wrapping(x,dim):
""" Calculates the translation matrix needed to wrap a particle back into the original periodic box
Usage: t = wrapping(x,dim)
Arguments:
x: a numpy array of size (3) that corresponds to the xyz coordinates of an ATOM/COM/COG of a residue
dim: a numpy array of size (3) that holds the xyz dimensions of the periodic box at that timestep
"""
t = zeros(3)
dim2 = dim/2.
for i in range(3):
if (x[i]<-dim2[i]) or (x[i]>dim2[i]):
t[i] = -dim[i]*round(x[i]/dim[i])
return t
def euclid_dist(x,y):
""" Calculates the Euclidian Distance between two arrays of the same size
Usage: dist,dist2 = euclid_dist(x,y)
Arguments:
x, y: numpy arrays with the same size
"""
dist2 = sums(square(x-y))
dist = sqrt(dist2)
return dist, dist2
def computePbcDist2(r1,r2,box):
""" compute the distance between two points taking into account periodic boundary conditions
Usage: dist = computePbcDist2(r1,r2,box):
Arguments:
r1, r2: two points that are defined
box: dimensions of the box containing protein and solvent
"""
dist2 = 0
for j in range(0,3):
temp = r1[j]-r2[j]
if temp < -box[j]/2.0:
temp += box[j]
elif temp > box[j]/2.0:
temp -= box[j]
dist2 += temp*temp
dist2 = math.sqrt(dist2)
return dist2;
| gpl-3.0 |
LucasRoesler/GooglePythonLessons | basic/solution/string2.py | 208 | 3094 | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
# LAB(begin solution)
if len(s) >= 3:
if s[-3:] != 'ing': s = s + 'ing'
else: s = s + 'ly'
return s
# LAB(replace solution)
# return
# LAB(end solution)
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
# LAB(begin solution)
n = s.find('not')
b = s.find('bad')
if n != -1 and b != -1 and b > n:
s = s[:n] + 'good' + s[b+3:]
return s
# LAB(replace solution)
# return
# LAB(end solution)
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
# LAB(begin solution)
# Figure out the middle position of each string.
a_middle = len(a) / 2
b_middle = len(b) / 2
if len(a) % 2 == 1: # add 1 if length is odd
a_middle = a_middle + 1
if len(b) % 2 == 1:
b_middle = b_middle + 1
return a[:a_middle] + b[:b_middle] + a[a_middle:] + b[b_middle:]
# LAB(replace solution)
# return
# LAB(end solution)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.2/django/template/loaders/eggs.py | 65 | 1434 | # Wrapper for loading templates from eggs via pkg_resources.resource_string.
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.template import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.conf import settings
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
return (resource_string(app, pkg_name).decode(settings.FILE_CHARSET), 'egg:%s:%s' % (app, pkg_name))
except:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
import warnings
warnings.warn(
"'django.template.loaders.eggs.load_template_source' is deprecated; use 'django.template.loaders.eggs.Loader' instead.",
PendingDeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = resource_string is not None
| bsd-3-clause |
cslzchen/osf.io | osf/models/outcome_artifacts.py | 5 | 9049 | from django.db import models, IntegrityError, transaction
from django.utils import timezone
from osf.exceptions import (
CannotFinalizeArtifactError,
IdentifierHasReferencesError,
IsPrimaryArtifactPIDError,
NoPIDError,
UnsupportedArtifactTypeError,
)
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.identifiers import Identifier
from osf.utils import outcomes as outcome_utils
from osf.utils.fields import NonNaiveDateTimeField
from osf.utils.identifiers import normalize_identifier
'''
This module defines the OutcomeArtifact model and its custom manager.
OutcomeArtifacts are a through-table, providing some additional metadata on the relationship
between an Outcome and an external Identifier that stores materials or provides context
for the research effort described by the Outcome.
'''
ArtifactTypes = outcome_utils.ArtifactTypes
OutcomeActions = outcome_utils.OutcomeActions
class ArtifactManager(models.Manager):
def get_queryset(self):
'''Overrides default `get_queryset` behavior to add custom logic.
Automatically annotates the `pid` from any linked identifier and the
GUID of the primary resource for the parent artifact.
Automatically filters out deleted entries
'''
base_queryset = super().get_queryset().select_related('identifier')
return base_queryset.annotate(
pid=models.F('identifier__value'),
primary_resource_guid=outcome_utils.make_primary_resource_guid_annotation(base_queryset)
)
def for_registration(self, registration, identifier_type='doi'):
'''Retrieves all OutcomeArtifacts sharing an Outcome, given the Primary Registration.'''
registration_identifier = registration.get_identifier(identifier_type)
artifact_qs = self.get_queryset()
return artifact_qs.annotate(
primary_outcome=models.Subquery(
artifact_qs.filter(
identifier=registration_identifier,
artifact_type=ArtifactTypes.PRIMARY
).values('outcome_id')[:1],
output_field=models.IntegerField()
)
).filter(
outcome_id=models.F('primary_outcome')
).exclude(
identifier=registration_identifier
)
class OutcomeArtifact(ObjectIDMixin, BaseModel):
'''OutcomeArtifact is a through table that connects an Outcomes with Identifiers
while providing some additional, useful metadata'''
# The following fields are inherited from ObjectIdMixin
# _id (CharField)
# The following fields are inherited from BaseModel
# created (DateTimeField)
# modified (DateTimeField)
outcome = models.ForeignKey(
'osf.outcome',
on_delete=models.CASCADE,
related_name='artifact_metadata'
)
identifier = models.ForeignKey(
'osf.identifier',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='artifact_metadata'
)
artifact_type = models.IntegerField(
null=False,
choices=ArtifactTypes.choices(),
default=ArtifactTypes.UNDEFINED,
)
title = models.TextField(null=False, blank=True)
description = models.TextField(null=False, blank=True)
finalized = models.BooleanField(default=False)
deleted = NonNaiveDateTimeField(null=True, blank=True)
objects = ArtifactManager()
class Meta:
indexes = [
models.Index(fields=['artifact_type', 'outcome'])
]
ordering = ['artifact_type', 'title']
@transaction.atomic
def update(
self,
new_description=None,
new_artifact_type=None,
new_pid_value=None,
pid_type='doi',
api_request=None
):
log_params = {}
if new_description is not None:
self.description = new_description
if new_artifact_type is not None:
if new_artifact_type == ArtifactTypes.UNDEFINED != self.artifact_type:
raise UnsupportedArtifactTypeError
self.artifact_type = new_artifact_type
if new_pid_value is not None:
log_params = {
'obsolete_identifier': self.identifier.value if self.identifier else '',
'new_identifier': new_pid_value
}
self._update_identifier(new_pid_value, pid_type, api_request)
if self.finalized:
if OutcomeArtifact.objects.filter(
outcome=self.outcome,
identifier=self.identifier,
artifact_type=self.artifact_type,
finalized=True,
deleted__isnull=True
).exclude(
id=self.id
).exists():
raise IntegrityError()
self.outcome.artifact_updated(
artifact=self,
action=OutcomeActions.UPDATE if new_pid_value is not None else None,
api_request=api_request,
**log_params,
)
def _update_identifier(self, new_pid_value, pid_type='doi', api_request=None):
'''Changes the linked Identifer to one matching the new pid_value and handles callbacks.
If `finalized` is True, will also log the change on the parent Outcome if invoked via API.
Will attempt to delete the previous identifier to avoid orphaned entries.
Should only be called from within `update` to ensure atomicity
Parameters:
new_pid_value: The string value of the new PID
pid_type (str): The string "type" of the new PID (for now, only "doi" is supported)
api_request: The api_request data from the API call that initiated the change.
'''
if not new_pid_value:
raise NoPIDError('Cannot assign an empty PID value')
normalized_pid_value = normalize_identifier(new_pid_value)
if self.identifier and normalized_pid_value == self.identifier.value:
return
new_identifier, created = Identifier.objects.get_or_create(
value=normalized_pid_value, category=pid_type
)
# Reraise these errors all the way to API
if created:
new_identifier.validate_identifier_value()
elif OutcomeArtifact.objects.filter(
outcome=self.outcome,
identifier=new_identifier,
artifact_type=ArtifactTypes.PRIMARY
).exists():
raise IsPrimaryArtifactPIDError(pid_value=new_pid_value, pid_category=pid_type)
previous_identifier = self.identifier
self.identifier = new_identifier
self.save()
if previous_identifier:
try:
previous_identifier.delete()
except IdentifierHasReferencesError:
pass
def finalize(self, api_request=None):
'''Sets `finalized` to True and handles callbacks.
Logs the change on the parent Outcome if invoked via the API.
Parameters:
api_request: The api_request data from the API call that initiated the change.
'''
incomplete_fields = []
if not (self.identifier and self.identifier.value):
incomplete_fields.append('identifier__value')
if not self.artifact_type:
incomplete_fields.append('artifact_type')
if incomplete_fields:
raise CannotFinalizeArtifactError(self, incomplete_fields)
if OutcomeArtifact.objects.filter(
outcome=self.outcome,
identifier=self.identifier,
artifact_type=self.artifact_type,
finalized=True,
deleted__isnull=True,
).exists():
raise IntegrityError()
self.finalized = True
self.save()
self.outcome.artifact_updated(
action=OutcomeActions.ADD,
artifact=self,
api_request=api_request,
new_identifier=self.identifier.value
)
def delete(self, api_request=None, **kwargs):
'''Intercept `delete` behavior on the model instance and handles callbacks.
Deletes from database if not `finalized` otherwise sets the `deleted` timestamp.
Logs the change on the parent Outcome if invoked via the API.
Attempts to delete the linked Identifier to avoid orphaned entries.
Parameters:
api_request: The api_request data from the API call that initiated the change.
'''
identifier = self.identifier
if self.finalized:
self.deleted = timezone.now()
self.save()
self.outcome.artifact_updated(
action=OutcomeActions.REMOVE,
artifact=self,
api_request=api_request,
obsolete_identifier=identifier.value
)
else:
super().delete(**kwargs)
if identifier:
try:
identifier.delete()
except IdentifierHasReferencesError:
pass
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.5/django/utils/translation/__init__.py | 112 | 4690 | """
Internationalization support.
"""
from __future__ import unicode_literals
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
]
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ngettext_lazy = lazy(ngettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
ungettext_lazy = lazy(ungettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
npgettext_lazy = lazy(npgettext, six.text_type)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(object):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
self.old_language = get_language()
def __enter__(self):
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join([force_text(s) for s in strings])
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
return LANG_INFO[lang_code]
except KeyError:
raise KeyError("Unknown language code %r." % lang_code)
| bsd-3-clause |
patmun/pynetdicom | netdicom/test/getscu.py | 2 | 1885 | #
# Copyright (c) 2012 Patrice Munger
# This file is part of pynetdicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pynetdicom.googlecode.com
#
"""
GetSCU AE example.
This demonstrates a simple application entity that support the Patient Root
Get SOP Class as SCU. The example sets up a SCP provider listening
on port 2001 on localhost using the dcmqrscp command from the offis toolkit.
"""
import sys
sys.path.append('..')
import time
from applicationentity import AE
from SOPclass import *
import dicom
from dcmqrscp import start_dcmqrscp
from dicom.dataset import Dataset
# first create a partner
start_dcmqrscp(server_port=2001, server_AET='AE1', populate=True)
for ii in range(20):
print
# call back
def OnAssociateResponse(association):
print "Association response received"
def OnReceiveStore(SOPClass, DS):
print "Received C-STORE"
print DS
return 0
# create application entity
MyAE = AE(
'LocalAE', 9998, [PatientRootGetSOPClass, VerificationSOPClass],
[RTPlanStorageSOPClass, CTImageStorageSOPClass, MRImageStorageSOPClass,
RTImageStorageSOPClass])
MyAE.OnAssociateResponse = OnAssociateResponse
MyAE.OnReceiveStore = OnReceiveStore
# remote application entity
RemoteAE = {'Address': 'localhost', 'Port': 2001, 'AET': 'AE1'}
# create association with remote AE
print "Request association"
assoc = MyAE.RequestAssociation(RemoteAE)
# perform a DICOM ECHO
print "DICOM Echo ... ",
st = assoc.VerificationSOPClass.SCU(1)
print 'done with status "%s"' % st
# send dataset using RTPlanStorageSOPClass
print "DICOM GetSCU ... ",
d = Dataset()
d.PatientsName = '*'
d.QueryRetrieveLevel = "PATIENT"
st = assoc.PatientRootGetSOPClass.SCU(d, 1)
print 'done with status "%s"' % st
print "Release association"
assoc.Release(0)
# done
MyAE.Quit()
| mit |
isnnn/Sick-Beard-TPB | lib/html5lib/treewalkers/lxmletree.py | 33 | 5641 | from lxml import etree
from html5lib.treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
import _base
from html5lib.constants import voidElements
from html5lib import ihatexml
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self, et.docinfo.root_name,
et.docinfo.public_id,
et.docinfo.system_url))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = self.obj.text
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = self.obj.tail
else:
self.tail = None
self.isstring = isinstance(obj, basestring)
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __nonzero__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return unicode(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, getattr(node, key)
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and node.isstring:
return _base.TEXT, node
elif node.tag == etree.Comment:
return _base.COMMENT, node.text
elif node.tag == etree.Entity:
return _base.ENTITY, node.text[1:-1] # strip &;
else:
#This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = {}
for name, value in node.attrib.items():
match = tag_regexp.match(name)
if match:
attrs[(match.group(1),match.group(2))] = value
else:
attrs[(None,name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return node.tail and (node, "tail") or node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| gpl-3.0 |
pyparallel/numpy | numpy/core/numerictypes.py | 69 | 28783 | """
numerictypes: Define the numeric type objects
This module is designed so "from numerictypes import \\*" is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
datetime64 timedelta64
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number (kind=i)
| integer
| signedinteger (intxx)
| byte
| short
| intc
| intp int0
| int_
| longlong
+-> unsignedinteger (uintxx) (kind=u)
| ubyte
| ushort
| uintc
| uintp uint0
| uint_
| ulonglong
+-> inexact
| +-> floating (floatxx) (kind=f)
| | half
| | single
| | float_ (double)
| | longfloat
| \\-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| character
| void (kind=V)
|
| str_ (string_, bytes_) (kind=S) [Python 2]
| unicode_ (kind=U) [Python 2]
|
| bytes_ (string_) (kind=S) [Python 3]
| str_ (unicode_) (kind=U) [Python 3]
|
\\-> object_ (not used much) (kind=O)
"""
from __future__ import division, absolute_import, print_function
import types as _types
import sys
import numbers
from numpy.compat import bytes, long
from numpy.core.multiarray import (
typeinfo, ndarray, array, empty, dtype, datetime_data,
datetime_as_string, busday_offset, busday_count, is_busday,
busdaycalendar
)
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
'issubdtype', 'datetime_data', 'datetime_as_string',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
# String-handling utilities to avoid locale-dependence.
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
_all_chars = [chr(_m) for _m in range(256)]
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
sctypeDict = {} # Contains all leaf-node scalar types with aliases
sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module here
def _evalname(name):
k = 0
for ch in name:
if ch in '0123456789':
break
k += 1
try:
bits = int(name[k:])
except ValueError:
bits = 0
base = name[:k]
return base, bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
name = obj.__name__
base = ''
char = ''
try:
if name[-1] == '_':
newname = name[:-1]
else:
newname = name
info = typeinfo[english_upper(newname)]
assert(info[-1] == obj) # sanity check
bits = info[2]
except KeyError: # bit-width name
base, bits = _evalname(name)
char = base[0]
if name == 'bool_':
char = 'b'
base = 'bool'
elif name == 'void':
char = 'V'
base = 'void'
elif name == 'object_':
char = 'O'
base = 'object'
bits = 0
elif name == 'datetime64':
char = 'M'
elif name == 'timedelta64':
char = 'm'
if sys.version_info[0] >= 3:
if name == 'bytes_':
char = 'S'
base = 'bytes'
elif name == 'str_':
char = 'U'
base = 'str'
else:
if name == 'string_':
char = 'S'
base = 'string'
elif name == 'unicode_':
char = 'U'
base = 'unicode'
bytes = bits // 8
if char != '' and bytes != 0:
char = "%s%d" % (char, bytes)
return base, bits, char
def _add_types():
for a in typeinfo.keys():
name = english_lower(a)
if isinstance(typeinfo[a], tuple):
typeobj = typeinfo[a][-1]
# define C-name and insert typenum and typechar references also
allTypes[name] = typeobj
sctypeDict[name] = typeobj
sctypeDict[typeinfo[a][0]] = typeobj
sctypeDict[typeinfo[a][1]] = typeobj
else: # generic class
allTypes[name] = typeinfo[a]
_add_types()
def _add_aliases():
for a in typeinfo.keys():
name = english_lower(a)
if not isinstance(typeinfo[a], tuple):
continue
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
if base[-3:] == 'int' or char[0] in 'ui':
continue
if base != '':
myname = "%s%d" % (base, bit)
if ((name != 'longdouble' and name != 'clongdouble') or
myname not in allTypes.keys()):
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
if base == 'complex':
na_name = '%s%d' % (english_capitalize(base), bit//2)
elif base == 'bool':
na_name = english_capitalize(base)
sctypeDict[na_name] = typeobj
else:
na_name = "%s%d" % (english_capitalize(base), bit)
sctypeDict[na_name] = typeobj
sctypeNA[na_name] = typeobj
sctypeDict[na_name] = typeobj
sctypeNA[typeobj] = na_name
sctypeNA[typeinfo[a][0]] = na_name
if char != '':
sctypeDict[char] = typeobj
sctypeNA[char] = na_name
_add_aliases()
# Integers are handled so that the int32 and int64 types should agree
# exactly with NPY_INT32, NPY_INT64. We need to enforce the same checking
# as is done in arrayobject.h where the order of getting a bit-width match
# is long, longlong, int, short, char.
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
val = typeinfo[ctype]
bits = val[2]
charname = 'i%d' % (bits//8,)
ucharname = 'u%d' % (bits//8,)
intname = 'int%d' % bits
UIntname = 'UInt%d' % bits
Intname = 'Int%d' % bits
uval = typeinfo['U'+ctype]
typeobj = val[-1]
utypeobj = uval[-1]
if intname not in allTypes.keys():
uintname = 'uint%d' % bits
allTypes[intname] = typeobj
allTypes[uintname] = utypeobj
sctypeDict[intname] = typeobj
sctypeDict[uintname] = utypeobj
sctypeDict[Intname] = typeobj
sctypeDict[UIntname] = utypeobj
sctypeDict[charname] = typeobj
sctypeDict[ucharname] = utypeobj
sctypeNA[Intname] = typeobj
sctypeNA[UIntname] = utypeobj
sctypeNA[charname] = typeobj
sctypeNA[ucharname] = utypeobj
sctypeNA[typeobj] = Intname
sctypeNA[utypeobj] = UIntname
sctypeNA[val[0]] = Intname
sctypeNA[uval[0]] = UIntname
_add_integer_aliases()
# We use these later
void = allTypes['void']
generic = allTypes['generic']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('unicode_', 'unicode'),
('object_', 'object')]
if sys.version_info[0] >= 3:
type_pairs.extend([('bytes_', 'string'),
('str_', 'unicode'),
('string_', 'string')])
else:
type_pairs.extend([('str_', 'string'),
('string_', 'string'),
('bytes_', 'string')])
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta']
if sys.version_info[0] >= 3:
# Py3K
to_remove.append('bytes')
to_remove.append('str')
to_remove.remove('unicode')
to_remove.remove('long')
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
# Now, construct dictionary to lookup character codes from types
_sctype2char_dict = {}
def _construct_char_code_lookup():
for name in typeinfo.keys():
tup = typeinfo[name]
if isinstance(tup, tuple):
if tup[0] not in ['p', 'P']:
_sctype2char_dict[tup[-1]] = tup[0]
_construct_char_code_lookup()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool, object, str, unicode, void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
'float32', 'float64', 'float80', 'float96', 'float128',
'float256',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
def maximum_sctype(t):
"""
Return the scalar type of highest precision of the same kind as the input.
Parameters
----------
t : dtype or dtype specifier
The input data type. This can be a `dtype` object or an object that
is convertible to a `dtype`.
Returns
-------
out : dtype
The highest precision data type of the same kind (`dtype.kind`) as `t`.
See Also
--------
obj2sctype, mintypecode, sctype2char
dtype
Examples
--------
>>> np.maximum_sctype(np.int)
<type 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<type 'numpy.uint64'>
>>> np.maximum_sctype(np.complex)
<type 'numpy.complex192'>
>>> np.maximum_sctype(str)
<type 'numpy.string_'>
>>> np.maximum_sctype('i2')
<type 'numpy.int64'>
>>> np.maximum_sctype('f4')
<type 'numpy.float96'>
"""
g = obj2sctype(t)
if g is None:
return t
t = g
name = t.__name__
base, bits = _evalname(name)
if bits == 0:
return t
else:
return sctypes[base][-1]
try:
buffer_type = _types.BufferType
except AttributeError:
# Py3K
buffer_type = memoryview
_python_types = {int: 'int_',
float: 'float_',
complex: 'complex_',
bool: 'bool_',
bytes: 'bytes_',
unicode: 'unicode_',
buffer_type: 'void',
}
if sys.version_info[0] >= 3:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, type):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
else:
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, _types.TypeType):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
Parameters
----------
rep : any
If `rep` is an instance of a scalar dtype, True is returned. If not,
False is returned.
Returns
-------
out : bool
Boolean result of check whether `rep` is a scalar dtype.
See Also
--------
issubsctype, issubdtype, obj2sctype, sctype2char
Examples
--------
>>> np.issctype(np.int32)
True
>>> np.issctype(list)
False
>>> np.issctype(1.1)
False
Strings are also a scalar type:
>>> np.issctype(np.dtype('str'))
True
"""
if not isinstance(rep, (type, dtype)):
return False
try:
res = obj2sctype(rep)
if res and res != object_:
return True
return False
except:
return False
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
Parameters
----------
rep : any
The object of which the type is returned.
default : any, optional
If given, this is returned for objects whose types can not be
determined. If not given, None is returned for those objects.
Returns
-------
dtype : dtype or Python type
The data type of `rep`.
See Also
--------
sctype2char, issctype, issubsctype, issubdtype, maximum_sctype
Examples
--------
>>> np.obj2sctype(np.int32)
<type 'numpy.int32'>
>>> np.obj2sctype(np.array([1., 2.]))
<type 'numpy.float64'>
>>> np.obj2sctype(np.array([1.j]))
<type 'numpy.complex128'>
>>> np.obj2sctype(dict)
<type 'numpy.object_'>
>>> np.obj2sctype('string')
<type 'numpy.string_'>
>>> np.obj2sctype(1, default=list)
<type 'list'>
"""
try:
if issubclass(rep, generic):
return rep
except TypeError:
pass
if isinstance(rep, dtype):
return rep.type
if isinstance(rep, type):
return _python_type(rep)
if isinstance(rep, ndarray):
return rep.dtype.type
try:
res = dtype(rep)
except:
return default
return res.type
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError if one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, np.int)
True
>>> np.issubclass_(np.int32, np.float)
False
"""
try:
return issubclass(arg1, arg2)
except TypeError:
return False
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
Parameters
----------
arg1, arg2 : dtype or dtype specifier
Data-types.
Returns
-------
out : bool
The result.
See Also
--------
issctype, issubdtype,obj2sctype
Examples
--------
>>> np.issubsctype('S8', str)
True
>>> np.issubsctype(np.array([1]), np.int)
True
>>> np.issubsctype(np.array([1]), np.float)
False
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
Parameters
----------
arg1, arg2 : dtype_like
dtype or string representing a typecode.
Returns
-------
out : bool
See Also
--------
issubsctype, issubclass_
numpy.core.numerictypes : Overview of numpy type hierarchy.
Examples
--------
>>> np.issubdtype('S1', str)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
if issubclass_(arg2, generic):
return issubclass(dtype(arg1).type, arg2)
mro = dtype(arg2).type.mro()
if len(mro) > 1:
val = mro[1]
else:
val = mro[0]
return issubclass(dtype(arg1).type, val)
# This dictionary allows look up based on any alias for an array data-type
class _typedict(dict):
"""
Base object for a dictionary for look-up with any alias for an array dtype.
Instances of `_typedict` can not be used as dictionaries directly,
first they have to be populated.
"""
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
for name, val in typeinfo.items():
if not isinstance(val, tuple):
continue
obj = val[-1]
nbytes[obj] = val[2] // 8
_alignment[obj] = val[3]
if (len(val) > 5):
_maxvals[obj] = val[4]
_minvals[obj] = val[5]
else:
_maxvals[obj] = None
_minvals[obj] = None
_construct_lookups()
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
Parameters
----------
sctype : scalar dtype or object
If a scalar dtype, the corresponding string character is
returned. If an object, `sctype2char` tries to infer its scalar type
and then return the corresponding string character.
Returns
-------
typechar : str
The string character corresponding to the scalar type.
Raises
------
ValueError
If `sctype` is an object for which the type can not be inferred.
See Also
--------
obj2sctype, issctype, issubsctype, mintypecode
Examples
--------
>>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
... print np.sctype2char(sctype)
l
d
D
S
O
>>> x = np.array([1., 2-1.j])
>>> np.sctype2char(x)
'D'
>>> np.sctype2char(list)
'O'
"""
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
return _sctype2char_dict[sctype]
# Create dictionary of casting functions that wrap sequences
# indexed by type or type character
cast = _typedict()
try:
ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
_types.LongType, _types.BooleanType,
_types.StringType, _types.UnicodeType, _types.BufferType]
except AttributeError:
# Py3K
ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
# Create the typestring lookup dictionary
_typestr = _typedict()
for key in _sctype2char_dict.keys():
if issubclass(key, allTypes['flexible']):
_typestr[key] = _sctype2char_dict[key]
else:
_typestr[key] = empty((1,), key).dtype.str[1:]
# Make sure all typestrings are in sctypeDict
for key, val in _typestr.items():
if val not in sctypeDict:
sctypeDict[val] = key
# Add additional strings to the sctypeDict
if sys.version_info[0] >= 3:
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', 'object', ('a', allTypes['bytes_'])]
else:
_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string',
('str', allTypes['string_']),
'unicode', 'object', ('a', allTypes['string_'])]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = name[1]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character':'c',
'Integer':'bhilqp',
'UnsignedInteger':'BHILQP',
'Float':'efdg',
'Complex':'FDG',
'AllInteger':'bBhHiIlLqQpP',
'AllFloat':'efdgFDG',
'Datetime': 'Mm',
'All':'?bhilqpBHILQPefdgFDGSUVOMm'}
# backwards compatibility --- deprecated name
typeDict = sctypeDict
typeNA = sctypeNA
# b -> boolean
# u -> unsigned integer
# i -> signed integer
# f -> floating point
# c -> complex
# M -> datetime
# m -> timedelta
# S -> string
# U -> Unicode string
# V -> record
# O -> Python object
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O', 'M', 'm']
__test_types = '?'+typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
__len_test_types = len(__test_types)
# Keep incrementing until a common type both can be coerced to
# is found. Otherwise, return None
def _find_common_coerce(a, b):
if a > b:
return a
try:
thisind = __test_types.index(a.char)
except ValueError:
return None
return _can_coerce_all([a, b], start=thisind)
# Find a data-type that all data-types in a list can be coerced to
def _can_coerce_all(dtypelist, start=0):
N = len(dtypelist)
if N == 0:
return None
if N == 1:
return dtypelist[0]
thisind = start
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
numcoerce = len([x for x in dtypelist if newdtype >= x])
if numcoerce == N:
return newdtype
thisind += 1
return None
def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
_register_types()
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
Parameters
----------
array_types : sequence
A list of dtypes or dtype convertible objects representing arrays.
scalar_types : sequence
A list of dtypes or dtype convertible objects representing scalars.
Returns
-------
datatype : dtype
The common data type, which is the maximum of `array_types` ignoring
`scalar_types`, unless the maximum of `scalar_types` is of a
different kind (`dtype.kind`). If the kind is not understood, then
None is returned.
See Also
--------
dtype, common_type, can_cast, mintypecode
Examples
--------
>>> np.find_common_type([], [np.int64, np.float32, np.complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
The standard casting rules ensure that a scalar cannot up-cast an
array unless the scalar is of a fundamentally different kind of data
(i.e. under a different hierarchy in the data type hierarchy) then
the array:
>>> np.find_common_type([np.float32], [np.int64, np.float64])
dtype('float32')
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
>>> np.find_common_type([np.float32], [np.complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
be used instead of dtypes:
>>> np.find_common_type(['f4', 'f4', 'i4'], ['c8'])
dtype('complex128')
"""
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
maxa = _can_coerce_all(array_types)
maxsc = _can_coerce_all(scalar_types)
if maxa is None:
return maxsc
if maxsc is None:
return maxa
try:
index_a = _kind_list.index(maxa.kind)
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
if index_sc > index_a:
return _find_common_coerce(maxsc, maxa)
else:
return maxa
| bsd-3-clause |
gunchleoc/django | tests/template_tests/filter_tests/test_filesizeformat.py | 80 | 2672 | from __future__ import unicode_literals
from django.template.defaultfilters import filesizeformat
from django.test import SimpleTestCase
from django.utils import translation
class FunctionTests(SimpleTestCase):
def test_formats(self):
self.assertEqual(filesizeformat(1023), '1023\xa0bytes')
self.assertEqual(filesizeformat(1024), '1.0\xa0KB')
self.assertEqual(filesizeformat(10 * 1024), '10.0\xa0KB')
self.assertEqual(filesizeformat(1024 * 1024 - 1), '1024.0\xa0KB')
self.assertEqual(filesizeformat(1024 * 1024), '1.0\xa0MB')
self.assertEqual(filesizeformat(1024 * 1024 * 50), '50.0\xa0MB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024 - 1), '1024.0\xa0MB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024), '1.0\xa0GB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024), '1.0\xa0TB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024), '1.0\xa0PB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024 * 2000), '2000.0\xa0PB')
self.assertEqual(filesizeformat(complex(1, -1)), '0\xa0bytes')
self.assertEqual(filesizeformat(""), '0\xa0bytes')
self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"), '0\xa0bytes')
def test_localized_formats(self):
with self.settings(USE_L10N=True), translation.override('de'):
self.assertEqual(filesizeformat(1023), '1023\xa0Bytes')
self.assertEqual(filesizeformat(1024), '1,0\xa0KB')
self.assertEqual(filesizeformat(10 * 1024), '10,0\xa0KB')
self.assertEqual(filesizeformat(1024 * 1024 - 1), '1024,0\xa0KB')
self.assertEqual(filesizeformat(1024 * 1024), '1,0\xa0MB')
self.assertEqual(filesizeformat(1024 * 1024 * 50), '50,0\xa0MB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024 - 1), '1024,0\xa0MB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024), '1,0\xa0GB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024), '1,0\xa0TB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024), '1,0\xa0PB')
self.assertEqual(filesizeformat(1024 * 1024 * 1024 * 1024 * 1024 * 2000), '2000,0\xa0PB')
self.assertEqual(filesizeformat(complex(1, -1)), '0\xa0Bytes')
self.assertEqual(filesizeformat(""), '0\xa0Bytes')
self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"), '0\xa0Bytes')
def test_negative_numbers(self):
self.assertEqual(filesizeformat(-100), '-100\xa0bytes')
self.assertEqual(filesizeformat(-1024 * 1024 * 50), '-50.0\xa0MB')
| bsd-3-clause |
loopCM/chromium | chrome/test/functional/chromeos_wifi_compliance.py | 52 | 4102 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import pyauto_functional
import chromeos_network # pyauto_functional must come before chromeos_network
class ChromeosWifiCompliance(chromeos_network.PyNetworkUITest):
"""Tests for ChromeOS wifi complaince.
These tests should be run within vacinity of the power strip where the wifi
routers are attached.
"""
def _BasicConnectRouterCompliance(self, router_name):
"""Generic basic test routine for connecting to a router.
Args:
router_name: The name of the router.
"""
self.InitWifiPowerStrip()
router = self.GetRouterConfig(router_name)
self.RouterPower(router_name, True)
# If the wifi network is expected to be invisible, the following
# line should timeout which is expected.
wifi_visible = self.WaitUntilWifiNetworkAvailable(router['ssid'],
is_hidden=router.get('hidden'))
# Note, we expect wifi_visible and 'hidden' status to be opposites.
# The test fails if the network visibility is not as expected.
if wifi_visible == router.get('hidden', False):
self.fail('We expected wifi network "%s" to be %s, but it was not.' %
(router['ssid'],
{True: 'hidden', False: 'visible'}[router.get('hidden',
False)]))
# Verify connect did not have any errors.
error = self.ConnectToWifiRouter(router_name)
self.assertFalse(error, 'Failed to connect to wifi network %s. '
'Reason: %s.' % (router['ssid'], error))
# Verify the network we connected to.
ssid = self.GetConnectedWifi()
self.assertEqual(ssid, router['ssid'],
'Did not successfully connect to wifi network %s.' % ssid)
self.DisconnectFromWifiNetwork()
def testConnectBelkinG(self):
"""Test connecting to the Belkin G router."""
self._BasicConnectRouterCompliance('Belkin_G')
def testConnectBelkinNPlus(self):
"""Test connecting to the Belkin N+ router."""
self._BasicConnectRouterCompliance('Belkin_N+')
def testConnectDLinkN150(self):
"""Test connecting to the D-Link N150 router."""
self._BasicConnectRouterCompliance('D-Link_N150')
def testConnectLinksysE3000(self):
"""Test connecting to the Linksys E3000 router.
The LinksysE3000 supports broadcasting of up to 2 SSID's.
This test will try connecting to each of them one at a time.
"""
self._BasicConnectRouterCompliance('LinksysE3000')
self._BasicConnectRouterCompliance('LinksysE3000_2')
def testConnectLinksysWRT54G2(self):
"""Test connecting to the Linksys WRT54G2 router."""
self._BasicConnectRouterCompliance('Linksys_WRT54G2')
def testConnectLinksysWRT54GL(self):
"""Test connecting to the LinksysWRT54GL router."""
self._BasicConnectRouterCompliance('Linksys_WRT54GL')
def testConnectNetgearN300(self):
"""Test connecting to the Netgear N300 router."""
self._BasicConnectRouterCompliance('Netgear_N300')
def testConnectNetgearWGR614(self):
"""Test connecting to the Netgear WGR 614 router."""
self._BasicConnectRouterCompliance('Netgear_WGR614')
def testConnectNfiniti(self):
"""Test connecting to the Nfiniti router."""
self._BasicConnectRouterCompliance('Nfiniti')
def testConnectSMCWBR145(self):
"""Test connecting to the SMC WBR 145 router."""
self._BasicConnectRouterCompliance('SMC_WBR145')
def testConnectTrendnet_639gr(self):
"""Test connecting to the Trendnet 639gr router.
The LinksysE3000 supports broadcasting of up to 4 SSID's.
This test will try connecting to each of them one at a time.
"""
self._BasicConnectRouterCompliance('Trendnet_639gr')
self._BasicConnectRouterCompliance('Trendnet_639gr_2')
self._BasicConnectRouterCompliance('Trendnet_639gr_3')
self._BasicConnectRouterCompliance('Trendnet_639gr_4')
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
shsingh/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ldb_monitor.py | 6 | 12613 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ldb_monitor
short_description: Configure server load balancing health monitors in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and ldb_monitor category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_ldb_monitor:
description:
- Configure server load balancing health monitors.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
http_get:
description:
- URL used to send a GET request to check the health of an HTTP server.
type: str
http_match:
description:
- String to match the value expected in response to an HTTP-GET request.
type: str
http_max_redirects:
description:
- The maximum number of HTTP redirects to be allowed (0 - 5).
type: int
interval:
description:
- Time between health checks (5 - 65635 sec).
type: int
name:
description:
- Monitor name.
required: true
type: str
port:
description:
- Service port used to perform the health check. If 0, health check monitor inherits port configured for the server (0 - 65635).
type: int
retry:
description:
- Number health check attempts before the server is considered down (1 - 255).
type: int
timeout:
description:
- Time to wait to receive response to a health check from a server. Reaching the timeout means the health check failed (1 - 255 sec).
type: int
type:
description:
- Select the Monitor type used by the health check monitor to check the health of the server (PING | TCP | HTTP).
type: str
choices:
- ping
- tcp
- http
- passive-sip
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure server load balancing health monitors.
fortios_firewall_ldb_monitor:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ldb_monitor:
http_get: "<your_own_value>"
http_match: "<your_own_value>"
http_max_redirects: "5"
interval: "6"
name: "default_name_7"
port: "8"
retry: "9"
timeout: "10"
type: "ping"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ldb_monitor_data(json):
option_list = ['http_get', 'http_match', 'http_max_redirects',
'interval', 'name', 'port',
'retry', 'timeout', 'type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ldb_monitor(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_ldb_monitor'] and data['firewall_ldb_monitor']:
state = data['firewall_ldb_monitor']['state']
else:
state = True
firewall_ldb_monitor_data = data['firewall_ldb_monitor']
filtered_data = underscore_to_hyphen(filter_firewall_ldb_monitor_data(firewall_ldb_monitor_data))
if state == "present":
return fos.set('firewall',
'ldb-monitor',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'ldb-monitor',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_ldb_monitor']:
resp = firewall_ldb_monitor(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_ldb_monitor": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"http_get": {"required": False, "type": "str"},
"http_match": {"required": False, "type": "str"},
"http_max_redirects": {"required": False, "type": "int"},
"interval": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"port": {"required": False, "type": "int"},
"retry": {"required": False, "type": "int"},
"timeout": {"required": False, "type": "int"},
"type": {"required": False, "type": "str",
"choices": ["ping", "tcp", "http",
"passive-sip"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
volaya/processing_pysal | ext-libs/pysal/region/tests/test_randomregion.py | 9 | 7010 |
import unittest
import pysal
import numpy as np
import random
class Test_Random_Regions(unittest.TestCase):
def setUp(self):
self.nregs = 13
self.cards = range(2, 14) + [10]
self.w = pysal.lat2W(10, 10, rook=False)
self.ids = self.w.id_order
def test_Random_Regions(self):
random.seed(10)
np.random.seed(10)
t0 = pysal.region.Random_Regions(self.ids, permutations=2)
result = [19, 14, 43, 37, 66, 3, 79, 41, 38, 68, 2, 1, 60]
for i in range(len(result)):
self.assertEquals(t0.solutions[0].regions[0][i], result[i])
for i in range(len(t0.solutions)):
self.assertEquals(t0.solutions_feas[i], t0.solutions[i])
random.seed(60)
np.random.seed(60)
t0 = pysal.region.Random_Regions(self.ids, num_regions=self.nregs,
cardinality=self.cards, contiguity=self.w, permutations=2)
result = [88, 97, 98, 89, 99, 86, 78, 59, 49, 69, 68, 79, 77]
for i in range(len(result)):
self.assertEquals(t0.solutions[0].regions[0][i], result[i])
for i in range(len(t0.solutions)):
self.assertEquals(t0.solutions_feas[i], t0.solutions[i])
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Regions(self.ids, num_regions=self.nregs,
cardinality=self.cards, permutations=2)
result = [37, 62]
for i in range(len(result)):
self.assertEquals(t0.solutions[0].regions[0][i], result[i])
for i in range(len(t0.solutions)):
self.assertEquals(t0.solutions_feas[i], t0.solutions[i])
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Regions(self.ids,
num_regions=self.nregs, contiguity=self.w, permutations=2)
result = [71, 72, 70, 93, 51, 91, 85, 74, 63, 73, 61, 62, 82]
for i in range(len(result)):
self.assertEquals(t0.solutions[0].regions[1][i], result[i])
for i in range(len(t0.solutions)):
self.assertEquals(t0.solutions_feas[i], t0.solutions[i])
random.seed(60)
np.random.seed(60)
t0 = pysal.region.Random_Regions(self.ids,
cardinality=self.cards, contiguity=self.w, permutations=2)
result = [88, 97, 98, 89, 99, 86, 78, 59, 49, 69, 68, 79, 77]
for i in range(len(result)):
self.assertEquals(t0.solutions[0].regions[0][i], result[i])
for i in range(len(t0.solutions)):
self.assertEquals(t0.solutions_feas[i], t0.solutions[i])
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Regions(
self.ids, num_regions=self.nregs, permutations=2)
result = [37, 62, 26, 41, 35, 25, 36]
for i in range(len(result)):
self.assertEquals(t0.solutions[0].regions[0][i], result[i])
for i in range(len(t0.solutions)):
self.assertEquals(t0.solutions_feas[i], t0.solutions[i])
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Regions(
self.ids, cardinality=self.cards, permutations=2)
result = [37, 62]
for i in range(len(result)):
self.assertEquals(t0.solutions[0].regions[0][i], result[i])
for i in range(len(t0.solutions)):
self.assertEquals(t0.solutions_feas[i], t0.solutions[i])
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Regions(
self.ids, contiguity=self.w, permutations=2)
result = [62, 52, 51, 50]
for i in range(len(result)):
self.assertEquals(t0.solutions[0].regions[1][i], result[i])
for i in range(len(t0.solutions)):
self.assertEquals(t0.solutions_feas[i], t0.solutions[i])
def test_Random_Region(self):
random.seed(10)
np.random.seed(10)
t0 = pysal.region.Random_Region(self.ids)
t0.regions[0]
result = [19, 14, 43, 37, 66, 3, 79, 41, 38, 68, 2, 1, 60]
for i in range(len(result)):
self.assertEquals(t0.regions[0][i], result[i])
self.assertEquals(t0.feasible, True)
random.seed(60)
np.random.seed(60)
t0 = pysal.region.Random_Region(self.ids, num_regions=self.nregs,
cardinality=self.cards, contiguity=self.w)
t0.regions[0]
result = [88, 97, 98, 89, 99, 86, 78, 59, 49, 69, 68, 79, 77]
for i in range(len(result)):
self.assertEquals(t0.regions[0][i], result[i])
self.assertEquals(t0.feasible, True)
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Region(
self.ids, num_regions=self.nregs, cardinality=self.cards)
t0.regions[0]
result = [37, 62]
for i in range(len(result)):
self.assertEquals(t0.regions[0][i], result[i])
self.assertEquals(t0.feasible, True)
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Region(
self.ids, num_regions=self.nregs, contiguity=self.w)
t0.regions[1]
result = [71, 72, 70, 93, 51, 91, 85, 74, 63, 73, 61, 62, 82]
for i in range(len(result)):
self.assertEquals(t0.regions[1][i], result[i])
self.assertEquals(t0.feasible, True)
random.seed(60)
np.random.seed(60)
t0 = pysal.region.Random_Region(
self.ids, cardinality=self.cards, contiguity=self.w)
t0.regions[0]
result = [88, 97, 98, 89, 99, 86, 78, 59, 49, 69, 68, 79, 77]
for i in range(len(result)):
self.assertEquals(t0.regions[0][i], result[i])
self.assertEquals(t0.feasible, True)
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Region(self.ids, num_regions=self.nregs)
t0.regions[0]
result = [37, 62, 26, 41, 35, 25, 36]
for i in range(len(result)):
self.assertEquals(t0.regions[0][i], result[i])
self.assertEquals(t0.feasible, True)
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Region(self.ids, cardinality=self.cards)
t0.regions[0]
result = [37, 62]
for i in range(len(result)):
self.assertEquals(t0.regions[0][i], result[i])
self.assertEquals(t0.feasible, True)
random.seed(100)
np.random.seed(100)
t0 = pysal.region.Random_Region(self.ids, contiguity=self.w)
t0.regions[0]
result = [37, 27, 36, 17]
for i in range(len(result)):
self.assertEquals(t0.regions[0][i], result[i])
self.assertEquals(t0.feasible, True)
suite = unittest.TestLoader().loadTestsFromTestCase(Test_Random_Regions)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| gpl-2.0 |
allen-fdes/python_demo | venv/Lib/encodings/hp_roman8.py | 132 | 13447 | """ Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py.
Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen)
Original source: LaserJet IIP Printer User's Manual HP part no
33471-90901, Hewlet-Packard, June 1989.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hp-roman8',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xc0' # 0xA1 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc2' # 0xA2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc8' # 0xA3 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xca' # 0xA4 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xA5 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xce' # 0xA6 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xA7 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xb4' # 0xA8 -> ACUTE ACCENT
'\u02cb' # 0xA9 -> MODIFIER LETTER GRAVE ACCENT (MANDARIN CHINESE FOURTH TONE)
'\u02c6' # 0xAA -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\xa8' # 0xAB -> DIAERESIS
'\u02dc' # 0xAC -> SMALL TILDE
'\xd9' # 0xAD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xdb' # 0xAE -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\u20a4' # 0xAF -> LIRA SIGN
'\xaf' # 0xB0 -> MACRON
'\xdd' # 0xB1 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0xB2 -> LATIN SMALL LETTER Y WITH ACUTE
'\xb0' # 0xB3 -> DEGREE SIGN
'\xc7' # 0xB4 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xe7' # 0xB5 -> LATIN SMALL LETTER C WITH CEDILLA
'\xd1' # 0xB6 -> LATIN CAPITAL LETTER N WITH TILDE
'\xf1' # 0xB7 -> LATIN SMALL LETTER N WITH TILDE
'\xa1' # 0xB8 -> INVERTED EXCLAMATION MARK
'\xbf' # 0xB9 -> INVERTED QUESTION MARK
'\xa4' # 0xBA -> CURRENCY SIGN
'\xa3' # 0xBB -> POUND SIGN
'\xa5' # 0xBC -> YEN SIGN
'\xa7' # 0xBD -> SECTION SIGN
'\u0192' # 0xBE -> LATIN SMALL LETTER F WITH HOOK
'\xa2' # 0xBF -> CENT SIGN
'\xe2' # 0xC0 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xea' # 0xC1 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xf4' # 0xC2 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xfb' # 0xC3 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xe1' # 0xC4 -> LATIN SMALL LETTER A WITH ACUTE
'\xe9' # 0xC5 -> LATIN SMALL LETTER E WITH ACUTE
'\xf3' # 0xC6 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0xC7 -> LATIN SMALL LETTER U WITH ACUTE
'\xe0' # 0xC8 -> LATIN SMALL LETTER A WITH GRAVE
'\xe8' # 0xC9 -> LATIN SMALL LETTER E WITH GRAVE
'\xf2' # 0xCA -> LATIN SMALL LETTER O WITH GRAVE
'\xf9' # 0xCB -> LATIN SMALL LETTER U WITH GRAVE
'\xe4' # 0xCC -> LATIN SMALL LETTER A WITH DIAERESIS
'\xeb' # 0xCD -> LATIN SMALL LETTER E WITH DIAERESIS
'\xf6' # 0xCE -> LATIN SMALL LETTER O WITH DIAERESIS
'\xfc' # 0xCF -> LATIN SMALL LETTER U WITH DIAERESIS
'\xc5' # 0xD0 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xee' # 0xD1 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xd8' # 0xD2 -> LATIN CAPITAL LETTER O WITH STROKE
'\xc6' # 0xD3 -> LATIN CAPITAL LETTER AE
'\xe5' # 0xD4 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xed' # 0xD5 -> LATIN SMALL LETTER I WITH ACUTE
'\xf8' # 0xD6 -> LATIN SMALL LETTER O WITH STROKE
'\xe6' # 0xD7 -> LATIN SMALL LETTER AE
'\xc4' # 0xD8 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xec' # 0xD9 -> LATIN SMALL LETTER I WITH GRAVE
'\xd6' # 0xDA -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0xDB -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xc9' # 0xDC -> LATIN CAPITAL LETTER E WITH ACUTE
'\xef' # 0xDD -> LATIN SMALL LETTER I WITH DIAERESIS
'\xdf' # 0xDE -> LATIN SMALL LETTER SHARP S (GERMAN)
'\xd4' # 0xDF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xc1' # 0xE0 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0xE1 -> LATIN CAPITAL LETTER A WITH TILDE
'\xe3' # 0xE2 -> LATIN SMALL LETTER A WITH TILDE
'\xd0' # 0xE3 -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xf0' # 0xE4 -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xcd' # 0xE5 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xcc' # 0xE6 -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd3' # 0xE7 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd2' # 0xE8 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd5' # 0xE9 -> LATIN CAPITAL LETTER O WITH TILDE
'\xf5' # 0xEA -> LATIN SMALL LETTER O WITH TILDE
'\u0160' # 0xEB -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0xEC -> LATIN SMALL LETTER S WITH CARON
'\xda' # 0xED -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0178' # 0xEE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xff' # 0xEF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xde' # 0xF0 -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xfe' # 0xF1 -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb7' # 0xF2 -> MIDDLE DOT
'\xb5' # 0xF3 -> MICRO SIGN
'\xb6' # 0xF4 -> PILCROW SIGN
'\xbe' # 0xF5 -> VULGAR FRACTION THREE QUARTERS
'\u2014' # 0xF6 -> EM DASH
'\xbc' # 0xF7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xF8 -> VULGAR FRACTION ONE HALF
'\xaa' # 0xF9 -> FEMININE ORDINAL INDICATOR
'\xba' # 0xFA -> MASCULINE ORDINAL INDICATOR
'\xab' # 0xFB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u25a0' # 0xFC -> BLACK SQUARE
'\xbb' # 0xFD -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xb1' # 0xFE -> PLUS-MINUS SIGN
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
nwbt/HydraPlatform | HydraServer/python/HydraServer/db/__init__.py | 2 | 1796 | # (c) Copyright 2013, 2014, University of Manchester
#
# HydraPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HydraPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HydraPlatform. If not, see <http://www.gnu.org/licenses/>
#
from sqlalchemy.orm import scoped_session
from sqlalchemy import create_engine
from HydraLib import config
from zope.sqlalchemy import ZopeTransactionExtension
import transaction
import logging
log = logging.getLogger(__name__)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
global DeclarativeBase
DeclarativeBase = declarative_base()
global DBSession
DBSession = None
global engine
engine = None
def connect():
db_url = config.get('mysqld', 'url')
log.info("Connecting to database: %s", db_url)
global engine
engine = create_engine(db_url)
maker = sessionmaker(bind=engine, autoflush=False, autocommit=False,
extension=ZopeTransactionExtension())
global DBSession
DBSession = scoped_session(maker)
DeclarativeBase.metadata.create_all(engine)
def commit_transaction():
try:
transaction.commit()
except Exception as e:
log.critical(e)
transaction.abort()
def close_session():
DBSession.remove()
def rollback_transaction():
transaction.abort()
| gpl-3.0 |
theeternalsw0rd/xbmc | lib/libUPnP/Platinum/Build/Tools/Scripts/GenSvnVersionHeader.py | 263 | 1526 | #! /usr/bin/python
#############################################################
# This tool is used to generate the version info file #
#############################################################
import sys
import os
# ensure that PLATINUM_HOME has been set and exists
if not os.environ.has_key('PLATINUM_KIT_HOME'):
print 'ERROR: PLATINUM_KIT_HOME not set'
sys.exit(1)
PLATINUM_KIT_HOME = os.environ['PLATINUM_KIT_HOME']
# ensure that PLATINUM_KIT_HOME has been set and exists
if not os.path.exists(PLATINUM_KIT_HOME) :
print 'ERROR: PLATINUM_KIT_HOME ('+PLATINUM_KIT_HOME+') does not exist'
sys.exit(1)
else :
print 'PLATINUM_KIT_HOME = ' + PLATINUM_KIT_HOME
# get the SVN repo version
version = os.popen('svnversion -n').readlines()[0]
print 'current VERSION =',version
if version.endswith('P'):
version = version[0:-1]
if version.endswith('MP'):
version = version[0:-2]
try:
version_int = int(version)+1 ## add one, because when we check it in, the rev will be incremented by one
except:
print 'ERROR: you cannot run this on a modified working copy'
sys.exit(1)
output = open(PLATINUM_KIT_HOME+'/Platinum/Source/Platinum/PltSvnVersion.h', 'w+')
output.write('/* DO NOT EDIT. This file was automatically generated by GenSvnVersionHeader.py */\n')
output.write('#define PLT_SVN_VERSION '+str(version_int)+'\n')
output.write('#define PLT_SVN_VERSION_STRING "'+str(version_int)+'"\n')
output.close()
print 'upon check-in, version will be', str(version_int)
| gpl-2.0 |
omakk/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_runner_xunit.py | 200 | 7133 | #
# test correct setup/teardowns at
# module, class, and instance level
def test_module_and_function_setup(testdir):
reprec = testdir.inline_runsource("""
modlevel = []
def setup_module(module):
assert not modlevel
module.modlevel.append(42)
def teardown_module(module):
modlevel.pop()
def setup_function(function):
function.answer = 17
def teardown_function(function):
del function.answer
def test_modlevel():
assert modlevel[0] == 42
assert test_modlevel.answer == 17
class TestFromClass:
def test_module(self):
assert modlevel[0] == 42
assert not hasattr(test_modlevel, 'answer')
""")
rep = reprec.matchreport("test_modlevel")
assert rep.passed
rep = reprec.matchreport("test_module")
assert rep.passed
def test_module_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
l = []
def setup_module(module):
l.append(1)
0/0
def test_nothing():
pass
def teardown_module(module):
l.append(2)
""")
reprec.assertoutcome(failed=1)
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.l == [1]
def test_setup_function_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
modlevel = []
def setup_function(function):
modlevel.append(1)
0/0
def teardown_function(module):
modlevel.append(2)
def test_func():
pass
""")
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.modlevel == [1]
def test_class_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSimpleClassSetup:
clslevel = []
def setup_class(cls):
cls.clslevel.append(23)
def teardown_class(cls):
cls.clslevel.pop()
def test_classlevel(self):
assert self.clslevel[0] == 23
class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
def test_classlevel_anothertime(self):
assert self.clslevel == [23]
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
assert not TestInheritedClassSetupStillWorks.clslevel
""")
reprec.assertoutcome(passed=1+2+1)
def test_class_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
class TestSimpleClassSetup:
clslevel = []
def setup_class(cls):
0/0
def teardown_class(cls):
cls.clslevel.append(1)
def test_classlevel(self):
pass
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
""")
reprec.assertoutcome(failed=1, passed=1)
def test_method_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSetupMethod:
def setup_method(self, meth):
self.methsetup = meth
def teardown_method(self, meth):
del self.methsetup
def test_some(self):
assert self.methsetup == self.test_some
def test_other(self):
assert self.methsetup == self.test_other
""")
reprec.assertoutcome(passed=2)
def test_method_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
class TestMethodSetup:
clslevel = []
def setup_method(self, method):
self.clslevel.append(1)
0/0
def teardown_method(self, method):
self.clslevel.append(2)
def test_method(self):
pass
def test_cleanup():
assert TestMethodSetup.clslevel == [1]
""")
reprec.assertoutcome(failed=1, passed=1)
def test_method_generator_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSetupTeardownOnInstance:
def setup_class(cls):
cls.classsetup = True
def setup_method(self, method):
self.methsetup = method
def test_generate(self):
assert self.classsetup
assert self.methsetup == self.test_generate
yield self.generated, 5
yield self.generated, 2
def generated(self, value):
assert self.classsetup
assert self.methsetup == self.test_generate
assert value == 5
""")
reprec.assertoutcome(passed=1, failed=1)
def test_func_generator_setup(testdir):
reprec = testdir.inline_runsource("""
import sys
def setup_module(mod):
print ("setup_module")
mod.x = []
def setup_function(fun):
print ("setup_function")
x.append(1)
def teardown_function(fun):
print ("teardown_function")
x.pop()
def test_one():
assert x == [1]
def check():
print ("check")
sys.stderr.write("e\\n")
assert x == [1]
yield check
assert x == [1]
""")
rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
assert rep.passed
def test_method_setup_uses_fresh_instances(testdir):
reprec = testdir.inline_runsource("""
class TestSelfState1:
memory = []
def test_hello(self):
self.memory.append(self)
def test_afterhello(self):
assert self != self.memory[0]
""")
reprec.assertoutcome(passed=2, failed=0)
def test_setup_that_skips_calledagain(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
pytest.skip("x")
def test_function1():
pass
def test_function2():
pass
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(skipped=2)
def test_setup_fails_again_on_all_tests(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
raise ValueError(42)
def test_function1():
pass
def test_function2():
pass
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=2)
def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
raise ValueError(42)
def pytest_funcarg__hello(request):
raise ValueError("xyz43")
def test_function1(hello):
pass
def test_function2(hello):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*function1*",
"*ValueError*42*",
"*function2*",
"*ValueError*42*",
"*2 error*"
])
assert "xyz43" not in result.stdout.str()
| mpl-2.0 |
jhsenjaliya/incubator-airflow | airflow/example_dags/example_docker_operator.py | 15 | 1692 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
from airflow import DAG
from airflow.operators import BashOperator
from datetime import datetime, timedelta
from airflow.operators.docker_operator import DockerOperator
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.utcnow(),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'docker_sample', default_args=default_args, schedule_interval=timedelta(minutes=10))
t1 = BashOperator(
task_id='print_date',
bash_command='date',
dag=dag)
t2 = BashOperator(
task_id='sleep',
bash_command='sleep 5',
retries=3,
dag=dag)
t3 = DockerOperator(api_version='1.19',
docker_url='tcp://localhost:2375', #Set your docker URL
command='/bin/sleep 30',
image='centos:latest',
network_mode='bridge',
task_id='docker_op_tester',
dag=dag)
t4 = BashOperator(
task_id='print_hello',
bash_command='echo "hello world!!!"',
dag=dag)
t1.set_downstream(t2)
t1.set_downstream(t3)
t3.set_downstream(t4)
"""
| apache-2.0 |
omakk/servo | tests/wpt/css-tests/tools/pytest/_pytest/assertion/__init__.py | 175 | 6457 | """
support for presenting detailed information in failing assertions.
"""
import py
import os
import sys
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--assert',
action="store",
dest="assertmode",
choices=("rewrite", "reinterp", "plain",),
default="rewrite",
metavar="MODE",
help="""control assertion debugging tools. 'plain'
performs no assertion debugging. 'reinterp'
reinterprets assert statements after they failed
to provide assertion expression information.
'rewrite' (the default) rewrites assert
statements in test modules on import to
provide assert expression information. """)
group.addoption('--no-assert',
action="store_true",
default=False,
dest="noassert",
help="DEPRECATED equivalent to --assert=plain")
group.addoption('--nomagic', '--no-magic',
action="store_true",
default=False,
help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
def pytest_configure(config):
mode = config.getvalue("assertmode")
if config.getvalue("noassert") or config.getvalue("nomagic"):
mode = "plain"
if mode == "rewrite":
try:
import ast # noqa
except ImportError:
mode = "reinterp"
else:
# Both Jython and CPython 2.6.0 have AST bugs that make the
# assertion rewriting hook malfunction.
if (sys.platform.startswith('java') or
sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
hook = rewrite.AssertionRewritingHook() # noqa
sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
config._assertstate.trace("configured with mode set to %r" % (mode,))
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
hook = session.config._assertstate.hook
if hook is not None:
hook.set_session(session)
def _running_on_ci():
"""Check if we're currently running on a CI system."""
env_vars = ['CI', 'BUILD_NUMBER']
return any(var in os.environ for var in env_vars)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are dropped unless -vv was used or
running on a CI.
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
for new_expl in hook_result:
if new_expl:
if (sum(len(p) for p in new_expl[1:]) > 80*8 and
item.config.option.verbose < 2 and
not _running_on_ci()):
show_max = 10
truncated_lines = len(new_expl) - show_max
new_expl[show_max:] = [py.builtin._totext(
'Detailed information truncated (%d more lines)'
', use "-vv" to show' % truncated_lines)]
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = py.builtin._totext("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
hook = session.config._assertstate.hook
if hook is not None:
hook.session = None
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
assert False
except AssertionError:
pass
else:
if mode == "rewrite":
specifically = ("assertions which are not in test modules "
"will be ignored")
else:
specifically = "failing tests may report as passing"
sys.stderr.write("WARNING: " + specifically +
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n")
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare
| mpl-2.0 |
barbuza/django | tests/messages_tests/base.py | 316 | 14243 | from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test import modify_settings, override_settings
from django.utils.translation import ugettext_lazy
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags,
['info', '', 'debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| bsd-3-clause |
awagner83/PhotoStore | setup.py | 1 | 1328 | #------------------------------------------------------------------------#
# PhotoStore - Photo Collection Storage
# Copyright (C) 2010 Adam Wagner <awagner83@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------------------#
"""Package installer."""
from sys import version_info, stderr
from setuptools import setup
if not (version_info[0] >= 3 and version_info[1] >= 1):
stderr.write("Python 3.1 or greater is required to install.\n")
exit(1)
setup(
name='PhotoStore',
version='0.1',
description='Photo Collection Storage.',
author='Adam Wagner',
author_email='awagner83@gmail.com',
packages=['photostore']
)
| gpl-3.0 |
swegener/micropython | tests/basics/exceptpoly.py | 65 | 1769 | try:
raise ArithmeticError
except Exception:
print("Caught ArithmeticError via Exception")
try:
raise ArithmeticError
except ArithmeticError:
print("Caught ArithmeticError")
try:
raise AssertionError
except Exception:
print("Caught AssertionError via Exception")
try:
raise AssertionError
except AssertionError:
print("Caught AssertionError")
try:
raise AttributeError
except Exception:
print("Caught AttributeError via Exception")
try:
raise AttributeError
except AttributeError:
print("Caught AttributeError")
try:
raise EOFError
except Exception:
print("Caught EOFError via Exception")
try:
raise EOFError
except EOFError:
print("Caught EOFError")
try:
raise Exception
except BaseException:
print("Caught Exception via BaseException")
try:
raise Exception
except Exception:
print("Caught Exception")
try:
raise ImportError
except Exception:
print("Caught ImportError via Exception")
try:
raise ImportError
except ImportError:
print("Caught ImportError")
try:
raise IndentationError
except SyntaxError:
print("Caught IndentationError via SyntaxError")
try:
raise IndentationError
except IndentationError:
print("Caught IndentationError")
try:
raise IndexError
except LookupError:
print("Caught IndexError via LookupError")
try:
raise IndexError
except IndexError:
print("Caught IndexError")
try:
raise KeyError
except LookupError:
print("Caught KeyError via LookupError")
try:
raise KeyError
except KeyError:
print("Caught KeyError")
try:
raise LookupError
except Exception:
print("Caught LookupError via Exception")
try:
raise LookupError
except LookupError:
print("Caught LookupError")
| mit |
matty-jones/MorphCT | morphct/code/device_KMC.py | 1 | 10032 | import os
import pickle
import sys
import numpy as np
import subprocess as sp
from morphct.definitions import SINGLE_RUN_DEVICE_KMC_FILE
from morphct.code import helper_functions as hf
class morphology_moiety:
def __init__(self, mol_morph_name, parameter_dict):
chromophore_list_location = os.path.join(
parameter_dict["output_morph_dir"],
mol_morph_name,
"code",
"".join([mol_morph_name, ".pickle"]),
)
pickle_data = hf.load_pickle(chromophore_list_location)
self.AA_morphology_dict = pickle_data[0]
self.parameter_dict = pickle_data[3]
self.chromophore_list = pickle_data[4]
self.carrier_type = self.get_carrier_type()
# Now add the occupation data to the chromophoreLists so that we can
# prevent double occupation in the simulations.
# The occupied property is a list that contains the device moiety
# coordinates where the chromophore is occupied.
for index, chromophore in enumerate(self.chromophore_list):
chromophore.occupied = []
def get_carrier_type(self):
species_present = []
for chromophore in self.chromophore_list:
species_present.append(chromophore.species)
if len(set(species_present)) == 1:
if species_present[0].lower() == "donor":
return "hole"
elif species_present[0].lower() == "acceptor":
return "electron"
else:
print("Error in chromophore:")
for key, val in chromophore.__dict__.items():
print(key, "=", val)
raise SystemError("Chromophore species is neither donor nor acceptor")
else:
return "both"
class chromophore_data_container:
# A helper class that contains all of the chromophore data for ease of
# access from anywhere
def __init__(self, device_array, moiety_dictionary, wrapxy):
self.device_array = device_array
self.moiety_dictionary = moiety_dictionary
self.wrapxy = wrapxy
def return_chromophore_list(self, device_position):
device_moiety_type = self.device_array[tuple(device_position)]
return self.moiety_dictionary[device_moiety_type].chromophore_list
def return_specific_chromophore(self, device_position, chromo_ID):
device_moiety_type = self.device_array[tuple(device_position)]
return self.moiety_dictionary[device_moiety_type].chromophore_list[chromo_ID]
def return_random_chromophore(self, device_position):
device_moiety_type = self.device_array[tuple(device_position)]
return np.random.choice(
self.moiety_dictionary[device_moiety_type].chromophore_list
)
def return_closest_chromophore_to_position(self, device_position, desired_position):
closest_chromo_ID = None
# Check that there is an eligible device position that exists at these
# coordinates (i.e. there is a hop taking place within the active layer)
# Find out which axis is out of index
for axis_no, val in enumerate(device_position):
if val >= self.device_array.shape[axis_no]:
if axis_no == 2:
# Leaving out of the top of the device
return "top"
elif self.wrapxy:
# Bring it in on the reverse side
device_position[axis_no] = 0
else:
return "out of bounds"
if val < 0:
if axis_no == 2:
# Leaving out of the bottom of the device
return "bottom"
elif self.wrapxy:
# Bring it in on the reverse side
device_position[axis_no] = self.device_array.shape[axis_no] - 1
else:
return "out of bounds"
device_moiety_type = self.device_array[tuple(device_position)]
positions = np.array(
[
chromo.posn
for chromo in self.moiety_dictionary[
device_moiety_type
].chromophore_list
]
)
distances = np.sqrt(
np.sum((positions - np.array(desired_position)) ** 2, axis=1)
)
closest_chromo_ID = np.argmin(distances)
return self.moiety_dictionary[device_moiety_type].chromophore_list[
closest_chromo_ID
]
class morphology_data_container:
# A helper class that contains all of the chromophore data for ease of
# access from anywhere
def __init__(self, device_array, moiety_dictionary):
self.device_array = device_array
self.moiety_dictionary = moiety_dictionary
def return_AA_morphology(self, device_position):
device_moiety_type = self.device_array[tuple(device_position)]
return self.moiety_dictionary[device_moiety_type].AA_morphology_dict
def return_device_moiety_type(self, device_position):
return self.device_array[tuple(device_position)]
def load_device_morphology(parameter_dict):
device_dir = os.path.join(
parameter_dict["input_device_dir"], parameter_dict["device_morphology"]
)
y_slices = os.listdir(device_dir)
# Initialize the array of the correct size (assumes cubic morphology)
device_array = np.zeros([len(y_slices)] * 3, dtype=int)
for y_val, file_name in enumerate(y_slices):
# Load the ySlice as-presented in the input files
y_slice = np.loadtxt(os.path.join(device_dir, file_name), dtype=int)
if len(y_slice.shape) > 0:
# The z-origin is at the top, and we need it at the bottom, so turn
# the array upside down
y_slice = np.flipud(y_slice)
# Now populate the array
for z_val, z_row in enumerate(y_slice):
for x_val, datum in enumerate(z_row):
device_array[x_val, y_val, z_val] = datum
else:
# Can't flipud and iterate over a zero-length array (one number), so
# assign it this way instead.
device_array[0, y_val, 0] = int(y_slice)
moiety_dictionary = {}
for moiety_ID in np.unique(device_array):
moiety_dictionary[moiety_ID] = morphology_moiety(
parameter_dict["device_components"][moiety_ID], parameter_dict
)
return device_array, moiety_dictionary
def main(parameter_dict):
# Get the random seed now for all the child processes
if parameter_dict["random_seed_override"] is not None:
np.random.seed(parameter_dict["random_seed_override"])
# First job will be to load in the device morphology, when I work out what
# format I want it to be.
device_array, moiety_dictionary = load_device_morphology(parameter_dict)
# Initialise the helperClass to obtain all of the chromophoreData required,
# allowing it be accessed globally
chromophore_data = chromophore_data_container(
device_array, moiety_dictionary, parameter_dict["wrap_device_xy"]
)
morphology_data = morphology_data_container(device_array, moiety_dictionary)
# Write these classes out to a pickle file so that they can be loaded by the
# child processes later
to_pickle = [device_array, chromophore_data, morphology_data, parameter_dict]
save_directory = os.path.join(
parameter_dict["output_device_dir"], parameter_dict["device_morphology"], "code"
)
if parameter_dict["overwrite_current_data"] is True:
with open(
os.path.join(save_directory, "device_data.pickle"), "wb+"
) as pickle_file:
pickle.dump(to_pickle, pickle_file)
voltages = []
for V in parameter_dict["voltage_sweep"]:
voltages.append(V)
proc_IDs = parameter_dict["proc_IDs"]
jobs_list = [
voltages[i : i + (int(np.ceil(len(voltages) / len(proc_IDs))))]
for i in range(
0, len(voltages), int(np.ceil(len(voltages) / float(len(proc_IDs))))
)
]
running_jobs = []
output_dir = os.path.join(
parameter_dict["output_device_dir"], parameter_dict["device_morphology"], "KMC"
)
print("Writing job pickles for each CPU...")
for proc_ID, jobs in enumerate(jobs_list):
pickle_name = os.path.join(output_dir, "KMC_data_{:02d}.pickle".format(proc_ID))
with open(pickle_name, "wb+") as pickle_file:
pickle.dump(jobs, pickle_file)
print(
"KMC jobs for proc_ID",
proc_ID,
"written to KMC_data_{:02d}.pickle".format(proc_ID),
)
# Open the required processes to execute the KMC jobs
# Random seeding is a little weird here. If we don't generate a random
# seed in the child process, it will just use the system time. So, we
# generate a seed here to get the same random number stream each time,
# and then feed the child process a new seed from the random number
# stream. This way, we ensure that each child process has a different
# random number stream to the other processes, but it's the same stream
# every time we run the program.
child_seed = np.random.randint(0, 2 ** 32)
# Previous run command:
run_command = [
"python",
SINGLE_RUN_DEVICE_KMC_FILE,
output_dir,
str(proc_ID),
str(child_seed),
]
print(run_command)
running_jobs.append(sp.Popen(run_command))
# Wait for all jobs to complete
[p.wait() for p in running_jobs]
print("All KMC jobs completed!")
# Combine results if required.
if __name__ == "__main__":
try:
pickle_file = sys.argv[1]
except:
print(
"Please specify the pickle file to load to continue the pipeline from"
" this point."
)
_, _, _, parameter_dict, _ = hf.load_pickle(pickle_file)
main(parameter_dict)
| gpl-3.0 |
amisrs/one-eighty | venv2/lib/python2.7/site-packages/sqlalchemy/ext/horizontal_shard.py | 31 | 4814 | # ext/horizontal_shard.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Horizontal sharding support.
Defines a rudimental 'horizontal sharding' system which allows a Session to
distribute queries and persistence operations across multiple databases.
For a usage example, see the :ref:`examples_sharding` example included in
the source distribution.
"""
from .. import util
from ..orm.session import Session
from ..orm.query import Query
__all__ = ['ShardedSession', 'ShardedQuery']
class ShardedQuery(Query):
def __init__(self, *args, **kwargs):
super(ShardedQuery, self).__init__(*args, **kwargs)
self.id_chooser = self.session.id_chooser
self.query_chooser = self.session.query_chooser
self._shard_id = None
def set_shard(self, shard_id):
"""return a new query, limited to a single shard ID.
all subsequent operations with the returned query will
be against the single shard regardless of other state.
"""
q = self._clone()
q._shard_id = shard_id
return q
def _execute_and_instances(self, context):
def iter_for_shard(shard_id):
context.attributes['shard_id'] = shard_id
result = self._connection_from_session(
mapper=self._mapper_zero(),
shard_id=shard_id).execute(
context.statement,
self._params)
return self.instances(result, context)
if self._shard_id is not None:
return iter_for_shard(self._shard_id)
else:
partial = []
for shard_id in self.query_chooser(self):
partial.extend(iter_for_shard(shard_id))
# if some kind of in memory 'sorting'
# were done, this is where it would happen
return iter(partial)
def get(self, ident, **kwargs):
if self._shard_id is not None:
return super(ShardedQuery, self).get(ident)
else:
ident = util.to_list(ident)
for shard_id in self.id_chooser(self, ident):
o = self.set_shard(shard_id).get(ident, **kwargs)
if o is not None:
return o
else:
return None
class ShardedSession(Session):
def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None,
query_cls=ShardedQuery, **kwargs):
"""Construct a ShardedSession.
:param shard_chooser: A callable which, passed a Mapper, a mapped
instance, and possibly a SQL clause, returns a shard ID. This id
may be based off of the attributes present within the object, or on
some round-robin scheme. If the scheme is based on a selection, it
should set whatever state on the instance to mark it in the future as
participating in that shard.
:param id_chooser: A callable, passed a query and a tuple of identity
values, which should return a list of shard ids where the ID might
reside. The databases will be queried in the order of this listing.
:param query_chooser: For a given Query, returns the list of shard_ids
where the query should be issued. Results from all shards returned
will be combined together into a single listing.
:param shards: A dictionary of string shard names
to :class:`~sqlalchemy.engine.Engine` objects.
"""
super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs)
self.shard_chooser = shard_chooser
self.id_chooser = id_chooser
self.query_chooser = query_chooser
self.__binds = {}
self.connection_callable = self.connection
if shards is not None:
for k in shards:
self.bind_shard(k, shards[k])
def connection(self, mapper=None, instance=None, shard_id=None, **kwargs):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance)
if self.transaction is not None:
return self.transaction.connection(mapper, shard_id=shard_id)
else:
return self.get_bind(
mapper,
shard_id=shard_id,
instance=instance
).contextual_connect(**kwargs)
def get_bind(self, mapper, shard_id=None,
instance=None, clause=None, **kw):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance, clause=clause)
return self.__binds[shard_id]
def bind_shard(self, shard_id, bind):
self.__binds[shard_id] = bind
| mit |
bonitadecker77/python-for-android | python-build/python-libs/gdata/src/atom/core.py | 137 | 20292 | #!/usr/bin/env python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import inspect
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
STRING_ENCODING = 'utf-8'
class XmlElement(object):
"""Represents an element node in an XML document.
The text member is a UTF-8 encoded str or unicode.
"""
_qname = None
_other_elements = None
_other_attributes = None
# The rule set contains mappings for XML qnames to child members and the
# appropriate member classes.
_rule_set = None
_members = None
text = None
def __init__(self, text=None, *args, **kwargs):
if ('_members' not in self.__class__.__dict__
or self.__class__._members is None):
self.__class__._members = tuple(self.__class__._list_xml_members())
for member_name, member_type in self.__class__._members:
if member_name in kwargs:
setattr(self, member_name, kwargs[member_name])
else:
if isinstance(member_type, list):
setattr(self, member_name, [])
else:
setattr(self, member_name, None)
self._other_elements = []
self._other_attributes = {}
if text is not None:
self.text = text
def _list_xml_members(cls):
"""Generator listing all members which are XML elements or attributes.
The following members would be considered XML members:
foo = 'abc' - indicates an XML attribute with the qname abc
foo = SomeElement - indicates an XML child element
foo = [AnElement] - indicates a repeating XML child element, each instance
will be stored in a list in this member
foo = ('att1', '{http://example.com/namespace}att2') - indicates an XML
attribute which has different parsing rules in different versions of
the protocol. Version 1 of the XML parsing rules will look for an
attribute with the qname 'att1' but verion 2 of the parsing rules will
look for a namespaced attribute with the local name of 'att2' and an
XML namespace of 'http://example.com/namespace'.
"""
members = []
for pair in inspect.getmembers(cls):
if not pair[0].startswith('_') and pair[0] != 'text':
member_type = pair[1]
if (isinstance(member_type, tuple) or isinstance(member_type, list)
or isinstance(member_type, (str, unicode))
or (inspect.isclass(member_type)
and issubclass(member_type, XmlElement))):
members.append(pair)
return members
_list_xml_members = classmethod(_list_xml_members)
def _get_rules(cls, version):
"""Initializes the _rule_set for the class which is used when parsing XML.
This method is used internally for parsing and generating XML for an
XmlElement. It is not recommended that you call this method directly.
Returns:
A tuple containing the XML parsing rules for the appropriate version.
The tuple looks like:
(qname, {sub_element_qname: (member_name, member_class, repeating), ..},
{attribute_qname: member_name})
To give a couple of concrete example, the atom.data.Control _get_rules
with version of 2 will return:
('{http://www.w3.org/2007/app}control',
{'{http://www.w3.org/2007/app}draft': ('draft',
<class 'atom.data.Draft'>,
False)},
{})
Calling _get_rules with version 1 on gdata.data.FeedLink will produce:
('{http://schemas.google.com/g/2005}feedLink',
{'{http://www.w3.org/2005/Atom}feed': ('feed',
<class 'gdata.data.GDFeed'>,
False)},
{'href': 'href', 'readOnly': 'read_only', 'countHint': 'count_hint',
'rel': 'rel'})
"""
# Initialize the _rule_set to make sure there is a slot available to store
# the parsing rules for this version of the XML schema.
# Look for rule set in the class __dict__ proxy so that only the
# _rule_set for this class will be found. By using the dict proxy
# we avoid finding rule_sets defined in superclasses.
# The four lines below provide support for any number of versions, but it
# runs a bit slower then hard coding slots for two versions, so I'm using
# the below two lines.
#if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
# cls._rule_set = []
#while len(cls.__dict__['_rule_set']) < version:
# cls._rule_set.append(None)
# If there is no rule set cache in the class, provide slots for two XML
# versions. If and when there is a version 3, this list will need to be
# expanded.
if '_rule_set' not in cls.__dict__ or cls._rule_set is None:
cls._rule_set = [None, None]
# If a version higher than 2 is requested, fall back to version 2 because
# 2 is currently the highest supported version.
if version > 2:
return cls._get_rules(2)
# Check the dict proxy for the rule set to avoid finding any rule sets
# which belong to the superclass. We only want rule sets for this class.
if cls._rule_set[version-1] is None:
# The rule set for each version consists of the qname for this element
# ('{namespace}tag'), a dictionary (elements) for looking up the
# corresponding class member when given a child element's qname, and a
# dictionary (attributes) for looking up the corresponding class member
# when given an XML attribute's qname.
elements = {}
attributes = {}
if ('_members' not in cls.__dict__ or cls._members is None):
cls._members = tuple(cls._list_xml_members())
for member_name, target in cls._members:
if isinstance(target, list):
# This member points to a repeating element.
elements[_get_qname(target[0], version)] = (member_name, target[0],
True)
elif isinstance(target, tuple):
# This member points to a versioned XML attribute.
if version <= len(target):
attributes[target[version-1]] = member_name
else:
attributes[target[-1]] = member_name
elif isinstance(target, (str, unicode)):
# This member points to an XML attribute.
attributes[target] = member_name
elif issubclass(target, XmlElement):
# This member points to a single occurance element.
elements[_get_qname(target, version)] = (member_name, target, False)
version_rules = (_get_qname(cls, version), elements, attributes)
cls._rule_set[version-1] = version_rules
return version_rules
else:
return cls._rule_set[version-1]
_get_rules = classmethod(_get_rules)
def get_elements(self, tag=None, namespace=None, version=1):
"""Find all sub elements which match the tag and namespace.
To find all elements in this object, call get_elements with the tag and
namespace both set to None (the default). This method searches through
the object's members and the elements stored in _other_elements which
did not match any of the XML parsing rules for this class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching elements.
Returns:
A list of the matching XmlElements.
"""
matches = []
ignored1, elements, ignored2 = self.__class__._get_rules(version)
if elements:
for qname, element_def in elements.iteritems():
member = getattr(self, element_def[0])
if member:
if _qname_matches(tag, namespace, qname):
if element_def[2]:
# If this is a repeating element, copy all instances into the
# result list.
matches.extend(member)
else:
matches.append(member)
for element in self._other_elements:
if _qname_matches(tag, namespace, element._qname):
matches.append(element)
return matches
GetElements = get_elements
# FindExtensions and FindChildren are provided for backwards compatibility
# to the atom.AtomBase class.
# However, FindExtensions may return more results than the v1 atom.AtomBase
# method does, because get_elements searches both the expected children
# and the unexpected "other elements". The old AtomBase.FindExtensions
# method searched only "other elements" AKA extension_elements.
FindExtensions = get_elements
FindChildren = get_elements
def get_attributes(self, tag=None, namespace=None, version=1):
"""Find all attributes which match the tag and namespace.
To find all attributes in this object, call get_attributes with the tag
and namespace both set to None (the default). This method searches
through the object's members and the attributes stored in
_other_attributes which did not fit any of the XML parsing rules for this
class.
Args:
tag: str
namespace: str
version: int Specifies the version of the XML rules to be used when
searching for matching attributes.
Returns:
A list of XmlAttribute objects for the matching attributes.
"""
matches = []
ignored1, ignored2, attributes = self.__class__._get_rules(version)
if attributes:
for qname, attribute_def in attributes.iteritems():
member = getattr(self, attribute_def[0])
if member:
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, member))
for qname, value in self._other_attributes.iteritems():
if _qname_matches(tag, namespace, qname):
matches.append(XmlAttribute(qname, value))
return matches
GetAttributes = get_attributes
def _harvest_tree(self, tree, version=1):
"""Populates object members from the data in the tree Element."""
qname, elements, attributes = self.__class__._get_rules(version)
for element in tree:
if elements and element.tag in elements:
definition = elements[element.tag]
# If this is a repeating element, make sure the member is set to a
# list.
if definition[2]:
if getattr(self, definition[0]) is None:
setattr(self, definition[0], [])
getattr(self, definition[0]).append(_xml_element_from_tree(element,
definition[1], version))
else:
setattr(self, definition[0], _xml_element_from_tree(element,
definition[1], version))
else:
self._other_elements.append(_xml_element_from_tree(element, XmlElement,
version))
for attrib, value in tree.attrib.iteritems():
if attributes and attrib in attributes:
setattr(self, attributes[attrib], value)
else:
self._other_attributes[attrib] = value
if tree.text:
self.text = tree.text
def _to_tree(self, version=1, encoding=None):
new_tree = ElementTree.Element(_get_qname(self, version))
self._attach_members(new_tree, version, encoding)
return new_tree
def _attach_members(self, tree, version=1, encoding=None):
"""Convert members to XML elements/attributes and add them to the tree.
Args:
tree: An ElementTree.Element which will be modified. The members of
this object will be added as child elements or attributes
according to the rules described in _expected_elements and
_expected_attributes. The elements and attributes stored in
other_attributes and other_elements are also added a children
of this tree.
version: int Ingnored in this method but used by VersionedElement.
encoding: str (optional)
"""
qname, elements, attributes = self.__class__._get_rules(version)
encoding = encoding or STRING_ENCODING
# Add the expected elements and attributes to the tree.
if elements:
for tag, element_def in elements.iteritems():
member = getattr(self, element_def[0])
# If this is a repeating element and there are members in the list.
if member and element_def[2]:
for instance in member:
instance._become_child(tree, version)
elif member:
member._become_child(tree, version)
if attributes:
for attribute_tag, member_name in attributes.iteritems():
value = getattr(self, member_name)
if value:
tree.attrib[attribute_tag] = value
# Add the unexpected (other) elements and attributes to the tree.
for element in self._other_elements:
element._become_child(tree, version)
for key, value in self._other_attributes.iteritems():
# I'm not sure if unicode can be used in the attribute name, so for now
# we assume the encoding is correct for the attribute name.
if not isinstance(value, unicode):
value = value.decode(encoding)
tree.attrib[key] = value
if self.text:
if isinstance(self.text, unicode):
tree.text = self.text
else:
tree.text = self.text.decode(encoding)
def to_string(self, version=1, encoding=None):
"""Converts this object to XML."""
return ElementTree.tostring(self._to_tree(version, encoding))
ToString = to_string
def __str__(self):
return self.to_string()
def _become_child(self, tree, version=1):
"""Adds a child element to tree with the XML data in self."""
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = _get_qname(self, version)
self._attach_members(new_child, version)
def __get_extension_elements(self):
return self._other_elements
def __set_extension_elements(self, elements):
self._other_elements = elements
extension_elements = property(__get_extension_elements,
__set_extension_elements,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def __get_extension_attributes(self):
return self._other_attributes
def __set_extension_attributes(self, attributes):
self._other_attributes = attributes
extension_attributes = property(__get_extension_attributes,
__set_extension_attributes,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
def _get_tag(self, version=1):
qname = _get_qname(self, version)
return qname[qname.find('}')+1:]
def _get_namespace(self, version=1):
qname = _get_qname(self, version)
if qname.startswith('{'):
return qname[1:qname.find('}')]
else:
return None
def _set_tag(self, tag):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if self._qname[0].startswith('{'):
self._qname[0] = '{%s}%s' % (self._get_namespace(1), tag)
else:
self._qname[0] = tag
else:
if self._qname.startswith('{'):
self._qname = '{%s}%s' % (self._get_namespace(), tag)
else:
self._qname = tag
def _set_namespace(self, namespace):
if isinstance(self._qname, tuple):
self._qname = self._qname.copy()
if namespace:
self._qname[0] = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname[0] = self._get_tag(1)
else:
if namespace:
self._qname = '{%s}%s' % (namespace, self._get_tag(1))
else:
self._qname = self._get_tag(1)
tag = property(_get_tag, _set_tag,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
namespace = property(_get_namespace, _set_namespace,
"""Provides backwards compatibility for v1 atom.AtomBase classes.""")
# Provided for backwards compatibility to atom.ExtensionElement
children = extension_elements
attributes = extension_attributes
def _get_qname(element, version):
if isinstance(element._qname, tuple):
if version <= len(element._qname):
return element._qname[version-1]
else:
return element._qname[-1]
else:
return element._qname
def _qname_matches(tag, namespace, qname):
"""Logic determines if a QName matches the desired local tag and namespace.
This is used in XmlElement.get_elements and XmlElement.get_attributes to
find matches in the element's members (among all expected-and-unexpected
elements-and-attributes).
Args:
expected_tag: string
expected_namespace: string
qname: string in the form '{xml_namespace}localtag' or 'tag' if there is
no namespace.
Returns:
boolean True if the member's tag and namespace fit the expected tag and
namespace.
"""
# If there is no expected namespace or tag, then everything will match.
if qname is None:
member_tag = None
member_namespace = None
else:
if qname.startswith('{'):
member_namespace = qname[1:qname.index('}')]
member_tag = qname[qname.index('}') + 1:]
else:
member_namespace = None
member_tag = qname
return ((tag is None and namespace is None)
# If there is a tag, but no namespace, see if the local tag matches.
or (namespace is None and member_tag == tag)
# There was no tag, but there was a namespace so see if the namespaces
# match.
or (tag is None and member_namespace == namespace)
# There was no tag, and the desired elements have no namespace, so check
# to see that the member's namespace is None.
or (tag is None and namespace == ''
and member_namespace is None)
# The tag and the namespace both match.
or (tag == member_tag
and namespace == member_namespace)
# The tag matches, and the expected namespace is the empty namespace,
# check to make sure the member's namespace is None.
or (tag == member_tag and namespace == ''
and member_namespace is None))
def parse(xml_string, target_class=None, version=1, encoding=None):
"""Parses the XML string according to the rules for the target_class.
Args:
xml_string: str or unicode
target_class: XmlElement or a subclass. If None is specified, the
XmlElement class is used.
version: int (optional) The version of the schema which should be used when
converting the XML into an object. The default is 1.
encoding: str (optional) The character encoding of the bytes in the
xml_string. Default is 'UTF-8'.
"""
if target_class is None:
target_class = XmlElement
if isinstance(xml_string, unicode):
if encoding is None:
xml_string = xml_string.encode(STRING_ENCODING)
else:
xml_string = xml_string.encode(encoding)
tree = ElementTree.fromstring(xml_string)
return _xml_element_from_tree(tree, target_class, version)
Parse = parse
xml_element_from_string = parse
XmlElementFromString = xml_element_from_string
def _xml_element_from_tree(tree, target_class, version=1):
if target_class._qname is None:
instance = target_class()
instance._qname = tree.tag
instance._harvest_tree(tree, version)
return instance
# TODO handle the namespace-only case
# Namespace only will be used with Google Spreadsheets rows and
# Google Base item attributes.
elif tree.tag == _get_qname(target_class, version):
instance = target_class()
instance._harvest_tree(tree, version)
return instance
return None
class XmlAttribute(object):
def __init__(self, qname, value):
self._qname = qname
self.value = value
| apache-2.0 |
zhmz90/keras | keras/preprocessing/sequence.py | 76 | 4441 | from __future__ import absolute_import
# -*- coding: utf-8 -*-
import numpy as np
import random
from six.moves import range
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):
"""
Pad each sequence to the same length:
the length of the longuest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen. Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
x = (np.ones((nb_samples, maxlen)) * value).astype(dtype)
for idx, s in enumerate(sequences):
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError("Truncating type '%s' not understood" % padding)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError("Padding type '%s' not understood" % padding)
return x
def make_sampling_table(size, sampling_factor=1e-5):
'''
This generates an array where the ith element
is the probability that a word of rank i would be sampled,
according to the sampling distribution used in word2vec.
The word2vec formula is:
p(word) = min(1, sqrt(word.frequency/sampling_factor) / (word.frequency/sampling_factor))
We assume that the word frequencies follow Zipf's law (s=1) to derive
a numerical approximation of frequency(rank):
frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
where gamma is the Euler-Mascheroni constant.
'''
gamma = 0.577
rank = np.array(list(range(size)))
rank[0] = 1
inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1./(12.*rank)
f = sampling_factor * inv_fq
return np.minimum(1., f / np.sqrt(f))
def skipgrams(sequence, vocabulary_size,
window_size=4, negative_samples=1., shuffle=True,
categorical=False, sampling_table=None):
'''
Take a sequence (list of indexes of words),
returns couples of [word_index, other_word index] and labels (1s or 0s),
where label = 1 if 'other_word' belongs to the context of 'word',
and label=0 if 'other_word' is ramdomly sampled
@param vocabulary_size: int. maximum possible word index + 1
@param window_size: int. actually half-window. The window of a word wi will be [i-window_size, i+window_size+1]
@param negative_samples: float >= 0. 0 for no negative (=random) samples. 1 for same number as positive samples. etc.
@param categorical: bool. if False, labels will be integers (eg. [0, 1, 1 .. ]),
if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ]
Note: by convention, index 0 in the vocabulary is a non-word and will be skipped.
'''
couples = []
labels = []
for i, wi in enumerate(sequence):
if not wi:
continue
if sampling_table is not None:
if sampling_table[wi] < random.random():
continue
window_start = max(0, i-window_size)
window_end = min(len(sequence), i+window_size+1)
for j in range(window_start, window_end):
if j != i:
wj = sequence[j]
if not wj:
continue
couples.append([wi, wj])
if categorical:
labels.append([0,1])
else:
labels.append(1)
if negative_samples > 0:
nb_negative_samples = int(len(labels) * negative_samples)
words = [c[0] for c in couples]
random.shuffle(words)
couples += [[words[i%len(words)], random.randint(1, vocabulary_size-1)] for i in range(nb_negative_samples)]
if categorical:
labels += [[1,0]]*nb_negative_samples
else:
labels += [0]*nb_negative_samples
if shuffle:
seed = random.randint(0,10e6)
random.seed(seed)
random.shuffle(couples)
random.seed(seed)
random.shuffle(labels)
return couples, labels
| mit |
ColOfAbRiX/ansible | lib/ansible/modules/storage/netapp/netapp_e_lun_mapping.py | 47 | 12304 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: netapp_e_lun_mapping
author: Kevin Hulquest (@hulquest)
short_description: Create or Remove LUN Mappings
description:
- Allows for the creation and removal of volume to host mappings for NetApp E-series storage arrays.
version_added: "2.2"
options:
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- "The storage system array identifier."
required: False
lun:
description:
- The LUN number you wish to give the mapping
- If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
required: False
default: 0
target:
description:
- The name of host or hostgroup you wish to assign to the mapping
- If omitted, the default hostgroup is used.
- If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
required: False
volume_name:
description:
- The name of the volume you wish to include in the mapping.
required: True
target_type:
description:
- Whether the target is a host or group.
- Required if supplying an explicit target.
required: False
choices: ["host", "group"]
state:
description:
- Present will ensure the mapping exists, absent will remove the mapping.
- All parameters I(lun), I(target), I(target_type) and I(volume_name) must still be supplied.
required: True
choices: ["present", "absent"]
api_url:
description:
- "The full API url. Example: http://ENDPOINT:8080/devmgr/v2"
- This can optionally be set via an environment variable, API_URL
required: False
api_username:
description:
- The username used to authenticate against the API. This can optionally be set via an environment variable, API_USERNAME
required: False
api_password:
description:
- The password used to authenticate against the API. This can optionally be set via an environment variable, API_PASSWORD
required: False
'''
EXAMPLES = '''
---
- name: Lun Mapping Example
netapp_e_lun_mapping:
state: present
ssid: 1
lun: 12
target: Wilson
volume_name: Colby1
target_type: group
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
'''
RETURN = '''
msg: Mapping exists.
msg: Mapping removed.
'''
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def get_host_and_group_map(module, ssid, api_url, user, pwd):
mapping = dict(host=dict(), group=dict())
hostgroups = 'storage-systems/%s/host-groups' % ssid
groups_url = api_url + hostgroups
try:
hg_rc, hg_data = request(groups_url, headers=HEADERS, url_username=user, url_password=pwd)
except:
err = get_exception()
module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]" % (ssid, str(err)))
for group in hg_data:
mapping['group'][group['name']] = group['id']
hosts = 'storage-systems/%s/hosts' % ssid
hosts_url = api_url + hosts
try:
h_rc, h_data = request(hosts_url, headers=HEADERS, url_username=user, url_password=pwd)
except:
err = get_exception()
module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]" % (ssid, str(err)))
for host in h_data:
mapping['host'][host['name']] = host['id']
return mapping
def get_volume_id(module, data, ssid, name, api_url, user, pwd):
qty = 0
for volume in data:
if volume['name'] == name:
qty += 1
if qty > 1:
module.fail_json(msg="More than one volume with the name: %s was found, "
"please use the volume WWN instead" % name)
else:
wwn = volume['wwn']
try:
return wwn
except NameError:
module.fail_json(msg="No volume with the name: %s, was found" % (name))
def get_hostgroups(module, ssid, api_url, user, pwd):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
return data
except Exception:
module.fail_json(msg="There was an issue with connecting, please check that your"
"endpoint is properly defined and your credentials are correct")
def get_volumes(module, ssid, api_url, user, pwd, mappable):
volumes = 'storage-systems/%s/%s' % (ssid, mappable)
url = api_url + volumes
try:
rc, data = request(url, url_username=user, url_password=pwd)
except Exception:
err = get_exception()
module.fail_json(
msg="Failed to mappable objects. Type[%s. Id [%s]. Error [%s]." % (mappable, ssid, str(err)))
return data
def get_lun_mappings(ssid, api_url, user, pwd, get_all=None):
mappings = 'storage-systems/%s/volume-mappings' % ssid
url = api_url + mappings
rc, data = request(url, url_username=user, url_password=pwd)
if not get_all:
remove_keys = ('ssid', 'perms', 'lunMappingRef', 'type', 'id')
for key in remove_keys:
for mapping in data:
del mapping[key]
return data
def create_mapping(module, ssid, lun_map, vol_name, api_url, user, pwd):
mappings = 'storage-systems/%s/volume-mappings' % ssid
url = api_url + mappings
post_body = json.dumps(dict(
mappableObjectId=lun_map['volumeRef'],
targetId=lun_map['mapRef'],
lun=lun_map['lun']
))
rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
ignore_errors=True)
if rc == 422:
data = move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd)
# module.fail_json(msg="The volume you specified '%s' is already "
# "part of a different LUN mapping. If you "
# "want to move it to a different host or "
# "hostgroup, then please use the "
# "netapp_e_move_lun module" % vol_name)
return data
def move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd):
lun_id = get_lun_id(module, ssid, lun_map, api_url, user, pwd)
move_lun = "storage-systems/%s/volume-mappings/%s/move" % (ssid, lun_id)
url = api_url + move_lun
post_body = json.dumps(dict(targetId=lun_map['mapRef'], lun=lun_map['lun']))
rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS)
return data
def get_lun_id(module, ssid, lun_mapping, api_url, user, pwd):
data = get_lun_mappings(ssid, api_url, user, pwd, get_all=True)
for lun_map in data:
if lun_map['volumeRef'] == lun_mapping['volumeRef']:
return lun_map['id']
# This shouldn't ever get called
module.fail_json(msg="No LUN map found.")
def remove_mapping(module, ssid, lun_mapping, api_url, user, pwd):
lun_id = get_lun_id(module, ssid, lun_mapping, api_url, user, pwd)
lun_del = "storage-systems/%s/volume-mappings/%s" % (ssid, lun_id)
url = api_url + lun_del
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS)
return data
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(required=True, choices=['present', 'absent']),
target=dict(required=False, default=None),
target_type=dict(required=False, choices=['host', 'group']),
lun=dict(required=False, type='int', default=0),
ssid=dict(required=False),
volume_name=dict(required=True),
))
module = AnsibleModule(argument_spec=argument_spec)
state = module.params['state']
target = module.params['target']
target_type = module.params['target_type']
lun = module.params['lun']
ssid = module.params['ssid']
vol_name = module.params['volume_name']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
if not api_url.endswith('/'):
api_url += '/'
volume_map = get_volumes(module, ssid, api_url, user, pwd, "volumes")
thin_volume_map = get_volumes(module, ssid, api_url, user, pwd, "thin-volumes")
volref = None
for vol in volume_map:
if vol['label'] == vol_name:
volref = vol['volumeRef']
if not volref:
for vol in thin_volume_map:
if vol['label'] == vol_name:
volref = vol['volumeRef']
if not volref:
module.fail_json(changed=False, msg="No volume with the name %s was found" % vol_name)
host_and_group_mapping = get_host_and_group_map(module, ssid, api_url, user, pwd)
desired_lun_mapping = dict(
mapRef=host_and_group_mapping[target_type][target],
lun=lun,
volumeRef=volref
)
lun_mappings = get_lun_mappings(ssid, api_url, user, pwd)
if state == 'present':
if desired_lun_mapping in lun_mappings:
module.exit_json(changed=False, msg="Mapping exists")
else:
result = create_mapping(module, ssid, desired_lun_mapping, vol_name, api_url, user, pwd)
module.exit_json(changed=True, **result)
elif state == 'absent':
if desired_lun_mapping in lun_mappings:
result = remove_mapping(module, ssid, desired_lun_mapping, api_url, user, pwd)
module.exit_json(changed=True, msg="Mapping removed")
else:
module.exit_json(changed=False, msg="Mapping absent")
if __name__ == '__main__':
main()
| gpl-3.0 |
MicroPasts/transcribingUr | createTasks.py | 1 | 7567 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
import json
from optparse import OptionParser
import pbclient
import random
import logging
import time
from requests import exceptions
def contents(filename):
return file(filename).read()
def handle_arguments():
# Arguments for the application
usage = "usage: %prog [options]"
parser = OptionParser(usage)
# URL where PyBossa listens
parser.add_option("-s", "--server", dest="api_url",
help="PyBossa URL http://domain.com/", metavar="URL",
default="http://localhost:5000/")
# API-KEY
parser.add_option("-k", "--api-key", dest="api_key",
help="PyBossa User API-KEY to interact with PyBossa",
metavar="API-KEY")
# Create App
parser.add_option("-c", "--create-app", action="store_true",
dest="create_app",
help="Create the application",
metavar="CREATE-APP")
# Update template for tasks and long_description for app
parser.add_option("-t", "--update-template", action="store_true",
dest="update_template",
help="Update Tasks template",
metavar="UPDATE-TEMPLATE")
# Update tasks question
parser.add_option("-q", "--update-tasks",
type="int",
dest="update_tasks",
help="Update Tasks n_answers",
metavar="UPDATE-TASKS")
# Modify the number of TaskRuns per Task
parser.add_option("-n", "--number-answers",
type="int",
dest="n_answers",
help="Number of answers per task",
metavar="N-ANSWERS",
default=3)
parser.add_option("-a", "--application-config",
dest="app_config",
help="Application config file",
metavar="APP-CONFIG",
default="app.json")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if not options.create_app and not options.update_template\
and not options.update_tasks:
parser.error("Please check --help or -h for the available options")
if not options.api_key:
parser.error("You must supply an API-KEY to create an \
application and tasks in PyBossa")
return options
def get_configuration():
options = handle_arguments()
# Load app details
try:
with file(options.app_config) as app_json:
app_config = json.load(app_json)
except IOError:
print "application config file is missing! Please create a new one"
exit(1)
return (app_config, options)
def run(app_config, options):
def check_api_error(api_response):
"""Check if returned API response contains an error"""
if type(api_response) == dict and (api_response.get('status') == 'failed'):
raise exceptions.HTTPError
def format_error(module, error):
"""Format the error for the given module"""
logging.error(module)
# Beautify JSON error
if type(error) == list:
print "Application not found"
else:
print json.dumps(error, sort_keys=True, indent=4, separators=(',', ': '))
exit(1)
def find_app_by_short_name():
try:
response = pbclient.find_app(short_name=app_config['short_name'])
check_api_error(response)
return response[0]
except:
format_error("pbclient.find_app", response)
def setup_app():
app = find_app_by_short_name()
app.long_description = contents('long_description.html')
#Category 8 = near eastern archaeology
app.category_id = 8
#Set app to hidden for development
app.hidden = 1
app.info['task_presenter'] = contents('template.html')
app.info['thumbnail'] = app_config['thumbnail']
app.info['tutorial'] = contents('tutorial.html')
try:
response = pbclient.update_app(app)
check_api_error(response)
return app
except:
format_error("pbclient.update_app", response)
def create_photo_task(app, photo, question, priority=0):
# Data for the tasks
task_info = photo
try:
response = pbclient.create_task(app.id, task_info, priority_0=priority)
check_api_error(response)
except:
format_error("pbclient.create_task", response)
pbclient.set('api_key', options.api_key)
pbclient.set('endpoint', options.api_url)
if options.verbose:
print('Running against PyBosssa instance at: %s' % options.api_url)
print('Using API-KEY: %s' % options.api_key)
if options.create_app:
if options.create_app:
try:
response = pbclient.create_app(app_config['name'],
app_config['short_name'],
app_config['description'])
check_api_error(response)
app = setup_app()
except:
format_error("pbclient.create_app", response)
else:
app = find_app_by_short_name()
if options.update_template:
print "Updating app template"
# discard return value
setup_app()
if options.update_tasks:
def tasks(app):
offset = 0
limit = 100
while True:
try:
tasks = pbclient.get_tasks(app.id, offset=offset, limit=limit)
check_api_error(tasks)
if len(tasks) == 0:
break
for task in tasks:
yield task
offset += len(tasks)
except:
format_error("pbclient.get_tasks", response)
def update_task(task, count):
print "Updating task: %s" % task.id
if 'n_answers' in task.info:
del(task.info['n_answers'])
task.n_answers = options.update_tasks
try:
response = pbclient.update_task(task)
check_api_error(response)
count[0] += 1
except:
format_error("pbclient.update_task", response)
print "Updating task n_answers"
app = find_app_by_short_name()
n_tasks = [0]
[update_task(t, n_tasks) for t in tasks(app)]
print "%s Tasks have been updated!" % n_tasks[0]
if __name__ == "__main__":
app_config, options = get_configuration()
run(app_config, options) | agpl-3.0 |
LoLab-VU/pysb | pysb/export/pysb_flat.py | 3 | 2750 | """
A module containing a class that exports a PySB model to a single Python source
file that, when imported, will recreate the same model. This is intended for
saving a dynamically generated model so that it can be reused without re-running
the dynamic generation process. Note that any macro calls and other program
structure in the original model are "flattened" in the process.
For information on how to use the model exporters, see the documentation
for :py:mod:`pysb.export`.
Structure of the Python code
============================
The standalone Python code calls ``Model()``, then defines Monomers, Parameters,
Expressions (constant), Compartments, Observables, Expressions (dynamic), Rules
and initial conditions in that order. This can be considered a sort of "repr()"
for a full model.
If the output is saved as ``foo.py`` then one may load the model with the
following line::
from foo import model
"""
from pysb.export import Exporter
from io import StringIO
class PysbFlatExporter(Exporter):
"""A class for generating PySB "flat" model source code from a model.
Inherits from :py:class:`pysb.export.Exporter`, which implements
basic functionality for all exporters.
"""
def export(self):
"""Export PySB source code from a model.
Returns
-------
string
String containing the Python code.
"""
output = StringIO()
# Convenience function for writing out a componentset.
def write_cset(cset):
for c in cset:
output.write(repr(c))
output.write("\n")
if cset:
output.write("\n")
if self.docstring:
output.write('"""')
output.write(self.docstring)
output.write('"""\n\n')
output.write("# exported from PySB model '%s'\n" % self.model.name)
output.write("\n")
output.write("from pysb import Model, Monomer, Parameter, Expression, "
"Compartment, Rule, Observable, Initial, MatchOnce, "
"Annotation, MultiState, Tag, ANY, WILD\n")
output.write("\n")
output.write("Model()\n")
output.write("\n")
write_cset(self.model.monomers)
write_cset(self.model.parameters)
write_cset(self.model.expressions_constant())
write_cset(self.model.compartments)
write_cset(self.model.observables)
write_cset(self.model.tags)
write_cset(self.model.expressions_dynamic())
write_cset(self.model.rules)
for ic in self.model.initials:
output.write("%s\n" % ic)
output.write("\n")
write_cset(self.model.annotations)
return output.getvalue()
| bsd-2-clause |
JT5D/scikit-learn | benchmarks/bench_plot_svd.py | 322 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
foreni-packages/golismero | tools/sqlmap/plugins/dbms/sqlite/enumeration.py | 8 | 1893 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.data import logger
from lib.core.exception import SqlmapUnsupportedFeatureException
from plugins.generic.enumeration import Enumeration as GenericEnumeration
class Enumeration(GenericEnumeration):
def __init__(self):
GenericEnumeration.__init__(self)
def getCurrentUser(self):
warnMsg = "on SQLite it is not possible to enumerate the current user"
logger.warn(warnMsg)
def getCurrentDb(self):
warnMsg = "on SQLite it is not possible to get name of the current database"
logger.warn(warnMsg)
def isDba(self):
warnMsg = "on SQLite the current user has all privileges"
logger.warn(warnMsg)
def getUsers(self):
warnMsg = "on SQLite it is not possible to enumerate the users"
logger.warn(warnMsg)
return []
def getPasswordHashes(self):
warnMsg = "on SQLite it is not possible to enumerate the user password hashes"
logger.warn(warnMsg)
return {}
def getPrivileges(self, *args):
warnMsg = "on SQLite it is not possible to enumerate the user privileges"
logger.warn(warnMsg)
return {}
def getDbs(self):
warnMsg = "on SQLite it is not possible to enumerate databases (use only '--tables')"
logger.warn(warnMsg)
return []
def searchDb(self):
warnMsg = "on SQLite it is not possible to search databases"
logger.warn(warnMsg)
return []
def searchColumn(self):
errMsg = "on SQLite it is not possible to search columns"
raise SqlmapUnsupportedFeatureException(errMsg)
def getHostname(self):
warnMsg = "on SQLite it is not possible to enumerate the hostname"
logger.warn(warnMsg)
| gpl-2.0 |
aiguofer/bokeh | examples/plotting/file/line_select.py | 3 | 1281 | """ Example demonstrating the picking of line objects.
"""
import numpy as np
from bokeh.models import TapTool, CustomJS, ColumnDataSource
from bokeh.plotting import output_file, show, figure
# The data is setup to have very different scales in x and y, to verify
# that picking happens in pixels. Different widths are used to test that
# you can click anywhere on the visible line.
#
# Note that the get_view() function used here is not documented and
# might change in future versions of Bokeh.
t = np.linspace(0, 0.1, 100)
code = """
d0 = cb_obj.selected["0d"];
if (d0.glyph) {
var color = d0.get_view().visuals.line.line_color.value();
var data = source.data;
data['text'] = ['Selected the ' + color + ' line'];
source.trigger('change');
}
"""
# use a source to easily update the text of the text-glyph
source = ColumnDataSource(data=dict(text=['no line selected']))
p = figure()
l1 = p.line(t, 100*np.sin(t*50), color='red', line_width=25)
l2 = p.line(t, 100*np.sin(t*50+1), color='green', line_width=5)
l3 = p.line(t, 100*np.sin(t*50+2), color='blue', line_width=1)
p.text(0, -100, source=source)
p.add_tools(TapTool(callback=CustomJS(code=code, args=dict(source=source))))
output_file("line_select.html", title="line_select.py example")
show(p)
| bsd-3-clause |
YuepengGuo/zipline | tests/test_serialization.py | 12 | 2910 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose_parameterized import parameterized
from unittest import TestCase
from zipline.finance.trading import TradingEnvironment
from .serialization_cases import (
object_serialization_cases,
assert_dict_equal
)
from six import iteritems
def gather_bad_dicts(state):
bad = []
for k, v in iteritems(state):
if not isinstance(v, dict):
continue
if type(v) != dict:
bad.append((k, v))
bad.extend(gather_bad_dicts(v))
return bad
class SerializationTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
@classmethod
def tearDownClass(cls):
del cls.env
@parameterized.expand(object_serialization_cases())
def test_object_serialization(self,
_,
cls,
initargs,
di_vars,
comparison_method='dict'):
obj = cls(*initargs)
for k, v in di_vars.items():
setattr(obj, k, v)
state = obj.__getstate__()
bad_dicts = gather_bad_dicts(state)
bad_template = "type({0}) == {1}".format
bad_msgs = [bad_template(k, type(v)) for k, v in bad_dicts]
msg = "Only support bare dicts. " + ', '.join(bad_msgs)
self.assertEqual(len(bad_dicts), 0, msg)
# no state should have a dict subclass. Only regular PyDict
if hasattr(obj, '__getinitargs__'):
initargs = obj.__getinitargs__()
else:
initargs = None
if hasattr(obj, '__getnewargs__'):
newargs = obj.__getnewargs__()
else:
newargs = None
if newargs is not None:
obj2 = cls.__new__(cls, *newargs)
else:
obj2 = cls.__new__(cls)
if initargs is not None:
obj2.__init__(*initargs)
obj2.__setstate__(state)
for k, v in di_vars.items():
setattr(obj2, k, v)
if comparison_method == 'repr':
self.assertEqual(obj.__repr__(), obj2.__repr__())
elif comparison_method == 'to_dict':
assert_dict_equal(obj.to_dict(), obj2.to_dict())
else:
assert_dict_equal(obj.__dict__, obj2.__dict__)
| apache-2.0 |
karthikb351/ProntoUsage | clint/resources.py | 15 | 3808 | # -*- coding: utf-8 -*-
"""
clint.resources
~~~~~~~~~~~~~~~
This module contains all the application resource features of clint.
"""
from __future__ import absolute_import
from __future__ import with_statement
import errno
from os import remove, removedirs
from os.path import isfile, join as path_join
from .packages.appdirs import AppDirs, AppDirsError
from .utils import mkdir_p, is_collection
__all__ = (
'init', 'user', 'site', 'cache',
'log', 'NotConfigured'
)
class AppDir(object):
"""Application Directory object."""
def __init__(self, path=None):
self.path = path
self._exists = False
if path:
self._create()
def __repr__(self):
return '<app-dir: %s>' % (self.path)
def __getattribute__(self, name):
if not name in ('_exists', 'path', '_create', '_raise_if_none'):
if not self._exists:
self._create()
return object.__getattribute__(self, name)
def _raise_if_none(self):
"""Raises if operations are carried out on an unconfigured AppDir."""
if not self.path:
raise NotConfigured()
def _create(self):
"""Creates current AppDir at AppDir.path."""
self._raise_if_none()
if not self._exists:
mkdir_p(self.path)
self._exists = True
def open(self, filename, mode='r'):
"""Returns file object from given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
return open(fn, mode)
def write(self, filename, content, binary=False):
"""Writes given content to given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
if binary:
flags = 'wb'
else:
flags = 'w'
with open(fn, flags) as f:
f.write(content)
def append(self, filename, content, binary=False):
"""Appends given content to given filename."""
self._raise_if_none()
fn = path_join(self.path, filename)
if binary:
flags = 'ab'
else:
flags = 'a'
with open(fn, 'a') as f:
f.write(content)
return True
def delete(self, filename=''):
"""Deletes given file or directory. If no filename is passed, current
directory is removed.
"""
self._raise_if_none()
fn = path_join(self.path, filename)
try:
if isfile(fn):
remove(fn)
else:
removedirs(fn)
except OSError as why:
if why.errno == errno.ENOENT:
pass
else:
raise why
def read(self, filename, binary=False):
"""Returns contents of given file with AppDir.
If file doesn't exist, returns None."""
self._raise_if_none()
fn = path_join(self.path, filename)
if binary:
flags = 'br'
else:
flags = 'r'
try:
with open(fn, flags) as f:
return f.read()
except IOError:
return None
def sub(self, path):
"""Returns AppDir instance for given subdirectory name."""
if is_collection(path):
path = path_join(path)
return AppDir(path_join(self.path, path))
# Module locals
user = AppDir()
site = AppDir()
cache = AppDir()
log = AppDir()
def init(vendor, name):
global user, site, cache, log
ad = AppDirs(name, vendor)
user.path = ad.user_data_dir
site.path = ad.site_data_dir
cache.path = ad.user_cache_dir
log.path = ad.user_log_dir
class NotConfigured(IOError):
"""Application configuration required. Please run resources.init() first."""
| mit |
GiantSteps/essentia | test/src/unittest/standard/test_cubicspline.py | 10 | 4553 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.standard import *
from numpy import r_, sin, pi # r_ for decimal step ranges
class TestCubicSpline(TestCase):
def testBadParams(self):
self.assertConfigureFails(CubicSpline(), { 'leftBoundaryFlag': 3})
self.assertConfigureFails(CubicSpline(), { 'rightBoundaryFlag': -3})
# ascendant order
self.assertConfigureFails(CubicSpline(), { 'xPoints': [ 0, 10, 10, 20 ],
'yPoints': [ 0, 5, -23, 17 ] })
# xPoints.size != yPoints.size
self.assertConfigureFails(CubicSpline(), { 'xPoints': [ 0, 10, 10, 20, 30],
'yPoints': [ 0, 5, -23, 17 ] })
# even sizes for quadratic spline should fail
self.assertConfigureFails(CubicSpline(), { 'xPoints': [ 0, 10, 10, 20],
'yPoints': [ 0, 5, -23, 17 ] })
self.assertConfigureFails(CubicSpline(), { 'xPoints': [ 10, 0 ],
'yPoints': [ 0, 10 ] })
def runge(self, x): return 1.0/(1.0+25.0*float(x*x))
def dRunge(self, x) :
# Runge's first derivative at x
k = 1.0+25.0*float(x*x)
return -50.0*float(x)/(k*k)
def ddRunge(self, x):
# Runge's second derivative at x
xx = x*x
k = 1.0+25.0*float(xx)
return (-50.0+3750.0*float(xx))/(k*k*k)
def evaluateCubicSpline(self, expected, bc, plot=False):
n = 11
x = [(float(n-i)*(-1.0)+float(i-1))/float(n-1) for i in range(n)]
y = [self.runge(i) for i in x]
# just for plotting
real=[];newx=[];found=[]
if not bc:
leftBound = 0.0 ;rightBound = 0.0
elif bc == 1:
leftBound = self.dRunge(x[0])
rightBound = self.dRunge(x[-1])
else: # bc == 2
leftBound = self.ddRunge(x[0])
rightBound = self.ddRunge(x[-1])
spline = CubicSpline(xPoints=x,yPoints=y,
leftBoundaryFlag = bc,
leftBoundaryValue = leftBound,
rightBoundaryFlag = bc,
rightBoundaryValue = rightBound)
xval = 0
k=0
for i in range(n+1):
if not i: jhi = 1
else: jhi = 2
for j in range(1, jhi+1):
if not i: xval = x[0] - 1.0
elif i<n:
xval = (float(jhi-j+1)*x[i-1]+float(j-1)*x[i])/float(jhi)
else:
if j==1: xval=x[n-1]
else: xval=x[n-1]+1.0
yval = spline(xval)[0]
self.assertAlmostEqual(expected[k], yval, 5e-6)
newx.append(xval)
found.append(yval)
real.append(self.runge(xval))
k+=1
if plot:
from pylab import plot, show, legend
plot(newx, found, label='found')
plot(newx, expected, label='expected')
plot(newx, real, label='real')
legend()
show()
def testCubicSplineBc0(self):
expected = readVector(join(filedir(), 'spline/cubicSpline_bc0.txt'))
self.evaluateCubicSpline(expected, 0)
def testCubicSplineBc1(self):
expected = readVector(join(filedir(), 'spline/cubicSpline_bc1.txt'))
self.evaluateCubicSpline(expected, 1)
def testCubicSplineBc2(self):
expected = readVector(join(filedir(), 'spline/cubicSpline_bc2.txt'))
self.evaluateCubicSpline(expected, 2)
suite = allTests(TestCubicSpline)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 |
gnowgi/gnowsys-studio | objectapp/tests/signals.py | 3 | 6309 | # Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test cases for Objectapp's signals"""
from django.test import TestCase
from objectapp.models import Gbobject
from objectapp.managers import DRAFT
from objectapp.managers import PUBLISHED
from objectapp.signals import disable_for_loaddata
from objectapp.signals import ping_directories_handler
from objectapp.signals import ping_external_urls_handler
class SignalsTestCase(TestCase):
"""Test cases for signals"""
def test_disable_for_loaddata(self):
self.top = 0
@disable_for_loaddata
def make_top():
self.top += 1
def call():
return make_top()
call()
self.assertEquals(self.top, 1)
# Okay the command is executed
def test_ping_directories_handler(self):
# Set up a stub around DirectoryPinger
self.top = 0
def fake_pinger(*ka, **kw):
self.top += 1
import objectapp.ping
from objectapp import settings
self.original_pinger = objectapp.ping.DirectoryPinger
objectapp.ping.DirectoryPinger = fake_pinger
params = {'title': 'My gbobject',
'content': 'My content',
'status': PUBLISHED,
'slug': 'my-gbobject'}
gbobject = Gbobject.objects.create(**params)
self.assertEquals(gbobject.is_visible, True)
settings.PING_DIRECTORIES = ()
ping_directories_handler('sender', **{'instance': gbobject})
self.assertEquals(self.top, 0)
settings.PING_DIRECTORIES = ('toto',)
settings.SAVE_PING_DIRECTORIES = True
ping_directories_handler('sender', **{'instance': gbobject})
self.assertEquals(self.top, 1)
gbobject.status = DRAFT
ping_directories_handler('sender', **{'instance': gbobject})
self.assertEquals(self.top, 1)
# Remove stub
objectapp.ping.DirectoryPinger = self.original_pinger
def test_ping_external_urls_handler(self):
# Set up a stub around ExternalUrlsPinger
self.top = 0
def fake_pinger(*ka, **kw):
self.top += 1
import objectapp.ping
from objectapp import settings
self.original_pinger = objectapp.ping.ExternalUrlsPinger
objectapp.ping.ExternalUrlsPinger = fake_pinger
params = {'title': 'My gbobject',
'content': 'My content',
'status': PUBLISHED,
'slug': 'my-gbobject'}
gbobject = Gbobject.objects.create(**params)
self.assertEquals(gbobject.is_visible, True)
settings.SAVE_PING_EXTERNAL_URLS = False
ping_external_urls_handler('sender', **{'instance': gbobject})
self.assertEquals(self.top, 0)
settings.SAVE_PING_EXTERNAL_URLS = True
ping_external_urls_handler('sender', **{'instance': gbobject})
self.assertEquals(self.top, 1)
gbobject.status = 0
ping_external_urls_handler('sender', **{'instance': gbobject})
self.assertEquals(self.top, 1)
# Remove stub
objectapp.ping.ExternalUrlsPinger = self.original_pinger
| agpl-3.0 |
simpeg/simpeg | docs/conf.py | 1 | 15473 | # -*- coding: utf-8 -*-
#
# SimPEG documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 30 18:42:44 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import datetime
from sphinx_gallery.sorting import FileNameSortKey
import glob
import SimPEG
import plotly.io as pio
import subprocess
import shutil
pio.renderers.default = "sphinx_gallery"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath("..{}".format(os.path.sep)))
sys.path.append(os.path.abspath(".{}_ext".format(os.path.sep)))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"numpydoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx_toolbox.collapse",
"sphinx_gallery.gen_gallery",
"sphinx.ext.todo",
"sphinx.ext.linkcode",
]
# Autosummary pages will be generated by sphinx-autogen instead of sphinx-build
autosummary_generate = True
numpydoc_attributes_as_param_list = False
# This has to be set to false in order to make the doc build in a
# reasonable amount of time.
numpydoc_show_inherited_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "SimPEG"
copyright = "2013 - 2020, SimPEG Team, http://simpeg.xyz"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.18.1"
# The full version, including alpha/beta/rc tags.
release = "0.18.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
linkcheck_ignore = [
r"https://github.com/simpeg/simpeg*",
"/content/examples/*",
"/content/tutorials/*",
r"https://www.pardiso-project.org",
r"https://docs.github.com/*",
# GJI refuses the connexion during the check
r"https://doi.org/10.1093/gji/*",
]
linkcheck_retries = 3
linkcheck_timeout = 500
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Edit on Github Extension ---------------------------------------------
edit_on_github_project = "simpeg/simpeg"
edit_on_github_branch = "main/docs"
check_meta = False
# source code links
link_github = True
# You can build old with link_github = False
if link_github:
import inspect
from os.path import relpath, dirname
extensions.append("sphinx.ext.linkcode")
def linkcode_resolve(domain, info):
if domain != "py":
return None
modname = info["module"]
fullname = info["fullname"]
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split("."):
try:
obj = getattr(obj, part)
except Exception:
return None
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
try:
fn = relpath(fn, start=dirname(SimPEG.__file__))
except ValueError:
return None
return f"https://github.com/simpeg/simpeg/blob/main/SimPEG/{fn}{linespec}"
else:
extensions.append("sphinx.ext.viewcode")
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [("png", 100), "pdf"]
import math
phi = (math.sqrt(5) + 1) / 2
plot_rcparams = {
"font.size": 8,
"axes.titlesize": 8,
"axes.labelsize": 8,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
"legend.fontsize": 8,
"figure.figsize": (3 * phi, 3),
"figure.subplot.bottom": 0.2,
"figure.subplot.left": 0.2,
"figure.subplot.right": 0.9,
"figure.subplot.top": 0.85,
"figure.subplot.wspace": 0.4,
"text.usetex": False,
}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import pydata_sphinx_theme
html_theme = "pydata_sphinx_theme"
# If false, no module index is generated.
html_use_modindex = True
html_theme_options = {
"external_links": [
{"name": "SimPEG", "url": "https://simpeg.xyz"},
{"name": "Contact", "url": "http://slack.simpeg.xyz"},
],
"icon_links": [
{
"name": "GitHub",
"url": "https://github.com/simpeg/simpeg",
"icon": "fab fa-github",
},
{
"name": "Slack",
"url": "http://slack.simpeg.xyz/",
"icon": "fab fa-slack",
},
{
"name": "Discourse",
"url": "https://simpeg.discourse.group/",
"icon": "fab fa-discourse",
},
{
"name": "Youtube",
"url": "https://www.youtube.com/c/geoscixyz",
"icon": "fab fa-youtube",
},
{
"name": "Twitter",
"url": "https://twitter.com/simpegpy",
"icon": "fab fa-twitter",
},
],
"use_edit_page_button": False,
}
html_logo = "images/simpeg-logo.png"
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
html_context = {
"github_user": "simpeg",
"github_repo": "simpeg",
"github_version": "main",
"doc_path": "docs",
}
except Exception:
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.sep.join([".", "images", "logo-block.ico"])
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "SimPEGdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "SimPEG.tex", "SimPEG Documentation", "SimPEG Team", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "simpeg", "SimPEG Documentation", ["SimPEG Team"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# Intersphinx
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"matplotlib": ("http://matplotlib.org/stable/", None),
"properties": ("https://propertiespy.readthedocs.io/en/latest/", None),
"discretize": ("http://discretize.simpeg.xyz/en/main/", None),
}
numpydoc_xref_param_type = True
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"SimPEG",
"SimPEG Documentation",
"SimPEG Team",
"SimPEG",
"Simulation and parameter estimation in geophyiscs.",
"Miscellaneous",
),
]
tutorial_dirs = glob.glob("../tutorials/[!_]*")
tut_gallery_dirs = ["content/tutorials/" + os.path.basename(f) for f in tutorial_dirs]
# Scaping images to generate on website
from plotly.io._sg_scraper import plotly_sg_scraper
import pyvista
# Make sure off screen is set to true when building locally
pyvista.OFF_SCREEN = True
# necessary when building the sphinx gallery
pyvista.BUILDING_GALLERY = True
image_scrapers = ("matplotlib", plotly_sg_scraper, pyvista.Scraper())
# Sphinx Gallery
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": ["../examples"] + tutorial_dirs,
"gallery_dirs": ["content/examples"] + tut_gallery_dirs,
"within_subsection_order": FileNameSortKey,
"filename_pattern": "\.py",
"backreferences_dir": "content/api/generated/backreferences",
"doc_module": "SimPEG",
"show_memory": True,
"image_scrapers": image_scrapers,
}
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# graphviz_dot = shutil.which("dot")
# this must be png, because links on SVG are broken
# graphviz_output_format = "png"
autodoc_member_order = "bysource"
# def supress_nonlocal_image_warn():
# import sphinx.environment
# sphinx.environment.BuildEnvironment.warn_node = _supress_nonlocal_image_warn
# def _supress_nonlocal_image_warn(self, msg, node, **kwargs):
# from docutils.utils import get_source_line
# if not msg.startswith("nonlocal image URI found:"):
# self._warnfunc(msg, "{0!s}:{1!s}".format(*get_source_line(node)))
# supress_nonlocal_image_warn()
# http://stackoverflow.com/questions/11417221/sphinx-autodoc-gives-warning-pyclass-reference-target-not-found-type-warning
nitpick_ignore = [
("py:class", "discretize.base.base_mesh.BaseMesh"),
("py:class", "callable"),
("py:class", "properties.base.base.HasProperties"),
("py:class", "pymatsolver.direct.Pardiso"),
("py:class", "matplotlib.axes._axes.Axes"),
("py:class", "optional"),
("py:class", "builtins.float"),
("py:class", "builtins.complex"),
("py:meth", "__call__"),
]
| mit |
ilmanzo/scratch_extensions | venv/lib/python3.4/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py | 1226 | 2323 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
| mit |
mikalstill/nova | nova/api/openstack/compute/schemas/flavors.py | 3 | 1550 | # Copyright 2017 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
index_query = {
'type': 'object',
'properties': {
'limit': parameter_types.multi_params(
parameter_types.non_negative_integer),
'marker': parameter_types.multi_params({'type': 'string'}),
'is_public': parameter_types.multi_params({'type': 'string'}),
'minRam': parameter_types.multi_params({'type': 'string'}),
'minDisk': parameter_types.multi_params({'type': 'string'}),
'sort_key': parameter_types.multi_params({'type': 'string'}),
'sort_dir': parameter_types.multi_params({'type': 'string'})
},
# NOTE(gmann): This is kept True to keep backward compatibility.
# As of now Schema validation stripped out the additional parameters and
# does not raise 400. In the future, we may block the additional parameters
# by bump in Microversion.
'additionalProperties': True
}
| apache-2.0 |
srivassumit/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/sub/no_wsh_at_the_end.py | 492 | 1839 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Correct signatures, wrong file name.
"""
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write(
'sub/no_wsh_at_the_end.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
titom73/junos-py-demo | scripts/03-Audit/l2-lldp-neighbor-search.py | 1 | 5305 | #---------------------------------------------------------------------------------------------------------------
# DESCRIPTION:
# This script asks you which lldp neighbor you are looking for.
# it searches this lldp neighbor accross the whole ip fabric.
# it connects to each device (it has the list of devices from a yaml file) and get the lldp neighbors and compare them with the one you are looking for.
# if it find it, it indicates where is it.
# it also write the same ouput on a file (junos-python-l2-lldp-search.log)
#
# AUTHOR: Khelil SATOR (ksator@juniper.net) / Thomas Grimonet (tgrimonet@juniper.net)
# FILE: l2-lldp-neighbor-search.py
# CREATED: 2015-11-11
# VERSION: 1.1
#
# USAGE:
# python l2-lldp-neighbor-search.py -u root -p **** -s myHostnameToFind
#
# --------------------------------------------------------------------
#
# HELP:
#usage: l2-lldp-neighbor-search.py [-h] [-u USERNAME] [-p PASSWORD] [-l LAB]
# [-s SEARCH]
#
# Python & Junos demo -- version 1.1
#
# optional arguments:
# -h, --help show this help message and exit
# -u USERNAME, --username USERNAME
# Username required to connect to devices
# -p PASSWORD, --password PASSWORD
# User password to connect to devices
# -l LAB, --lab LAB Files containing device IP address
# -s SEARCH, --search SEARCH
# Hostname to search in the lab
#
# --------------------------------------------------------------------
#
# # Output sample:
# python l2-lldp-neighbor-search.py -u root -p Juniper123 -s spine01
# * Start checking router 172.30.108.228
# * Found it on 172.30.108.228 / et-0/0/48
# * Start checking router 172.30.108.229
# * Found it on 172.30.108.229 / et-0/0/48
# * Start checking router 172.30.108.230
# * Found it on 172.30.108.230 / et-0/0/0
# * Start checking router 172.30.108.232
# * Start checking router 172.30.108.233
# * Start checking router 172.30.108.234
# * Start checking router 172.30.108.236
# * Found it on 172.30.108.236 / xe-2/0/0
#
# --------------------------------------------------------------------
import yaml
# Custom LLDP table seated in script/03-Audit/OP/ directory from this GIT repository
from op.lldp import LLDPNeighborTableJNPR
from jnpr.junos import Device
from datetime import datetime
import logging
import sys
import argparse
from optparse import OptionParser
from logging.handlers import RotatingFileHandler
### Function to connect to device and then collect data from PhyPort Table
def get_data(router, options ):
jdev = Device(host=router, user=options.username, password=options.password)
jdev.open()
data = LLDPNeighborTableJNPR(jdev).get()
return data
def main(options):
### Open list of devices
my_list_of_devices=open(options.lab).read()
my_list_of_routers=yaml.load(my_list_of_devices)
found = False # Help to remember if we found a connection or not
for router in my_list_of_routers:
print " * Start checking router "+ router
logger.info("Start checking router %s",router)
lldp_neighbors = get_data(router,options)
for item in lldp_neighbors:
if item.remote_sysname == options.search:
print " * Found it on " + router +" / "+item.local_int
logger.debug("Interface %s connected to %s (%s)" , item.local_int, item.remote_sysname.upper(), item.remote_port_desc)
found = True
if found is not True:
print " !! Device is not connected to your lab (or LLDP is not activated on it)"
logger.warning("Device is not connected to your lab (or LLDP is not activated on it)")
logger.info("End of analyzing router %s",router)
# ----------------------------------------------------------------- #
# MAIN Section
# ----------------------------------------------------------------- #
if __name__ == "__main__":
# Default Username and Password. Could be updated through CLI Parameters
version = "1.1"
gUser='root'
gPass='****'
gFile='../lab-poc.yml'
gSearch = "localhost"
### CLI Option parser:
parser = argparse.ArgumentParser(description="Python & Junos demo -- version "+version)
parser.add_argument('-u','--username' ,help='Username required to connect to devices',default=gUser)
parser.add_argument('-p','--password' ,help='User password to connect to devices',default=gPass)
parser.add_argument('-l','--lab' ,help='Files containing device IP address',default=gFile)
parser.add_argument('-s','--search' ,help='Hostname to search in the lab',default=gSearch)
options = parser.parse_args()
### Activate logging to keep trace in log file
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(funcName)s :: %(message)s')
### Display log with CRITICAL level and higher
steam_handler = logging.StreamHandler()
steam_handler.setLevel(logging.CRITICAL)
steam_handler.setFormatter(formatter)
logger.addHandler(steam_handler)
### Write log with DEBUG level and higher
file_handler = logging.FileHandler("junos-python-l2-lldp-search.log")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
### Add handler to logger
logger.addHandler(steam_handler)
logger.addHandler(file_handler)
logger.info('Start to analyze routers')
main(options) | apache-2.0 |
brainchildguru/brainchild-media-group | node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2720 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
spacex/kernel-centos7 | scripts/rt-tester/rt-tester.py | 10998 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
aparo/pyes | tests/test_aggs.py | 1 | 4169 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pyes.tests import ESTestCase
from pyes.aggs import MissingAgg, MinAgg, MaxAgg, NestedAgg, ReverseNestedAgg
from pyes.query import MatchAllQuery
import datetime
class AggsSearchTestCase(ESTestCase):
def setUp(self):
super(AggsSearchTestCase, self).setUp()
self.conn.indices.create_index(self.index_name)
self.conn.indices.put_mapping(self.document_type, {'properties': self.get_default_mapping()}, self.index_name)
self.conn.index({'name': 'Joe Tester',
'parsedtext': 'Joe Testere nice guy',
'uuid': '11111',
'position': 1,
'tag': 'foo',
'integer': 1,
'date': datetime.date(2011, 5, 16),
'resellers':[
{'name': 'name1', 'price': 100}, {'name': 'name1', 'price': 200}
]
},
self.index_name, self.document_type, 1)
self.conn.index({'name': ' Bill Baloney',
'parsedtext': 'Bill Testere nice guy',
'uuid': '22222',
'position': 2,
'integer': 2,
'tag': 'foo',
'resellers':[],
'date': datetime.date(2011, 4, 16)},
self.index_name, self.document_type, 2)
self.conn.index({'name': 'Bill Clinton',
'parsedtext': 'Bill is not nice guy',
'uuid': '33333',
'position': 3,
'tag': 'bar',
'resellers':[
{'name': 'name1', 'price': 1000}, {'name': 'name1', 'price': 2000}
],
'date': datetime.date(2011, 4, 28)},
self.index_name, self.document_type, 3)
self.conn.indices.refresh(self.index_name)
def test_missing_agg(self):
q = MatchAllQuery()
q = q.search()
missing = MissingAgg(name='missing', field='integer')
q.agg.add(missing)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.missing, {u'doc_count': 1})
def test_min_agg(self):
q = MatchAllQuery()
q = q.search()
min_agg = MinAgg(name='min', field='position')
q.agg.add(min_agg)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.min, {u'value': 1})
def test_max_agg(self):
q = MatchAllQuery()
q = q.search()
max_agg = MaxAgg(name='max', field='position')
q.agg.add(max_agg)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.max, {u'value': 3})
def test_nested_agg(self):
q = MatchAllQuery()
q = q.search()
nested = NestedAgg(name='nested', path='resellers')
q.agg.add(nested)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.nested, {u'doc_count': 4})
def test_reverse_nested_agg(self):
q = MatchAllQuery()
q = q.search()
reverse_nested = ReverseNestedAgg(name='reverse', field='id')
nested = NestedAgg(name='nested', path='resellers', sub_aggs=[reverse_nested])
q.agg.add(nested)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.nested['doc_count'], 4)
self.assertEqual(resultset.aggs.nested.reverse, {u'doc_count': 2})
| bsd-3-clause |
buske/silva | src/features/vienna/vienna.py | 1 | 4410 | #!/usr/bin/env python
"""
Input sequences are read, one per line, of the form: AAGA[C/G]TCG
"""
# Author: Orion Buske
# Date: 27 December 2011
from __future__ import division, with_statement
import os
import sys
import re
from subprocess import Popen, PIPE
from math import log10
assert os.getenv('SILVA_PATH') is not None, \
"Error: SILVA_PATH is unset."
sys.path.insert(0, os.path.expandvars("$SILVA_PATH/lib/python"))
from silva import maybe_gzip_open, print_args
BIN = os.path.expandvars("$SILVA_PATH/tools/vienna/Progs/RNAfold")
assert os.path.isfile(BIN), \
"Error: missing file: %s" % BIN
def score_sequence(*seqs):
"""Scores one or more sequences"""
p = Popen([BIN, '-p1', '-d2', '--noPS'],
stdin=PIPE, stdout=PIPE, close_fds=True)
input = '\n'.join(seqs)
output = p.communicate(input)[0]
output = output.splitlines()
#print '\n'.join(output)
re2 = re.compile(r'.*\[\s*([\d.-]+)\]')
re4 = re.compile(r'.*ensemble diversity ([\d.-]+)')
results = []
for i, line in enumerate(output):
if i % 5 == 4:
m = re4.match(line)
assert m
results.append(float(m.group(1)))
assert len(results) == len(seqs)
return results
def iter_sequences(filename, domain, **kwargs):
def get_mut_seqs(seq):
pre, post = seq.split('/')
pre, old = pre.split('[')
new, post = post.split(']')
pre_len = min(len(pre), domain)
post_len = min(len(post), domain)
# If too close to one end of sequence, accomodate
if pre_len < domain:
post_len = min(len(post), 2*domain - pre_len)
if post_len < domain:
pre_len = min(len(pre), 2*domain - post_len)
pre = pre[-pre_len:]
post = post[:post_len]
assert len(pre) + len(post) == 2 * domain
return pre + old + post, pre + new + post
with maybe_gzip_open(filename) as ifp:
for line in ifp:
try:
seq = line.strip().upper()
premrna = seq.replace('|', '')
postmrna = ''.join(seq.split('|')[::2])
yield get_mut_seqs(premrna), get_mut_seqs(postmrna)
except (ValueError, AssertionError):
print >>sys.stderr, "Error parsing sequence: skipping"
yield None
def script(filename, quiet=False, domain=None, **kwargs):
fields = ['pvar_pre', 'pvar_post']
if domain is not None:
fields = ['%s_%d' % (field, domain) for field in fields]
print "#%s" % '\t'.join(fields)
seqs = []
for entry in iter_sequences(filename, domain=domain, **kwargs):
if entry is None:
print '\t'.join(['na'] * len(fields))
continue
seqs.extend(entry[0])
seqs.extend(entry[1])
scores = score_sequence(*seqs)
def safe_f(new, old):
if old == 0:
return 'na'
else:
return '%.4f' % -log10(new / old)
for pre_old, pre_new, post_old, post_new in \
zip(scores[::4], scores[1::4], scores[2::4], scores[3::4]):
print '\t'.join([safe_f(pre_new, pre_old), safe_f(post_new, post_old)])
def parse_args(args):
from optparse import OptionParser
usage = "usage: %prog [options] (SEQ|-)"
description = __doc__.strip()
parser = OptionParser(usage=usage,
description=description)
parser.add_option("-q", "--quiet", default=False,
dest="quiet", action='store_true',
help="Quiet output, suitable"
" for additional processing")
parser.add_option("-d", "--domain", metavar="WIDTH",
dest="domain", type="int", default=None,
help="Limit analysis to within WIDTH bases"
" on either side of the mutation [default: None]")
options, args = parser.parse_args()
if len(args) != 1:
parser.error("Inappropriate number of arguments")
elif options.domain is None:
parser.error("Must specify domain width")
return options, args
def main(args=sys.argv[1:]):
options, args = parse_args(args)
kwargs = dict(options.__dict__)
if not options.quiet:
print_args(args, kwargs)
script(*args, **kwargs)
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
ampax/edx-platform | lms/djangoapps/course_blocks/__init__.py | 12 | 1370 | """
The Course Blocks app, built upon the Block Cache framework in
openedx.core.lib.block_structure, is a higher layer django app in LMS that
provides additional context of Courses and Users (via usage_info.py) with
implementations for Block Structure Transformers that are related to
block structure course access.
As described in the Block Cache framework's __init__ module, this
framework provides faster access to course blocks for performance
sensitive features, by caching all transformer-required data so no
modulestore access is necessary during block access.
It is expected that only Block Access related transformers reside in
this django app, as they are cross-cutting authorization transformers
required across other features. Other higher-level and feature-specific
transformers should be implemented in their own separate apps.
Note: Currently, some of the implementation is redundant with the
has_access code in courseware/access.py. However, we do have short-term
plans for refactoring the current has_access code to use Course Blocks
instead (https://openedx.atlassian.net/browse/MA-1019). We have
introduced this redundancy in the short-term as an incremental
implementation approach, reducing risk with initial release of this app.
"""
# Importing signals is necessary to activate the course publish/delete signal handlers.
from . import signals
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/redis/azure-mgmt-redis/azure/mgmt/redis/aio/operations/_private_link_resources_operations.py | 1 | 6756 | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_link_resources_operations import build_list_by_redis_cache_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.redis.aio.RedisManagementClient`'s
:attr:`private_link_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_redis_cache(
self, resource_group_name: str, cache_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateLinkResource"]:
"""Gets the private link resources that need to be created for a redis cache.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param cache_name: The name of the Redis cache. Required.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResource or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.redis.models.PrivateLinkResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateLinkResourceListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_redis_cache_request(
resource_group_name=resource_group_name,
cache_name=cache_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_redis_cache.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_redis_cache.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cache/redis/{cacheName}/privateLinkResources"} # type: ignore
| mit |
aboutsajjad/Bridge | app_packages/youtube_dl/extractor/ntvde.py | 64 | 3195 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
js_to_json,
parse_duration,
)
class NTVDeIE(InfoExtractor):
IE_NAME = 'n-tv.de'
_VALID_URL = r'https?://(?:www\.)?n-tv\.de/mediathek/videos/[^/?#]+/[^/?#]+-article(?P<id>.+)\.html'
_TESTS = [{
'url': 'http://www.n-tv.de/mediathek/videos/panorama/Schnee-und-Glaette-fuehren-zu-zahlreichen-Unfaellen-und-Staus-article14438086.html',
'md5': '6ef2514d4b1e8e03ca24b49e2f167153',
'info_dict': {
'id': '14438086',
'ext': 'mp4',
'thumbnail': r're:^https?://.*\.jpg$',
'title': 'Schnee und Glätte führen zu zahlreichen Unfällen und Staus',
'alt_title': 'Winterchaos auf deutschen Straßen',
'description': 'Schnee und Glätte sorgen deutschlandweit für einen chaotischen Start in die Woche: Auf den Straßen kommt es zu kilometerlangen Staus und Dutzenden Glätteunfällen. In Düsseldorf und München wirbelt der Schnee zudem den Flugplan durcheinander. Dutzende Flüge landen zu spät, einige fallen ganz aus.',
'duration': 4020,
'timestamp': 1422892797,
'upload_date': '20150202',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
info = self._parse_json(self._search_regex(
r'(?s)ntv\.pageInfo\.article\s*=\s*(\{.*?\});', webpage, 'info'),
video_id, transform_source=js_to_json)
timestamp = int_or_none(info.get('publishedDateAsUnixTimeStamp'))
vdata = self._parse_json(self._search_regex(
r'(?s)\$\(\s*"\#player"\s*\)\s*\.data\(\s*"player",\s*(\{.*?\})\);',
webpage, 'player data'), video_id,
transform_source=lambda s: js_to_json(re.sub(r'advertising:\s*{[^}]+},', '', s)))
duration = parse_duration(vdata.get('duration'))
formats = []
if vdata.get('video'):
formats.append({
'format_id': 'flash',
'url': 'rtmp://fms.n-tv.de/%s' % vdata['video'],
})
if vdata.get('videoMp4'):
formats.append({
'format_id': 'mobile',
'url': compat_urlparse.urljoin('http://video.n-tv.de', vdata['videoMp4']),
'tbr': 400, # estimation
})
if vdata.get('videoM3u8'):
m3u8_url = compat_urlparse.urljoin('http://video.n-tv.de', vdata['videoM3u8'])
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4', entry_protocol='m3u8_native',
preference=0, m3u8_id='hls', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'title': info['headline'],
'description': info.get('intro'),
'alt_title': info.get('kicker'),
'timestamp': timestamp,
'thumbnail': vdata.get('html5VideoPoster'),
'duration': duration,
'formats': formats,
}
| mit |
Azure/azure-sdk-for-python | sdk/peering/azure-mgmt-peering/azure/mgmt/peering/operations/_prefixes_operations.py | 1 | 26672 | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import PeeringManagementClientMixinABC, _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
peering_service_name: str,
prefix_name: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01")) # type: Literal["2022-10-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"peeringServiceName": _SERIALIZER.url("peering_service_name", peering_service_name, "str"),
"prefixName": _SERIALIZER.url("prefix_name", prefix_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, peering_service_name: str, prefix_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01")) # type: Literal["2022-10-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"peeringServiceName": _SERIALIZER.url("peering_service_name", peering_service_name, "str"),
"prefixName": _SERIALIZER.url("prefix_name", prefix_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, peering_service_name: str, prefix_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01")) # type: Literal["2022-10-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"peeringServiceName": _SERIALIZER.url("peering_service_name", peering_service_name, "str"),
"prefixName": _SERIALIZER.url("prefix_name", prefix_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_peering_service_request(
resource_group_name: str,
peering_service_name: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-10-01")) # type: Literal["2022-10-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"peeringServiceName": _SERIALIZER.url("peering_service_name", peering_service_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class PrefixesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.peering.PeeringManagementClient`'s
:attr:`prefixes` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self,
resource_group_name: str,
peering_service_name: str,
prefix_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> _models.PeeringServicePrefix:
"""Gets an existing prefix with the specified name under the given subscription, resource group
and peering service.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param peering_service_name: The name of the peering service. Required.
:type peering_service_name: str
:param prefix_name: The name of the prefix. Required.
:type prefix_name: str
:param expand: The properties to be expanded. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeeringServicePrefix or the result of cls(response)
:rtype: ~azure.mgmt.peering.models.PeeringServicePrefix
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PeeringServicePrefix]
request = build_get_request(
resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
prefix_name=prefix_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("PeeringServicePrefix", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}"} # type: ignore
@overload
def create_or_update(
self,
resource_group_name: str,
peering_service_name: str,
prefix_name: str,
peering_service_prefix: _models.PeeringServicePrefix,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PeeringServicePrefix:
"""Creates a new prefix with the specified name under the given subscription, resource group and
peering service.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param peering_service_name: The name of the peering service. Required.
:type peering_service_name: str
:param prefix_name: The name of the prefix. Required.
:type prefix_name: str
:param peering_service_prefix: The properties needed to create a prefix. Required.
:type peering_service_prefix: ~azure.mgmt.peering.models.PeeringServicePrefix
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeeringServicePrefix or the result of cls(response)
:rtype: ~azure.mgmt.peering.models.PeeringServicePrefix
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
peering_service_name: str,
prefix_name: str,
peering_service_prefix: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PeeringServicePrefix:
"""Creates a new prefix with the specified name under the given subscription, resource group and
peering service.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param peering_service_name: The name of the peering service. Required.
:type peering_service_name: str
:param prefix_name: The name of the prefix. Required.
:type prefix_name: str
:param peering_service_prefix: The properties needed to create a prefix. Required.
:type peering_service_prefix: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeeringServicePrefix or the result of cls(response)
:rtype: ~azure.mgmt.peering.models.PeeringServicePrefix
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
peering_service_name: str,
prefix_name: str,
peering_service_prefix: Union[_models.PeeringServicePrefix, IO],
**kwargs: Any
) -> _models.PeeringServicePrefix:
"""Creates a new prefix with the specified name under the given subscription, resource group and
peering service.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param peering_service_name: The name of the peering service. Required.
:type peering_service_name: str
:param prefix_name: The name of the prefix. Required.
:type prefix_name: str
:param peering_service_prefix: The properties needed to create a prefix. Is either a model type
or a IO type. Required.
:type peering_service_prefix: ~azure.mgmt.peering.models.PeeringServicePrefix or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeeringServicePrefix or the result of cls(response)
:rtype: ~azure.mgmt.peering.models.PeeringServicePrefix
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PeeringServicePrefix]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(peering_service_prefix, (IO, bytes)):
_content = peering_service_prefix
else:
_json = self._serialize.body(peering_service_prefix, "PeeringServicePrefix")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
prefix_name=prefix_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("PeeringServicePrefix", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("PeeringServicePrefix", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, peering_service_name: str, prefix_name: str, **kwargs: Any
) -> None:
"""Deletes an existing prefix with the specified name under the given subscription, resource group
and peering service.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param peering_service_name: The name of the peering service. Required.
:type peering_service_name: str
:param prefix_name: The name of the prefix. Required.
:type prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
prefix_name=prefix_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}"} # type: ignore
@distributed_trace
def list_by_peering_service(
self, resource_group_name: str, peering_service_name: str, expand: Optional[str] = None, **kwargs: Any
) -> Iterable["_models.PeeringServicePrefix"]:
"""Lists all prefixes under the given subscription, resource group and peering service.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param peering_service_name: The name of the peering service. Required.
:type peering_service_name: str
:param expand: The properties to be expanded. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeeringServicePrefix or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.peering.models.PeeringServicePrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-10-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PeeringServicePrefixListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_peering_service_request(
resource_group_name=resource_group_name,
peering_service_name=peering_service_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.list_by_peering_service.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PeeringServicePrefixListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_peering_service.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes"} # type: ignore
| mit |
lyft/incubator-airflow | scripts/ci/pre_commit/pre_commit_vendor_k8s_json_schema.py | 3 | 2603 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from typing import Iterator
import requests
K8S_DEFINITIONS = (
"https://raw.githubusercontent.com/yannh/kubernetes-json-schema"
"/master/v1.22.0-standalone-strict/_definitions.json"
)
VALUES_SCHEMA_FILE = "chart/values.schema.json"
with open(VALUES_SCHEMA_FILE) as f:
schema = json.load(f)
def find_refs(props: dict) -> Iterator[str]:
for value in props.values():
if "$ref" in value:
yield value["$ref"]
if "items" in value:
if "$ref" in value["items"]:
yield value["items"]["$ref"]
if "properties" in value:
yield from find_refs(value["properties"])
def get_remote_schema(url: str) -> dict:
req = requests.get(url)
req.raise_for_status()
return req.json()
# Create 'definitions' if it doesn't exist or reset the io.k8s defs
schema["definitions"] = {k: v for k, v in schema.get("definitions", {}).items() if not k.startswith("io.k8s")}
# Get the k8s defs
defs = get_remote_schema(K8S_DEFINITIONS)
# first find refs in our schema
refs = set(find_refs(schema["properties"]))
# now we look for refs in refs
i = 0
while True:
starting_refs = refs
for ref in refs:
ref_id = ref.split('/')[-1]
schema["definitions"][ref_id] = defs["definitions"][ref_id]
refs = set(find_refs(schema["definitions"]))
if refs == starting_refs:
break
# Make sure we don't have a runaway loop
i += 1
if i > 15:
raise SystemExit("Wasn't able to find all nested references in 15 cycles")
# and finally, sort them all!
schema["definitions"] = dict(sorted(schema["definitions"].items()))
# Then write out our schema
with open(VALUES_SCHEMA_FILE, 'w') as f:
json.dump(schema, f, indent=4)
f.write('\n') # with a newline!
| apache-2.0 |
CatoTH/OpenSlides | server/openslides/users/restrict.py | 2 | 2918 | from typing import Any, Dict
from ..utils.auth import async_has_perm
async def restrict_user(full_user: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns the restricted serialized data for the instance prepared
for the user. Removes several fields for non admins so that they do
not get the fields they should not get.
"""
from .serializers import (
USERCANSEEEXTRASERIALIZER_FIELDS,
USERCANSEESERIALIZER_FIELDS,
)
user_id = full_user["id"]
def filtered_data(full_user, whitelist):
"""
Returns a new dict like full_user but only with whitelisted keys.
"""
return {key: full_user[key] for key in whitelist}
# We have some sets of data to be sent:
# * full data i. e. all fields (including session_auth_hash),
# * all data i. e. all fields but not session_auth_hash,
# * many data i. e. all fields but not the default password and session_auth_hash,
# * little data i. e. all fields but not the default password, session_auth_hash,
# comments, gender, email, last_email_send, active status and auth_type
# * own data i. e. all little data fields plus email and gender. This is applied
# to the own user, if he just can see little or no data.
# * no data.
# Prepare field set for users with "all" data, "many" data and with "little" data.
all_data_fields = set(USERCANSEEEXTRASERIALIZER_FIELDS)
all_data_fields.add("groups_id")
all_data_fields.discard("groups")
all_data_fields.add("default_password")
many_data_fields = all_data_fields.copy()
many_data_fields.discard("default_password")
little_data_fields = set(USERCANSEESERIALIZER_FIELDS)
little_data_fields.add("groups_id")
little_data_fields.discard("groups")
own_data_fields = set(little_data_fields)
own_data_fields.add("email")
own_data_fields.add("gender")
own_data_fields.add("vote_delegated_to_id")
own_data_fields.add("vote_delegated_from_users_id")
# Check user permissions.
if await async_has_perm(user_id, "users.can_see_name"):
if await async_has_perm(user_id, "users.can_see_extra_data"):
if await async_has_perm(user_id, "users.can_manage"):
whitelist = all_data_fields
else:
whitelist = many_data_fields
else:
whitelist = own_data_fields
# for managing {motion, assignment} polls the users needs to know
# the vote delegation structure.
if await async_has_perm(
user_id, "motion.can_manage_polls"
) or await async_has_perm(user_id, "assignments.can_manage"):
whitelist.add("vote_delegated_to_id")
whitelist.add("vote_delegated_from_users_id")
data = filtered_data(full_user, whitelist)
else:
# Parse data.
data = filtered_data(full_user, own_data_fields)
return data
| mit |
jandom/rdkit | rdkit/sping/PDF/pdfdoc.py | 1 | 15738 | # -*- coding: latin-1 -*-
#pdfdoc.py
"""
PDFgen is a library to generate PDF files containing text and graphics. It is the
foundation for a complete reporting solution in Python.
The module pdfdoc.py handles the 'outer structure' of PDF documents, ensuring that
all objects are properly cross-referenced and indexed to the nearest byte. The
'inner structure' - the page descriptions - are presumed to be generated before
each page is saved.
pdfgen.py calls this and provides a 'canvas' object to handle page marking operators.
piddlePDF calls pdfgen and offers a high-level interface.
(C) Copyright Andy Robinson 1998-1999
"""
from __future__ import print_function
import os
import sys
import string
import time
import tempfile
from types import *
from math import sin, cos, pi, ceil
try:
import zlib
except ImportError:
print("zlib not available, page compression not available")
from .pdfgeom import bezierArc
from . import pdfutils
from .pdfutils import LINEEND # this constant needed in both
from . import pdfmetrics
##############################################################
#
# Constants and declarations
#
##############################################################
StandardEnglishFonts = [
'Courier', 'Courier-Bold', 'Courier-Oblique', 'Courier-BoldOblique', 'Helvetica',
'Helvetica-Bold', 'Helvetica-Oblique', 'Helvetica-BoldOblique', 'Times-Roman', 'Times-Bold',
'Times-Italic', 'Times-BoldItalic', 'Symbol', 'ZapfDingbats'
]
PDFError = 'PDFError'
AFMDIR = '.'
A4 = (595.27, 841.89) #default page size
class PDFDocument:
"""Responsible for linking and writing out the whole document.
Builds up a list of objects using add(key, object). Each of these
must inherit from PDFObject and be able to write itself into the file.
For cross-linking, it provides getPosition(key) which tells you where
another object is, or raises a KeyError if not found. The rule is that
objects should only refer ones previously written to file.
"""
def __init__(self):
self.objects = []
self.objectPositions = {}
self.fonts = MakeType1Fonts()
#mapping of Postscriptfont names to internal ones;
#needs to be dynamically built once we start adding
#fonts in.
self.fontMapping = {}
for i in range(len(StandardEnglishFonts)):
psname = StandardEnglishFonts[i]
pdfname = '/F%d' % (i + 1)
self.fontMapping[psname] = pdfname
self.pages = []
self.pagepositions = []
# position 1
cat = PDFCatalog()
cat.RefPages = 3
cat.RefOutlines = 2
self.add('Catalog', cat)
# position 2 - outlines
outl = PDFOutline()
self.add('Outline', outl)
# position 3 - pages collection
self.PageCol = PDFPageCollection()
self.add('PagesTreeRoot', self.PageCol)
# positions 4-16 - fonts
fontstartpos = len(self.objects) + 1
for font in self.fonts:
self.add('Font.' + font.keyname, font)
self.fontdict = MakeFontDictionary(fontstartpos, len(self.fonts))
# position 17 - Info
self.info = PDFInfo() #hang onto it!
self.add('Info', self.info)
self.infopos = len(self.objects) #1-based, this gives its position
def add(self, key, obj):
self.objectPositions[key] = len(self.objects) # its position
self.objects.append(obj)
obj.doc = self
return len(self.objects) - 1 # give its position
def getPosition(self, key):
"""Tell you where the given object is in the file - used for
cross-linking; an object can call self.doc.getPosition("Page001")
to find out where the object keyed under "Page001" is stored."""
return self.objectPositions[key]
def setTitle(self, title):
"embeds in PDF file"
self.info.title = title
def setAuthor(self, author):
"embedded in PDF file"
self.info.author = author
def setSubject(self, subject):
"embeds in PDF file"
self.info.subject = subject
def printXref(self):
self.startxref = sys.stdout.tell()
print('xref')
print(0, len(self.objects) + 1)
print('0000000000 65535 f')
for pos in self.xref:
print('%0.10d 00000 n' % pos)
def writeXref(self, f):
self.startxref = f.tell()
f.write('xref' + LINEEND)
f.write('0 %d' % (len(self.objects) + 1) + LINEEND)
f.write('0000000000 65535 f' + LINEEND)
for pos in self.xref:
f.write('%0.10d 00000 n' % pos + LINEEND)
def printTrailer(self):
print('trailer')
print('<< /Size %d /Root %d 0 R /Info %d 0 R>>' % (len(self.objects) + 1, 1, self.infopos))
print('startxref')
print(self.startxref)
def writeTrailer(self, f):
f.write('trailer' + LINEEND)
f.write('<< /Size %d /Root %d 0 R /Info %d 0 R>>' % (len(self.objects) + 1, 1, self.infopos) +
LINEEND)
f.write('startxref' + LINEEND)
f.write(str(self.startxref) + LINEEND)
def SaveToFile(self, filename):
fileobj = open(filename, 'wb')
self.SaveToFileObject(fileobj)
fileobj.close()
def SaveToFileObject(self, fileobj):
"""Open a file, and ask each object in turn to write itself to
the file. Keep track of the file position at each point for
use in the index at the end"""
f = fileobj
i = 1
self.xref = []
f.write("%PDF-1.2" + LINEEND) # for CID support
f.write("%í춾" + LINEEND)
for obj in self.objects:
pos = f.tell()
self.xref.append(pos)
f.write(str(i) + ' 0 obj' + LINEEND)
obj.save(f)
f.write('endobj' + LINEEND)
i = i + 1
self.writeXref(f)
self.writeTrailer(f)
f.write('%%EOF') # no lineend needed on this one!
# with the Mac, we need to tag the file in a special
#way so the system knows it is a PDF file.
#This supplied by Joe Strout
if os.name == 'mac':
import macfs #@UnresolvedImport
try:
macfs.FSSpec(filename).SetCreatorType('CARO', 'PDF ')
except Exception:
pass
def printPDF(self):
"prints it to standard output. Logs positions for doing trailer"
print("%PDF-1.0")
print("%í춾")
i = 1
self.xref = []
for obj in self.objects:
pos = sys.stdout.tell()
self.xref.append(pos)
print(i, '0 obj')
obj.printPDF()
print('endobj')
i = i + 1
self.printXref()
self.printTrailer()
print("%%EOF", end='')
def addPage(self, page):
"""adds page and stream at end. Maintains pages list"""
#page.buildstream()
pos = len(self.objects) # work out where added
page.ParentPos = 3 #pages collection
page.info = {
'parentpos': 3,
'fontdict': self.fontdict,
'contentspos': pos + 2,
}
self.PageCol.PageList.append(pos + 1)
self.add('Page%06d' % len(self.PageCol.PageList), page)
#self.objects.append(page)
self.add('PageStream%06d' % len(self.PageCol.PageList), page.stream)
#self.objects.append(page.stream)
def hasFont(self, psfontname):
return self.fontMapping.has_key(psfontname)
def getInternalFontName(self, psfontname):
try:
return self.fontMapping[psfontname]
except KeyError:
raise PDFError("Font {0} not available in document".format(psfontname))
def getAvailableFonts(self):
fontnames = self.fontMapping.keys()
fontnames.sort()
return fontnames
##############################################################
#
# Utilities
#
##############################################################
class OutputGrabber:
"""At times we need to put something in the place of standard
output. This grabs stdout, keeps the data, and releases stdout
when done.
NOT working well enough!"""
def __init__(self):
self.oldoutput = sys.stdout
sys.stdout = self
self.closed = 0
self.data = []
def write(self, x):
if not self.closed:
self.data.append(x)
def getData(self):
return string.join(self.data)
def close(self):
sys.stdout = self.oldoutput
self.closed = 1
def __del__(self):
if not self.closed:
self.close()
def testOutputGrabber():
gr = OutputGrabber()
for i in range(10):
print('line', i)
data = gr.getData()
gr.close()
print('Data...', data)
##############################################################
#
# PDF Object Hierarchy
#
##############################################################
class PDFObject:
"""Base class for all PDF objects. In PDF, precise measurement
of file offsets is essential, so the usual trick of just printing
and redirecting output has proved to give different behaviour on
Mac and Windows. While it might be soluble, I'm taking charge
of line ends at the binary level and explicitly writing to a file.
The LINEEND constant lets me try CR, LF and CRLF easily to help
pin down the problem."""
def save(self, file):
"Save its content to an open file"
file.write('% base PDF object' + LINEEND)
def printPDF(self):
self.save(sys.stdout)
class PDFLiteral(PDFObject):
" a ready-made one you wish to quote"
def __init__(self, text):
self.text = text
def save(self, file):
file.write(self.text + LINEEND)
class PDFCatalog(PDFObject):
"requires RefPages and RefOutlines set"
def __init__(self):
self.template = string.join([
'<<', '/Type /Catalog', '/Pages %d 0 R', '/Outlines %d 0 R', '>>'
], LINEEND)
def save(self, file):
file.write(self.template % (self.RefPages, self.RefOutlines) + LINEEND)
class PDFInfo(PDFObject):
"""PDF documents can have basic information embedded, viewable from
File | Document Info in Acrobat Reader. If this is wrong, you get
Postscript errors while printing, even though it does not print."""
def __init__(self):
self.title = "untitled"
self.author = "anonymous"
self.subject = "unspecified"
now = time.localtime(time.time())
self.datestr = '%04d%02d%02d%02d%02d%02d' % tuple(now[0:6])
def save(self, file):
file.write(
string.join([
"<</Title (%s)", "/Author (%s)", "/CreationDate (D:%s)", "/Producer (PDFgen)",
"/Subject (%s)", ">>"
], LINEEND) % (pdfutils._escape(self.title), pdfutils._escape(self.author), self.datestr,
pdfutils._escape(self.subject)) + LINEEND)
class PDFOutline(PDFObject):
"null outline, does nothing yet"
def __init__(self):
self.template = string.join(['<<', '/Type /Outlines', '/Count 0', '>>'], LINEEND)
def save(self, file):
file.write(self.template + LINEEND)
class PDFPageCollection(PDFObject):
"presumes PageList attribute set (list of integers)"
def __init__(self):
self.PageList = []
def save(self, file):
lines = ['<<', '/Type /Pages', '/Count %d' % len(self.PageList), '/Kids [']
for page in self.PageList:
lines.append(str(page) + ' 0 R ')
lines.append(']')
lines.append('>>')
text = string.join(lines, LINEEND)
file.write(text + LINEEND)
class PDFPage(PDFObject):
"""The Bastard. Needs list of Resources etc. Use a standard one for now.
It manages a PDFStream object which must be added to the document's list
of objects as well."""
def __init__(self):
self.drawables = []
self.pagewidth = 595 #these are overridden by piddlePDF
self.pageheight = 842
self.stream = PDFStream()
self.hasImages = 0
self.pageTransitionString = '' # presentation effects
# editors on different systems may put different things in the line end
# without me noticing. No triple-quoted strings allowed!
self.template = string.join([
'<<',
'/Type /Page',
'/Parent %(parentpos)d 0 R',
'/Resources',
' <<',
' /Font %(fontdict)s',
' /ProcSet %(procsettext)s',
' >>',
'/MediaBox [0 0 %(pagewidth)d %(pageheight)d]', #A4 by default
'/Contents %(contentspos)d 0 R',
'%(transitionString)s',
'>>'
],
LINEEND)
def setCompression(self, onoff=0):
"Turns page compression on or off"
assert onoff in [0, 1], "Page compression options are 1=on, 2=off"
self.stream.compression = onoff
def save(self, file):
self.info['pagewidth'] = self.pagewidth
self.info['pageheight'] = self.pageheight
# check for image support
if self.hasImages:
self.info['procsettext'] = '[/PDF /Text /ImageC]'
else:
self.info['procsettext'] = '[/PDF /Text]'
self.info['transitionString'] = self.pageTransitionString
file.write(self.template % self.info + LINEEND)
def clear(self):
self.drawables = []
def setStream(self, data):
if type(data) is ListType:
data = string.join(data, LINEEND)
self.stream.setStream(data)
TestStream = "BT /F6 24 Tf 80 672 Td 24 TL ( ) Tj T* ET"
class PDFStream(PDFObject):
"Used for the contents of a page"
def __init__(self):
self.data = None
self.compression = 0
def setStream(self, data):
self.data = data
def save(self, file):
#avoid crashes if they wrote nothing in the page
if self.data == None:
self.data = TestStream
if self.compression == 1:
comp = zlib.compress(self.data) #this bit is very fast...
base85 = pdfutils._AsciiBase85Encode(comp) #...sadly this isn't
wrapped = pdfutils._wrap(base85)
data_to_write = wrapped
else:
data_to_write = self.data
# the PDF length key should contain the length including
# any extra LF pairs added by Print on DOS.
#lines = len(string.split(self.data,'\n'))
#length = len(self.data) + lines # one extra LF each
length = len(data_to_write) + len(LINEEND) #AR 19980202
if self.compression:
file.write('<< /Length %d /Filter [/ASCII85Decode /FlateDecode]>>' % length + LINEEND)
else:
file.write('<< /Length %d >>' % length + LINEEND)
file.write('stream' + LINEEND)
file.write(data_to_write + LINEEND)
file.write('endstream' + LINEEND)
class PDFImage(PDFObject):
# sample one while developing. Currently, images go in a literals
def save(self, file):
file.write(
string.join([
'<<', '/Type /XObject', '/Subtype /Image', '/Name /Im0', '/Width 24', '/Height 23',
'/BitsPerComponent 1', '/ColorSpace /DeviceGray', '/Filter /ASCIIHexDecode', '/Length 174',
'>>', 'stream', '003B00 002700 002480 0E4940 114920 14B220 3CB650',
'75FE88 17FF8C 175F14 1C07E2 3803C4 703182 F8EDFC',
'B2BBC2 BB6F84 31BFC2 18EA3C 0E3E00 07FC00 03F800', '1E1800 1FF800>', 'endstream', 'endobj'
], LINEEND) + LINEEND)
class PDFType1Font(PDFObject):
def __init__(self, key, font):
self.fontname = font
self.keyname = key
self.template = string.join([
'<<', '/Type /Font', '/Subtype /Type1', '/Name /%s', '/BaseFont /%s',
'/Encoding /MacRomanEncoding', '>>'
], LINEEND)
def save(self, file):
file.write(self.template % (self.keyname, self.fontname) + LINEEND)
##############################################################
#
# some helpers
#
##############################################################
def MakeType1Fonts():
"returns a list of all the standard font objects"
fonts = []
pos = 1
for fontname in StandardEnglishFonts:
font = PDFType1Font('F' + str(pos), fontname)
fonts.append(font)
pos = pos + 1
return fonts
def MakeFontDictionary(startpos, count):
"returns a font dictionary assuming they are all in the file from startpos"
dict = " <<" + LINEEND
pos = startpos
for i in range(count):
dict = dict + '\t\t/F%d %d 0 R ' % (i + 1, startpos + i) + LINEEND
dict = dict + "\t\t>>" + LINEEND
return dict
if __name__ == '__main__':
print('For test scripts, run test1.py to test6.py')
| bsd-3-clause |
lyft/incubator-airflow | airflow/providers/google/marketing_platform/example_dags/example_campaign_manager.py | 4 | 6029 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use CampaignManager.
"""
import os
import time
from datetime import datetime
from airflow import models
from airflow.providers.google.marketing_platform.operators.campaign_manager import (
GoogleCampaignManagerBatchInsertConversionsOperator,
GoogleCampaignManagerBatchUpdateConversionsOperator,
GoogleCampaignManagerDeleteReportOperator,
GoogleCampaignManagerDownloadReportOperator,
GoogleCampaignManagerInsertReportOperator,
GoogleCampaignManagerRunReportOperator,
)
from airflow.providers.google.marketing_platform.sensors.campaign_manager import (
GoogleCampaignManagerReportSensor,
)
PROFILE_ID = os.environ.get("MARKETING_PROFILE_ID", "123456789")
FLOODLIGHT_ACTIVITY_ID = int(os.environ.get("FLOODLIGHT_ACTIVITY_ID", 12345))
FLOODLIGHT_CONFIGURATION_ID = int(os.environ.get("FLOODLIGHT_CONFIGURATION_ID", 12345))
ENCRYPTION_ENTITY_ID = int(os.environ.get("ENCRYPTION_ENTITY_ID", 12345))
DEVICE_ID = os.environ.get("DEVICE_ID", "12345")
BUCKET = os.environ.get("MARKETING_BUCKET", "test-cm-bucket")
REPORT_NAME = "test-report"
REPORT = {
"type": "STANDARD",
"name": REPORT_NAME,
"criteria": {
"dateRange": {
"kind": "dfareporting#dateRange",
"relativeDateRange": "LAST_365_DAYS",
},
"dimensions": [{"kind": "dfareporting#sortedDimension", "name": "dfa:advertiser"}],
"metricNames": ["dfa:activeViewImpressionDistributionViewable"],
},
}
CONVERSION = {
"kind": "dfareporting#conversion",
"floodlightActivityId": FLOODLIGHT_ACTIVITY_ID,
"floodlightConfigurationId": FLOODLIGHT_CONFIGURATION_ID,
"mobileDeviceId": DEVICE_ID,
"ordinal": "0",
"quantity": 42,
"value": 123.4,
"timestampMicros": int(time.time()) * 1000000,
"customVariables": [
{
"kind": "dfareporting#customFloodlightVariable",
"type": "U4",
"value": "value",
}
],
}
CONVERSION_UPDATE = {
"kind": "dfareporting#conversion",
"floodlightActivityId": FLOODLIGHT_ACTIVITY_ID,
"floodlightConfigurationId": FLOODLIGHT_CONFIGURATION_ID,
"mobileDeviceId": DEVICE_ID,
"ordinal": "0",
"quantity": 42,
"value": 123.4,
}
with models.DAG(
"example_campaign_manager",
schedule_interval='@once', # Override to match your needs,
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag:
# [START howto_campaign_manager_insert_report_operator]
create_report = GoogleCampaignManagerInsertReportOperator(
profile_id=PROFILE_ID, report=REPORT, task_id="create_report"
)
report_id = create_report.output["report_id"]
# [END howto_campaign_manager_insert_report_operator]
# [START howto_campaign_manager_run_report_operator]
run_report = GoogleCampaignManagerRunReportOperator(
profile_id=PROFILE_ID, report_id=report_id, task_id="run_report"
)
file_id = run_report.output["file_id"]
# [END howto_campaign_manager_run_report_operator]
# [START howto_campaign_manager_wait_for_operation]
wait_for_report = GoogleCampaignManagerReportSensor(
task_id="wait_for_report",
profile_id=PROFILE_ID,
report_id=report_id,
file_id=file_id,
)
# [END howto_campaign_manager_wait_for_operation]
# [START howto_campaign_manager_get_report_operator]
get_report = GoogleCampaignManagerDownloadReportOperator(
task_id="get_report",
profile_id=PROFILE_ID,
report_id=report_id,
file_id=file_id,
report_name="test_report.csv",
bucket_name=BUCKET,
)
# [END howto_campaign_manager_get_report_operator]
# [START howto_campaign_manager_delete_report_operator]
delete_report = GoogleCampaignManagerDeleteReportOperator(
profile_id=PROFILE_ID, report_name=REPORT_NAME, task_id="delete_report"
)
# [END howto_campaign_manager_delete_report_operator]
wait_for_report >> get_report >> delete_report
# Task dependencies created via `XComArgs`:
# create_report >> run_report
# create_report >> wait_for_report
# create_report >> get_report
# run_report >> get_report
# run_report >> wait_for_report
# [START howto_campaign_manager_insert_conversions]
insert_conversion = GoogleCampaignManagerBatchInsertConversionsOperator(
task_id="insert_conversion",
profile_id=PROFILE_ID,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=ENCRYPTION_ENTITY_ID,
)
# [END howto_campaign_manager_insert_conversions]
# [START howto_campaign_manager_update_conversions]
update_conversion = GoogleCampaignManagerBatchUpdateConversionsOperator(
task_id="update_conversion",
profile_id=PROFILE_ID,
conversions=[CONVERSION_UPDATE],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=ENCRYPTION_ENTITY_ID,
max_failed_updates=1,
)
# [END howto_campaign_manager_update_conversions]
insert_conversion >> update_conversion
if __name__ == "__main__":
dag.clear()
dag.run()
| apache-2.0 |
juliantaylor/scipy | scipy/sparse/csgraph/__init__.py | 13 | 6939 | r"""
==============================================================
Compressed Sparse Graph Routines (:mod:`scipy.sparse.csgraph`)
==============================================================
.. currentmodule:: scipy.sparse.csgraph
Fast graph algorithms based on sparse matrix representations.
Contents
========
.. autosummary::
:toctree: generated/
connected_components -- determine connected components of a graph
laplacian -- compute the laplacian of a graph
shortest_path -- compute the shortest path between points on a positive graph
dijkstra -- use Dijkstra's algorithm for shortest path
floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
bellman_ford -- use the Bellman-Ford algorithm for shortest path
johnson -- use Johnson's algorithm for shortest path
breadth_first_order -- compute a breadth-first order of nodes
depth_first_order -- compute a depth-first order of nodes
breadth_first_tree -- construct the breadth-first tree from a given node
depth_first_tree -- construct a depth-first tree from a given node
minimum_spanning_tree -- construct the minimum spanning tree of a graph
Graph Representations
=====================
This module uses graphs which are stored in a matrix format. A
graph with N nodes can be represented by an (N x N) adjacency matrix G.
If there is a connection from node i to node j, then G[i, j] = w, where
w is the weight of the connection. For nodes i and j which are
not connected, the value depends on the representation:
- for dense array representations, non-edges are represented by
G[i, j] = 0, infinity, or NaN.
- for dense masked representations (of type np.ma.MaskedArray), non-edges
are represented by masked values. This can be useful when graphs with
zero-weight edges are desired.
- for sparse array representations, non-edges are represented by
non-entries in the matrix. This sort of sparse representation also
allows for edges with zero weights.
As a concrete example, imagine that you would like to represent the following
undirected graph::
G
(0)
/ \
1 2
/ \
(2) (1)
This graph has three nodes, where node 0 and 1 are connected by an edge of
weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
We can construct the dense, masked, and sparse representations as follows,
keeping in mind that an undirected graph is represented by a symmetric matrix::
>>> G_dense = np.array([[0, 2, 1],
... [2, 0, 0],
... [1, 0, 0]])
>>> G_masked = np.ma.masked_values(G_dense, 0)
>>> from scipy.sparse import csr_matrix
>>> G_sparse = csr_matrix(G_dense)
This becomes more difficult when zero edges are significant. For example,
consider the situation when we slightly modify the above graph::
G2
(0)
/ \
0 2
/ \
(2) (1)
This is identical to the previous graph, except nodes 0 and 2 are connected
by an edge of zero weight. In this case, the dense representation above
leads to ambiguities: how can non-edges be represented if zero is a meaningful
value? In this case, either a masked or sparse representation must be used
to eliminate the ambiguity::
>>> G2_data = np.array([[np.inf, 2, 0 ],
... [2, np.inf, np.inf],
... [0, np.inf, np.inf]])
>>> G2_masked = np.ma.masked_invalid(G2_data)
>>> from scipy.sparse.csgraph import csgraph_from_dense
>>> # G2_sparse = csr_matrix(G2_data) would give the wrong result
>>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
>>> G2_sparse.data
array([ 2., 0., 2., 0.])
Here we have used a utility routine from the csgraph submodule in order to
convert the dense representation to a sparse representation which can be
understood by the algorithms in submodule. By viewing the data array, we
can see that the zero values are explicitly encoded in the graph.
Directed vs. Undirected
-----------------------
Matrices may represent either directed or undirected graphs. This is
specified throughout the csgraph module by a boolean keyword. Graphs are
assumed to be directed by default. In a directed graph, traversal from node
i to node j can be accomplished over the edge G[i, j], but not the edge
G[j, i]. In a non-directed graph, traversal from node i to node j can be
accomplished over either G[i, j] or G[j, i]. If both edges are not null,
and the two have unequal weights, then the smaller of the two is used.
Note that a symmetric matrix will represent an undirected graph, regardless
of whether the 'directed' keyword is set to True or False. In this case,
using ``directed=True`` generally leads to more efficient computation.
The routines in this module accept as input either scipy.sparse representations
(csr, csc, or lil format), masked representations, or dense representations
with non-edges indicated by zeros, infinities, and NaN entries.
"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['cs_graph_components',
'connected_components',
'laplacian',
'shortest_path',
'floyd_warshall',
'dijkstra',
'bellman_ford',
'johnson',
'breadth_first_order',
'depth_first_order',
'breadth_first_tree',
'depth_first_tree',
'minimum_spanning_tree',
'construct_dist_matrix',
'reconstruct_path',
'csgraph_from_dense',
'csgraph_masked_from_dense',
'csgraph_to_dense',
'csgraph_to_masked',
'NegativeCycleError']
from ._components import cs_graph_components
from ._laplacian import laplacian
from ._shortest_path import shortest_path, floyd_warshall, dijkstra,\
bellman_ford, johnson, NegativeCycleError
from ._traversal import breadth_first_order, depth_first_order, \
breadth_first_tree, depth_first_tree, connected_components
from ._min_spanning_tree import minimum_spanning_tree
from ._tools import construct_dist_matrix, reconstruct_path,\
csgraph_from_dense, csgraph_to_dense, csgraph_masked_from_dense,\
csgraph_from_masked
from numpy import deprecate as _deprecate
cs_graph_components = _deprecate(cs_graph_components,
message=("In the future, use "
"csgraph.connected_components. Note "
"that this new function has a "
"slightly different interface: see "
"the docstring for more "
"information."))
from numpy.testing import Tester
test = Tester().test
| bsd-3-clause |
arthurfurlan/django-shortim | setup.py | 1 | 1075 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Strongly inspired (copied :D) from:
# http://jacobian.org/writing/django-apps-with-buildout/
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-shortim',
version='0.2',
url='http://github.com/valvim/django-shortim',
license='GPLv3',
description=('Django application for creating short URLs. '
'This code is currently running on http://va.mu.'),
author='Arthur Furlan',
long_description = read('README'),
author_email='afurlan@valvim.com',
packages=find_packages('src'),
package_dir={'': 'src'},
install_requires=['setuptools'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
)
| gpl-3.0 |