repo_name
stringlengths 5
100
| path
stringlengths 4
251
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 499
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|---|
pasqualguerrero/django | django/core/management/commands/showmigrations.py | 438 | 4901 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
class Command(BaseCommand):
help = "Shows all available migrations for the current project"
def add_arguments(self, parser):
parser.add_argument('app_labels', nargs='*',
help='App labels of applications to limit the output to.')
parser.add_argument('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.')
formats = parser.add_mutually_exclusive_group()
formats.add_argument('--list', '-l', action='store_const', dest='format', const='list',
help='Shows a list of all migrations and which are applied.')
formats.add_argument('--plan', '-p', action='store_const', dest='format', const='plan',
help='Shows all migrations in the order they will be applied.')
parser.set_defaults(format='list')
def handle(self, *args, **options):
self.verbosity = options.get('verbosity')
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
if options['format'] == "plan":
return self.show_plan(connection)
else:
return self.show_list(connection, options['app_labels'])
def show_list(self, connection, app_names=None):
"""
Shows a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
invalid_apps = []
for app_name in app_names:
if app_name not in loader.migrated_apps:
invalid_apps.append(app_name)
if invalid_apps:
raise CommandError("No migrations present for: %s" % (", ".join(invalid_apps)))
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
# Mark it as applied/unapplied
if plan_node in loader.applied_migrations:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.MIGRATE_FAILURE)
def show_plan(self, connection):
"""
Shows all known migrations in the order they will be applied
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
targets = graph.leaf_nodes()
plan = []
seen = set()
# Generate the plan
for target in targets:
for migration in graph.forwards_plan(target):
if migration not in seen:
plan.append(graph.nodes[migration])
seen.add(migration)
# Output
def print_deps(migration):
out = []
for dep in migration.dependencies:
if dep[1] == "__first__":
roots = graph.root_nodes(dep[0])
dep = roots[0] if roots else (dep[0], "__first__")
out.append("%s.%s" % dep)
if out:
return " ... (%s)" % ", ".join(out)
return ""
for migration in plan:
deps = ""
if self.verbosity >= 2:
deps = print_deps(migration)
if (migration.app_label, migration.name) in loader.applied_migrations:
self.stdout.write("[X] %s%s" % (migration, deps))
else:
self.stdout.write("[ ] %s%s" % (migration, deps))
| bsd-3-clause |
RotorWidgets/base-station | base_station/races/migrations/0002_auto_20160324_0525.py | 1 | 1978 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-24 05:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('races', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='raceheat',
name='ended_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='Heat ended time'),
),
migrations.AddField(
model_name='raceheat',
name='number',
field=models.PositiveSmallIntegerField(default=1, verbose_name='Heat number'),
),
migrations.AddField(
model_name='raceheat',
name='started_time',
field=models.DateTimeField(blank=True, null=True, verbose_name='Heat started time'),
),
migrations.AlterField(
model_name='heatevent',
name='heat',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='triggered_events', to='races.RaceHeat'),
),
migrations.AlterField(
model_name='heatevent',
name='tracker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='triggered_events', to='trackers.Tracker'),
),
migrations.AlterField(
model_name='heatevent',
name='trigger',
field=models.PositiveSmallIntegerField(choices=[(0, 'Gate Trigger'), (1, 'Area Entered Trigger'), (2, 'Area Exit Trigger'), (3, 'Crash Trigger'), (4, 'Land Trigger'), (5, 'Takeoff Trigger'), (6, 'Arm Trigger'), (7, 'Disarm Trigger'), (8, 'Start Trigger'), (9, 'End Trigger')], verbose_name='trigger'),
),
migrations.AlterUniqueTogether(
name='raceheat',
unique_together=set([('number', 'event')]),
),
]
| gpl-3.0 |
dtrodrigues/nifi-minifi-cpp | docker/test/integration/MiNiFi_integration_test_driver.py | 2 | 15679 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import uuid
from pydoc import locate
from minifi.core.InputPort import InputPort
from minifi.core.DockerTestCluster import DockerTestCluster
from minifi.validators.EmptyFilesOutPutValidator import EmptyFilesOutPutValidator
from minifi.validators.NoFileOutPutValidator import NoFileOutPutValidator
from minifi.validators.SingleFileOutputValidator import SingleFileOutputValidator
from minifi.validators.MultiFileOutputValidator import MultiFileOutputValidator
from minifi.validators.SingleOrMultiFileOutputValidator import SingleOrMultiFileOutputValidator
from minifi.validators.NoContentCheckFileNumberValidator import NoContentCheckFileNumberValidator
from minifi.validators.NumFileRangeValidator import NumFileRangeValidator
from minifi.validators.SingleJSONFileOutputValidator import SingleJSONFileOutputValidator
from minifi.core.utils import decode_escaped_str
class MiNiFi_integration_test:
def __init__(self, context):
self.test_id = context.test_id
self.cluster = DockerTestCluster(context)
self.connectable_nodes = []
# Remote process groups are not connectables
self.remote_process_groups = []
self.file_system_observer = None
self.docker_directory_bindings = context.directory_bindings
self.cluster.set_directory_bindings(self.docker_directory_bindings.get_directory_bindings(self.test_id), self.docker_directory_bindings.get_data_directories(self.test_id))
def __del__(self):
self.cleanup()
def cleanup(self):
self.cluster.cleanup()
def acquire_container(self, name, engine='minifi-cpp', command=None):
return self.cluster.acquire_container(name, engine, command)
def wait_for_container_startup_to_finish(self, container_name):
startup_success = self.cluster.wait_for_startup_log(container_name, 120)
if not startup_success:
logging.error("Cluster startup failed for %s", container_name)
self.cluster.log_app_output()
return startup_success
def start_kafka_broker(self):
self.cluster.acquire_container('kafka-broker', 'kafka-broker')
self.cluster.deploy('zookeeper')
self.cluster.deploy('kafka-broker')
assert self.wait_for_container_startup_to_finish('kafka-broker')
def start_splunk(self):
self.cluster.acquire_container('splunk', 'splunk')
self.cluster.deploy('splunk')
assert self.wait_for_container_startup_to_finish('splunk')
assert self.cluster.enable_splunk_hec_indexer('splunk', 'splunk_hec_token')
def start_elasticsearch(self):
self.cluster.acquire_container('elasticsearch', 'elasticsearch')
self.cluster.deploy('elasticsearch')
assert self.wait_for_container_startup_to_finish('elasticsearch')
def start_opensearch(self):
self.cluster.acquire_container('opensearch', 'opensearch')
self.cluster.deploy('opensearch')
assert self.wait_for_container_startup_to_finish('opensearch')
def start(self, container_name=None):
if container_name is not None:
logging.info("Starting container %s", container_name)
self.cluster.deploy_flow(container_name)
assert self.wait_for_container_startup_to_finish(container_name)
return
logging.info("MiNiFi_integration_test start")
self.cluster.deploy_flow()
for container_name in self.cluster.containers:
assert self.wait_for_container_startup_to_finish(container_name)
def stop(self, container_name):
logging.info("Stopping container %s", container_name)
self.cluster.stop_flow(container_name)
def kill(self, container_name):
logging.info("Killing container %s", container_name)
self.cluster.kill_flow(container_name)
def restart(self, container_name):
logging.info("Restarting container %s", container_name)
self.cluster.restart_flow(container_name)
def add_node(self, processor):
if processor.get_name() in (elem.get_name() for elem in self.connectable_nodes):
raise Exception("Trying to register processor with an already registered name: \"%s\"" % processor.get_name())
self.connectable_nodes.append(processor)
def get_or_create_node_by_name(self, node_name):
node = self.get_node_by_name(node_name)
if node is None:
if node_name == "RemoteProcessGroup":
raise Exception("Trying to register RemoteProcessGroup without an input port or address.")
node = locate("minifi.processors." + node_name + "." + node_name)()
node.set_name(node_name)
self.add_node(node)
return node
def get_node_by_name(self, name):
for node in self.connectable_nodes:
if name == node.get_name():
return node
raise Exception("Trying to fetch unknown node: \"%s\"" % name)
def add_remote_process_group(self, remote_process_group):
if remote_process_group.get_name() in (elem.get_name() for elem in self.remote_process_groups):
raise Exception("Trying to register remote_process_group with an already registered name: \"%s\"" % remote_process_group.get_name())
self.remote_process_groups.append(remote_process_group)
def get_remote_process_group_by_name(self, name):
for node in self.remote_process_groups:
if name == node.get_name():
return node
raise Exception("Trying to fetch unknow node: \"%s\"" % name)
@staticmethod
def generate_input_port_for_remote_process_group(remote_process_group, name):
input_port_node = InputPort(name, remote_process_group)
# Generate an MD5 hash unique to the remote process group id
input_port_node.set_uuid(uuid.uuid3(remote_process_group.get_uuid(), "input_port"))
return input_port_node
def add_test_data(self, path, test_data, file_name=None):
if file_name is None:
file_name = str(uuid.uuid4())
test_data = decode_escaped_str(test_data)
self.docker_directory_bindings.put_file_to_docker_path(self.test_id, path, file_name, test_data.encode('utf-8'))
def put_test_resource(self, file_name, contents):
self.docker_directory_bindings.put_test_resource(self.test_id, file_name, contents)
def rm_out_child(self):
self.docker_directory_bindings.rm_out_child(self.test_id)
def add_file_system_observer(self, file_system_observer):
self.file_system_observer = file_system_observer
def check_for_no_files_generated(self, wait_time_in_seconds):
output_validator = NoFileOutPutValidator()
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output_after_time_period(wait_time_in_seconds, output_validator)
def check_for_single_file_with_content_generated(self, content, timeout_seconds):
output_validator = SingleFileOutputValidator(decode_escaped_str(content))
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, 1)
def check_for_single_json_file_with_content_generated(self, content, timeout_seconds):
output_validator = SingleJSONFileOutputValidator(content)
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, 1)
def check_for_multiple_files_generated(self, file_count, timeout_seconds, expected_content=[]):
output_validator = MultiFileOutputValidator(file_count, [decode_escaped_str(content) for content in expected_content])
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, file_count)
def check_for_at_least_one_file_with_content_generated(self, content, timeout_seconds):
output_validator = SingleOrMultiFileOutputValidator(decode_escaped_str(content))
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator)
def check_for_num_files_generated(self, num_flowfiles, timeout_seconds):
output_validator = NoContentCheckFileNumberValidator(num_flowfiles)
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, max(1, num_flowfiles))
def check_for_num_file_range_generated(self, min_files, max_files, wait_time_in_seconds):
output_validator = NumFileRangeValidator(min_files, max_files)
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output_after_time_period(wait_time_in_seconds, output_validator)
def check_for_an_empty_file_generated(self, timeout_seconds):
output_validator = EmptyFilesOutPutValidator()
output_validator.set_output_dir(self.file_system_observer.get_output_dir())
self.__check_output(timeout_seconds, output_validator, 1)
def __check_output_after_time_period(self, wait_time_in_seconds, output_validator):
time.sleep(wait_time_in_seconds)
self.__validate(output_validator)
def __check_output(self, timeout_seconds, output_validator, max_files=0):
result = self.file_system_observer.validate_output(timeout_seconds, output_validator, max_files)
self.cluster.log_app_output()
assert not self.cluster.segfault_happened()
assert result
def __validate(self, validator):
self.cluster.log_app_output()
assert not self.cluster.segfault_happened()
assert validator.validate()
def check_s3_server_object_data(self, s3_container_name, object_data):
assert self.cluster.check_s3_server_object_data(s3_container_name, object_data)
def check_s3_server_object_metadata(self, s3_container_name, content_type):
assert self.cluster.check_s3_server_object_metadata(s3_container_name, content_type)
def check_empty_s3_bucket(self, s3_container_name):
assert self.cluster.is_s3_bucket_empty(s3_container_name)
def check_http_proxy_access(self, http_proxy_container_name, url):
assert self.cluster.check_http_proxy_access(http_proxy_container_name, url)
def check_azure_storage_server_data(self, azure_container_name, object_data):
assert self.cluster.check_azure_storage_server_data(azure_container_name, object_data)
def wait_for_kafka_consumer_to_be_registered(self, kafka_container_name):
assert self.cluster.wait_for_kafka_consumer_to_be_registered(kafka_container_name)
def check_splunk_event(self, splunk_container_name, query):
assert self.cluster.check_splunk_event(splunk_container_name, query)
def check_splunk_event_with_attributes(self, splunk_container_name, query, attributes):
assert self.cluster.check_splunk_event_with_attributes(splunk_container_name, query, attributes)
def check_google_cloud_storage(self, gcs_container_name, content):
assert self.cluster.check_google_cloud_storage(gcs_container_name, content)
def check_empty_gcs_bucket(self, gcs_container_name):
assert self.cluster.is_gcs_bucket_empty(gcs_container_name)
def check_empty_elastic(self, elastic_container_name):
assert self.cluster.is_elasticsearch_empty(elastic_container_name)
def elastic_generate_apikey(self, elastic_container_name):
return self.cluster.elastic_generate_apikey(elastic_container_name)
def create_doc_elasticsearch(self, elastic_container_name, index_name, doc_id):
assert self.cluster.create_doc_elasticsearch(elastic_container_name, index_name, doc_id)
def check_elastic_field_value(self, elastic_container_name, index_name, doc_id, field_name, field_value):
assert self.cluster.check_elastic_field_value(elastic_container_name, index_name, doc_id, field_name, field_value)
def add_elastic_user_to_opensearch(self, container_name):
assert self.cluster.add_elastic_user_to_opensearch(container_name)
def check_minifi_log_contents(self, line, timeout_seconds=60, count=1):
self.check_container_log_contents("minifi-cpp", line, timeout_seconds, count)
def check_minifi_log_matches_regex(self, regex, timeout_seconds=60, count=1):
for container in self.cluster.containers.values():
if container.get_engine() == "minifi-cpp":
line_found = self.cluster.wait_for_app_logs_regex(container.get_name(), regex, timeout_seconds, count)
if line_found:
return
assert False
def check_container_log_contents(self, container_engine, line, timeout_seconds=60, count=1):
for container in self.cluster.containers.values():
if container.get_engine() == container_engine:
line_found = self.cluster.wait_for_app_logs(container.get_name(), line, timeout_seconds, count)
if line_found:
return
assert False
def check_minifi_log_does_not_contain(self, line, wait_time_seconds):
time.sleep(wait_time_seconds)
for container in self.cluster.containers.values():
if container.get_engine() == "minifi-cpp":
_, logs = self.cluster.get_app_log(container.get_name())
if logs is not None and 1 <= logs.decode("utf-8").count(line):
assert False
def check_query_results(self, postgresql_container_name, query, number_of_rows, timeout_seconds):
assert self.cluster.check_query_results(postgresql_container_name, query, number_of_rows, timeout_seconds)
def check_container_log_matches_regex(self, container_name, log_pattern, timeout_seconds, count=1):
assert self.cluster.wait_for_app_logs_regex(container_name, log_pattern, timeout_seconds, count)
def add_test_blob(self, blob_name, content, with_snapshot):
self.cluster.add_test_blob(blob_name, content, with_snapshot)
def check_azure_blob_storage_is_empty(self, timeout_seconds):
assert self.cluster.check_azure_blob_storage_is_empty(timeout_seconds)
def check_azure_blob_and_snapshot_count(self, blob_and_snapshot_count, timeout_seconds):
assert self.cluster.check_azure_blob_and_snapshot_count(blob_and_snapshot_count, timeout_seconds)
def check_metric_class_on_prometheus(self, metric_class, timeout_seconds):
assert self.cluster.wait_for_metric_class_on_prometheus(metric_class, timeout_seconds)
def check_processor_metric_on_prometheus(self, metric_class, timeout_seconds, processor_name):
assert self.cluster.wait_for_processor_metric_on_prometheus(metric_class, timeout_seconds, processor_name)
| apache-2.0 |
SunPower/pvfactors | pvfactors/geometry/pvground.py | 1 | 45147 | """Classes for implementation of ground geometry"""
from pvfactors import PVFactorsError
from pvfactors.config import (
MAX_X_GROUND, MIN_X_GROUND, Y_GROUND, DISTANCE_TOLERANCE, COLOR_DIC)
from pvfactors.geometry.base import (
BaseSide, PVSegment, ShadeCollection, PVSurface)
from pvfactors.geometry.timeseries import (
TsShadeCollection, TsLineCoords, TsPointCoords, TsSurface,
_get_params_at_idx)
from shapely.geometry import LineString
import numpy as np
from copy import deepcopy
class TsGround(object):
"""Timeseries ground class: this class is a vectorized version of the
PV ground geometry class, and it will store timeseries shaded ground
and illuminated ground elements, as well as pv row cut points."""
# TODO: this needs to be passed at initialization for flexibility
x_min = MIN_X_GROUND
x_max = MAX_X_GROUND
def __init__(self, shadow_elements, illum_elements, param_names=None,
flag_overlap=None, cut_point_coords=None, y_ground=None):
"""Initialize timeseries ground using list of timeseries surfaces
for the ground shadows
Parameters
----------
shadow_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
Timeseries shaded ground elements
illum_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
Timeseries illuminated ground elements
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
flag_overlap : list of bool, optional
Flags indicating if the ground shadows are overlapping, for all
time steps (Default=None). I.e. is there direct shading on pv rows?
cut_point_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`, \
optional
List of cut point coordinates, as calculated for timeseries PV rows
(Default = None)
y_ground : float, optional
Y coordinate of flat ground [m] (Default=None)
"""
# Lists of timeseries ground elements
self.shadow_elements = shadow_elements
self.illum_elements = illum_elements
# Shade collections
list_shaded_surf = []
list_illum_surf = []
for shadow_el in shadow_elements:
list_shaded_surf += shadow_el.all_ts_surfaces
for illum_el in illum_elements:
list_illum_surf += illum_el.all_ts_surfaces
self.illum = TsShadeCollection(list_illum_surf, False)
self.shaded = TsShadeCollection(list_shaded_surf, True)
# Other ground attributes
self.param_names = [] if param_names is None else param_names
self.flag_overlap = flag_overlap
self.cut_point_coords = [] if cut_point_coords is None \
else cut_point_coords
self.y_ground = y_ground
self.shaded_params = dict.fromkeys(self.param_names)
self.illum_params = dict.fromkeys(self.param_names)
@classmethod
def from_ts_pvrows_and_angles(cls, list_ts_pvrows, alpha_vec, rotation_vec,
y_ground=Y_GROUND, flag_overlap=None,
param_names=None):
"""Create timeseries ground from list of timeseries PV rows, and
PV array and solar angles.
Parameters
----------
list_ts_pvrows : \
list of :py:class:`~pvfactors.geometry.pvrow.TsPVRow`
Timeseries PV rows to use to calculate timeseries ground shadows
alpha_vec : np.ndarray
Angle made by 2d solar vector and PV array x-axis [rad]
rotation_vec : np.ndarray
Timeseries rotation values of the PV row [deg]
y_ground : float, optional
Fixed y coordinate of flat ground [m] (Default = Y_GROUND constant)
flag_overlap : list of bool, optional
Flags indicating if the ground shadows are overlapping, for all
time steps (Default=None). I.e. is there direct shading on pv rows?
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
"""
rotation_vec = np.deg2rad(rotation_vec)
n_steps = len(rotation_vec)
# Calculate coords of ground shadows and cutting points
ground_shadow_coords = []
cut_point_coords = []
for ts_pvrow in list_ts_pvrows:
# Get pvrow coords
x1s_pvrow = ts_pvrow.full_pvrow_coords.b1.x
y1s_pvrow = ts_pvrow.full_pvrow_coords.b1.y
x2s_pvrow = ts_pvrow.full_pvrow_coords.b2.x
y2s_pvrow = ts_pvrow.full_pvrow_coords.b2.y
# --- Shadow coords calculation
# Calculate x coords of shadow
x1s_shadow = x1s_pvrow - (y1s_pvrow - y_ground) / np.tan(alpha_vec)
x2s_shadow = x2s_pvrow - (y2s_pvrow - y_ground) / np.tan(alpha_vec)
# Order x coords from left to right
x1s_on_left = x1s_shadow <= x2s_shadow
xs_left_shadow = np.where(x1s_on_left, x1s_shadow, x2s_shadow)
xs_right_shadow = np.where(x1s_on_left, x2s_shadow, x1s_shadow)
# Append shadow coords to list
ground_shadow_coords.append(
[[xs_left_shadow, y_ground * np.ones(n_steps)],
[xs_right_shadow, y_ground * np.ones(n_steps)]])
# --- Cutting points coords calculation
dx = (y1s_pvrow - y_ground) / np.tan(rotation_vec)
cut_point_coords.append(
TsPointCoords(x1s_pvrow - dx, y_ground * np.ones(n_steps)))
ground_shadow_coords = np.array(ground_shadow_coords)
return cls.from_ordered_shadows_coords(
ground_shadow_coords, flag_overlap=flag_overlap,
cut_point_coords=cut_point_coords, param_names=param_names,
y_ground=y_ground)
@classmethod
def from_ordered_shadows_coords(cls, shadow_coords, flag_overlap=None,
param_names=None, cut_point_coords=None,
y_ground=Y_GROUND):
"""Create timeseries ground from list of ground shadow coordinates.
Parameters
----------
shadow_coords : np.ndarray
List of ordered ground shadow coordinates (from left to right)
flag_overlap : list of bool, optional
Flags indicating if the ground shadows are overlapping, for all
time steps (Default=None). I.e. is there direct shading on pv rows?
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
cut_point_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`, \
optional
List of cut point coordinates, as calculated for timeseries PV rows
(Default = None)
y_ground : float, optional
Fixed y coordinate of flat ground [m] (Default = Y_GROUND constant)
"""
# Get cut point coords if any
cut_point_coords = cut_point_coords or []
# Create shadow coordinate objects
list_shadow_coords = [TsLineCoords.from_array(coords)
for coords in shadow_coords]
# If the overlap flags were passed, make sure shadows don't overlap
if flag_overlap is not None:
if len(list_shadow_coords) > 1:
for idx, coords in enumerate(list_shadow_coords[:-1]):
coords.b2.x = np.where(flag_overlap,
list_shadow_coords[idx + 1].b1.x,
coords.b2.x)
# Create shaded ground elements
ts_shadows_elements = cls._shadow_elements_from_coords_and_cut_pts(
list_shadow_coords, cut_point_coords, param_names)
# Create illuminated ground elements
ts_illum_elements = cls._illum_elements_from_coords_and_cut_pts(
ts_shadows_elements, cut_point_coords, param_names, y_ground)
return cls(ts_shadows_elements, ts_illum_elements,
param_names=param_names, flag_overlap=flag_overlap,
cut_point_coords=cut_point_coords, y_ground=y_ground)
def at(self, idx, x_min_max=None, merge_if_flag_overlap=True,
with_cut_points=True):
"""Generate a PV ground geometry for the desired index. This will
only return non-point surfaces within the ground bounds, i.e.
surfaces that are not points, and which are within x_min and x_max.
Parameters
----------
idx : int
Index to use to generate PV ground geometry
x_min_max : tuple, optional
List of minimum and maximum x coordinates for the flat surface [m]
(Default = None)
merge_if_flag_overlap : bool, optional
Decide whether to merge all shadows if they overlap or not
(Default = True)
with_cut_points : bool, optional
Decide whether to include the saved cut points in the created
PV ground geometry (Default = True)
Returns
-------
pvground : :py:class:`~pvfactors.geometry.pvground.PVGround`
"""
# Get shadow elements that are not points at the given index
non_pt_shadow_elements = [
shadow_el for shadow_el in self.shadow_elements
if shadow_el.coords.length[idx] > DISTANCE_TOLERANCE]
if with_cut_points:
# We want the ground surfaces broken up at the cut points
if merge_if_flag_overlap:
# We want to merge the shadow surfaces when they overlap
list_shadow_surfaces = self._merge_shadow_surfaces(
idx, non_pt_shadow_elements)
else:
# No need to merge the shadow surfaces
list_shadow_surfaces = []
for shadow_el in non_pt_shadow_elements:
list_shadow_surfaces += \
shadow_el.non_point_surfaces_at(idx)
# Get the illuminated surfaces
list_illum_surfaces = []
for illum_el in self.illum_elements:
list_illum_surfaces += illum_el.non_point_surfaces_at(idx)
else:
# No need to break up the surfaces at the cut points
# We will need to build up new surfaces (since not done by classes)
# Get the parameters at the given index
illum_params = _get_params_at_idx(idx, self.illum_params)
shaded_params = _get_params_at_idx(idx, self.shaded_params)
if merge_if_flag_overlap and (self.flag_overlap is not None):
# We want to merge the shadow surfaces when they overlap
is_overlap = self.flag_overlap[idx]
if is_overlap and (len(non_pt_shadow_elements) > 1):
coords = [non_pt_shadow_elements[0].b1.at(idx),
non_pt_shadow_elements[-1].b2.at(idx)]
list_shadow_surfaces = [PVSurface(
coords, shaded=True, param_names=self.param_names,
params=shaded_params)]
else:
# No overlap for the given index or config
list_shadow_surfaces = [
PVSurface(shadow_el.coords.at(idx),
shaded=True, params=shaded_params,
param_names=self.param_names)
for shadow_el in non_pt_shadow_elements
if shadow_el.coords.length[idx]
> DISTANCE_TOLERANCE]
else:
# No need to merge the shadow surfaces
list_shadow_surfaces = [
PVSurface(shadow_el.coords.at(idx),
shaded=True, params=shaded_params,
param_names=self.param_names)
for shadow_el in non_pt_shadow_elements
if shadow_el.coords.length[idx]
> DISTANCE_TOLERANCE]
# Get the illuminated surfaces
list_illum_surfaces = [PVSurface(illum_el.coords.at(idx),
shaded=False, params=illum_params,
param_names=self.param_names)
for illum_el in self.illum_elements
if illum_el.coords.length[idx]
> DISTANCE_TOLERANCE]
# Pass the created lists to the PVGround builder
return PVGround.from_lists_surfaces(
list_shadow_surfaces, list_illum_surfaces,
param_names=self.param_names, y_ground=self.y_ground,
x_min_max=x_min_max)
def plot_at_idx(self, idx, ax, color_shaded=COLOR_DIC['pvrow_shaded'],
color_illum=COLOR_DIC['pvrow_illum'], x_min_max=None,
merge_if_flag_overlap=True, with_cut_points=True,
with_surface_index=False):
"""Plot timeseries ground at a certain index.
Parameters
----------
idx : int
Index to use to plot timeseries side
ax : :py:class:`matplotlib.pyplot.axes` object
Axes for plotting
color_shaded : str, optional
Color to use for plotting the shaded surfaces (Default =
COLOR_DIC['pvrow_shaded'])
color_shaded : str, optional
Color to use for plotting the illuminated surfaces (Default =
COLOR_DIC['pvrow_illum'])
x_min_max : tuple, optional
List of minimum and maximum x coordinates for the flat surface [m]
(Default = None)
merge_if_flag_overlap : bool, optional
Decide whether to merge all shadows if they overlap or not
(Default = True)
with_cut_points : bool, optional
Decide whether to include the saved cut points in the created
PV ground geometry (Default = True)
with_surface_index : bool, optional
Plot the surfaces with their index values (Default = False)
"""
pvground = self.at(idx, x_min_max=x_min_max,
merge_if_flag_overlap=merge_if_flag_overlap,
with_cut_points=with_cut_points)
pvground.plot(ax, color_shaded=color_shaded, color_illum=color_illum,
with_index=with_surface_index)
def update_params(self, new_dict):
"""Update the illuminated parameters with new ones, not only for the
timeseries ground, but also for its ground elements and the timeseries
surfaces of the ground elements, so that they are all synced.
Parameters
----------
new_dict : dict
New parameters
"""
self.update_illum_params(new_dict)
self.update_shaded_params(new_dict)
def update_illum_params(self, new_dict):
"""Update the illuminated parameters with new ones, not only for the
timeseries ground, but also for its ground elements and the timeseries
surfaces of the ground elements, so that they are all synced.
Parameters
----------
new_dict : dict
New parameters
"""
self.illum_params.update(new_dict)
for illum_el in self.illum_elements:
illum_el.params.update(new_dict)
for surf in illum_el.surface_list:
surf.params.update(new_dict)
def update_shaded_params(self, new_dict):
"""Update the shaded parameters with new ones, not only for the
timeseries ground, but also for its ground elements and the timeseries
surfaces of the ground elements, so that they are all synced.
Parameters
----------
new_dict : dict
New parameters
"""
self.shaded_params.update(new_dict)
for shaded_el in self.shadow_elements:
shaded_el.params.update(new_dict)
for surf in shaded_el.surface_list:
surf.params.update(new_dict)
def get_param_weighted(self, param):
"""Get timeseries parameter for the ts ground, after weighting by
surface length.
Parameters
----------
param : str
Name of parameter
Returns
-------
np.ndarray
Weighted parameter values
"""
return self.get_param_ww(param) / self.length
def get_param_ww(self, param):
"""Get timeseries parameter from the ground's surfaces with weight,
i.e. after multiplying by the surface lengths.
Parameters
----------
param: str
Surface parameter to return
Returns
-------
np.ndarray
Timeseries parameter values multiplied by weights
Raises
------
KeyError
if parameter name not in a surface parameters
"""
value = 0.
for shadow_el in self.shadow_elements:
value += shadow_el.get_param_ww(param)
for illum_el in self.illum_elements:
value += illum_el.get_param_ww(param)
return value
def shadow_coords_left_of_cut_point(self, idx_cut_pt):
"""Get coordinates of shadows located on the left side of the cut point
with given index. The coordinates of the shadows will be bounded
by the coordinates of the cut point and the default minimum
ground x values.
Parameters
----------
idx_cut_pt : int
Index of the cut point of interest
Returns
-------
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Coordinates of the shadows on the left side of the cut point
"""
cut_pt_coords = self.cut_point_coords[idx_cut_pt]
return [shadow_el._coords_left_of_cut_point(shadow_el.coords,
cut_pt_coords)
for shadow_el in self.shadow_elements]
def shadow_coords_right_of_cut_point(self, idx_cut_pt):
"""Get coordinates of shadows located on the right side of the cut
point with given index. The coordinates of the shadows will be bounded
by the coordinates of the cut point and the default maximum
ground x values.
Parameters
----------
idx_cut_pt : int
Index of the cut point of interest
Returns
-------
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Coordinates of the shadows on the right side of the cut point
"""
cut_pt_coords = self.cut_point_coords[idx_cut_pt]
return [shadow_el._coords_right_of_cut_point(shadow_el.coords,
cut_pt_coords)
for shadow_el in self.shadow_elements]
def ts_surfaces_side_of_cut_point(self, side, idx_cut_pt):
"""Get a list of all the ts ground surfaces an a request side of
a cut point
Parameters
----------
side : str
Side of the cut point, either 'left' or 'right'
idx_cut_pt : int
Index of the cut point, on whose side we want to get the ground
surfaces
Returns
-------
list
List of timeseries ground surfaces on the side of the cut point
"""
list_ts_surfaces = []
for shadow_el in self.shadow_elements:
list_ts_surfaces += shadow_el.surface_dict[idx_cut_pt][side]
for illum_el in self.illum_elements:
list_ts_surfaces += illum_el.surface_dict[idx_cut_pt][side]
return list_ts_surfaces
@property
def n_ts_surfaces(self):
"""Number of timeseries surfaces in the ts ground"""
return self.n_ts_shaded_surfaces + self.n_ts_illum_surfaces
@property
def n_ts_shaded_surfaces(self):
"""Number of shaded timeseries surfaces in the ts ground"""
n_ts_surfaces = 0
for shadow_el in self.shadow_elements:
n_ts_surfaces += shadow_el.n_ts_surfaces
return n_ts_surfaces
@property
def n_ts_illum_surfaces(self):
"""Number of illuminated timeseries surfaces in the ts ground"""
n_ts_surfaces = 0
for illum_el in self.illum_elements:
n_ts_surfaces += illum_el.n_ts_surfaces
return n_ts_surfaces
@property
def all_ts_surfaces(self):
"""Number of timeseries surfaces in the ts ground"""
all_ts_surfaces = []
for shadow_el in self.shadow_elements:
all_ts_surfaces += shadow_el.all_ts_surfaces
for illum_el in self.illum_elements:
all_ts_surfaces += illum_el.all_ts_surfaces
return all_ts_surfaces
@property
def length(self):
"""Length of the timeseries ground"""
length = 0
for shadow_el in self.shadow_elements:
length += shadow_el.length
for illum_el in self.illum_elements:
length += illum_el.length
return length
@property
def shaded_length(self):
"""Length of the timeseries ground"""
length = 0
for shadow_el in self.shadow_elements:
length += shadow_el.length
return length
def non_point_shaded_surfaces_at(self, idx):
"""Return a list of shaded surfaces, that are not points
at given index
Parameters
----------
idx : int
Index at which we want the surfaces not to be points
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
list_surfaces = []
for shadow_el in self.shadow_elements:
list_surfaces += shadow_el.non_point_surfaces_at(0)
return list_surfaces
def non_point_illum_surfaces_at(self, idx):
"""Return a list of illuminated surfaces, that are not
points at given index
Parameters
----------
idx : int
Index at which we want the surfaces not to be points
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
list_surfaces = []
for illum_el in self.illum_elements:
list_surfaces += illum_el.non_point_surfaces_at(0)
return list_surfaces
def non_point_surfaces_at(self, idx):
"""Return a list of all surfaces that are not
points at given index
Parameters
----------
idx : int
Index at which we want the surfaces not to be points
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
return self.non_point_illum_surfaces_at(idx) \
+ self.non_point_shaded_surfaces_at(idx)
def n_non_point_surfaces_at(self, idx):
"""Return the number of :py:class:`~pvfactors.geometry.base.PVSurface`
that are not points at given index
Parameters
----------
idx : int
Index at which we want the surfaces not to be points
Returns
-------
int
"""
return len(self.non_point_surfaces_at(idx))
@staticmethod
def _shadow_elements_from_coords_and_cut_pts(
list_shadow_coords, cut_point_coords, param_names):
"""Create ground shadow elements from a list of ordered shadow
coordinates (from left to right), and the ground cut point coordinates.
Notes
-----
This method will clip the shadow coords to the limit of ground,
i.e. the shadow coordinates shouldn't be outside of the range
[MIN_X_GROUND, MAX_X_GROUND].
Parameters
----------
list_shadow_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
List of ordered ground shadow coordinates (from left to right)
cut_point_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
List of cut point coordinates (from left to right)
param_names : list
List of parameter names for the ground elements
Returns
-------
list_shadow_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
Ordered list of shadow elements (from left to right)
"""
list_shadow_elements = []
# FIXME: x_min and x_max should be passed as inputs
for shadow_coords in list_shadow_coords:
shadow_coords.b1.x = np.clip(shadow_coords.b1.x, MIN_X_GROUND,
MAX_X_GROUND)
shadow_coords.b2.x = np.clip(shadow_coords.b2.x, MIN_X_GROUND,
MAX_X_GROUND)
list_shadow_elements.append(
TsGroundElement(shadow_coords,
list_ordered_cut_pts_coords=cut_point_coords,
param_names=param_names, shaded=True))
return list_shadow_elements
@staticmethod
def _illum_elements_from_coords_and_cut_pts(
list_shadow_elements, cut_pt_coords, param_names, y_ground):
"""Create ground illuminated elements from a list of ordered shadow
elements (from left to right), and the ground cut point coordinates.
This method will make sure that the illuminated ground elements are
all within the ground limits [MIN_X_GROUND, MAX_X_GROUND].
Parameters
----------
list_shadow_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
List of ordered ground shadow coordinates (from left to right)
cut_point_coords : \
list of :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
List of cut point coordinates (from left to right)
param_names : list
List of parameter names for the ground elements
Returns
-------
list_shadow_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
Ordered list of shadow elements (from left to right)
"""
list_illum_elements = []
if len(list_shadow_elements) == 0:
msg = """There must be at least one shadow element on the ground,
otherwise it probably means that no PV rows were created, so
there's no point in running a simulation..."""
raise PVFactorsError(msg)
n_steps = len(list_shadow_elements[0].coords.b1.x)
y_ground_vec = y_ground * np.ones(n_steps)
# FIXME: x_min and x_max should be passed as inputs
next_x = MIN_X_GROUND * np.ones(n_steps)
# Build the groud elements from left to right, starting at x_min
# and covering the ground with illuminated elements where there's no
# shadow
for shadow_element in list_shadow_elements:
x1 = next_x
x2 = shadow_element.coords.b1.x
coords = TsLineCoords.from_array(
np.array([[x1, y_ground_vec], [x2, y_ground_vec]]))
list_illum_elements.append(TsGroundElement(
coords, list_ordered_cut_pts_coords=cut_pt_coords,
param_names=param_names, shaded=False))
next_x = shadow_element.coords.b2.x
# Add the last illuminated element to the list
coords = TsLineCoords.from_array(
np.array([[next_x, y_ground_vec],
[MAX_X_GROUND * np.ones(n_steps), y_ground_vec]]))
list_illum_elements.append(TsGroundElement(
coords, list_ordered_cut_pts_coords=cut_pt_coords,
param_names=param_names, shaded=False))
return list_illum_elements
def _merge_shadow_surfaces(self, idx, non_pt_shadow_elements):
"""Merge the shadow surfaces in a list of shadow elements
at the shadow boundaries only, at a given index, but keep the shadow
surfaces broken up at the cut points.
Parameters
----------
idx : int
Index at which we want to merge the surfaces
non_pt_shadow_elements : \
list of :py:class:`~pvfactors.geometry.pvground.TsGroundElement`
List of non point shadow elements
Returns
-------
list_shadow_surfaces : \
list of :py:class:`~pvfactors.geometry.base.PVSurface`
List of shadow surfaces at a given index
(ordered from left to right)
"""
# TODO: check if it would be faster to merge the ground elements first,
# and then break it down with the cut points
# Decide whether to merge all shadows or not
list_shadow_surfaces = []
if self.flag_overlap is not None:
# Get the overlap flags
is_overlap = self.flag_overlap[idx]
n_shadow_elements = len(non_pt_shadow_elements)
if is_overlap and (n_shadow_elements > 1):
# If there's only one shadow, not point in going through this
# Now go from left to right and merge shadow surfaces
surface_to_merge = None
for i_el, shadow_el in enumerate(non_pt_shadow_elements):
surfaces = shadow_el.non_point_surfaces_at(idx)
n_surf = len(surfaces)
for i_surf, surface in enumerate(surfaces):
if i_surf == n_surf - 1:
# last surface, could also be first
if i_surf == 0:
# Need to merge with preceding if exists
if surface_to_merge is not None:
coords = [surface_to_merge.boundary[0],
surface.boundary[1]]
surface = PVSurface(
coords, shaded=True,
param_names=self.param_names,
params=surface.params,
index=surface.index)
if i_el == n_shadow_elements - 1:
# last surface of last shadow element
list_shadow_surfaces.append(surface)
else:
# keep for merging with next element
surface_to_merge = surface
elif i_surf == 0:
# first surface but definitely not last either
if surface_to_merge is not None:
coords = [surface_to_merge.boundary[0],
surface.boundary[1]]
list_shadow_surfaces.append(
PVSurface(coords, shaded=True,
param_names=self.param_names,
params=surface.params,
index=surface.index))
else:
list_shadow_surfaces.append(surface)
else:
# not first nor last surface
list_shadow_surfaces.append(surface)
else:
# There's no need to merge anything
for shadow_el in non_pt_shadow_elements:
list_shadow_surfaces += \
shadow_el.non_point_surfaces_at(idx)
else:
# There's no need to merge anything
for shadow_el in non_pt_shadow_elements:
list_shadow_surfaces += shadow_el.non_point_surfaces_at(idx)
return list_shadow_surfaces
class TsGroundElement(object):
"""Special class for timeseries ground elements: a ground element has known
timeseries coordinate boundaries, but it will also have a break down of
its area into n+1 timeseries surfaces located in the n+1 ground zones
defined by the n ground cutting points.
This is crucial to calculate view factors in a vectorized way."""
def __init__(self, coords, list_ordered_cut_pts_coords=None,
param_names=None, shaded=False):
"""Initialize the timeseries ground element using its timeseries
line coordinates, and build the timeseries surfaces for all the
cut point zones.
Parameters
----------
coords : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates of the ground element
list_ordered_cut_pts_coords : list, optional
List of all the cut point timeseries coordinates
(Default = [])
param_names : list of str, optional
List of names of surface parameters to use when creating geometries
(Default = None)
shaded : bool, optional
Flag specifying is element is a shadow or not (Default = False)
"""
self.coords = coords
self.param_names = param_names or []
self.params = dict.fromkeys(self.param_names)
self.shaded = shaded
self.surface_dict = None # will be necessary for view factor calcs
self.surface_list = [] # will be necessary for vf matrix formation
list_ordered_cut_pts_coords = list_ordered_cut_pts_coords or []
if len(list_ordered_cut_pts_coords) > 0:
self._create_all_ts_surfaces(list_ordered_cut_pts_coords)
self.n_ts_surfaces = len(self.surface_list)
@property
def b1(self):
"""Timeseries coordinates of first boundary point"""
return self.coords.b1
@property
def b2(self):
"""Timeseries coordinates of second boundary point"""
return self.coords.b2
@property
def centroid(self):
"""Timeseries point coordinates of the element's centroid"""
return self.coords.centroid
@property
def length(self):
"""Timeseries length of the ground"""
return self.coords.length
@property
def all_ts_surfaces(self):
"""List of all ts surfaces making up the ts ground element"""
return self.surface_list
def surfaces_at(self, idx):
"""Return list of surfaces (from left to right) at given index that
make up the ground element.
Parameters
----------
idx : int
Index of interest
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
return [surface.at(idx)
for surface in self.surface_list]
def non_point_surfaces_at(self, idx):
"""Return list of non-point surfaces (from left to right) at given
index that make up the ground element.
Parameters
----------
idx : int
Index of interest
Returns
-------
list of :py:class:`~pvfactors.geometry.base.PVSurface`
"""
return [surface.at(idx)
for surface in self.surface_list
if surface.length[idx] > DISTANCE_TOLERANCE]
def get_param_weighted(self, param):
"""Get timeseries parameter for the ground element, after weighting by
surface length.
Parameters
----------
param : str
Name of parameter
Returns
-------
np.ndarray
Weighted parameter values
"""
return self.get_param_ww(param) / self.length
def get_param_ww(self, param):
"""Get timeseries parameter from the ground element with weight,
i.e. after multiplying by the surface lengths.
Parameters
----------
param: str
Surface parameter to return
Returns
-------
np.ndarray
Timeseries parameter values multiplied by weights
Raises
------
KeyError
if parameter name not in a surface parameters
"""
value = 0.
for ts_surf in self.surface_list:
value += ts_surf.length * ts_surf.get_param(param)
return value
def _create_all_ts_surfaces(self, list_ordered_cut_pts):
"""Create all the n+1 timeseries surfaces that make up the timeseries
ground element, and which are located in the n+1 zones defined by
the n cut points.
Parameters
----------
list_ordered_cut_pts : list of :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
List of timeseries coordinates of all cut points, ordered from
left to right
"""
# Initialize dict
self.surface_dict = {i: {'right': [], 'left': []}
for i in range(len(list_ordered_cut_pts))}
n_cut_pts = len(list_ordered_cut_pts)
next_coords = self.coords
for idx_pt, cut_pt_coords in enumerate(list_ordered_cut_pts):
# Get coords on left of cut pt
coords_left = self._coords_left_of_cut_point(next_coords,
cut_pt_coords)
# Save that surface in the required structures
surface_left = TsSurface(coords_left, param_names=self.param_names,
shaded=self.shaded)
self.surface_list.append(surface_left)
for i in range(idx_pt, n_cut_pts):
self.surface_dict[i]['left'].append(surface_left)
for j in range(0, idx_pt):
self.surface_dict[j]['right'].append(surface_left)
next_coords = self._coords_right_of_cut_point(next_coords,
cut_pt_coords)
# Save the right most portion
next_surface = TsSurface(next_coords, param_names=self.param_names,
shaded=self.shaded)
self.surface_list.append(next_surface)
for j in range(0, n_cut_pts):
self.surface_dict[j]['right'].append(next_surface)
@staticmethod
def _coords_right_of_cut_point(coords, cut_pt_coords):
"""Calculate timeseries line coordinates that are right of the given
cut point coordinates, but still within the ground area
Parameters
----------
coords : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Original timeseries coordinates
cut_pt_coords :
:py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries coordinates of cut point
Returns
-------
:py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates that are located right of the cut
point
"""
coords = deepcopy(coords)
# FIXME: should be using x_min x_max inputs instead of global constant
coords.b1.x = np.maximum(coords.b1.x, cut_pt_coords.x)
coords.b1.x = np.minimum(coords.b1.x, MAX_X_GROUND)
coords.b2.x = np.maximum(coords.b2.x, cut_pt_coords.x)
coords.b2.x = np.minimum(coords.b2.x, MAX_X_GROUND)
return coords
@staticmethod
def _coords_left_of_cut_point(coords, cut_pt_coords):
"""Calculate timeseries line coordinates that are left of the given
cut point coordinates, but still within the ground area
Parameters
----------
coords : :py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Original timeseries coordinates
cut_pt_coords :
:py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries coordinates of cut point
Returns
-------
:py:class:`~pvfactors.geometry.timeseries.TsLineCoords`
Timeseries line coordinates that are located left of the cut
point
"""
coords = deepcopy(coords)
# FIXME: should be using x_min x_max inputs instead of global constant
coords.b1.x = np.minimum(coords.b1.x, cut_pt_coords.x)
coords.b1.x = np.maximum(coords.b1.x, MIN_X_GROUND)
coords.b2.x = np.minimum(coords.b2.x, cut_pt_coords.x)
coords.b2.x = np.maximum(coords.b2.x, MIN_X_GROUND)
return coords
class PVGround(BaseSide):
"""Class that defines the ground geometry in PV arrays."""
def __init__(self, list_segments=None, original_linestring=None):
"""Initialize PV ground geometry.
Parameters
----------
list_segments : list of :py:class:`~pvfactors.geometry.base.PVSegment`, optional
List of PV segments that will constitute the ground (Default = [])
original_linestring : :py:class:`shapely.geometry.LineString`, optional
Full continuous linestring that the ground will be made of
(Default = None)
"""
list_segments = list_segments or []
self.original_linestring = original_linestring
super(PVGround, self).__init__(list_segments)
@classmethod
def as_flat(cls, x_min_max=None, shaded=False, y_ground=Y_GROUND,
param_names=None):
"""Build a horizontal flat ground surface, made of 1 PV segment.
Parameters
----------
x_min_max : tuple, optional
List of minimum and maximum x coordinates for the flat surface [m]
(Default = None)
shaded : bool, optional
Shaded status of the created PV surfaces (Default = False)
y_ground : float, optional
Location of flat ground on y axis in [m] (Default = Y_GROUND)
param_names : list of str, optional
Names of the surface parameters, eg reflectivity, total incident
irradiance, temperature, etc. (Default = [])
Returns
-------
PVGround object
"""
param_names = param_names or []
# Get ground boundaries
if x_min_max is None:
x_min, x_max = MIN_X_GROUND, MAX_X_GROUND
else:
x_min, x_max = x_min_max
# Create PV segment for flat ground
coords = [(x_min, y_ground), (x_max, y_ground)]
seg = PVSegment.from_linestring_coords(coords, shaded=shaded,
normal_vector=[0., 1.],
param_names=param_names)
return cls(list_segments=[seg], original_linestring=LineString(coords))
@classmethod
def from_lists_surfaces(
cls, list_shaded_surfaces, list_illum_surfaces, x_min_max=None,
y_ground=Y_GROUND, param_names=None):
"""Create ground from lists of shaded and illuminated PV surfaces.
Parameters
----------
list_shaded_surfaces : \
list of :py:class:`~pvfactors.geometry.base.PVSurface`
List of shaded ground PV surfaces
list_illum_surfaces : \
list of :py:class:`~pvfactors.geometry.base.PVSurface`
List of illuminated ground PV surfaces
x_min_max : tuple, optional
List of minimum and maximum x coordinates for the flat surface [m]
(Default = None)
y_ground : float, optional
Location of flat ground on y axis in [m] (Default = Y_GROUND)
param_names : list of str, optional
Names of the surface parameters, eg reflectivity, total incident
irradiance, temperature, etc. (Default = [])
Returns
-------
PVGround object
"""
param_names = param_names or []
# Get ground boundaries
if x_min_max is None:
x_min, x_max = MIN_X_GROUND, MAX_X_GROUND
else:
x_min, x_max = x_min_max
full_extent_coords = [(x_min, y_ground), (x_max, y_ground)]
# Create the shade collections
shaded_collection = ShadeCollection(
list_surfaces=list_shaded_surfaces, shaded=True,
param_names=param_names)
illum_collection = ShadeCollection(
list_surfaces=list_illum_surfaces, shaded=False,
param_names=param_names)
# Create the ground segment
segment = PVSegment(illum_collection=illum_collection,
shaded_collection=shaded_collection)
return cls(list_segments=[segment],
original_linestring=LineString(full_extent_coords))
@property
def boundary(self):
"""Boundaries of the ground's original linestring."""
return self.original_linestring.boundary
| bsd-3-clause |
pasqualguerrero/django | django/contrib/auth/tokens.py | 429 | 2803 | from datetime import date
from django.conf import settings
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
hash = salted_hmac(
self.key_salt,
self._make_hash_value(user, timestamp),
).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _make_hash_value(self, user, timestamp):
# Ensure results are consistent across DB backends
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
return (
six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp)
)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
| bsd-3-clause |
MauHernandez/cyclope | cyclope/migrations/0026_auto__chg_field_sitesettings_font_size.py | 2 | 13275 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SiteSettings.font_size'
db.alter_column('cyclope_sitesettings', 'font_size', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2))
def backwards(self, orm):
# Changing field 'SiteSettings.font_size'
db.alter_column('cyclope_sitesettings', 'font_size', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
models = {
'collections.collection': {
'Meta': {'object_name': 'Collection'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'default_list_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'navigation_root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.author': {
'Meta': {'ordering': "['name']", 'object_name': 'Author'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'})
},
'cyclope.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '100'})
},
'cyclope.layout': {
'Meta': {'object_name': 'Layout'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.menu': {
'Meta': {'object_name': 'Menu'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cyclope.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'menu_entries'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'custom_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'menu_items'", 'to': "orm['cyclope.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cyclope.MenuItem']"}),
'persistent_layout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site_home': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"})
},
'cyclope.regionview': {
'Meta': {'object_name': 'RegionView'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'region_views'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'cyclope.relatedcontent': {
'Meta': {'ordering': "['order']", 'object_name': 'RelatedContent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'other_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'other_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_rt'", 'to': "orm['contenttypes.ContentType']"}),
'self_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'self_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_lt'", 'to': "orm['contenttypes.ContentType']"})
},
'cyclope.sitesettings': {
'Meta': {'object_name': 'SiteSettings'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'YES'", 'max_length': '4'}),
'body_custom_font': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'body_font': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'default_layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enable_abuse_reports': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_comments_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_follow_buttons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_ratings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_share_buttons': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'font_size': ('django.db.models.fields.DecimalField', [], {'default': '12', 'max_digits': '4', 'decimal_places': '2'}),
'global_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'head_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'hide_content_icons': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'moderate_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'newsletter_collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['collections.Collection']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'rss_content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'symmetrical': 'False'}),
'show_author': ('django.db.models.fields.CharField', [], {'default': "'AUTHOR'", 'max_length': '6'}),
'show_head_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'unique': 'True'}),
'social_follow_services': ('jsonfield.fields.JSONField', [], {'default': '\'[["twitter","USERNAME"],["facebook","USERNAME"],["google","USERNAME"],["flickr","USERNAME"],["linkedin","USERNAME"],["vimeo","USERNAME"],["youtube","USERNAME"]]\''}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'titles_custom_font': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'titles_font': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'})
},
'cyclope.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cyclope'] | gpl-3.0 |
ghickman/django | tests/serializers/test_natural.py | 13 | 9129 | from django.core import serializers
from django.db import connection
from django.test import TestCase
from .models import (
Child, FKAsPKNoNaturalKey, FKDataNaturalKey, NaturalKeyAnchor,
NaturalKeyThing, NaturalPKWithDefault,
)
from .tests import register_tests
class NaturalKeySerializerTests(TestCase):
pass
def natural_key_serializer_test(self, format):
# Create all the objects defined in the test data
with connection.constraint_checks_disabled():
objects = [
NaturalKeyAnchor.objects.create(id=1100, data="Natural Key Anghor"),
FKDataNaturalKey.objects.create(id=1101, data_id=1100),
FKDataNaturalKey.objects.create(id=1102, data_id=None),
]
# Serialize the test database
serialized_data = serializers.serialize(format, objects, indent=2, use_natural_foreign_keys=True)
for obj in serializers.deserialize(format, serialized_data):
obj.save()
# Assert that the deserialized data is the same
# as the original source
for obj in objects:
instance = obj.__class__.objects.get(id=obj.pk)
self.assertEqual(
obj.data, instance.data,
"Objects with PK=%d not equal; expected '%s' (%s), got '%s' (%s)" % (
obj.pk, obj.data, type(obj.data), instance, type(instance.data),
)
)
def natural_key_test(self, format):
book1 = {
'data': '978-1590597255',
'title': 'The Definitive Guide to Django: Web Development Done Right',
}
book2 = {'data': '978-1590599969', 'title': 'Practical Django Projects'}
# Create the books.
adrian = NaturalKeyAnchor.objects.create(**book1)
james = NaturalKeyAnchor.objects.create(**book2)
# Serialize the books.
string_data = serializers.serialize(
format, NaturalKeyAnchor.objects.all(), indent=2,
use_natural_foreign_keys=True, use_natural_primary_keys=True,
)
# Delete one book (to prove that the natural key generation will only
# restore the primary keys of books found in the database via the
# get_natural_key manager method).
james.delete()
# Deserialize and test.
books = list(serializers.deserialize(format, string_data))
self.assertEqual(len(books), 2)
self.assertEqual(books[0].object.title, book1['title'])
self.assertEqual(books[0].object.pk, adrian.pk)
self.assertEqual(books[1].object.title, book2['title'])
self.assertIsNone(books[1].object.pk)
def natural_pk_mti_test(self, format):
"""
If serializing objects in a multi-table inheritance relationship using
natural primary keys, the natural foreign key for the parent is output in
the fields of the child so it's possible to relate the child to the parent
when deserializing.
"""
child_1 = Child.objects.create(parent_data='1', child_data='1')
child_2 = Child.objects.create(parent_data='2', child_data='2')
string_data = serializers.serialize(
format,
[child_1.parent_ptr, child_2.parent_ptr, child_2, child_1],
use_natural_foreign_keys=True, use_natural_primary_keys=True,
)
child_1.delete()
child_2.delete()
for obj in serializers.deserialize(format, string_data):
obj.save()
children = Child.objects.all()
self.assertEqual(len(children), 2)
for child in children:
# If it's possible to find the superclass from the subclass and it's
# the correct superclass, it's working.
self.assertEqual(child.child_data, child.parent_data)
def forward_ref_fk_test(self, format):
t1 = NaturalKeyThing.objects.create(key='t1')
t2 = NaturalKeyThing.objects.create(key='t2', other_thing=t1)
t1.other_thing = t2
t1.save()
string_data = serializers.serialize(
format, [t1, t2], use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(format, string_data, handle_forward_references=True):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
for obj in objs_with_deferred_fields:
obj.save_deferred_fields()
t1 = NaturalKeyThing.objects.get(key='t1')
t2 = NaturalKeyThing.objects.get(key='t2')
self.assertEqual(t1.other_thing, t2)
self.assertEqual(t2.other_thing, t1)
def forward_ref_fk_with_error_test(self, format):
t1 = NaturalKeyThing.objects.create(key='t1')
t2 = NaturalKeyThing.objects.create(key='t2', other_thing=t1)
t1.other_thing = t2
t1.save()
string_data = serializers.serialize(
format, [t1], use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(format, string_data, handle_forward_references=True):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
obj = objs_with_deferred_fields[0]
msg = 'NaturalKeyThing matching query does not exist'
with self.assertRaisesMessage(serializers.base.DeserializationError, msg):
obj.save_deferred_fields()
def forward_ref_m2m_test(self, format):
t1 = NaturalKeyThing.objects.create(key='t1')
t2 = NaturalKeyThing.objects.create(key='t2')
t3 = NaturalKeyThing.objects.create(key='t3')
t1.other_things.set([t2, t3])
string_data = serializers.serialize(
format, [t1, t2, t3], use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(format, string_data, handle_forward_references=True):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
for obj in objs_with_deferred_fields:
obj.save_deferred_fields()
t1 = NaturalKeyThing.objects.get(key='t1')
t2 = NaturalKeyThing.objects.get(key='t2')
t3 = NaturalKeyThing.objects.get(key='t3')
self.assertCountEqual(t1.other_things.all(), [t2, t3])
def forward_ref_m2m_with_error_test(self, format):
t1 = NaturalKeyThing.objects.create(key='t1')
t2 = NaturalKeyThing.objects.create(key='t2')
t3 = NaturalKeyThing.objects.create(key='t3')
t1.other_things.set([t2, t3])
t1.save()
string_data = serializers.serialize(
format, [t1, t2], use_natural_primary_keys=True,
use_natural_foreign_keys=True,
)
NaturalKeyThing.objects.all().delete()
objs_with_deferred_fields = []
for obj in serializers.deserialize(format, string_data, handle_forward_references=True):
obj.save()
if obj.deferred_fields:
objs_with_deferred_fields.append(obj)
obj = objs_with_deferred_fields[0]
msg = 'NaturalKeyThing matching query does not exist'
with self.assertRaisesMessage(serializers.base.DeserializationError, msg):
obj.save_deferred_fields()
def pk_with_default(self, format):
"""
The deserializer works with natural keys when the primary key has a default
value.
"""
obj = NaturalPKWithDefault.objects.create(name='name')
string_data = serializers.serialize(
format, NaturalPKWithDefault.objects.all(), use_natural_foreign_keys=True,
use_natural_primary_keys=True,
)
objs = list(serializers.deserialize(format, string_data))
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].object.pk, obj.pk)
def fk_as_pk_natural_key_not_called(self, format):
"""
The deserializer doesn't rely on natural keys when a model has a custom
primary key that is a ForeignKey.
"""
o1 = NaturalKeyAnchor.objects.create(data='978-1590599969')
o2 = FKAsPKNoNaturalKey.objects.create(pk_fk=o1)
serialized_data = serializers.serialize(format, [o1, o2])
deserialized_objects = list(serializers.deserialize(format, serialized_data))
self.assertEqual(len(deserialized_objects), 2)
for obj in deserialized_objects:
self.assertEqual(obj.object.pk, o1.pk)
# Dynamically register tests for each serializer
register_tests(NaturalKeySerializerTests, 'test_%s_natural_key_serializer', natural_key_serializer_test)
register_tests(NaturalKeySerializerTests, 'test_%s_serializer_natural_keys', natural_key_test)
register_tests(NaturalKeySerializerTests, 'test_%s_serializer_natural_pks_mti', natural_pk_mti_test)
register_tests(NaturalKeySerializerTests, 'test_%s_forward_references_fks', forward_ref_fk_test)
register_tests(NaturalKeySerializerTests, 'test_%s_forward_references_fk_errors', forward_ref_fk_with_error_test)
register_tests(NaturalKeySerializerTests, 'test_%s_forward_references_m2ms', forward_ref_m2m_test)
register_tests(NaturalKeySerializerTests, 'test_%s_forward_references_m2m_errors', forward_ref_m2m_with_error_test)
register_tests(NaturalKeySerializerTests, 'test_%s_pk_with_default', pk_with_default)
register_tests(
NaturalKeySerializerTests,
'test_%s_fk_as_pk_natural_key_not_called',
fk_as_pk_natural_key_not_called,
)
| bsd-3-clause |
KevinXuxuxu/datahub_lsems | src/service/handler.py | 3 | 5474 | import hashlib
from account.manager import *
from core.db.connection import DataHubConnection
from core.db.manager import DataHubManager
from datahub import DataHub
from datahub.constants import *
from datahub.account.constants import *
'''
@author: anant bhardwaj
@date: Oct 9, 2013
DataHub Handler
'''
def construct_result_set(res):
tuples = [Tuple(
cells=[bytes(val) for val in t]) for t in res['tuples']]
field_names = [bytes(field['name']) for field in res['fields']]
field_types = [bytes(field['type']) for field in res['fields']]
return ResultSet(status=res['status'],
num_tuples=res['row_count'],
num_more_tuples=0,
tuples=tuples,
field_names=field_names,
field_types=field_types)
class DataHubHandler:
def __init__(self):
self.sessions={}
pass
def get_version(self):
return VERSION
def open_connection(self, con_params):
try:
repo_base = con_params.user
if con_params.repo_base and con_params.repo_base != '':
repo_base = con_params.repo_base
user = ''
is_app = False
if con_params.user:
user = con_params.user
DataHubConnection(
user=con_params.user,
password=hashlib.sha1(con_params.password).hexdigest(),
repo_base=repo_base)
else:
user = con_params.app_id
is_app = True
DataHubConnection(
user=con_params.app_id,
password=hashlib.sha1(con_params.app_token).hexdigest(),
repo_base=repo_base)
'''
res = DataHubManager.has_base_privilege(user, repo_base, 'CONNECT')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
con = Connection(
user=user,
is_app=is_app,
repo_base=repo_base)
return con
except Exception, e:
raise DBException(message=str(e))
def create_repo(self, con, repo_name):
try:
'''
res = DataHubManager.has_base_privilege(con.user, con.repo_base, 'CREATE')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.create_repo(repo=repo_name)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def list_repos(self, con):
try:
'''
res = DataHubManager.has_base_privilege(con.user, con.repo_base, 'CONNECT')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.list_repos()
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def delete_repo(self, con, repo_name, force_if_non_empty):
try:
'''
res = DataHubManager.has_base_privilege(con.user, con.repo_base, 'CREATE')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
res = DataHubManager.has_repo_privilege(con.user, con.repo_base, repo_name, 'CREATE')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.delete_repo(repo=repo_name, force=force_if_non_empty)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def list_tables(self, con, repo_name):
try:
'''
res = DataHubManager.has_repo_privilege(con.user, con.repo_base, repo_name, 'USAGE')
if not (res and res['tuples'][0][0]):
raise Exception('Access denied. Missing required privileges.')
'''
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.list_tables(repo=repo_name)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def get_schema(self, con, table_name):
try:
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.get_schema(table=table_name)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def execute_sql(self, con, query, query_params=None):
try:
manager = DataHubManager(user=con.repo_base, repo_base=con.repo_base)
res = manager.execute_sql(query=query, params=query_params)
return construct_result_set(res)
except Exception, e:
raise DBException(message=str(e))
def create_account(
self, username, email, password, repo_name, app_id=None, app_token=None):
try:
account_register(
username=username,
email=email,
password=password,
repo_name=repo_name,
app_id=app_id,
app_token=app_token)
return True
except Exception, e:
raise AccountException(message=str(e))
def remove_account(self, username, app_id=None, app_token=None):
try:
account_remove(
username=username,
app_id=app_id,
app_token=app_token)
return True
except Exception, e:
raise AccountException(message=str(e))
| mit |
ghickman/django | django/db/models/options.py | 13 | 36791 | import bisect
import copy
import inspect
from collections import defaultdict
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured
from django.db import connections
from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint
from django.db.models.query_utils import PathInfo
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.text import camel_case_to_spaces, format_lazy
from django.utils.translation import override
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = ()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = (
'verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by', 'order_with_respect_to',
'app_label', 'db_tablespace', 'abstract', 'managed', 'proxy', 'swappable',
'auto_created', 'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name', 'required_db_features',
'required_db_vendor', 'base_manager_name', 'default_manager_name',
'indexes', 'constraints',
)
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = option_together[0]
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
class Options:
FORWARD_PROPERTIES = {
'fields', 'many_to_many', 'concrete_fields', 'local_concrete_fields',
'_forward_fields_map', 'managers', 'managers_map', 'base_manager',
'default_manager',
}
REVERSE_PROPERTIES = {'related_objects', 'fields_map', '_relation_tree'}
default_apps = apps
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.local_fields = []
self.local_many_to_many = []
self.private_fields = []
self.local_managers = []
self.base_manager_name = None
self.default_manager_name = None
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.indexes = []
self.constraints = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete', 'view')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = {}
self.auto_created = False
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = self.default_apps
self.default_related_name = None
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# App label/class name interpolation for names of constraints and
# indexes.
if not getattr(cls._meta, 'abstract', False):
for attr_name in {'constraints', 'indexes'}:
objs = getattr(self, attr_name, [])
setattr(self, attr_name, self._format_names_with_class(cls, objs))
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs))
else:
self.verbose_name_plural = format_lazy('{}s', self.verbose_name)
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _format_names_with_class(self, cls, objs):
"""App label/class name interpolation for object names."""
new_objs = []
for obj in objs:
obj = obj.clone()
obj.name = obj.name % {
'app_label': cls._meta.app_label.lower(),
'class': cls.__name__.lower(),
}
new_objs.append(obj)
return new_objs
def _get_default_pk_class(self):
pk_class_path = getattr(
self.app_config,
'default_auto_field',
settings.DEFAULT_AUTO_FIELD,
)
if self.app_config and self.app_config._is_default_auto_field_overridden:
app_config_class = type(self.app_config)
source = (
f'{app_config_class.__module__}.'
f'{app_config_class.__qualname__}.default_auto_field'
)
else:
source = 'DEFAULT_AUTO_FIELD'
if not pk_class_path:
raise ImproperlyConfigured(f'{source} must not be empty.')
try:
pk_class = import_string(pk_class_path)
except ImportError as e:
msg = (
f"{source} refers to the module '{pk_class_path}' that could "
f"not be imported."
)
raise ImproperlyConfigured(msg) from e
if not issubclass(pk_class, AutoField):
raise ValueError(
f"Primary key '{pk_class_path}' referred by {source} must "
f"subclass AutoField."
)
return pk_class
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(iter(self.parents.values()))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
pk_class = self._get_default_pk_class()
auto = pk_class(verbose_name='ID', primary_key=True, auto_created=True)
model.add_to_class('id', auto)
def add_manager(self, manager):
self.local_managers.append(manager)
self._expire_cache()
def add_field(self, field, private=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if private:
self.private_fields.append(field)
elif field.is_relation and field.many_to_many:
bisect.insort(self.local_many_to_many, field)
else:
bisect.insort(self.local_fields, field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return self.label_lower
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, str):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""Return the untranslated verbose name."""
with override(None):
return str(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def managers(self):
managers = []
seen_managers = set()
bases = (b for b in self.model.mro() if hasattr(b, '_meta'))
for depth, base in enumerate(bases):
for manager in base._meta.local_managers:
if manager.name in seen_managers:
continue
manager = copy.copy(manager)
manager.model = self.model
seen_managers.add(manager.name)
managers.append((depth, manager.creation_counter, manager))
return make_immutable_fields_list(
"managers",
(m[2] for m in sorted(managers)),
)
@cached_property
def managers_map(self):
return {manager.name: manager for manager in self.managers}
@cached_property
def base_manager(self):
base_manager_name = self.base_manager_name
if not base_manager_name:
# Get the first parent's base_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
if parent._base_manager.name != '_base_manager':
base_manager_name = parent._base_manager.name
break
if base_manager_name:
try:
return self.managers_map[base_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
base_manager_name,
)
)
manager = Manager()
manager.name = '_base_manager'
manager.model = self.model
manager.auto_created = True
return manager
@cached_property
def default_manager(self):
default_manager_name = self.default_manager_name
if not default_manager_name and not self.local_managers:
# Get the first parent's default_manager_name if there's one.
for parent in self.model.mro()[1:]:
if hasattr(parent, '_meta'):
default_manager_name = parent._meta.default_manager_name
break
if default_manager_name:
try:
return self.managers_map[default_manager_name]
except KeyError:
raise ValueError(
"%s has no manager named %r" % (
self.object_name,
default_manager_name,
)
)
if self.managers:
return self.managers[0]
@cached_property
def fields(self):
"""
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not private or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
def is_not_an_m2m_field(f):
return not (f.is_relation and f.many_to_many)
def is_not_a_generic_relation(f):
return not (f.is_relation and f.one_to_many)
def is_not_a_generic_foreign_key(f):
return not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False)
if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@cached_property
def many_to_many(self):
"""
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many)
)
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name):
"""
Return a field instance given the name of a forward or reverse field.
"""
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
return self._forward_fields_map[field_name]
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named '%s'. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def get_path_to_parent(self, parent):
"""
Return a list of PathInfos containing the path from the current
model to the parent model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
# Skip the chain of proxy to the concrete proxied model.
proxied_model = self.concrete_model
path = []
opts = self
for int_model in self.get_base_chain(parent):
if int_model is proxied_model:
opts = int_model._meta
else:
final_field = opts.parents[int_model]
targets = (final_field.remote_field.get_related_field(),)
opts = int_model._meta
path.append(PathInfo(
from_opts=final_field.model._meta,
to_opts=opts,
target_fields=targets,
join_field=final_field,
m2m=False,
direct=True,
filtered_relation=None,
))
return path
def get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent)
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
opts = model._meta
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if opts.abstract:
continue
fields_with_relations = (
f for f in opts._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, str):
remote_label = f.remote_field.model._meta.concrete_model._meta.label
related_objects_graph[remote_label].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta.concrete_model._meta.label]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
if forward:
for cache_key in self.FORWARD_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
if reverse and not self.abstract:
for cache_key in self.REVERSE_PROPERTIES:
if cache_key in self.__dict__:
delattr(self, cache_key)
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = seen_models is None
if topmost_call:
seen_models = set()
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if not getattr(obj, 'parent_link', False) or obj.model == self.concrete_model:
fields.append(obj)
if reverse and not self.proxy:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields += self.local_fields
fields += self.local_many_to_many
# Private fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the private fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields += self.private_fields
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
@cached_property
def total_unique_constraints(self):
"""
Return a list of total unique constraints. Useful for determining set
of fields guaranteed to be unique for all rows.
"""
return [
constraint
for constraint in self.constraints
if isinstance(constraint, UniqueConstraint) and constraint.condition is None
]
@cached_property
def _property_names(self):
"""Return a set of the names of the properties defined on the model."""
names = []
for name in dir(self.model):
attr = inspect.getattr_static(self.model, name)
if isinstance(attr, property):
names.append(name)
return frozenset(names)
@cached_property
def db_returning_fields(self):
"""
Private API intended only to be used by Django itself.
Fields to be returned after a database insert.
"""
return [
field for field in self._get_fields(forward=True, reverse=False, include_parents=PROXY_PARENTS)
if getattr(field, 'db_returning', False)
]
| bsd-3-clause |
omnirom/android_kernel_lge_x3 | tools/perf/scripts/python/netdev-times.py | 11266 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
all-of-us/raw-data-repository | rdr_service/alembic/versions/f3fdb9d05ab3_bq_sync_pk_fix.py | 1 | 3626 | """bq_sync_pk_fix
Revision ID: f3fdb9d05ab3
Revises: 7d63fbc6d9ca
Create Date: 2019-08-14 12:10:16.423602
"""
from alembic import op
import sqlalchemy as sa
import model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'f3fdb9d05ab3'
down_revision = '7d63fbc6d9ca'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('TRUNCATE TABLE bigquery_sync') # We want to start over now, these are some big changes.
op.add_column('bigquery_sync', sa.Column('pk_id', sa.Integer(), nullable=False))
op.execute('ALTER TABLE bigquery_sync ADD COLUMN `project_id` VARCHAR(80) NOT NULL AFTER modified')
# op.add_column('bigquery_sync', sa.Column('project_id', sa.String(length=80), nullable=True))
op.drop_constraint(u'bigquery_sync_ibfk_1', 'bigquery_sync', type_='foreignkey')
op.drop_index('ix_participant_ds_table', table_name='bigquery_sync')
op.execute('ALTER TABLE bigquery_sync CHANGE COLUMN `dataset` `dataset_id` VARCHAR(80) NOT NULL')
op.execute('ALTER TABLE bigquery_sync CHANGE COLUMN `table` `table_id` VARCHAR(80) NOT NULL')
op.create_index('ix_participant_ds_table', 'bigquery_sync', ['pk_id', 'project_id', 'dataset_id', 'table_id'], unique=False)
op.drop_column('bigquery_sync', 'participant_id')
# ### end Alembic commands ###
pass
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('TRUNCATE TABLE bigquery_sync') # We want to start over now, these are some big changes.
op.drop_index('ix_participant_ds_table', table_name='bigquery_sync')
op.execute('ALTER TABLE bigquery_sync CHANGE COLUMN `dataset_id` `dataset` VARCHAR(80) NOT NULL')
op.execute('ALTER TABLE bigquery_sync CHANGE COLUMN `table_id` `table` VARCHAR(80) NOT NULL')
op.add_column('bigquery_sync',
sa.Column('participant_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False))
op.create_foreign_key(u'bigquery_sync_ibfk_1', 'bigquery_sync', 'participant', ['participant_id'],
['participant_id'])
op.create_index('ix_participant_ds_table', 'bigquery_sync', ['participant_id', 'dataset', 'table'], unique=False)
op.drop_column('bigquery_sync', 'pk_id')
op.drop_column('bigquery_sync', 'project_id')
# ### end Alembic commands ###
pass
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause |
keflavich/spectral-cube | spectral_cube/tests/test_analysis_functions.py | 3 | 13651 |
import pytest
import warnings
import numpy as np
import astropy.units as u
# from astropy.modeling import models, fitting
from ..analysis_utilities import stack_spectra, fourier_shift, stack_cube
from .utilities import generate_gaussian_cube, gaussian
from ..utils import BadVelocitiesWarning
def test_shift():
amp = 1
v0 = 0 * u.m / u.s
sigma = 8
spectral_axis = np.arange(-50, 51) * u.m / u.s
true_spectrum = gaussian(spectral_axis.value,
amp, v0.value, sigma)
# Shift is an integer, so rolling is equivalent
rolled_spectrum = np.roll(true_spectrum, 10)
shift_spectrum = fourier_shift(true_spectrum, 10)
np.testing.assert_allclose(shift_spectrum,
rolled_spectrum,
rtol=1e-4)
# With part masked
masked_spectrum = true_spectrum.copy()
mask = np.abs(spectral_axis.value) <= 30
masked_spectrum[~mask] = np.NaN
rolled_mask = np.roll(mask, 10)
rolled_masked_spectrum = rolled_spectrum.copy()
rolled_masked_spectrum[~rolled_mask] = np.NaN
shift_spectrum = fourier_shift(masked_spectrum, 10)
np.testing.assert_allclose(shift_spectrum,
rolled_masked_spectrum,
rtol=1e-4)
def test_stacking(use_dask):
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# Now fit a Gaussian to the mean stacked profile.
# fit_vals = fit_gaussian(stacked.spectral_axis.value, stacked.value)[0]
# np.testing.assert_allclose(fit_vals, np.array([amp, v0.value, sigma]),
# atol=1e-3)
# The stacked spectrum should have the same spectral axis
np.testing.assert_allclose(stacked.spectral_axis.value,
test_cube.spectral_axis.value)
def test_cube_stacking(use_dask):
'''
Test passing a list of cubes
This test simply averages two copies of the same thing.
A more thorough test might be to verify that cubes with different frequency
supports also yield good results.
'''
amp = 1.
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
test_cube1 = test_cube.with_spectral_unit(u.GHz, rest_value=1*u.GHz, velocity_convention='radio')
test_cube2 = test_cube.with_spectral_unit(u.GHz, rest_value=2*u.GHz, velocity_convention='radio')
vmin = -10*u.km/u.s
vmax = 10*u.km/u.s
# Stack two cubes
stacked = stack_cube([test_cube1, test_cube2], linelist=[1.,2.]*u.GHz,
vmin=vmin, vmax=vmax, average=np.nanmean,
convolve_beam=None, return_cutouts=False)
np.testing.assert_allclose(stacked.filled_data[:],
test_cube.spectral_slab(vmin, vmax).filled_data[:])
# Stack one cube with two frequencies, one that's out of band
stacked = stack_cube(test_cube1, linelist=[1.,2.]*u.GHz,
vmin=vmin, vmax=vmax, average=np.nanmean,
convolve_beam=None, return_cutouts=False)
np.testing.assert_allclose(stacked.filled_data[:],
test_cube.spectral_slab(vmin, vmax).filled_data[:])
# TODO: add tests of multiple lines in the same cube
# (this requires a different test cube setup)
def test_stacking_badvels(use_dask):
'''
Regression test for #493: don't include bad velocities when stacking
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
test_vels[12,11] = 500*u.km/u.s
with pytest.warns(BadVelocitiesWarning,
match='Some velocities are outside the allowed range and will be'):
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals (the one bad value shouldn't have caused a problem)
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
def test_stacking_reversed_specaxis(use_dask):
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, spec_scale=-1. * u.km / u.s, use_dask=use_dask)
true_spectrum = gaussian(test_cube.spectral_axis.value,
amp, v0.value, sigma)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# The stacked spectrum should have the same spectral axis
np.testing.assert_allclose(stacked.spectral_axis.value,
test_cube.spectral_axis.value)
def test_stacking_wpadding(use_dask):
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
'''
amp = 1.
sigma = 8.
v0 = 0. * u.km / u.s
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise, use_dask=use_dask)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# Now fit a Gaussian to the mean stacked profile.
# fit_vals = fit_gaussian(stacked.spectral_axis.value, stacked.value)[0]
# np.testing.assert_allclose(fit_vals, np.array([amp, 0.0, sigma]),
# atol=1e-3)
# The spectral axis should be padded by ~25% on each side
stack_shape = int(test_cube.shape[0] * 1.5)
# This is rounded, so the shape could be +/- 1
assert (stacked.size == stack_shape) or (stacked.size == stack_shape - 1) \
or (stacked.size == stack_shape + 1)
def test_padding_direction(use_dask):
amp = 1.
sigma = 8.
v0 = 0. * u.km / u.s
noise = None
shape = (100, 2, 2)
vel_surface = np.array([[0, 5], [5, 10]])
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise,
vel_surface=vel_surface, use_dask=use_dask)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# now check that the stacked spectral axis is right
# (all shifts are negative, so vmin < -50 km/s, should be -60?)
assert stacked.spectral_axis.min() == -60*u.km/u.s
assert stacked.spectral_axis.max() == 49*u.km/u.s
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
def test_stacking_woffset(use_dask):
'''
Use a set of identical Gaussian profiles randomly offset to ensure the
shifted spectrum has the correct properties.
Make sure the operations aren't affected by absolute velocity offsets
'''
amp = 1.
sigma = 8.
v0 = 100. * u.km / u.s
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(shape=shape, amp=amp, sigma=sigma, noise=noise,
v0=v0.value, use_dask=use_dask)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= 1e-3
# The spectral axis should be padded by ~25% on each side
stack_shape = int(test_cube.shape[0] * 1.5)
# This is rounded, so the shape could be +/- 1
assert (stacked.size == stack_shape) or (stacked.size == stack_shape - 1) \
or (stacked.size == stack_shape + 1)
def test_stacking_shape_failure(use_dask):
"""
Regression test for #466
"""
amp = 1.
v0 = 0. * u.km / u.s
sigma = 8.
noise = None
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
# make the test_vels array the wrong shape
test_vels = test_vels[:-1, :-1]
with pytest.raises(ValueError) as exc:
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
assert 'Velocity surface map does not match' in exc.value.args[0]
test_vels = np.ones(shape[1:], dtype='float') + np.nan
with pytest.raises(ValueError) as exc:
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False, pad_edges=False)
assert "velocity_surface contains no finite values" in exc.value.args[0]
def test_stacking_noisy(use_dask):
# Test stack w/ S/N of 0.2
# This is cheating b/c we know the correct peak velocities, but serves as
# a good test that the stacking is working.
amp = 1.
sigma = 8.
v0 = 0 * u.km / u.s
noise = 5.0
shape = (100, 25, 25)
test_cube, test_vels = \
generate_gaussian_cube(amp=amp, sigma=sigma, noise=noise,
shape=shape, use_dask=use_dask)
# Stack the spectra in the cube
stacked = \
stack_spectra(test_cube, test_vels, v0=v0,
stack_function=np.nanmean,
xy_posns=None, num_cores=1,
chunk_size=-1,
progressbar=False,
pad_edges=True)
true_spectrum = gaussian(stacked.spectral_axis.value,
amp, v0.value, sigma)
# Calculate residuals
resid = np.abs(stacked.value - true_spectrum)
assert np.std(resid) <= noise / np.sqrt(shape[1] * shape[2])
# Now fit a Gaussian to the mean stacked profile.
# fit_vals, fit_errs = fit_gaussian(stacked.spectral_axis.value,
# stacked.value)
# Check that the fit is consistent with the true values within 1-sigma err
# for fit_val, fit_err, true_val in zip(fit_vals, fit_errs,
# [amp, v0.value, sigma]):
# np.testing.assert_allclose(fit_val, true_val,
# atol=fit_err)
# def fit_gaussian(vels, data):
# g_init = models.Gaussian1D()
# fit_g = fitting.LevMarLSQFitter()
# g_fit = fit_g(g_init, vels, data)
# cov = fit_g.fit_info['param_cov']
# if cov is None:
# cov = np.zeros((3, 3)) * np.NaN
# parvals = g_fit.parameters
# parerrs = np.sqrt(np.diag(cov))
# return parvals, parerrs
| bsd-3-clause |
rob356/SickRage | lib/imdb/parser/sql/dbschema.py | 117 | 20506 | #-*- encoding: utf-8 -*-
"""
parser.sql.dbschema module (imdb.parser.sql package).
This module provides the schema used to describe the layout of the
database used by the imdb.parser.sql package; functions to create/drop
tables and indexes are also provided.
Copyright 2005-2012 Davide Alberani <da@erlug.linux.it>
2006 Giuseppe "Cowo" Corbelli <cowo --> lugbs.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
_dbschema_logger = logging.getLogger('imdbpy.parser.sql.dbschema')
# Placeholders for column types.
INTCOL = 1
UNICODECOL = 2
STRINGCOL = 3
_strMap = {1: 'INTCOL', 2: 'UNICODECOL', 3: 'STRINGCOL'}
class DBCol(object):
"""Define column objects."""
def __init__(self, name, kind, **params):
self.name = name
self.kind = kind
self.index = None
self.indexLen = None
# If not None, two notations are accepted: 'TableName'
# and 'TableName.ColName'; in the first case, 'id' is assumed
# as the name of the pointed column.
self.foreignKey = None
if 'index' in params:
self.index = params['index']
del params['index']
if 'indexLen' in params:
self.indexLen = params['indexLen']
del params['indexLen']
if 'foreignKey' in params:
self.foreignKey = params['foreignKey']
del params['foreignKey']
self.params = params
def __str__(self):
"""Class representation."""
s = '<DBCol %s %s' % (self.name, _strMap[self.kind])
if self.index:
s += ' INDEX'
if self.indexLen:
s += '[:%d]' % self.indexLen
if self.foreignKey:
s += ' FOREIGN'
if 'default' in self.params:
val = self.params['default']
if val is not None:
val = '"%s"' % val
s += ' DEFAULT=%s' % val
for param in self.params:
if param == 'default': continue
s += ' %s' % param.upper()
s += '>'
return s
def __repr__(self):
"""Class representation."""
s = '<DBCol(name="%s", %s' % (self.name, _strMap[self.kind])
if self.index:
s += ', index="%s"' % self.index
if self.indexLen:
s += ', indexLen=%d' % self.indexLen
if self.foreignKey:
s += ', foreignKey="%s"' % self.foreignKey
for param in self.params:
val = self.params[param]
if isinstance(val, (unicode, str)):
val = u'"%s"' % val
s += ', %s=%s' % (param, val)
s += ')>'
return s
class DBTable(object):
"""Define table objects."""
def __init__(self, name, *cols, **kwds):
self.name = name
self.cols = cols
# Default values.
self.values = kwds.get('values', {})
def __str__(self):
"""Class representation."""
return '<DBTable %s (%d cols, %d values)>' % (self.name,
len(self.cols), sum([len(v) for v in self.values.values()]))
def __repr__(self):
"""Class representation."""
s = '<DBTable(name="%s"' % self.name
col_s = ', '.join([repr(col).rstrip('>').lstrip('<')
for col in self.cols])
if col_s:
s += ', %s' % col_s
if self.values:
s += ', values=%s' % self.values
s += ')>'
return s
# Default values to insert in some tables: {'column': (list, of, values, ...)}
kindTypeDefs = {'kind': ('movie', 'tv series', 'tv movie', 'video movie',
'tv mini series', 'video game', 'episode')}
companyTypeDefs = {'kind': ('distributors', 'production companies',
'special effects companies', 'miscellaneous companies')}
infoTypeDefs = {'info': ('runtimes', 'color info', 'genres', 'languages',
'certificates', 'sound mix', 'tech info', 'countries', 'taglines',
'keywords', 'alternate versions', 'crazy credits', 'goofs',
'soundtrack', 'quotes', 'release dates', 'trivia', 'locations',
'mini biography', 'birth notes', 'birth date', 'height',
'death date', 'spouse', 'other works', 'birth name',
'salary history', 'nick names', 'books', 'agent address',
'biographical movies', 'portrayed in', 'where now', 'trade mark',
'interviews', 'article', 'magazine cover photo', 'pictorial',
'death notes', 'LD disc format', 'LD year', 'LD digital sound',
'LD official retail price', 'LD frequency response', 'LD pressing plant',
'LD length', 'LD language', 'LD review', 'LD spaciality', 'LD release date',
'LD production country', 'LD contrast', 'LD color rendition',
'LD picture format', 'LD video noise', 'LD video artifacts',
'LD release country', 'LD sharpness', 'LD dynamic range',
'LD audio noise', 'LD color information', 'LD group genre',
'LD quality program', 'LD close captions-teletext-ld-g',
'LD category', 'LD analog left', 'LD certification',
'LD audio quality', 'LD video quality', 'LD aspect ratio',
'LD analog right', 'LD additional information',
'LD number of chapter stops', 'LD dialogue intellegibility',
'LD disc size', 'LD master format', 'LD subtitles',
'LD status of availablility', 'LD quality of source',
'LD number of sides', 'LD video standard', 'LD supplement',
'LD original title', 'LD sound encoding', 'LD number', 'LD label',
'LD catalog number', 'LD laserdisc title', 'screenplay-teleplay',
'novel', 'adaption', 'book', 'production process protocol',
'printed media reviews', 'essays', 'other literature', 'mpaa',
'plot', 'votes distribution', 'votes', 'rating',
'production dates', 'copyright holder', 'filming dates', 'budget',
'weekend gross', 'gross', 'opening weekend', 'rentals',
'admissions', 'studios', 'top 250 rank', 'bottom 10 rank')}
compCastTypeDefs = {'kind': ('cast', 'crew', 'complete', 'complete+verified')}
linkTypeDefs = {'link': ('follows', 'followed by', 'remake of', 'remade as',
'references', 'referenced in', 'spoofs', 'spoofed in',
'features', 'featured in', 'spin off from', 'spin off',
'version of', 'similar to', 'edited into',
'edited from', 'alternate language version of',
'unknown link')}
roleTypeDefs = {'role': ('actor', 'actress', 'producer', 'writer',
'cinematographer', 'composer', 'costume designer',
'director', 'editor', 'miscellaneous crew',
'production designer', 'guest')}
# Schema of tables in our database.
# XXX: Foreign keys can be used to create constrains between tables,
# but they create indexes in the database, and this
# means poor performances at insert-time.
DB_SCHEMA = [
DBTable('Name',
# namePcodeCf is the soundex of the name in the canonical format.
# namePcodeNf is the soundex of the name in the normal format, if
# different from namePcodeCf.
# surnamePcode is the soundex of the surname, if different from the
# other two values.
# The 'id' column is simply skipped by SQLObject (it's a default);
# the alternateID attribute here will be ignored by SQLAlchemy.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('imdbID', INTCOL, default=None, index='idx_imdb_id'),
DBCol('gender', STRINGCOL, length=1, default=None),
DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
index='idx_pcodecf'),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CharName',
# namePcodeNf is the soundex of the name in the normal format.
# surnamePcode is the soundex of the surname, if different
# from namePcodeNf.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CompanyName',
# namePcodeNf is the soundex of the name in the normal format.
# namePcodeSf is the soundex of the name plus the country code.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('countryCode', UNICODECOL, length=255, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('namePcodeSf', STRINGCOL, length=5, default=None,
index='idx_pcodesf'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('KindType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=15, default=None, alternateID=True),
values=kindTypeDefs
),
DBTable('Title',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('title', UNICODECOL, notNone=True,
index='idx_title', indexLen=10),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
DBCol('productionYear', INTCOL, default=None),
DBCol('imdbID', INTCOL, default=None, index="idx_imdb_id"),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
foreignKey='Title'),
DBCol('seasonNr', INTCOL, default=None, index="idx_season_nr"),
DBCol('episodeNr', INTCOL, default=None, index="idx_episode_nr"),
# Maximum observed length is 44; 49 can store 5 comma-separated
# year-year pairs.
DBCol('seriesYears', STRINGCOL, length=49, default=None),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CompanyType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=32, default=None, alternateID=True),
values=companyTypeDefs
),
DBTable('AkaName',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_person',
foreignKey='Name'),
DBCol('name', UNICODECOL, notNone=True),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
index='idx_pcodecf'),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('AkaTitle',
# XXX: It's safer to set notNone to False, here.
# alias for akas are stored completely in the AkaTitle table;
# this means that episodes will set also a "tv series" alias name.
# Reading the aka-title.list file it looks like there are
# episode titles with aliases to different titles for both
# the episode and the series title, while for just the series
# there are no aliases.
# E.g.:
# aka title original title
# "Series, The" (2005) {The Episode} "Other Title" (2005) {Other Title}
# But there is no:
# "Series, The" (2005) "Other Title" (2005)
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_movieid',
foreignKey='Title'),
DBCol('title', UNICODECOL, notNone=True),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
DBCol('productionYear', INTCOL, default=None),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
foreignKey='AkaTitle'),
DBCol('seasonNr', INTCOL, default=None),
DBCol('episodeNr', INTCOL, default=None),
DBCol('note', UNICODECOL, default=None),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('RoleType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('role', STRINGCOL, length=32, notNone=True, alternateID=True),
values=roleTypeDefs
),
DBTable('CastInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_pid',
foreignKey='Name'),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('personRoleID', INTCOL, default=None, index='idx_cid',
foreignKey='CharName'),
DBCol('note', UNICODECOL, default=None),
DBCol('nrOrder', INTCOL, default=None),
DBCol('roleID', INTCOL, notNone=True, foreignKey='RoleType')
),
DBTable('CompCastType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=32, notNone=True, alternateID=True),
values=compCastTypeDefs
),
DBTable('CompleteCast',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, index='idx_mid', foreignKey='Title'),
DBCol('subjectID', INTCOL, notNone=True, foreignKey='CompCastType'),
DBCol('statusID', INTCOL, notNone=True, foreignKey='CompCastType')
),
DBTable('InfoType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('info', STRINGCOL, length=32, notNone=True, alternateID=True),
values=infoTypeDefs
),
DBTable('LinkType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('link', STRINGCOL, length=32, notNone=True, alternateID=True),
values=linkTypeDefs
),
DBTable('Keyword',
DBCol('id', INTCOL, notNone=True, alternateID=True),
# XXX: can't use alternateID=True, because it would create
# a UNIQUE index; unfortunately (at least with a common
# collation like utf8_unicode_ci) MySQL will consider
# some different keywords identical - like
# "fiancée" and "fiancee".
DBCol('keyword', UNICODECOL, notNone=True,
index='idx_keyword', indexLen=5),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode')
),
DBTable('MovieKeyword',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('keywordID', INTCOL, notNone=True, index='idx_keywordid',
foreignKey='Keyword')
),
DBTable('MovieLink',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('linkedMovieID', INTCOL, notNone=True, foreignKey='Title'),
DBCol('linkTypeID', INTCOL, notNone=True, foreignKey='LinkType')
),
DBTable('MovieInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True),
DBCol('note', UNICODECOL, default=None)
),
# This table is identical to MovieInfo, except that both 'infoTypeID'
# and 'info' are indexed.
DBTable('MovieInfoIdx',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('infoTypeID', INTCOL, notNone=True, index='idx_infotypeid',
foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True, index='idx_info', indexLen=10),
DBCol('note', UNICODECOL, default=None)
),
DBTable('MovieCompanies',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('companyID', INTCOL, notNone=True, index='idx_cid',
foreignKey='CompanyName'),
DBCol('companyTypeID', INTCOL, notNone=True, foreignKey='CompanyType'),
DBCol('note', UNICODECOL, default=None)
),
DBTable('PersonInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_pid',
foreignKey='Name'),
DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True),
DBCol('note', UNICODECOL, default=None)
)
]
# Functions to manage tables.
def dropTables(tables, ifExists=True):
"""Drop the tables."""
# In reverse order (useful to avoid errors about foreign keys).
DB_TABLES_DROP = list(tables)
DB_TABLES_DROP.reverse()
for table in DB_TABLES_DROP:
_dbschema_logger.info('dropping table %s', table._imdbpyName)
table.dropTable(ifExists)
def createTables(tables, ifNotExists=True):
"""Create the tables and insert default values."""
for table in tables:
# Create the table.
_dbschema_logger.info('creating table %s', table._imdbpyName)
table.createTable(ifNotExists)
# Insert default values, if any.
if table._imdbpySchema.values:
_dbschema_logger.info('inserting values into table %s',
table._imdbpyName)
for key in table._imdbpySchema.values:
for value in table._imdbpySchema.values[key]:
table(**{key: unicode(value)})
def createIndexes(tables, ifNotExists=True):
"""Create the indexes in the database.
Return a list of errors, if any."""
errors = []
for table in tables:
_dbschema_logger.info('creating indexes for table %s',
table._imdbpyName)
try:
table.addIndexes(ifNotExists)
except Exception, e:
errors.append(e)
continue
return errors
def createForeignKeys(tables, ifNotExists=True):
"""Create Foreign Keys.
Return a list of errors, if any."""
errors = []
mapTables = {}
for table in tables:
mapTables[table._imdbpyName] = table
for table in tables:
_dbschema_logger.info('creating foreign keys for table %s',
table._imdbpyName)
try:
table.addForeignKeys(mapTables, ifNotExists)
except Exception, e:
errors.append(e)
continue
return errors
| gpl-3.0 |
bracket/rasterizer | handsome/TileCache.py | 2 | 1374 | from .Coordinate import Coordinate
from .Pixel import Pixel
from .Tile import Tile
class TileCache:
def __init__(self, tile_shape, sample_rate=1, dtype=Pixel):
self.tiles = { }
self.tile_shape = tile_shape
self.sample_rate = sample_rate
self.dtype = dtype
def tile_origin_for_coordinate(self, coordinate):
width, height = self.tile_shape
return (
int(coordinate[0] // width * width),
int(coordinate[1] // height * height)
)
def get_tile(self, coordinate):
origin = self.tile_origin_for_coordinate(coordinate)
tile = self.tiles.get(origin)
if tile is not None:
return tile
tile = Tile(origin, self.tile_shape, self.sample_rate, self.dtype)
self.tiles[origin] = tile
return tile
def get_tiles_for_bounds(self, bounds):
width, height = self.tile_shape
left, bottom = self.tile_origin_for_coordinate((bounds.left, bounds.bottom))
right, top = self.tile_origin_for_coordinate((bounds.right + width, bounds.top + height))
for x in range(left, right, width):
for y in range(bottom, top, height):
yield self.get_tile((x, y))
def composite_into(self, target):
for source in self.tiles.values():
target.composite_from(source)
| bsd-2-clause |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 30