code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# maximum number of letters(ignoring spaces and duplicates) if tie choose alphabetical order.
# import sys
# text = "".join(sys.stdin.readlines())
# name_list = text.split("\n")
inputList = ["kylan charles", "raymond strickland", "julissa shepard", "andrea meza", "destiny alvarado"]
inputList2 = ["maria garcia", "smith hernandez", "hernandez smith", "mary martinez", "james johnson"]
inputList3 = ["Sheldon Cooper", "Howord Wolowitz", "Amy Farrah Fowler", "Leonard Hofstadter", "Bernadette R"]
name_store = {}
for name in inputList3:
name_store[name] = len(set(name.lower().replace(" ", ""))) # Remove spaces using replace and remove duplicates using set
res = []
maxLen = -float("inf")
for name in name_store.keys():
if name_store.get(name) > maxLen:
res.clear()
res.append(name)
maxLen = name_store.get(name)
elif name_store.get(name) == maxLen:
res.append(name)
res.sort()
print(res[0]) | saisankargochhayat/algo_quest | Company-Based/SAP/social_sabatical_name.py | Python | apache-2.0 | 938 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Console Proxy Service."""
import socket
from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova import rpc
from nova import utils
console_manager_opts = [
cfg.StrOpt('console_driver',
default='nova.console.xvp.XVPConsoleProxy',
help='Driver to use for the console proxy'),
cfg.BoolOpt('stub_compute',
default=False,
help='Stub calls to compute worker for tests'),
cfg.StrOpt('console_public_hostname',
default=socket.gethostname(),
help='Publicly visible name for this console host'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(console_manager_opts)
LOG = logging.getLogger(__name__)
class ConsoleProxyManager(manager.Manager):
"""Sets up and tears down any console proxy connections.
Needed for accessing instance consoles securely.
"""
def __init__(self, console_driver=None, *args, **kwargs):
if not console_driver:
console_driver = FLAGS.console_driver
self.driver = importutils.import_object(console_driver)
super(ConsoleProxyManager, self).__init__(*args, **kwargs)
self.driver.host = self.host
def init_host(self):
self.driver.init_host()
@exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
instance = self.db.instance_get(context, instance_id)
host = instance['host']
name = instance['name']
pool = self.get_pool_for_instance_host(context, host)
try:
console = self.db.console_get_by_pool_instance(context,
pool['id'],
instance_id)
except exception.NotFound:
LOG.debug(_('Adding console'), instance=instance)
if not password:
password = utils.generate_password(8)
if not port:
port = self.driver.get_port(context)
console_data = {'instance_name': name,
'instance_id': instance_id,
'password': password,
'pool_id': pool['id']}
if port:
console_data['port'] = port
console = self.db.console_create(context, console_data)
self.driver.setup_console(context, console)
return console['id']
@exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
try:
console = self.db.console_get(context, console_id)
except exception.NotFound:
LOG.debug(_('Tried to remove non-existant console '
'%(console_id)s.') %
{'console_id': console_id})
return
self.db.console_delete(context, console_id)
self.driver.teardown_console(context, console)
def get_pool_for_instance_host(self, context, instance_host):
context = context.elevated()
console_type = self.driver.console_type
try:
pool = self.db.console_pool_get_by_host_type(context,
instance_host,
self.host,
console_type)
except exception.NotFound:
#NOTE(mdragon): Right now, the only place this info exists is the
# compute worker's flagfile, at least for
# xenserver. Thus we ned to ask.
if FLAGS.stub_compute:
pool_info = {'address': '127.0.0.1',
'username': 'test',
'password': '1234pass'}
else:
pool_info = rpc.call(context,
self.db.queue_get_for(context,
FLAGS.compute_topic,
instance_host),
{'method': 'get_console_pool_info',
'args': {'console_type': console_type}})
pool_info['password'] = self.driver.fix_pool_password(
pool_info['password'])
pool_info['host'] = self.host
pool_info['public_hostname'] = FLAGS.console_public_hostname
pool_info['console_type'] = self.driver.console_type
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
return pool
| usc-isi/extra-specs | nova/console/manager.py | Python | apache-2.0 | 5,515 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Weixuan Fu (weixuanf@upenn.edu)
- Daniel Angell (dpa34@drexel.edu)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import pandas as pd
import os, os.path
from sklearn.base import BaseEstimator
try:
from sklearn.feature_selection._base import SelectorMixin
except ImportError:
from sklearn.feature_selection.base import SelectorMixin
class FeatureSetSelector(BaseEstimator, SelectorMixin):
"""Select predefined feature subsets."""
@property
def __name__(self):
"""Instance name is the same as the class name."""
return self.__class__.__name__
def __init__(self, subset_list, sel_subset):
"""Create a FeatureSetSelector object.
Parameters
----------
subset_list: string, required
Path to a file that indicates all the subset lists. Currently,
this file needs to be a .csv with one header row.
There should be 3 columns on the table, including subset names (Subset),
number of features (Size) and features in the subset (Features).
The feature names or indexs of input features
should be seprated by ';' on the 3rd column of the file.
The feature names in the files must match those in the (training and
testing) dataset.
sel_subset: int or string or list or tuple
int: index of subset in subset file
string: subset name of subset
list or tuple: list of int or string for indexs or subset names
Returns
-------
None
"""
self.subset_list = subset_list
self.sel_subset = sel_subset
def fit(self, X, y=None):
"""Fit FeatureSetSelector for feature selection
Parameters
----------
X: array-like of shape (n_samples, n_features)
The training input samples.
y: array-like, shape (n_samples,)
The target values (integers that correspond to classes in classification, real numbers in regression).
Returns
-------
self: object
Returns a copy of the estimator
"""
subset_df = pd.read_csv(self.subset_list, header=0, index_col=0)
if isinstance(self.sel_subset, int):
self.sel_subset_name = subset_df.index[self.sel_subset]
elif isinstance(self.sel_subset, str):
self.sel_subset_name = self.sel_subset
else: # list or tuple
self.sel_subset_name = []
for s in self.sel_subset:
if isinstance(s, int):
self.sel_subset_name.append(subset_df.index[s])
else:
self.sel_subset_name.append(s)
sel_features = subset_df.loc[self.sel_subset_name, 'Features']
if not isinstance(sel_features, str):
sel_features = ";".join(sel_features.tolist())
sel_uniq_features = set(sel_features.split(';'))
if isinstance(X, pd.DataFrame): # use columns' names
self.feature_names = list(X.columns.values)
self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))
self.feat_list_idx = [list(X.columns).index(feat_name) for feat_name in self.feat_list]
elif isinstance(X, np.ndarray): # use index
self.feature_names = list(range(X.shape[1]))
sel_uniq_features = [int(val) for val in sel_uniq_features]
self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))
self.feat_list_idx = self.feat_list
if not len(self.feat_list):
raise ValueError('No feature is found on the subset list!')
return self
def transform(self, X):
"""Make subset after fit
Parameters
----------
X: numpy ndarray, {n_samples, n_features}
New data, where n_samples is the number of samples and n_features is the number of features.
Returns
-------
X_transformed: array-like, shape (n_samples, n_features + 1) or (n_samples, n_features + 1 + n_classes) for classifier with predict_proba attribute
The transformed feature set.
"""
if isinstance(X, pd.DataFrame):
X_transformed = X[self.feat_list].values
elif isinstance(X, np.ndarray):
X_transformed = X[:, self.feat_list_idx]
return X_transformed.astype(np.float64)
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
n_features = len(self.feature_names)
mask = np.zeros(n_features, dtype=bool)
mask[np.asarray(self.feat_list_idx)] = True
return mask
| weixuanfu/tpot | tpot/builtins/feature_set_selector.py | Python | lgpl-3.0 | 5,788 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateArtifact
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_sync]
from google.cloud import aiplatform_v1
def sample_create_artifact():
# Create a client
client = aiplatform_v1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateArtifactRequest(
parent="parent_value",
)
# Make the request
response = client.create_artifact(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_MetadataService_CreateArtifact_sync]
| googleapis/python-aiplatform | samples/generated_samples/aiplatform_generated_aiplatform_v1_metadata_service_create_artifact_sync.py | Python | apache-2.0 | 1,496 |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
Functions in this module are imported into the nova.db namespace. Call these
functions from nova.db namespace, not the nova.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
"""
from eventlet import tpool
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova.openstack.common.db import api as db_api
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
db_opts = [
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
tpool_opts = [
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(tpool_opts, 'database')
CONF.import_opt('backend', 'nova.openstack.common.db.options',
group='database')
_BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'}
class NovaDBAPI(object):
"""Nova's DB API wrapper class.
This wraps the oslo DB API with an option to be able to use eventlet's
thread pooling. Since the CONF variable may not be loaded at the time
this class is instantiated, we must look at it on the first DB API call.
"""
def __init__(self):
self.__db_api = None
@property
def _db_api(self):
if not self.__db_api:
nova_db_api = db_api.DBAPI(CONF.database.backend,
backend_mapping=_BACKEND_MAPPING)
if CONF.database.use_tpool:
self.__db_api = tpool.Proxy(nova_db_api)
else:
self.__db_api = nova_db_api
return self.__db_api
def __getattr__(self, key):
return getattr(self._db_api, key)
IMPL = NovaDBAPI()
LOG = logging.getLogger(__name__)
# The maximum value a signed INT type may have
MAX_INT = 0x7FFFFFFF
###################
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
###################
def service_destroy(context, service_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, service_id)
def service_get(context, service_id, with_compute_node=False):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id,
with_compute_node=with_compute_node)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_by_compute_host(context, host):
"""Get the service entry for a given compute host.
Returns the service entry joined with the compute_node entry.
"""
return IMPL.service_get_by_compute_host(context, host)
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on a service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def compute_node_get(context, compute_id):
"""Get a compute node by its id.
:param context: The security context
:param compute_id: ID of the compute node
:returns: Dictionary-like object containing properties of the compute node,
including its corresponding service
Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
"""
return IMPL.compute_node_get(context, compute_id)
def compute_node_get_by_service_id(context, service_id):
"""Get a compute node by its associated service id.
:param context: The security context
:param service_id: ID of the associated service
:returns: Dictionary-like object containing properties of the compute node,
including its corresponding service and statistics
Raises ServiceNotFound if service with the given ID doesn't exist.
"""
return IMPL.compute_node_get_by_service_id(context, service_id)
def compute_node_get_all(context, no_date_fields=False):
"""Get all computeNodes.
:param context: The security context
:param no_date_fields: If set to True, excludes 'created_at', 'updated_at',
'deleted_at' and 'deleted' fields from the output,
thus significantly reducing its size.
Set to False by default
:returns: List of dictionaries each containing compute node properties,
including corresponding service
"""
return IMPL.compute_node_get_all(context, no_date_fields)
def compute_node_search_by_hypervisor(context, hypervisor_match):
"""Get compute nodes by hypervisor hostname.
:param context: The security context
:param hypervisor_match: The hypervisor hostname
:returns: List of dictionary-like objects each containing compute node
properties, including corresponding service
"""
return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match)
def compute_node_create(context, values):
"""Create a compute node from the values dictionary.
:param context: The security context
:param values: Dictionary containing compute node properties
:returns: Dictionary-like object containing the properties of the created
node, including its corresponding service and statistics
"""
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values):
"""Set the given properties on a compute node and update it.
:param context: The security context
:param compute_id: ID of the compute node
:param values: Dictionary containing compute node properties to be updated
:returns: Dictionary-like object containing the properties of the updated
compute node, including its corresponding service and statistics
Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
"""
return IMPL.compute_node_update(context, compute_id, values)
def compute_node_delete(context, compute_id):
"""Delete a compute node from the database.
:param context: The security context
:param compute_id: ID of the compute node
Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
"""
return IMPL.compute_node_delete(context, compute_id)
def compute_node_statistics(context):
"""Get aggregate statistics over all compute nodes.
:param context: The security context
:returns: Dictionary containing compute node characteristics summed up
over all the compute nodes, e.g. 'vcpus', 'free_ram_mb' etc.
"""
return IMPL.compute_node_statistics(context)
###################
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id)
def certificate_get_all_by_user(context, user_id):
"""Get all certificates for a user."""
return IMPL.certificate_get_all_by_user(context, user_id)
def certificate_get_all_by_user_and_project(context, user_id, project_id):
"""Get all certificates for a user and project."""
return IMPL.certificate_get_all_by_user_and_project(context,
user_id,
project_id)
###################
def floating_ip_get(context, id):
return IMPL.floating_ip_get(context, id)
def floating_ip_get_pools(context):
"""Returns a list of floating ip pools."""
return IMPL.floating_ip_get_pools(context)
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
"""Allocate free floating ip from specified pool and return the address.
Raises if one is not available.
"""
return IMPL.floating_ip_allocate_address(context, project_id, pool,
auto_assigned)
def floating_ip_bulk_create(context, ips):
"""Create a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_create(context, ips)
def floating_ip_bulk_destroy(context, ips):
"""Destroy a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_destroy(context, ips)
def floating_ip_create(context, values):
"""Create a floating ip from the values dictionary."""
return IMPL.floating_ip_create(context, values)
def floating_ip_deallocate(context, address):
"""Deallocate a floating ip by address."""
return IMPL.floating_ip_deallocate(context, address)
def floating_ip_destroy(context, address):
"""Destroy the floating_ip or raise if it does not exist."""
return IMPL.floating_ip_destroy(context, address)
def floating_ip_disassociate(context, address):
"""Disassociate a floating ip from a fixed ip by address.
:returns: the fixed ip record joined to network record or None
if the ip was not associated to an ip.
"""
return IMPL.floating_ip_disassociate(context, address)
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
"""Associate a floating ip to a fixed_ip by address.
:returns: the fixed ip record joined to network record or None
if the ip was already associated to the fixed ip.
"""
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
host)
def floating_ip_get_all(context):
"""Get all floating ips."""
return IMPL.floating_ip_get_all(context)
def floating_ip_get_all_by_host(context, host):
"""Get all floating ips by host."""
return IMPL.floating_ip_get_all_by_host(context, host)
def floating_ip_get_all_by_project(context, project_id):
"""Get all floating ips by project."""
return IMPL.floating_ip_get_all_by_project(context, project_id)
def floating_ip_get_by_address(context, address):
"""Get a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_fixed_address(context, fixed_address):
"""Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
"""Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
def floating_ip_set_auto_assigned(context, address):
"""Set auto_assigned flag to floating ip."""
return IMPL.floating_ip_set_auto_assigned(context, address)
def dnsdomain_list(context):
"""Get a list of all zones in our database, public and private."""
return IMPL.dnsdomain_list(context)
def dnsdomain_get_all(context):
"""Get a list of all dnsdomains in our database."""
return IMPL.dnsdomain_get_all(context)
def dnsdomain_register_for_zone(context, fqdomain, zone):
"""Associated a DNS domain with an availability zone."""
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
"""Associated a DNS domain with a project id."""
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
"""Purge associations for the specified DNS zone."""
return IMPL.dnsdomain_unregister(context, fqdomain)
def dnsdomain_get(context, fqdomain):
"""Get the db record for the specified domain."""
return IMPL.dnsdomain_get(context, fqdomain)
####################
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
def migration_create(context, values):
"""Create a migration record."""
return IMPL.migration_create(context, values)
def migration_get(context, migration_id):
"""Finds a migration by the id."""
return IMPL.migration_get(context, migration_id)
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
status)
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute, use_slave=False):
"""Finds all unconfirmed migrations within the confirmation window for
a specific destination compute host.
"""
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
confirm_window, dest_compute, use_slave=use_slave)
def migration_get_in_progress_by_host_and_node(context, host, node):
"""Finds all migrations for the given host + node that are not yet
confirmed or reverted.
"""
return IMPL.migration_get_in_progress_by_host_and_node(context, host, node)
def migration_get_all_by_filters(context, filters):
"""Finds all migrations in progress."""
return IMPL.migration_get_all_by_filters(context, filters)
####################
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
"""
return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id,
reserved)
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
"""Find free ip in network and associate it to instance or host.
Raises if one is not available.
"""
return IMPL.fixed_ip_associate_pool(context, network_id,
instance_uuid, host)
def fixed_ip_create(context, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_create(context, values)
def fixed_ip_bulk_create(context, ips):
"""Create a lot of fixed ips from the values dictionary."""
return IMPL.fixed_ip_bulk_create(context, ips)
def fixed_ip_disassociate(context, address):
"""Disassociate a fixed ip from an instance by address."""
return IMPL.fixed_ip_disassociate(context, address)
def fixed_ip_disassociate_all_by_timeout(context, host, time):
"""Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get(context, id, get_network=False):
"""Get fixed ip by id or raise if it does not exist.
If get_network is true, also return the associated network.
"""
return IMPL.fixed_ip_get(context, id, get_network)
def fixed_ip_get_all(context):
"""Get all defined fixed ips."""
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_by_address(context, address, columns_to_join=None):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def fixed_ip_get_by_address_detailed(context, address):
"""Get detailed fixed ip info by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address_detailed(context, address)
def fixed_ip_get_by_floating_address(context, floating_address):
"""Get a fixed ip by a floating address."""
return IMPL.fixed_ip_get_by_floating_address(context, floating_address)
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
def fixed_ip_get_by_host(context, host):
"""Get fixed ips by compute host."""
return IMPL.fixed_ip_get_by_host(context, host)
def fixed_ip_get_by_network_host(context, network_uuid, host):
"""Get fixed ip for a host in a network."""
return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host)
def fixed_ips_by_virtual_interface(context, vif_id):
"""Get fixed ips by virtual interface or raise if none exist."""
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
####################
def virtual_interface_create(context, values):
"""Create a virtual interface record in the database."""
return IMPL.virtual_interface_create(context, values)
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table."""
return IMPL.virtual_interface_get(context, vif_id)
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table filtering on address."""
return IMPL.virtual_interface_get_by_address(context, address)
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table filtering on vif uuid."""
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
def virtual_interface_get_by_instance(context, instance_id, use_slave=False):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id,
use_slave=use_slave)
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
"""Gets all virtual interfaces for instance."""
return IMPL.virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id)
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records associated with instance."""
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
def virtual_interface_get_all(context):
"""Gets all virtual interfaces from the table."""
return IMPL.virtual_interface_get_all(context)
####################
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
def instance_destroy(context, instance_uuid, constraint=None,
update_cells=True):
"""Destroy the instance or raise if it does not exist."""
rv = IMPL.instance_destroy(context, instance_uuid, constraint)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_destroy_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance destroy"))
return rv
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid,
columns_to_join, use_slave=use_slave)
def instance_get(context, instance_id, columns_to_join=None):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id,
columns_to_join=columns_to_join)
def instance_get_all(context, columns_to_join=None):
"""Get all instances."""
return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc', limit=None, marker=None,
columns_to_join=None, use_slave=False):
"""Get all instances that match all filters."""
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
use_slave=use_slave)
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Get instances and joins active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window_joined(context, begin, end,
project_id, host)
def instance_get_all_by_host(context, host,
columns_to_join=None, use_slave=False):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host,
columns_to_join,
use_slave=use_slave)
def instance_get_all_by_host_and_node(context, host, node):
"""Get all instances belonging to a node."""
return IMPL.instance_get_all_by_host_and_node(context, host, node)
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
def instance_get_floating_address(context, instance_id):
"""Get the first floating ip address of an instance."""
return IMPL.instance_get_floating_address(context, instance_id)
def instance_floating_address_get_all(context, instance_uuid):
"""Get all floating ip addresses of an instance."""
return IMPL.instance_floating_address_get_all(context, instance_uuid)
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
def instance_update(context, instance_uuid, values, update_cells=True):
"""Set the given properties on an instance and update it.
Raises NotFound if instance does not exist.
"""
rv = IMPL.instance_update(context, instance_uuid, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance update"))
return rv
# FIXME(comstud): 'update_cells' is temporary as we transition to using
# objects. When everything is using Instance.save(), we can remove the
# argument and the RPC to nova-cells.
def instance_update_and_get_original(context, instance_uuid, values,
update_cells=True,
columns_to_join=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance id or uuid
:param values: = dict containing column values
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
rv = IMPL.instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=columns_to_join)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_update_at_top(context, rv[1])
except Exception:
LOG.exception(_("Failed to notify cells of instance update"))
return rv
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
def instance_remove_security_group(context, instance_id, security_group_id):
"""Disassociate the given security group from the given instance."""
return IMPL.instance_remove_security_group(context, instance_id,
security_group_id)
####################
def instance_group_create(context, values, policies=None, metadata=None,
members=None):
"""Create a new group with metadata.
Each group will receive a unique uuid. This will be used for access to the
group.
"""
return IMPL.instance_group_create(context, values, policies, metadata,
members)
def instance_group_get(context, group_uuid):
"""Get a specific group by id."""
return IMPL.instance_group_get(context, group_uuid)
def instance_group_update(context, group_uuid, values):
"""Update the attributes of an group."""
return IMPL.instance_group_update(context, group_uuid, values)
def instance_group_delete(context, group_uuid):
"""Delete an group."""
return IMPL.instance_group_delete(context, group_uuid)
def instance_group_get_all(context):
"""Get all groups."""
return IMPL.instance_group_get_all(context)
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups for a specific project_id."""
return IMPL.instance_group_get_all_by_project_id(context, project_id)
def instance_group_metadata_add(context, group_uuid, metadata,
set_delete=False):
"""Add metadata to the group."""
return IMPL.instance_group_metadata_add(context, group_uuid, metadata,
set_delete)
def instance_group_metadata_delete(context, group_uuid, key):
"""Delete metadata from the group."""
return IMPL.instance_group_metadata_delete(context, group_uuid, key)
def instance_group_metadata_get(context, group_uuid):
"""Get the metadata from the group."""
return IMPL.instance_group_metadata_get(context, group_uuid)
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
"""Add members to the group."""
return IMPL.instance_group_members_add(context, group_uuid, members,
set_delete=set_delete)
def instance_group_member_delete(context, group_uuid, instance_id):
"""Delete a specific member from the group."""
return IMPL.instance_group_member_delete(context, group_uuid, instance_id)
def instance_group_members_get(context, group_uuid):
"""Get the members from the group."""
return IMPL.instance_group_members_get(context, group_uuid)
def instance_group_policies_add(context, group_uuid, policies,
set_delete=False):
"""Add policies to the group."""
return IMPL.instance_group_policies_add(context, group_uuid, policies,
set_delete=set_delete)
def instance_group_policy_delete(context, group_uuid, policy):
"""Delete a specific policy from the group."""
return IMPL.instance_group_policy_delete(context, group_uuid, policy)
def instance_group_policies_get(context, group_uuid):
"""Get the policies from the group."""
return IMPL.instance_group_policies_get(context, group_uuid)
###################
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return IMPL.instance_info_cache_get(context, instance_uuid)
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
return IMPL.instance_info_cache_update(context, instance_uuid, values)
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
return IMPL.instance_info_cache_delete(context, instance_uuid)
###################
def key_pair_create(context, values):
"""Create a key_pair from the values dictionary."""
return IMPL.key_pair_create(context, values)
def key_pair_destroy(context, user_id, name):
"""Destroy the key_pair or raise if it does not exist."""
return IMPL.key_pair_destroy(context, user_id, name)
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
def key_pair_get_all_by_user(context, user_id):
"""Get all key_pairs by user."""
return IMPL.key_pair_get_all_by_user(context, user_id)
def key_pair_count_by_user(context, user_id):
"""Count number of key pairs for the given user ID."""
return IMPL.key_pair_count_by_user(context, user_id)
####################
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a free network to a project."""
return IMPL.network_associate(context, project_id, network_id, force)
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
def network_create_safe(context, values):
"""Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
constraints because the network already exists, no exception is raised.
"""
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
"""Delete network with key network_id.
This method assumes that the network is not associated with any project
"""
return IMPL.network_delete_safe(context, network_id)
def network_disassociate(context, network_id, disassociate_host=True,
disassociate_project=True):
"""Disassociate the network from project or host
Raises if it does not exist.
"""
return IMPL.network_disassociate(context, network_id, disassociate_host,
disassociate_project)
def network_get(context, network_id, project_only="allow_none"):
"""Get a network or raise if it does not exist."""
return IMPL.network_get(context, network_id, project_only=project_only)
def network_get_all(context, project_only="allow_none"):
"""Return all defined networks."""
return IMPL.network_get_all(context, project_only)
def network_get_all_by_uuids(context, network_uuids,
project_only="allow_none"):
"""Return networks by ids."""
return IMPL.network_get_all_by_uuids(context, network_uuids,
project_only=project_only)
# pylint: disable=C0103
def network_in_use_on_host(context, network_id, host=None):
"""Indicates if a network is currently in use on host."""
return IMPL.network_in_use_on_host(context, network_id, host)
def network_get_associated_fixed_ips(context, network_id, host=None):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id, host)
def network_get_by_uuid(context, uuid):
"""Get a network by uuid or raise if it does not exist."""
return IMPL.network_get_by_uuid(context, uuid)
def network_get_by_cidr(context, cidr):
"""Get a network by cidr or raise if it does not exist."""
return IMPL.network_get_by_cidr(context, cidr)
def network_get_all_by_host(context, host):
"""All networks for which the given host is the network host."""
return IMPL.network_get_all_by_host(context, host)
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
def network_update(context, network_id, values):
"""Set the given properties on a network and update it.
Raises NotFound if network does not exist.
"""
return IMPL.network_update(context, network_id, values)
###############
def quota_create(context, project_id, resource, limit, user_id=None):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit,
user_id=user_id)
def quota_get(context, project_id, resource, user_id=None):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource, user_id=user_id)
def quota_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all quotas associated with a given project and user."""
return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_get_all(context, project_id):
"""Retrieve all user quotas associated with a given project."""
return IMPL.quota_get_all(context, project_id)
def quota_update(context, project_id, resource, limit, user_id=None):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit,
user_id=user_id)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
"""Retrieve all default quotas."""
return IMPL.quota_class_get_default(context)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
###################
def quota_usage_get(context, project_id, resource, user_id=None):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource, user_id=user_id)
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project_and_user(context,
project_id, user_id)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(context, project_id, user_id, resource,
**kwargs)
###################
def quota_reserve(context, resources, quotas, user_quotas, deltas, expire,
until_refresh, max_age, project_id=None, user_id=None):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, user_quotas, deltas,
expire, until_refresh, max_age,
project_id=project_id, user_id=user_id)
def reservation_commit(context, reservations, project_id=None, user_id=None):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations,
project_id=project_id,
user_id=user_id)
def reservation_rollback(context, reservations, project_id=None, user_id=None):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations,
project_id=project_id,
user_id=user_id)
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
"""Destroy all quotas associated with a given project and user."""
return IMPL.quota_destroy_all_by_project_and_user(context,
project_id, user_id)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def get_ec2_volume_id_by_uuid(context, volume_id):
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
def get_volume_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_volume_uuid_by_ec2_id(context, ec2_id)
def ec2_volume_create(context, volume_id, forced_id=None):
return IMPL.ec2_volume_create(context, volume_id, forced_id)
def ec2_volume_get_by_id(context, volume_id):
return IMPL.ec2_volume_get_by_id(context, volume_id)
def ec2_volume_get_by_uuid(context, volume_uuid):
return IMPL.ec2_volume_get_by_uuid(context, volume_uuid)
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id)
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
def ec2_snapshot_create(context, snapshot_id, forced_id=None):
return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
####################
def block_device_mapping_create(context, values, legacy=True):
"""Create an entry of block device mapping."""
return IMPL.block_device_mapping_create(context, values, legacy)
def block_device_mapping_update(context, bdm_id, values, legacy=True):
"""Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values, legacy)
def block_device_mapping_update_or_create(context, values, legacy=True):
"""Update an entry of block device mapping.
If not existed, create a new entry
"""
return IMPL.block_device_mapping_update_or_create(context, values, legacy)
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
"""Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid,
use_slave)
def block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join=None):
"""Get block device mapping for a given volume."""
return IMPL.block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join)
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_device(
context, instance_uuid, device_name)
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
####################
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id, columns_to_join=None):
"""Get security group by its id."""
return IMPL.security_group_get(context, security_group_id,
columns_to_join)
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
"""Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name,
columns_to_join=None)
def security_group_get_by_project(context, project_id):
"""Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_uuid):
"""Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_uuid)
def security_group_in_use(context, group_id):
"""Indicates if a security group is currently in use."""
return IMPL.security_group_in_use(context, group_id)
def security_group_create(context, values):
"""Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_update(context, security_group_id, values,
columns_to_join=None):
"""Update a security group."""
return IMPL.security_group_update(context, security_group_id, values,
columns_to_join=columns_to_join)
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id.
Returns a tuple with the first element being a bool indicating
if the default security group previously existed. Second
element is the dict used to create the default security group.
"""
return IMPL.security_group_ensure_default(context)
def security_group_destroy(context, security_group_id):
"""Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
####################
def security_group_rule_create(context, values):
"""Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
"""Get all rules for a given security group."""
return IMPL.security_group_rule_get_by_security_group(
context, security_group_id, columns_to_join=columns_to_join)
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
"""Get all rules that grant access to the given security group."""
return IMPL.security_group_rule_get_by_security_group_grantee(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
def security_group_rule_get(context, security_group_rule_id):
"""Gets a security group rule."""
return IMPL.security_group_rule_get(context, security_group_rule_id)
def security_group_rule_count_by_group(context, security_group_id):
"""Count rules in a given security group."""
return IMPL.security_group_rule_count_by_group(context, security_group_id)
###################
def security_group_default_rule_get(context, security_group_rule_default_id):
return IMPL.security_group_default_rule_get(context,
security_group_rule_default_id)
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
return IMPL.security_group_default_rule_destroy(
context, security_group_rule_default_id)
def security_group_default_rule_create(context, values):
return IMPL.security_group_default_rule_create(context, values)
def security_group_default_rule_list(context):
return IMPL.security_group_default_rule_list(context)
###################
def provider_fw_rule_create(context, rule):
"""Add a firewall rule at the provider level (all hosts & instances)."""
return IMPL.provider_fw_rule_create(context, rule)
def provider_fw_rule_get_all(context):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all(context)
def provider_fw_rule_destroy(context, rule_id):
"""Delete a provider firewall rule from the database."""
return IMPL.provider_fw_rule_destroy(context, rule_id)
###################
def project_get_networks(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_networks(context, project_id, associate)
###################
def console_pool_create(context, values):
"""Create console pool."""
return IMPL.console_pool_create(context, values)
def console_pool_get_by_host_type(context, compute_host, proxy_host,
console_type):
"""Fetch a console pool for a given proxy host, compute host, and type."""
return IMPL.console_pool_get_by_host_type(context,
compute_host,
proxy_host,
console_type)
def console_pool_get_all_by_host_type(context, host, console_type):
"""Fetch all pools for given proxy host and type."""
return IMPL.console_pool_get_all_by_host_type(context,
host,
console_type)
def console_create(context, values):
"""Create a console."""
return IMPL.console_create(context, values)
def console_delete(context, console_id):
"""Delete a console."""
return IMPL.console_delete(context, console_id)
def console_get_by_pool_instance(context, pool_id, instance_uuid):
"""Get console entry for a given instance and pool."""
return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid)
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
"""Get consoles for a given instance."""
return IMPL.console_get_all_by_instance(context, instance_uuid,
columns_to_join)
def console_get(context, console_id, instance_uuid=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_uuid)
##################
def flavor_create(context, values, projects=None):
"""Create a new instance type."""
return IMPL.flavor_create(context, values, projects=projects)
def flavor_get_all(context, inactive=False, filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
"""Get all instance flavors."""
return IMPL.flavor_get_all(
context, inactive=inactive, filters=filters, sort_key=sort_key,
sort_dir=sort_dir, limit=limit, marker=marker)
def flavor_get(context, id):
"""Get instance type by id."""
return IMPL.flavor_get(context, id)
def flavor_get_by_name(context, name):
"""Get instance type by name."""
return IMPL.flavor_get_by_name(context, name)
def flavor_get_by_flavor_id(context, id, read_deleted=None):
"""Get instance type by flavor id."""
return IMPL.flavor_get_by_flavor_id(context, id, read_deleted)
def flavor_destroy(context, name):
"""Delete an instance type."""
return IMPL.flavor_destroy(context, name)
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access by flavor id."""
return IMPL.flavor_access_get_by_flavor_id(context, flavor_id)
def flavor_access_add(context, flavor_id, project_id):
"""Add flavor access for project."""
return IMPL.flavor_access_add(context, flavor_id, project_id)
def flavor_access_remove(context, flavor_id, project_id):
"""Remove flavor access for project."""
return IMPL.flavor_access_remove(context, flavor_id, project_id)
def flavor_extra_specs_get(context, flavor_id):
"""Get all extra specs for an instance type."""
return IMPL.flavor_extra_specs_get(context, flavor_id)
def flavor_extra_specs_get_item(context, flavor_id, key):
"""Get extra specs by key and flavor_id."""
return IMPL.flavor_extra_specs_get_item(context, flavor_id, key)
def flavor_extra_specs_delete(context, flavor_id, key):
"""Delete the given extra specs item."""
IMPL.flavor_extra_specs_delete(context, flavor_id, key)
def flavor_extra_specs_update_or_create(context, flavor_id,
extra_specs):
"""Create or update instance type extra specs.
This adds or modifies the key/value pairs specified in the
extra specs dict argument
"""
IMPL.flavor_extra_specs_update_or_create(context, flavor_id,
extra_specs)
####################
def pci_device_get_by_addr(context, node_id, dev_addr):
"""Get PCI device by address."""
return IMPL.pci_device_get_by_addr(context, node_id, dev_addr)
def pci_device_get_by_id(context, id):
"""Get PCI device by id."""
return IMPL.pci_device_get_by_id(context, id)
def pci_device_get_all_by_node(context, node_id):
"""Get all PCI devices for one host."""
return IMPL.pci_device_get_all_by_node(context, node_id)
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
"""Get PCI devices allocated to instance."""
return IMPL.pci_device_get_all_by_instance_uuid(context, instance_uuid)
def pci_device_destroy(context, node_id, address):
"""Delete a PCI device record."""
return IMPL.pci_device_destroy(context, node_id, address)
def pci_device_update(context, node_id, address, value):
"""Update a pci device."""
return IMPL.pci_device_update(context, node_id, address, value)
###################
def cell_create(context, values):
"""Create a new child Cell entry."""
return IMPL.cell_create(context, values)
def cell_update(context, cell_name, values):
"""Update a child Cell entry."""
return IMPL.cell_update(context, cell_name, values)
def cell_delete(context, cell_name):
"""Delete a child Cell."""
return IMPL.cell_delete(context, cell_name)
def cell_get(context, cell_name):
"""Get a specific child Cell."""
return IMPL.cell_get(context, cell_name)
def cell_get_all(context):
"""Get all child Cells."""
return IMPL.cell_get_all(context)
####################
def instance_metadata_get(context, instance_uuid):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get(context, instance_uuid)
def instance_metadata_delete(context, instance_uuid, key):
"""Delete the given metadata item."""
IMPL.instance_metadata_delete(context, instance_uuid, key)
def instance_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.instance_metadata_update(context, instance_uuid,
metadata, delete)
####################
def instance_system_metadata_get(context, instance_uuid):
"""Get all system metadata for an instance."""
return IMPL.instance_system_metadata_get(context, instance_uuid)
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.instance_system_metadata_update(
context, instance_uuid, metadata, delete)
####################
def agent_build_create(context, values):
"""Create a new agent build entry."""
return IMPL.agent_build_create(context, values)
def agent_build_get_by_triple(context, hypervisor, os, architecture):
"""Get agent build by hypervisor/OS/architecture triple."""
return IMPL.agent_build_get_by_triple(context, hypervisor, os,
architecture)
def agent_build_get_all(context, hypervisor=None):
"""Get all agent builds."""
return IMPL.agent_build_get_all(context, hypervisor)
def agent_build_destroy(context, agent_update_id):
"""Destroy agent build entry."""
IMPL.agent_build_destroy(context, agent_update_id)
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
"""Return bw usage for instance and mac in a given audit period."""
return IMPL.bw_usage_get(context, uuid, start_period, mac)
def bw_usage_get_by_uuids(context, uuids, start_period):
"""Return bw usages for instance(s) in a given audit period."""
return IMPL.bw_usage_get_by_uuids(context, uuids, start_period)
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None,
update_cells=True):
"""Update cached bandwidth usage for an instance's network based on mac
address. Creates new record if needed.
"""
rv = IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
bw_out, last_ctr_in, last_ctr_out, last_refreshed=last_refreshed)
if update_cells:
try:
cells_rpcapi.CellsAPI().bw_usage_update_at_top(context,
uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed)
except Exception:
LOG.exception(_("Failed to notify cells of bw_usage update"))
return rv
###################
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return IMPL.vol_get_usage_by_time(context, begin)
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
"""Update cached volume usage for a volume
Creates new record if needed.
"""
return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
wr_bytes, instance_id, project_id, user_id,
availability_zone,
update_totals=update_totals)
###################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
return IMPL.s3_image_get(context, image_id)
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
return IMPL.s3_image_get_by_uuid(context, image_uuid)
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
return IMPL.s3_image_create(context, image_uuid)
####################
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
def aggregate_get(context, aggregate_id):
"""Get a specific aggregate by id."""
return IMPL.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(context, host, key=None):
"""Get a list of aggregates that host belongs to."""
return IMPL.aggregate_get_by_host(context, host, key)
def aggregate_metadata_get_by_host(context, host, key=None):
"""Get metadata for all aggregates that host belongs to.
Returns a dictionary where each value is a set, this is to cover the case
where there two aggregates have different values for the same key.
Optional key filter
"""
return IMPL.aggregate_metadata_get_by_host(context, host, key)
def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key):
"""Get metadata for an aggregate by metadata key."""
return IMPL.aggregate_metadata_get_by_metadata_key(context, aggregate_id,
key)
def aggregate_host_get_by_metadata_key(context, key):
"""Get hosts with a specific metadata key metadata for all aggregates.
Returns a dictionary where each key is a hostname and each value is a set
of the key values
return value: {machine: set( az1, az2 )}
"""
return IMPL.aggregate_host_get_by_metadata_key(context, key)
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates.
If values contains a metadata key, it updates the aggregate metadata too.
"""
return IMPL.aggregate_update(context, aggregate_id, values)
def aggregate_delete(context, aggregate_id):
"""Delete an aggregate."""
return IMPL.aggregate_delete(context, aggregate_id)
def aggregate_get_all(context):
"""Get all aggregates."""
return IMPL.aggregate_get_all(context)
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
"""Add/update metadata. If set_delete=True, it adds only."""
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
def aggregate_metadata_get(context, aggregate_id):
"""Get metadata for the specified aggregate."""
return IMPL.aggregate_metadata_get(context, aggregate_id)
def aggregate_metadata_delete(context, aggregate_id, key):
"""Delete the given metadata key."""
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
def aggregate_host_add(context, aggregate_id, host):
"""Add host to the aggregate."""
IMPL.aggregate_host_add(context, aggregate_id, host)
def aggregate_host_get_all(context, aggregate_id):
"""Get hosts for the specified aggregate."""
return IMPL.aggregate_host_get_all(context, aggregate_id)
def aggregate_host_delete(context, aggregate_id, host):
"""Delete the given host from the aggregate."""
IMPL.aggregate_host_delete(context, aggregate_id, host)
####################
def instance_fault_create(context, values, update_cells=True):
"""Create a new Instance Fault."""
rv = IMPL.instance_fault_create(context, values)
if update_cells:
try:
cells_rpcapi.CellsAPI().instance_fault_create_at_top(context, rv)
except Exception:
LOG.exception(_("Failed to notify cells of instance fault"))
return rv
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
####################
def action_start(context, values):
"""Start an action for an instance."""
return IMPL.action_start(context, values)
def action_finish(context, values):
"""Finish an action for an instance."""
return IMPL.action_finish(context, values)
def actions_get(context, uuid):
"""Get all instance actions for the provided instance."""
return IMPL.actions_get(context, uuid)
def action_get_by_request_id(context, uuid, request_id):
"""Get the action by request_id and given instance."""
return IMPL.action_get_by_request_id(context, uuid, request_id)
def action_event_start(context, values):
"""Start an event on an instance action."""
return IMPL.action_event_start(context, values)
def action_event_finish(context, values):
"""Finish an event on an instance action."""
return IMPL.action_event_finish(context, values)
def action_events_get(context, action_id):
"""Get the events by action id."""
return IMPL.action_events_get(context, action_id)
def action_event_get_by_id(context, action_id, event_id):
return IMPL.action_event_get_by_id(context, action_id, event_id)
####################
def get_ec2_instance_id_by_uuid(context, instance_id):
"""Get ec2 id through uuid from instance_id_mappings table."""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
def get_instance_uuid_by_ec2_id(context, ec2_id):
"""Get uuid through ec2 id from instance_id_mappings table."""
return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
def ec2_instance_create(context, instance_uuid, id=None):
"""Create the ec2 id to instance uuid mapping on demand."""
return IMPL.ec2_instance_create(context, instance_uuid, id)
####################
def task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message=None):
"""Mark a task as complete for a given host/time period."""
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message)
def task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items=None,
message=None):
"""Mark a task as started for a given host/time period."""
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items,
message)
def task_log_get_all(context, task_name, period_beginning,
period_ending, host=None, state=None):
return IMPL.task_log_get_all(context, task_name, period_beginning,
period_ending, host, state)
def task_log_get(context, task_name, period_beginning,
period_ending, host, state=None):
return IMPL.task_log_get(context, task_name, period_beginning,
period_ending, host, state)
####################
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to corresponding shadow
tables.
:returns: number of rows archived.
"""
return IMPL.archive_deleted_rows(context, max_rows=max_rows)
def archive_deleted_rows_for_table(context, tablename, max_rows=None):
"""Move up to max_rows rows from tablename to corresponding shadow
table.
:returns: number of rows archived.
"""
return IMPL.archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows)
| CiscoSystems/nova | nova/db/api.py | Python | apache-2.0 | 66,221 |
"""Module containing a preprocessor that executes the code cells
and updates outputs"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from textwrap import dedent
try:
from queue import Empty # Py 3
except ImportError:
from Queue import Empty # Py 2
from traitlets import List, Unicode, Bool, Enum, Any, Type
from nbformat.v4 import output_from_msg
from .base import Preprocessor
from ..utils.exceptions import ConversionException
from traitlets import Integer
from jupyter_client.manager import KernelManager
class CellExecutionError(ConversionException):
"""
Custom exception to propagate exceptions that are raised during
notebook execution to the caller. This is mostly useful when
using nbconvert as a library, since it allows to deal with
failures gracefully.
"""
def __init__(self, traceback):
self.traceback = traceback
def __str__(self):
s = self.__unicode__()
if not isinstance(s, str):
s = s.encode('utf8', 'replace')
return s
def __unicode__(self):
return self.traceback
class ExecutePreprocessor(Preprocessor):
"""
Executes all the cells in a notebook
"""
timeout = Integer(30, allow_none=True,
help=dedent(
"""
The time to wait (in seconds) for output from executions.
If a cell execution takes longer, an exception (TimeoutError
on python 3+, RuntimeError on python 2) is raised.
`None` or `-1` will disable the timeout. If `timeout_func` is set,
it overrides `timeout`.
"""
)
).tag(config=True)
timeout_func = Any(
default_value=None,
allow_none=True,
help=dedent(
"""
A callable which, when given the cell source as input,
returns the time to wait (in seconds) for output from cell
executions. If a cell execution takes longer, an exception
(TimeoutError on python 3+, RuntimeError on python 2) is
raised.
Returning `None` or `-1` will disable the timeout for the cell.
Not setting `timeout_func` will cause the preprocessor to
default to using the `timeout` trait for all cells. The
`timeout_func` trait overrides `timeout` if it is not `None`.
"""
)
).tag(config=True)
interrupt_on_timeout = Bool(False,
help=dedent(
"""
If execution of a cell times out, interrupt the kernel and
continue executing other cells rather than throwing an error and
stopping.
"""
)
).tag(config=True)
allow_errors = Bool(False,
help=dedent(
"""
If `False` (default), when a cell raises an error the
execution is stopped and a `CellExecutionError`
is raised.
If `True`, execution errors are ignored and the execution
is continued until the end of the notebook. Output from
exceptions is included in the cell output in both cases.
"""
)
).tag(config=True)
extra_arguments = List(Unicode())
kernel_name = Unicode('',
help=dedent(
"""
Name of kernel to use to execute the cells.
If not set, use the kernel_spec embedded in the notebook.
"""
)
).tag(config=True)
raise_on_iopub_timeout = Bool(False,
help=dedent(
"""
If `False` (default), then the kernel will continue waiting for
iopub messages until it receives a kernel idle message, or until a
timeout occurs, at which point the currently executing cell will be
skipped. If `True`, then an error will be raised after the first
timeout. This option generally does not need to be used, but may be
useful in contexts where there is the possibility of executing
notebooks with memory-consuming infinite loops.
"""
)
).tag(config=True)
shutdown_kernel = Enum(['graceful', 'immediate'],
default_value='graceful',
help=dedent(
"""
If `graceful` (default), then the kernel is given time to clean
up after executing all cells, e.g., to execute its `atexit` hooks.
If `immediate`, then the kernel is signaled to immediately
terminate.
"""
)
).tag(config=True)
kernel_manager_class = Type(
default_value=KernelManager,
config=True,
help='The kernel manager class to use.'
)
def preprocess(self, nb, resources):
"""
Preprocess notebook executing each code cell.
The input argument `nb` is modified in-place.
Parameters
----------
nb : NotebookNode
Notebook being executed.
resources : dictionary
Additional resources used in the conversion process. For example,
passing ``{'metadata': {'path': run_path}}`` sets the
execution path to ``run_path``.
Returns
-------
nb : NotebookNode
The executed notebook.
resources : dictionary
Additional resources used in the conversion process.
"""
path = resources.get('metadata', {}).get('path', '')
if path == '':
path = None
# from jupyter_client.manager import start_new_kernel
def start_new_kernel(startup_timeout=60, kernel_name='python',
**kwargs):
km = self.kernel_manager_class(kernel_name=kernel_name)
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
if self.kernel_name:
kernel_name = self.kernel_name
self.log.info("Executing notebook with kernel: %s" % kernel_name)
self.km, self.kc = start_new_kernel(
kernel_name=kernel_name,
extra_arguments=self.extra_arguments,
cwd=path)
self.kc.allow_stdin = False
try:
nb, resources = super(ExecutePreprocessor, self).preprocess(nb, resources)
finally:
self.kc.stop_channels()
self.km.shutdown_kernel(now=self.shutdown_kernel == 'immediate')
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Executes a single code cell. See base.py for details.
To execute all cells see :meth:`preprocess`.
"""
if cell.cell_type != 'code':
return cell, resources
outputs = self.run_cell(cell)
cell.outputs = outputs
if not self.allow_errors:
for out in outputs:
if out.output_type == 'error':
pattern = u"""\
An error occurred while executing the following cell:
------------------
{cell.source}
------------------
{out.ename}: {out.evalue}
"""
msg = dedent(pattern).format(out=out, cell=cell)
raise CellExecutionError(msg)
return cell, resources
def run_cell(self, cell):
msg_id = self.kc.execute(cell.source)
self.log.debug("Executing cell:\n%s", cell.source)
# wait for finish, with timeout
while True:
try:
if self.timeout_func is not None:
timeout = self.timeout_func(cell)
else:
timeout = self.timeout
if not timeout or timeout < 0:
timeout = None
msg = self.kc.shell_channel.get_msg(timeout=timeout)
except Empty:
self.log.error(
"Timeout waiting for execute reply (%is)." % self.timeout)
if self.interrupt_on_timeout:
self.log.error("Interrupting kernel")
self.km.interrupt_kernel()
break
else:
try:
exception = TimeoutError
except NameError:
exception = RuntimeError
raise exception("Cell execution timed out")
if msg['parent_header'].get('msg_id') == msg_id:
break
else:
# not our reply
continue
outs = []
while True:
try:
# We've already waited for execute_reply, so all output
# should already be waiting. However, on slow networks, like
# in certain CI systems, waiting < 1 second might miss messages.
# So long as the kernel sends a status:idle message when it
# finishes, we won't actually have to wait this long, anyway.
msg = self.kc.iopub_channel.get_msg(timeout=4)
except Empty:
self.log.warn("Timeout waiting for IOPub output")
if self.raise_on_iopub_timeout:
raise RuntimeError("Timeout waiting for IOPub output")
else:
break
if msg['parent_header'].get('msg_id') != msg_id:
# not an output from our execution
continue
msg_type = msg['msg_type']
self.log.debug("output: %s", msg_type)
content = msg['content']
# set the prompt number for the input and the output
if 'execution_count' in content:
cell['execution_count'] = content['execution_count']
if msg_type == 'status':
if content['execution_state'] == 'idle':
break
else:
continue
elif msg_type == 'execute_input':
continue
elif msg_type == 'clear_output':
outs = []
continue
elif msg_type.startswith('comm'):
continue
try:
out = output_from_msg(msg)
except ValueError:
self.log.error("unhandled iopub msg: " + msg_type)
else:
outs.append(out)
return outs
| lancezlin/ml_template_py | lib/python2.7/site-packages/nbconvert/preprocessors/execute.py | Python | mit | 10,816 |
import Gnuplot
class TreePlot:
def __init__(self, filename, title = None, datatitle = None,
debug = 0):
self.gp = Gnuplot.Gnuplot(debug = debug)
fp = open(filename, "r")
line = fp.readline()
while line:
line = line.strip()
if line.find('"') >= 0:
data = line.split(" ")
label = line[line.find('"')+2:-1]
self.gp('set label "%s" at %f,%f' %
(label, float(data[0]), float(data[1])))
line = fp.readline()
fp.close()
self.file = Gnuplot.File(filename)
self.gp('set data style lines')
self.gp.title(title)
self.file.set_option(title = datatitle)
def plot(self):
self.gp.plot(self.file)
def replot(self):
self.gp.replot()
def hardcopy(self, output):
self.gp.hardcopy(output)
if __name__ == '__main__':
tree = TreePlot("data.tree", title = "Sample Tree Data")
tree.plot()
raw_input()
tree.file.set_option(title = "Data Title")
tree.replot()
tree.hardcopy("/tmp/output.ps")
raw_input()
| emilydolson/forestcat | pyrobot/tools/cluster/treeplot.py | Python | agpl-3.0 | 1,148 |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the duecredit package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
Automatic injection of bibliography entries for skimage module
"""
from ..entries import Doi, BibTeX
# If defined, would determine from which to which version of the corresponding
# module to care about
min_version = None
max_version = None
def inject(injector):
#http://scikit-image.org
injector.add('skimage', None, Doi('10.7717/peerj.453'),
description='scikit-image: Image processing in Python.',
tags=['implementation'])
| jgors/duecredit | duecredit/injections/mod_skimage.py | Python | bsd-2-clause | 874 |
""" This is a test of using PilotManagerClient
In order to run this test we need the following DBs installed:
- PilotAgentsDB
And the following services should also be on:
- Pilots
this is pytest!
"""
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.Client.PilotManagerClient import PilotManagerClient
gLogger.setLevel('VERBOSE')
def test_PilotsDB():
pilots = PilotManagerClient()
res = pilots.addPilotTQReference(['aPilot'], 1, '/a/ownerDN', 'a/owner/Group')
assert res['OK'] is True
res = pilots.getCurrentPilotCounters({})
assert res['OK'] is True
assert res['Value'] == {'Submitted': 1}
res = pilots.deletePilots('aPilot')
assert res['OK'] is True
res = pilots.getCurrentPilotCounters({})
assert res['OK'] is True
assert res['Value'] == {}
res = pilots.addPilotTQReference(['anotherPilot'], 1, '/a/ownerDN', 'a/owner/Group')
assert res['OK'] is True
res = pilots.storePilotOutput('anotherPilot', 'This is an output', 'this is an error')
assert res['OK'] is True
res = pilots.getPilotOutput('anotherPilot')
assert res['OK'] is True
assert res['Value'] == {'OwnerDN': '/a/ownerDN',
'OwnerGroup': 'a/owner/Group',
'StdErr': 'this is an error',
'FileList': [],
'StdOut': 'This is an output'}
res = pilots.getPilotInfo('anotherPilot')
assert res['OK'] is True
assert res['Value']['anotherPilot']['AccountingSent'] == 'False'
assert res['Value']['anotherPilot']['PilotJobReference'] == 'anotherPilot'
res = pilots.selectPilots({})
assert res['OK'] is True
res = pilots.getPilotSummary('', '')
assert res['OK'] is True
assert res['Value']['Total']['Submitted'] == 1
res = pilots.getPilotMonitorWeb({}, [], 0, 100)
assert res['OK'] is True
assert res['Value']['TotalRecords'] == 1
res = pilots.getPilotMonitorSelectors()
assert res['OK'] is True
assert res['Value'] == {'GridType': ['DIRAC'],
'OwnerGroup': ['a/owner/Group'],
'DestinationSite': ['NotAssigned'],
'Broker': ['Unknown'], 'Status': ['Submitted'],
'OwnerDN': ['/a/ownerDN'],
'GridSite': ['Unknown'],
'Owner': []}
res = pilots.getPilotSummaryWeb({}, [], 0, 100)
assert res['OK'] is True
assert res['Value']['TotalRecords'] == 1
res = pilots.setAccountingFlag('anotherPilot', 'True')
assert res['OK'] is True
res = pilots.setPilotStatus('anotherPilot', 'Running')
assert res['OK'] is True
res = pilots.getPilotInfo('anotherPilot')
assert res['OK'] is True
assert res['Value']['anotherPilot']['AccountingSent'] == 'True'
assert res['Value']['anotherPilot']['Status'] == 'Running'
res = pilots.setJobForPilot(123, 'anotherPilot')
assert res['OK'] is True
res = pilots.setPilotBenchmark('anotherPilot', 12.3)
assert res['OK'] is True
res = pilots.countPilots({})
assert res['OK'] is True
# res = pilots.getCounters()
# # getPilotStatistics
res = pilots.deletePilots('anotherPilot')
assert res['OK'] is True
res = pilots.getCurrentPilotCounters({})
assert res['OK'] is True
assert res['Value'] == {}
| andresailer/DIRAC | tests/Integration/WorkloadManagementSystem/Test_PilotsClient.py | Python | gpl-3.0 | 3,395 |
"""
Django settings for example project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from distutils.util import strtobool
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kn^t*y!9lz5@p%3bcqeq8k+9irtme4hh9%!kzr&r9wual1o4%s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = strtobool(os.environ.get('DEBUG', '1'))
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'countries'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config() or {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'example.renderers.PaginatedCSVRenderer',
),
'PAGE_SIZE': 10
}
| marcgibbons/drf_signed_auth | example/example/settings.py | Python | bsd-2-clause | 4,062 |
from __future__ import absolute_import, unicode_literals
from celery.canvas import chain as celery_chain
from celery_growthmonitor import settings
from celery_growthmonitor.models import JobHolder
from celery_growthmonitor.tasks import remove_old_jobs, start, stop
def pre(job_holder: JobHolder, *tasks):
flow = (start.s(job_holder),)
if tasks:
flow += tasks
return flow
def post(*tasks):
flow = ()
if tasks:
flow += tasks
flow += (stop.s(),)
if settings.TTL.seconds > 0:
flow += (remove_old_jobs.s(),)
return flow
def chain(job_holder: JobHolder, *tasks):
"""
Build a chain of tasks, adding monitoring and maintenance tasks at the beginning and end of the chain
Parameters
----------
job_holder : JobHolder
tasks : celery.shared_task
Returns
-------
celery.canvas.chain
"""
flow = pre(job_holder, *tasks) + post()
return celery_chain(*flow)
def chain_pre(job_holder: JobHolder, *tasks):
flow = pre(job_holder, *tasks)
return celery_chain(*flow)
def chain_post(*tasks):
flow = post(*tasks)
return celery_chain(*flow)
| mbourqui/django-celery-growthmonitor | celery_growthmonitor/canvas.py | Python | gpl-3.0 | 1,153 |
#!/usr/bin/env python
import mirheo as mir
import numpy as np
import argparse
from mpi4py import MPI
parser = argparse.ArgumentParser()
parser.add_argument("--restart", action='store_true', default=False)
parser.add_argument("--ranks", type=int, nargs=3)
args = parser.parse_args()
ranks = args.ranks
domain = (4, 6, 8)
dt = 0
comm = MPI.COMM_WORLD
u = mir.Mirheo(ranks, domain, comm_ptr = MPI._addressof(comm),
debug_level=3, log_filename='log', no_splash=True,
checkpoint_every = (0 if args.restart else 5))
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
if args.restart:
ic = mir.InitialConditions.Restart("restart/")
else:
ic = mir.InitialConditions.Uniform(number_density=2)
u.registerParticleVector(pv, ic)
u.run(7, dt=dt)
rank = comm.Get_rank()
if args.restart and pv:
color = 1
else:
color = 0
comm = comm.Split(color, rank)
if args.restart and pv:
ids = pv.get_indices()
pos = pv.getCoordinates()
vel = pv.getVelocities()
data = np.hstack((np.atleast_2d(ids).T, pos, vel))
data = comm.gather(data, root=0)
if comm.Get_rank() == 0:
data = np.concatenate(data)
np.savetxt("parts.txt", data)
# TEST: restart.particle_vector
# cd restart
# rm -rf restart parts.out.txt parts.txt
# mir.run --runargs "-n 1" ./particle_vector.py --ranks 1 1 1
# mir.run --runargs "-n 1" ./particle_vector.py --ranks 1 1 1 --restart
# cat parts.txt | LC_ALL=en_US.utf8 sort > parts.out.txt
# TEST: restart.particle_vector.mpi
# cd restart
# rm -rf restart parts.out.txt parts.txt
# mir.run --runargs "-n 4" ./particle_vector.py --ranks 1 2 2
# mir.run --runargs "-n 4" ./particle_vector.py --ranks 1 2 2 --restart
# cat parts.txt | LC_ALL=en_US.utf8 sort > parts.out.txt
| dimaleks/uDeviceX | tests/restart/particle_vector.py | Python | gpl-3.0 | 1,775 |
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from lxml import etree as ET
class project_logical_framework_project(osv.Model):
_inherit = 'project.project'
_columns = {
'logical_framework': fields.one2many(
'project_logical_framework.logical_framework',
'project_id',
'Logical Framework'),
}
class project_logical_framework_logical_framework(osv.Model):
_name = 'project_logical_framework.logical_framework'
_order = "type"
def _logic_title(self, cr, uid, ids, field_name, arg, context):
res = {}
record = self.browse(cr, uid, ids, context=context)
for data in record:
res_str = dict(
self.pool.get('project_logical_framework.logical_framework').
fields_get(cr, uid, allfields=['type'], context=context)
['type']['selection'])[data.type]
res_str += "\n" + str(data.logic)
res[data.id] = res_str
return res
_columns = {
'project_id' : fields.many2one(
'project.project',
'logical_framework',
'Project'),
'type': fields.selection((
('1','Global Objectives:'),
('2','Specific Objectives:'),
('3','Results:'),
('4','Activities:')),
'Type', required="true"),
'logic': fields.text('Logic'),
'logic_title': fields.function(_logic_title, type="text"),
'intervention': fields.text('Intervention'),
'indicators': fields.text('Indicators'),
'verification': fields.text('Verification source'),
'hypothesis': fields.text('Hypothesis'),
} | stephane-/project_logical_framework | project_logical_framework.py | Python | mit | 1,730 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules."""
import numpy as np
import tensorflow.compat.v1 as tf
SECS_TO_DAYS = 60 * 60 * 24
def positional_encoding(dim, sentence_length, dtype=tf.float32):
"""Positional encoding."""
encoded_vec = np.array([
pos / np.power(10000, 2 * i / dim) # pylint: disable=g-complex-comprehension
for pos in range(sentence_length)
for i in range(dim)
])
encoded_vec[::2] = np.sin(encoded_vec[::2])
encoded_vec[1::2] = np.cos(encoded_vec[1::2])
return tf.convert_to_tensor(
encoded_vec.reshape([sentence_length, dim]), dtype=dtype)
def normalize(inputs, epsilon=1e-8, scope="ln", reuse=None):
"""Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing
ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
"""
with tf.variable_scope(scope, reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon)**(.5))
outputs = gamma * normalized + beta
return outputs
def embedding(inputs,
vocab_size,
num_units,
zero_pad=True,
scale=True,
l2_reg=0.0,
scope="embedding",
with_t=False,
reuse=None):
"""Embeds a given tensor.
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids to be
looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0) should
be constant zeros.
scale: A boolean. If True. the outputs is multiplied by sqrt num_units.
l2_reg: L2 regularization weight.
scope: Optional scope for `variable_scope`.
with_t: If True, return the embedding table.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
For example,
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[ 0. 0. ]
[ 0.09754146 0.67385566]
[ 0.37864095 -0.35689294]]
[[-1.01329422 -1.09939694]
[ 0.7521342 0.38203377]
[-0.04973143 -0.06210355]]]
```
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=False)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[-0.19172323 -0.39159766]
[-0.43212751 -0.66207761]
[ 1.03452027 -0.26704335]]
[[-0.11634696 -0.35983452]
[ 0.50208133 0.53509563]
[ 1.22204471 -0.96587461]]]
```
"""
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable(
"lookup_table",
dtype=tf.float32,
shape=[vocab_size, num_units],
# initializer=tf.contrib.layers.xavier_initializer(),
regularizer=tf.keras.regularizers.l2(l2_reg))
if zero_pad:
lookup_table = tf.concat(
(tf.zeros(shape=[1, num_units]), lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
if scale:
outputs = outputs * (num_units**0.5)
if with_t:
return outputs, lookup_table
else:
return outputs
def multihead_attention(queries,
keys,
times=None,
num_units=None,
num_heads=1,
dropout_rate=0,
is_training=True,
use_prior="none",
causality=True,
scope="multihead_attention",
residual=False,
time_exp_base=None,
overlapping_chunks=None,
reuse=None,
with_qk=False):
"""Applies multihead attention.
Args:
queries: A 3d tensor with shape of [N, T_q, C_q].
keys: A 3d tensor with shape of [N, T_k, C_k].
times: A 3d tensor with shape of [N, T_q, T_k].
num_units: A scalar. Attention size.
num_heads: An int. Number of heads.
dropout_rate: A floating point number.
is_training: Boolean. Controller of mechanism for dropout.
use_prior: String. Whether to use prior for attention heads. Supported
values include: none, position.
causality: Boolean. If true, units that reference the future are masked.
scope: Optional scope for `variable_scope`.
residual: Boolean. Whether to use residual connection.
time_exp_base: A scalar. Base for exponential time intervals. Only used for
the case where use_prior='time'.
overlapping_chunks: Boolean. Whether to use (non)/overlapping chunks for the
case where use_prior='time'.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name. Returns A 3d tensor with shape of (N, T_q, C)
with_qk: Whether to use qk.
Returns:
Output of multihead attention.
"""
tf.logging.info(
"Computing attention with prior: {} and num of heads: {}".format(
use_prior, num_heads))
with tf.variable_scope(scope, reuse=reuse):
# Set the fall back option for num_units
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# pylint: disable=invalid-name
# Linear projections
# Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu)
# K = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
# V = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
Q = tf.layers.dense(queries, num_units, activation=None) # (N, T_q, C)
K = tf.layers.dense(keys, num_units, activation=None) # (N, T_k, C)
V = tf.layers.dense(keys, num_units, activation=None) # (N, T_k, C)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
# pylint: enable=invalid-name
# Multiplication
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1]**0.5)
# Key Masking
key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k)
key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k)
key_masks = tf.tile(
tf.expand_dims(key_masks, 1),
[1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(outputs) * (-2**32 + 1)
outputs = tf.where(tf.equal(key_masks, 0), paddings,
outputs) # (h*N, T_q, T_k)
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)
tril = tf.linalg.LinearOperatorLowerTriangular(
diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0),
[tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(masks) * (-2**32 + 1)
outputs = tf.where(tf.equal(masks, 0), paddings,
outputs) # (h*N, T_q, T_k)
# Position/Time prior is only used in multi-head case.
if num_heads > 1:
# Scaling head weights with position prior.
if use_prior == "position":
# Each head focuses on a window of items whose size is computed below.
attn_size = int(outputs.get_shape().as_list()[-1] / num_heads)
outputs = tf.concat(
_compute_head_weights_with_position_prior(outputs, masks, paddings,
num_heads, attn_size),
axis=0) # (H*N, T_q, T_k)
tf.logging.info("After position-wise sliding window attention.")
tf.logging.info(outputs.shape)
# Scaling head weights with time prior.
elif use_prior == "time":
# Convert time deltas from seconds to days.
if times is None:
raise ValueError("Times tensor is needed.")
time_deltas = _compute_time_deltas(times) / SECS_TO_DAYS
outputs = tf.concat(_compute_head_weights_with_time_prior(
outputs, paddings, time_deltas, num_heads, time_exp_base,
overlapping_chunks), axis=0) # (H*N, T_q, T_k)
# Activation
outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)
# Query Masking
query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q)
query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q)
query_masks = tf.tile(
tf.expand_dims(query_masks, -1),
[1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k)
outputs *= query_masks # broadcasting. (h*N, T_q, C)
# Dropouts
outputs = tf.layers.dropout(
outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Weighted sum
outputs = tf.matmul(outputs, V_) # (h*N, T_q, C/h)
# Restore shape
outputs = tf.concat(
tf.split(outputs, num_heads, axis=0), axis=2) # (N, T_q, C)
# Residual connection
if residual:
outputs += queries
if with_qk:
return Q, K
else:
return outputs
def _compute_head_weights_with_position_prior(weights, masks, paddings,
num_heads, attn_size):
"""Computes head-specific attention weights with position prior.
This function simply masks out the weights for items if they don't belong to a
certain chunk, using a sliding window technique. I.e., head i only focuses on
ith recent "chunk_size" items with respect to the query. Note that chunks are
non-overlapping, meaning, sliding window stride is also set to attn_size.
Args:
weights: A 3d tensor with shape of [h*N, T_q, T_k].
masks: A 3d tensor with shape of [h*N, T_q, T_k].
paddings: A 3d tensor with shape of [h*N, T_q, T_k].
num_heads: An integer denoting number of chunks.
attn_size: An integer denoting the size of the sliding window.
Returns:
A list of h tensors (each shaped [N, T_q, T_k]) where tensors correspond to
chunk specific weights.
"""
# Masks is a lower triangular tensor with ones in the bottom and zeros in the
# upper section. Since chunks are allocated with respect to query position, we
# first need to count the available items prior to each query. argmin function
# would work for this, except the last query because it returns the smallest
# index in the case of ties. To make sure we have the accurate count for the
# last query, we first append a zero tensor and call the argmin function.
max_idxs = tf.argmin(tf.concat([masks, tf.zeros_like(masks)], axis=-1),
2) # (h*N, T_q)
# Split for heads.
max_idxs_split = tf.split(max_idxs, num_heads, axis=0) # (h x (N, T_q))
weights_split = tf.split(weights, num_heads, axis=0) # (h x (N, T_q, T_k))
paddings_split = tf.split(paddings, num_heads, axis=0) # (h x (N, T_q, T_k))
# Collects output weights per chunk.
chunk_outputs_list = []
for i in range(num_heads):
mask_left = tf.sequence_mask(
tf.maximum(max_idxs_split[i] - (attn_size * (i + 1)), 0),
tf.shape(weights_split[i])[2]) # (N, T_q, T_k)
mask_right = tf.sequence_mask(
tf.maximum(max_idxs_split[i] - (attn_size * i), 0),
tf.shape(weights_split[i])[2]) # (N, T_q, T_k)
mask = tf.logical_and(tf.logical_not(mask_left),
mask_right) # (N, T_q, T_k)
# Adjust weights for chunk i.
output = tf.where(mask, weights_split[i],
paddings_split[i]) # (N, T_q, T_k)
chunk_outputs_list.append(output)
return chunk_outputs_list # (h x (N, T_q, T_k))
def _compute_head_weights_with_time_prior(weights, paddings, time_deltas,
num_heads, time_exp_base,
overlapping_chunks):
"""Computes head-specific attention weights with time prior.
This function simply masks out the weights for items if they don't belong to a
certain chunk. Here, chunks are allocated based on time information. We use
exponential function--pow(time_exp_base,i)--to allocate segment boundaries.
Note that time delta values represent number of days.
Example 1: Let overlapping_chunks=False, time_exp_base=3 and num_heads=3.
1st head focuses on the items within time interval [0, pow(3,0)],
2nd head focuses on the items within time interval (pow(3,0), pow(3,1)],
3rd (last) head focuses on the items within time interval (pow(3,1), inf]
Example 2: Let overlapping_chunks=True, time_exp_base=3 and num_heads=3.
1st head focuses on the items within time interval [0, pow(3,0)],
2nd head focuses on the items within time interval [0, pow(3,1)],
3rd (last) head focuses on the items within time interval [0, inf]
Args:
weights: A 3d tensor with shape of [h*N, T_q, T_k].
paddings: A 3d tensor with shape of [h*N, T_q, T_k].
time_deltas: A 3d tensor with shape of [N, T_q, T_k].
num_heads: An integer denoting number of chunks.
time_exp_base: A scalar. Base for exponential time intervals.
overlapping_chunks: Boolean. Whether to use overlapping chunks.
Returns:
A list of h tensors (each shaped [N, T_q, T_k]) where tensors correspond to
chunk specific weights.
"""
tf.logging.info(
"Computing with time_exp_base:{} and overlapping_chunks:{}".format(
time_exp_base, overlapping_chunks))
chunk_outputs_list = []
weights_split = tf.split(weights, num_heads, axis=0)
paddings_split = tf.split(paddings, num_heads, axis=0)
ones_tensor = tf.ones_like(time_deltas) # (N, T_q, T_k)
# False in previous items and True in future items.
mask_previous_head = time_deltas < 0 # (N, T_q, T_k)
for i in range(num_heads):
if i == (num_heads - 1): # Last chunk considers all the remaining items.
# All True.
mask_next_head = tf.ones_like(time_deltas, dtype=bool) # (N, T_q, T_k)
else:
mask_next_head = tf.math.less_equal(
time_deltas, (time_exp_base**i) * ones_tensor) # (N, T_q, T_k)
mask = tf.logical_and(tf.logical_not(mask_previous_head),
mask_next_head) # (N, T_q, T_k)
output = tf.where(mask, weights_split[i],
paddings_split[i]) # (N, T_q, T_k)
chunk_outputs_list.append(output)
# Update previous mask for non-overlapping chunks.
if not overlapping_chunks:
mask_previous_head = mask_next_head
return chunk_outputs_list
def _compute_time_deltas(times):
"""This function computes time deltas between items.
It is important to note that given timestamps are for queries. Hence, we need
to consider that while calculating the time deltas between queries and items.
Example: For items: [<PAD>, 1, 2, 3] and queries: [q1, q2, q3, q4], the times
vector is [t1, t2, t3, t4]. Then, the time deltas will be:
[
[t1, 0, t1-t2, t1-t3], # time deltas for query 1
[t2, t2-t1, 0, t2-t3], # time deltas for query 2
[t3, t3-t1, t3-t2, 0], # time deltas for query 3
[t4, t4-t1, t4-t2, t4-t3] # time deltas for query 4
]
Args:
times: A 2d tensor with shape of [N, T_q].
Returns:
A 3d tensor with shape of [N, T_q, T_q].
"""
t1 = tf.tile(tf.expand_dims(times, 2), [1, 1, tf.shape(times)[1]])
t2 = tf.tile(tf.expand_dims(times, 1), [1, tf.shape(times)[1], 1])
time_deltas = t1 - t2 # (N, T_q, T_q)
time_deltas = tf.concat([tf.expand_dims(times, 2), time_deltas],
2) # (N, T_q, 1+T_q)
time_deltas = time_deltas[:, :, :-1] # (N, T_q, T_q)
return time_deltas
# pylint: disable=dangerous-default-value
def feedforward(inputs,
num_units=[2048, 512],
scope="multihead_attention",
dropout_rate=0.2,
is_training=True,
reuse=None):
"""Point-wise feed forward net.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
dropout_rate: Dropout rate.
is_training: Whether to run in training mode.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name.
Returns:
A 3d tensor with the same shape and dtype as inputs
"""
with tf.variable_scope(scope, reuse=reuse):
# Inner layer
params = {
"inputs": inputs,
"filters": num_units[0],
"kernel_size": 1,
"activation": tf.nn.relu,
"use_bias": True
}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(
outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Readout layer
params = {
"inputs": outputs,
"filters": num_units[1],
"kernel_size": 1,
"activation": None,
"use_bias": True
}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(
outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Residual connection
outputs += inputs
# Normalize
# outputs = normalize(outputs)
return outputs
# pylint: disable=dangerous-default-value
def query_feedforward(inputs,
num_units,
scope="item_and_query_combined_embedding",
dropout_rate=0,
is_training=True,
residual=False,
reuse=None):
"""Point-wise feed forward net for query-item encoder.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
dropout_rate: Dropout rate.
is_training: Whether to run in training mode.
residual: Whether to use residual connections.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name.
Returns:
A 3d tensor with the same shape and dtype as inputs
"""
with tf.variable_scope(scope, reuse=reuse):
outputs = tf.nn.relu(inputs)
for units in num_units:
params = {
"inputs": outputs,
"filters": units,
"kernel_size": 1,
"activation": None,
"use_bias": True
}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(
outputs,
rate=dropout_rate,
training=tf.convert_to_tensor(is_training))
# Residual connection
if residual:
outputs += inputs
return outputs
| google-research/google-research | multi_resolution_rec/modules.py | Python | apache-2.0 | 19,899 |
#!/usr/bin/env python
"""Gets the IP for a given hostname."""
import sys
import argparse
import socket
# ==============================================================================
__version__ = "0.1"
__copyright__ = "Copyright 2017, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = ' \
# Copyright 2014-2017 devops.center llc \
# \
# Licensed under the Apache License, Version 2.0 (the "License"); \
# you may not use this file except in compliance with the License. \
# You may obtain a copy of the License at \
# \
# http://www.apache.org/licenses/LICENSE-2.0 \
# \
# Unless required by applicable law or agreed to in writing, software \
# distributed under the License is distributed on an "AS IS" BASIS, \
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \
# See the License for the specific language governing permissions and \
# limitations under the License. \
# '
__status__ = "Development"
# ==============================================================================
def checkArgs():
"""Check the command line arguments."""
parser = argparse.ArgumentParser(
description=('Gets the IP for the given hostname'))
parser.add_argument('-n', '--nameOfHost', help='The fully qualified '
'name and domain of the host that you want the '
'IP for.',
required=True)
args = parser.parse_args()
retHostname = None
if args.nameOfHost:
retHostname = args.nameOfHost
return(retHostname)
def main(argv):
"""Main code goes here."""
theHostname = checkArgs()
try:
ipReturned = socket.gethostbyname(theHostname)
print(ipReturned)
except socket.gaierror:
print("ERROR")
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| devopscenter/dcUtils | checkDNSforIP.py | Python | apache-2.0 | 2,325 |
#
# Copyright (c) 2018 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
default_app_config = 'pdc.apps.unreleasedvariant.apps.UnreleasedVariantConfig'
| release-engineering/product-definition-center | pdc/apps/unreleasedvariant/__init__.py | Python | mit | 188 |
def func():
value = "not-none"
# pylint: disable=unused-argument1
if value is not None:
print("Not none")
# pylint: disable=unused-argument2
else:
print("None") | siosio/intellij-community | python/testData/intentions/PyInvertIfConditionIntentionTest/commentsPylintBoth_after.py | Python | apache-2.0 | 197 |
from .gmm_bayes import GMMBayes
| astroML/astroML | astroML/classification/__init__.py | Python | bsd-2-clause | 32 |
idade = 12
if idade < 4:
preco = 0
elif idade < 18:
preco = 5
elif idade < 65:
preco = 10
else:
preco = 5
print('Seu custo de admissão e R$' + str(preco) + '.')
'''
Foi adicionado mais bloco de instrução elif
para idades abaixo de 65. Se caso a idade for maior
que 18 e menor que 65 o bloco e executado, caso a idade
seja acima de 65 executa o último bloco else.
''' | zirou30/python_student | 82.py | Python | gpl-3.0 | 387 |
# Notes on using python requests to post .NET forms:
# http://stackoverflow.com/questions/24975955/sending-an-asp-net-post-with-pythons-requests
import BeautifulSoup
import re
import requests
NOT_FOUND_MESSAGE = 'No Voter Registration information could be found for the data provided.'
URL = 'https://www.pavoterservices.state.pa.us/Pages/voterregistrationstatus.aspx'
STATUS_REGEX = re.compile(r'^(.+?)\(Date of Birth: (\d+/\d+/\d+)\) is registered to vote in.+?Status :(.+?)Party :(.+?)If you wish')
WARD_REGEX = re.compile(r'^Polling Place Address for (.+?) WD (\d+) DIV (\d+)$')
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2342.2 Safari/537.36',
'Accept': '*/*',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8'
}
def get_registration(county, first_name, middle_name, last_name, dob):
session = requests.Session()
frm_response = session.get(URL, headers=HEADERS)
soup = BeautifulSoup.BeautifulSoup(frm_response.content)
# magic .NET hidden fields
viewstate = soup.findAll(attrs={'name':'__VIEWSTATE'})
valid = soup.findAll(attrs={'name':'__EVENTVALIDATION'})
gen = soup.findAll(attrs={'name':'__VIEWSTATEGENERATOR'})
# drop-down with counties
county_dropdown = soup.find(id='ctl00_ContentPlaceHolder1_CountyCombo')
opts = county_dropdown.findAll('option')
# first option is not a county; loop through the rest and map county name to dropdown val
# (going to submit dropdown val with form)
opts_rng = range(1, len(opts))
counties = {opts[i].text: int(opts[i]['value']) for i in opts_rng}
county_val = counties.get(county.upper())
# actual form data to submit
frm = {'ctl00$ContentPlaceHolder1$CountyCombo': county_val,
'ctl00$ContentPlaceHolder1$SuffixCombo': None,
'ctl00$ContentPlaceHolder1$btnContinue': 'Continue',
'ctl00$ContentPlaceHolder1$txtVRSOpt2Item2': first_name,
'ctl00$ContentPlaceHolder1$txtVRSOpt2Item3': last_name,
'ctl00$ContentPlaceHolder1$txtVRSOpt2Item4': dob,
'ctl00$ContentPlaceHolder1$txtVRSOpt2Item5': middle_name,
'__EVENTTARGET': None,
'__EVENTARGUMENT': None,
'__VIEWSTATE': viewstate[0]['value'],
'__EVENTVALIDATION': valid[0]['value'],
'__VIEWSTATEGENERATOR': gen[0]['value']
}
reg_response = session.post(url=URL, data=frm, headers={'Referer': frm_response.url})
reg = BeautifulSoup.BeautifulSoup(reg_response.content)
# span with name, dob, party, and registration status
status_span = reg.find(id='ctl00_ContentPlaceHolder1_regstatus')
if not status_span:
# check for span saying no info found
not_found_span = reg.find(id='ctl00_ContentPlaceHolder1_lblNotFound')
if not_found_span:
return {'notFound': NOT_FOUND_MESSAGE}
else:
return {}
status = STATUS_REGEX.search(status_span.text)
found_name, found_dob, found_status, found_party = status.groups()
# section with county name, ward, and division ID
ward_section = reg.find(id='ctl00_ContentPlaceHolder1_PollingPlaceAddressLabel')
ward_match = WARD_REGEX.match(ward_section.text)
county_abbr, ward, div = ward_match.groups()
# polling place info
place_name_section = reg.find(id='ctl00_ContentPlaceHolder1_DescriptionRowCell')
polling_place_name = place_name_section.text
polling_addr_section = reg.find(id='ctl00_ContentPlaceHolder1_addRow1Cell1')
polling_place_addr = polling_addr_section.text
polling_city_section = reg.find(id='ctl00_ContentPlaceHolder1_PollingPlaceCityLabel')
polling_place_city = polling_city_section.text
polling_state_section = reg.find(id='ctl00_ContentPlaceHolder1_PollingPlaceStateLabel')
polling_place_state = polling_state_section.text
# accessibility (get alt text from image)
acc = reg.find(id='ctl00_ContentPlaceHolder1_AccessibilityImage')
access_text = acc.attrMap['alt']
# object to return with the scraped data on it
response = {
'name': found_name,
'dob': found_dob,
'status': found_status,
'party': found_party,
'county': county_abbr,
'ward': ward,
'division': div,
'polling_place': {
'name': polling_place_name,
'address': {
'street': polling_place_addr,
'city': polling_place_city,
'state': polling_place_state
},
'accessibility': access_text
}
}
return response
| flibbertigibbet/pavoterservices-passthrough | scrape_voter_registration.py | Python | gpl-3.0 | 4,620 |
import os
import tempfile
import mimetypes
import sys
from hashlib import sha224
from .Ressource import Ressource, Metadata
from .Document import Document
from .Text import Text
from .Video import Video
from .Audio import Audio
from .Image import Image
from .Directory import Directory
sys.setrecursionlimit( 10000 )
mimetypes.init()
DEFAULT_CONTENT_TYPE = "application/octet-stream"
map_contentTypes2Ressource={
"application/javascript" : Text,
"application/ogg" : Video,
"application/xhtml+xml" : Text,
"application/json" : Text,
"application/xml" : Text,
"application/x-directory" : Directory,
"inode/directory" : Directory,
"text/directory" : Directory
}
def contentType2Ressource(contentType):
if contentType in map_contentTypes2Ressource :
return map_contentTypes2Ressource[ contentType ]()
token = contentType.split("/")[0]
if( token == "audio"):
return Audio()
elif( token == "image"):
return Image()
elif( token == "text"):
return Text()
elif( token == "video"):
return Video()
return Document()
def hashfile_aux(afile, hasher, blocksize=65536):
afile.seek(0)
buff = afile.read(blocksize)
while buff :
hasher.update( buff )
buff = afile.read(blocksize)
afile.seek(0)
def hashfile(afile, hasher, blocksize=65536):
hashfile_aux(afile, hasher, blocksize)
return hasher.hexdigest()
##os.scandir pour python 3.4
def hashdir(path, hasher, blocksize=65536):
buff = ""
for path, dirs, files in os.walk(path):
for filename in files:
hashfile_aux( open(os.path.join(path, filename), "rb") , hasher, blocksize)
return hasher.hexdigest()
def filesize(afile):
current = afile.tell()
afile.seek(0, os.SEEK_END)
size = afile.tell()
afile.seek(current, os.SEEK_SET)
return size
def dirsize(path):
size = 0
for path, dirs, files in os.walk(path):
for filename in files:
size += os.path.getsize( os.path.join(path, filename) )
for dirname in dirs:
size += dirsize( os.path.join(path, dirname) )
return size
#def dirsize(path):
#size = 0
#for entry in os.scandir(path):
#if entry.is_file():
#size += os.path.getsize( os.path.join(path, entry.name) )
#elif entry.is_dir() and entry.name != "." and entry.name != "..":
#size += dirsize( os.path.join(path, entry.name) )
#return size
def build(tmp, contentType):
"""
tmp : temporaryDirector, str => directory
temporaryFile,file => file
parent : if parent : parent.add_child(ressource) for tail recursion
"""
if type( tmp ) == tempfile.TemporaryDirectory or type( tmp ) == str :
if type( tmp ) == str :
h_sha224 = hashdir(tmp, sha224())
size = dirsize(tmp)
else:
h_sha224 = hashdir(tmp.name, sha224())
size = dirsize(tmp.name)
else :
h_sha224 = hashfile(tmp, sha224())
size = filesize(tmp)
ct_list = contentType.split(";")
contentType = ct_list[0].strip()
charset = ( ct_list[1].strip() if len(ct_list)>1 else "")
ressource = contentType2Ressource( contentType )
ressource._tmp = tmp
metadata = Metadata( ressource )
metadata["contentType"] = contentType
metadata["sha224"] = h_sha224
metadata["size"] = size
if type( tmp ) == tempfile.TemporaryDirectory or type( tmp ) == str :
path = (tmp if type(tmp) == str else tmp.name)
#recursive construction for dir, will crash if dept dir >999
local_path, local_dirs, local_files = os.walk(path , topdown=True).__next__() #only the first layer
for local_dir in local_dirs:
l_ressource = build( os.path.join(local_path, local_dir),
"inode/directory")
ressource.add_child( l_ressource )
for local_file in local_files:
location = os.path.join(local_path, local_file)
local_ct = mimetypes.guess_type(location, strict=False)[0]
if not local_ct :
local_ct = "application/octet-stream"
l_ressource = build( open(location, "rb"), local_ct)
ressource.add_child( l_ressource )
return ressource
| athena-project/Hermes | src/RessourceFactory.py | Python | gpl-2.0 | 4,024 |
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollsig', 'poll', 'poll-cv'],
# TODO(ctiller, sreecha): enable epoll1, epollex, epoll-thread-pool
'mac': ['poll'],
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.args.compiler == 'cmake':
_check_arch(self.args.arch, ['default'])
self._use_cmake = True
self._docker_distro = 'jessie'
self._make_options = []
elif self.platform == 'windows':
self._use_cmake = False
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._use_cmake = False
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV '
try:
cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None);
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
timeout_scaling = 1
if polling_strategy == 'poll-cv':
timeout_scaling *= 5
if polling_strategy in target.get('excluded_poll_engines', []):
continue
# Scale overall test timeout if running under various sanitizers.
config = self.args.config
if ('asan' in config
or config == 'msan'
or config == 'tsan'
or config == 'ubsan'
or config == 'helgrind'
or config == 'memcheck'):
timeout_scaling *= 20
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
if self._use_cmake:
binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
else:
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
out.append(self.config.job_spec(cmdline,
shortname='%s %s' % (' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
out.append(self.config.job_spec(cmdline,
shortname=' '.join(
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
'check_epollexclusive']
def make_options(self):
return self._make_options;
def pre_build_steps(self):
if self._use_cmake:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
else:
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_c.bat']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return ['CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix]
def _gcc_make_options(self, version_suffix):
return ['CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.8':
return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class NodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6',
'node7', 'node8',
'electron1.3', 'electron1.6'])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
if self.platform == 'windows':
return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
else:
run_script = 'run_node'
if self.runtime == 'electron':
run_script += '_electron'
return [self.config.job_spec(['tools/run_tests/helper_scripts/{}.sh'.format(run_script),
self.node_version],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
build_script = 'pre_build_node'
if self.runtime == 'electron':
build_script += '_electron'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
if self.config == 'dbg':
config_flag = '--debug'
else:
config_flag = '--release'
return [['tools\\run_tests\\helper_scripts\\build_node.bat',
config_flag]]
else:
build_script = 'build_node'
if self.runtime == 'electron':
build_script += '_electron'
# building for electron requires a patch version
self.node_version += '.0'
return [['tools/run_tests/helper_scripts/{}.sh'.format(build_script),
self.node_version]]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(collections.namedtuple('PythonConfig', [
'name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [self.config.job_spec(
config.run,
timeout_seconds=5*60,
environ=dict(list(environment.items()) +
[('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
for config in self.pythons]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
if self.args.compiler in ['python3.5', 'python3.6']:
return 'pyenv'
elif self.args.compiler == 'python_alpine':
return 'alpine'
else:
return 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
minor='7', bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(name='py34', major='3',
minor='4', bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(name='py35', major='3',
minor='5', bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(name='py36', major='3',
minor='6', bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(name='pypy', major='2',
config_vars=config_vars)
pypy32_config = _pypy_config_generator(name='pypy3', major='3',
config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python27_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
tests.append(self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['coreclr', 'default'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
self._make_options = []
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file,
'-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*',
'-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options;
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
timeout_seconds=60*60,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
timeout_seconds=30*60,
shortname='objc-examples-build',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ={'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
return [self.config.job_spec(cmd['script'].split(),
timeout_seconds=30*60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
class NodeExpressLanguage(object):
"""Dummy Node express test target to enable running express performance
benchmarks"""
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
return []
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_node.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node_express'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'node_express': NodeExpressLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.' % arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' % args.arch)
sys.exit(1)
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2013', 'vs2015',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6',
'coreclr',
'cmake'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--iomgr_platform',
choices=['native', 'uv'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
help='Measure the cpu costs of tests')
argp.add_argument('--update_submodules', default=[], nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. ' +
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--quiet_success',
default=False,
action='store_const',
const=True,
help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. ' +
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
argp.add_argument('--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
args = argp.parse_args()
if args.force_default_poller:
_POLLING_STRATEGIES = {}
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print('WARNING: may need to regenerate projects, but since we are not on')
print(' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print('languages with custom make options cannot be built simultaneously with other languages')
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(set([make_option for lang in languages for make_option in lang.make_options()]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print ('Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print ('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
if makefile.startswith('cmake/build/'):
return [jobset.JobSpec(['cmake', '--build', '.',
'--target', '%s' % target,
'--config', _MSBUILD_CONFIG[cfg]],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets]
extra_args = []
# better do parallel compilation
# empirically /m:2 gives the best performance/price and should prevent
# overloading the windows workers.
extra_args.extend(['/m:2'])
# disable PDB generation: it's broken, and we don't need it during CI
extra_args.extend(['/p:Jenkins=true'])
return [
jobset.JobSpec([_windows_build_bat(args.compiler),
'vsprojects\\%s.sln' % target,
'/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
extra_args +
language_make_options,
shell=True, timeout_seconds=None)
for target in targets]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-j', '%d' % args.jobs] +
targets,
cwd='cmake/build',
timeout_seconds=None)]
if targets:
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-f', makefile,
'-j', '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
'CONFIG=%s' % cfg,
'Q='] +
language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) +
targets,
timeout_seconds=None)]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
try:
subprocess.check_call('bins/%s/check_epollexclusive' % args.config)
return True
except subprocess.CalledProcessError, e:
return False
except OSError, e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
check_cancelled, newline_on_success, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string() in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string()]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent/100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success, max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_results_to_bq(resultset, args.bq_result_table, args, platform_string())
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| vsco/grpc | tools/run_tests/run_tests.py | Python | bsd-3-clause | 56,812 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-21 06:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Pentagram', '0007_auto_20160720_0929'),
]
operations = [
migrations.RenameModel(
old_name='Comment',
new_name='Comments',
),
migrations.AlterField(
model_name='photo',
name='photo',
field=models.ImageField(default=2, upload_to=''),
preserve_default=False,
),
]
| danielburz/pentagram | practicap5/Pentagram/migrations/0008_auto_20160721_0946.py | Python | gpl-3.0 | 605 |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - mailtranslators script
@copyright: 2004-2007 MoinMoin:ThomasWaldmann
@license: GPL, see COPYING for details
"""
import sys
from MoinMoin import i18n
from MoinMoin.mail.sendmail import sendmail
from MoinMoin.script import MoinScript
class PluginScript(MoinScript):
"""\
Purpose:
========
This tool allows you to have a message read in from standard input, and
then sent to all translators via email. If you use %(lang)s in the message
it will be replaced with the appropriate language code for the translator.
Detailed Instructions:
======================
General syntax: moin [options] maint mailtranslators [mailtranslators-options]
[options] usually should be:
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
[mailtranslators-options] see below:
0. To send an email to all translaters, from john@smith.com, and with a subject
of 'Please update your translations!' and a body of 'Please update your language,
%(lang)s'
moin ... maint mailtranslators --from-address john@smith.com --subject 'Please update your translations!'
Please update your language, %(lang)s
^D
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self, argv, def_values)
self.parser.add_option(
"-f", "--from-address", dest="from_address",
help="use as from: for email."
)
self.parser.add_option(
"-s", "--subject", dest="subject",
help="use as subject: for email."
)
def mainloop(self):
self.init_request()
request = self.request
from_address = unicode(self.options.from_address or "tw-public@gmx.de")
subject = unicode(self.options.subject or "MoinMoin i18n notification")
text_template = unicode(sys.stdin.read())
languages = i18n.wikiLanguages()
langs = languages.keys()
langs.remove('en') # nothing to do for english, so remove it
#langs = ['de', ] # for testing
if len(text_template) > 10: # do not send mails w/o real content
for lang in langs:
to_address = languages[lang]['last-translator']
rc = None
if to_address and '***vacant***' not in to_address:
text = text_template % locals()
rc = sendmail(request, [to_address], subject, text, mail_from=from_address)
print lang, repr(from_address), repr(to_address), subject, repr(rc)
| Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/script/maint/mailtranslators.py | Python | mit | 2,609 |
# -*- coding: utf-8 -*- ?
import sqlite3
class User:
def login(self, email, password):
db = sqlite3.connect("news.db")
with db:
cursor = db.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY,email TEXT, username TEXT, password TEXT, tags TEXT);")
cursor.execute("SELECT * IN users WHERE email = ?, password = ?",(email,password))
user_info = cursor.featchone()
if user_info:
self.id = user_info[0]
self.email = user_info[1]
self.name = user_info[2]
self.tags = user_info[4].split(";")
self.exist = True
def info(self, user_id):
db = sqlite3.connect("news.db")
with db:
cursor = db.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY,email TEXT, username TEXT, password TEXT, tags TEXT);")
cursor.execute("SELECT * IN users WHERE id = ?",(str(user_id),))
user_info = cursor.featchone()
if user_info:
self.id = user_info[0]
self.email = user_info[1]
self.name = user_info[2]
self.tags = user_info[4].split(";")
self.exist = False
def register(self, user_id):
db = sqlite3.connect("news.db")
with db:
cursor = db.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY,email TEXT, username TEXT, password TEXT, tags TEXT);")
cursor.execute("SELECT * IN users WHERE id = ?",(str(user_id),))
user_info = cursor.featchone()
if user_info:
self.id = user_info[0]
self.email = user_info[1]
self.name = user_info[2]
self.tags = user_info[4].split(";")
self.exist = False
user = {
"id":1,
"nickname": "Alxmamaev",
"avatar": "https://pp.vk.me/c631117/v631117884/397fc/7RA23642ai8.jpg",
"description": "Питонист, люблю смотреть в монитор :D",
"tags": ["Технологии","Политика","всякое","Программирование","Кино","Стартапы"]
}
| alxmamaev/microblog | app/models.py | Python | mit | 2,307 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coral Compatibility Test Suite.
The Coral Compatibility Test Suite is intended for validating partner hardware
operates as expected. It runs a variety of long-running tests and benchmarks.
The output is provided on stdout as well as to a file, passed in with the
--output argument (by default cts.txt).
In order to be approved for Coral branding, all tests must pass.
"""
import argparse
import os
import platform
import subprocess
import ssl
import sys
from shutil import unpack_archive
from tempfile import NamedTemporaryFile
from urllib.request import urlopen
CTS_HEADER = """#####################################################
Coral Compatibility Test Suite
#####################################################\n\n"""
SECTION_HEADER = "-----------------------------------------------------\n"
class TestSuite():
"""Helper class for running tests and storing results.
Attributes:
results: A dictionary of tests and their results.
file_path: Location of file (absolute path).
file: File handle for test output.
tpus: Number of TPUs detected on the system.
pci: At least one TPU is connected over PCIe.
usb: At least one TPU is connected over USB.
thermals: A dictionary of tests and the max recorded temperature.
"""
def __init__(self, file_path):
self.results = dict()
self.file_path = file_path
self.file = open(file_path, "w")
self.tpus = 0
self.pci = False
self.usb = False
self.thermals = dict()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self._write_summary()
def _file_print(self, message):
"""Helper function that prints to both the console and a file.
Args:
message: A string containing the message.
"""
self.file.write(message)
self.file.flush()
sys.stdout.write(message)
def _run_linux_system_command(self, header, command, output_parse=""):
"""Helper function that runs a linux command.
Args:
header: Header string for the section
command: Command and args to be passed to Popen
output_parse: String used to parse output to useful data.
"""
self._file_print("\n***** " + header + " *****\n")
try:
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
for line in proc.stdout:
if not output_parse or output_parse in line:
self._file_print(line)
proc.wait()
except Exception as e:
self._file_print(str(e) + "\n")
def _write_thermal_summary(self):
self._file_print("\n" + SECTION_HEADER)
self._file_print("Temperatures During Tests" + "\n")
self._file_print(SECTION_HEADER)
for test in self.thermals:
self._file_print(test + ": " + str(self.thermals[test]) + "\n")
self._file_print("\n")
def _write_summary(self):
"""Writes the summary.
Generates a table of the complete results of the test, and provides
a final overall Pass/Fail. The summary is printed on the console and
added to the beginning of the output file.
"""
if self.thermals:
self._write_thermal_summary()
# In order to prepend the summary, closes the file, creates a new
# temporary file with the header, and copies in the old contents.
self.file.close()
temp_path = self.file_path + ".tmp"
summary = CTS_HEADER
with open(self.file_path, "r") as file:
with open(temp_path, "w") as temp_file:
overall_passed = True
for test in self.results:
summary += (test + ": " +
("Passed" if self.results[test] is True else "Failed") + "\n")
overall_passed = overall_passed and self.results[test]
if self.thermals:
max_temp = max(self.thermals.values())
summary += "\nMax Temperature (C): " + str(max_temp)
summary += "\nOverall Compatibility: " + \
("Passed" if overall_passed is True else "Failed") + "\n"
print("\n" + summary)
temp_file.write(summary)
temp_file.write("\n\n")
temp_file.write(file.read())
os.rename(temp_path, self.file_path)
def _read_temperatures(self, current_test):
command = ["cat /sys/class/apex/apex_*/temp"]
temperatures = []
try:
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
for line in proc.stdout:
temperatures.append(float(line) / 1000.0)
proc.wait()
except Exception as e:
pass
if temperatures:
self.thermals[current_test] = max(temperatures)
def run_test(self, test):
"""Runs a given test.
Runs a given test, providing output to the console and output file. The
results are stored in a dictionary with the key being the test name and
the value as a Boolean indicating if the test passed.
Args:
test: The name / relative path of the test.
"""
current_test = test[0]
self._file_print("\n" + SECTION_HEADER)
self._file_print(current_test + "\n")
self._file_print(SECTION_HEADER)
test[0] = os.getcwd() + "/" + current_test
try:
proc = subprocess.Popen(
test, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
for line in proc.stdout:
self._file_print(line)
proc.wait()
except Exception as e:
self._file_print(str(e) + "\n")
self.results[current_test] = False
return
if self.pci and sys.platform == "linux":
self._read_temperatures(current_test)
if proc.returncode:
self.results[current_test] = False
else:
self.results[current_test] = True
def detect_tpus(self):
"""Detects number of TPUs.
Runs lstpu, which outputs the paths of TPUs on the system.
Returns:
An integer that indicates the number of TPUs detected.
"""
test = "lstpu"
test_path = os.path.join(os.getcwd(), test)
self._file_print("\n" + SECTION_HEADER)
self._file_print("Detected TPUs (lstpu)\n")
self._file_print(SECTION_HEADER)
try:
proc = subprocess.Popen(
test_path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
for line in proc.stdout:
if not self.pci and "PCI" in line:
self.pci = True
if not self.usb and "USB" in line:
self.usb = True
self.tpus += 1
self._file_print(line)
proc.wait()
except Exception as e:
self._file_print(str(e) + "\n")
if self.tpus:
self.results[test] = True
else:
self.results[test] = False
self._file_print("No TPUs detected\n")
return self.tpus
def print_system_info(self):
"""Prints system info.
Runs various commands (currently Linux only) to print information about
the system, including PCIe and USB when relevant.
"""
self._file_print(
"\n-----------------------------------------------------\n")
self._file_print("System Info\n")
self._file_print(
"-----------------------------------------------------\n")
self._file_print(platform.platform() + "\n")
if(sys.platform == "linux"):
if self.pci: # For PCIe, displays apex kernel messages and lspci output.
self._run_linux_system_command(
"TPU Kernel Messages", ["dmesg"], "apex")
self._run_linux_system_command(
"PCI Info", ["lspci", "-vvv", "-d 1ac1:089a"])
elif self.usb: # For USB, the device can be in DFU mode or TPU mode.
self._run_linux_system_command(
"USB Devices", ["lsusb"])
self._run_linux_system_command("USB Tree", ["lsusb", "-t"])
self._run_linux_system_command(
"USB Detailed Info (TPU in standard mode)", ["lsusb", "-v", "-d 18d1:9302"])
self._run_linux_system_command(
"USB Detailed Info (TPU in DFU mode)", ["lsusb", "-v", "-d 1a6e:089a"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', default="cts.txt")
args = parser.parse_args()
# Gets the complete path of file.
output_file = os.path.join(os.getcwd(), args.output)
# Checks for and downloads/extracts test data.
TEST_DATA_COMMIT = "c21de4450f88a20ac5968628d375787745932a5a"
if not os.path.isdir(os.path.join(os.getcwd(), "test_data")):
print("Test data not found, downloading...")
context = ssl._create_unverified_context()
with urlopen("https://github.com/google-coral/test_data/archive/" + TEST_DATA_COMMIT + ".zip", context=context) as zipresp, NamedTemporaryFile() as tfile:
tfile.write(zipresp.read())
tfile.seek(0)
print("Download complete, extracting...")
unpack_archive(tfile.name, os.getcwd(), format='zip')
os.rename(os.path.join(os.getcwd(), "test_data-" + TEST_DATA_COMMIT),
os.path.join(os.getcwd(), "test_data"))
with TestSuite(output_file) as cts:
# Verifies TPU(s) are attached
tpus = cts.detect_tpus()
if not tpus:
return
cts.print_system_info()
# Iterates through tests, outputting results to file and storing results.
cts.run_test(test=["tflite_utils_test"])
cts.run_test(test=["inference_stress_test",
"--stress_test_runs=10000", "--stress_with_sleep_test_runs=200"])
cts.run_test(test=["model_loading_stress_test",
"--stress_test_runs=50"])
cts.run_test(test=["inference_repeatability_test",
"--stress_test_runs=1000", "--gtest_repeat=20"])
# For classification test, omit TF2 ResNet50 - which fails on some platforms.
cts.run_test(test=["classification_models_test", "--gtest_repeat=10",
"--gtest_filter=-*tfhub_tf2_resnet_50_imagenet_ptq*"])
cts.run_test(test=["detection_models_test", "--gtest_repeat=100"])
cts.run_test(test=["segmentation_models_test", "--gtest_repeat=100"])
# If more than 1 TPU is attached, runs multi-TPU tests.
if tpus > 1:
cts.run_test(
test=["multiple_tpus_inference_stress_test", "--num_inferences=5000"])
# Runs Benchmarks, which just reports results but don't compare.
# Also note CPU scaling is not disabled (as it would require root).
cts.run_test(test=["models_benchmark", "--benchmark_color=false"])
if __name__ == "__main__":
main()
| google-coral/cts | coral_cts.py | Python | apache-2.0 | 12,056 |
from __future__ import print_function
import json
import matplotlib.pyplot as plt
import numpy as np
with open("../data/results/scores.json", 'r') as f:
scores = json.load(f)
with open("../data/results/distances.json", 'r') as f:
distances = json.load(f)
with open("../data/results/times.json", 'r') as f:
times = json.load(f)
print(scores)
print(distances)
print(times)
def scorelines():
''' line plot of learned classifier's scores (x = ns, y = accuracy) '''
ns = [10, 100, 1000, 10000]
fig, ax = plt.subplots()
ax.plot(ns, scores["basic"], marker='o', linestyle='-', color='r',
label='Basic')
ax.plot(ns, scores["tree"], marker='s', linestyle='-', color='b',
label='Tree')
ax.plot(ns, scores["trained"], marker='^', linestyle='-', color='g',
label='Learned Thresholds')
ax.set_xlabel('Size of Training Set')
ax.set_ylabel('Average Accuracy over Test Set')
title = 'Learning-based Classifier Accuracy by Size of Training Set'
ax.set_title(title)
ax.set_xscale('log')
ax.set_xlim(7, 14000)
ax.set_ylim(0.0, 1.0)
ax.set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
plt.legend(loc=2)
plt.tight_layout()
plt.savefig("../output/linechart_scores.png")
def scorebars():
''' bar chart of classifier's scores by classifier type (y = accuracy) '''
scorelist = [scores["lexical"], scores["basic"][-1], scores["tree"][-1],
scores["trained"][-1]]
N = 4
offset = 0.125
ind = np.arange(N) # the x locations for the groups
width = 0.75 # the width of the bars
fig, ax = plt.subplots()
ax.bar(ind+offset, scorelist, width, alpha=0.40, color='r')
# add some text for labels, title and axes ticks
ax.set_ylabel('Average Accuracy')
ax.set_title('Classification Accuracy by Classifier Type')
ax.set_xticks(ind+width/2+offset)
ax.set_xticklabels(('Lexical Matcher',
'Basic Classifier',
'Tree Classifier',
'Learned Thresholds'))
ax.set_ylim(0.0, 1.0)
ax.set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
plt.tight_layout()
plt.savefig("../output/barchart_scores.png")
def distancebars():
''' bar chart of classifier's distances by category (y = accuracy) '''
distancelist = [distances["basic"], distances["tree"],
distances["trained"]]
N = 3
offset = 0.125
ind = np.arange(N) # the x locations for the groups
width = 0.75 # the width of the bars
fig, ax = plt.subplots()
ax.bar(ind+offset, distancelist, width, alpha=0.40, color='b')
# add some text for labels, title and axes ticks
ax.set_ylabel('Average Distance')
ax.set_title('Average Distance of Predictions by Classifier Type')
ax.set_xticks(ind+width/2+offset)
ax.set_xticklabels(('Basic Classifier', 'Tree Classifier',
'Tree w/ Learned Thresholds'))
ax.set_ylim(0.0, 1.0)
# ax.set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
plt.tight_layout()
plt.savefig("../output/barchart_distances.png")
def timelines():
''' line plot of learned classifier's times (x = ns, y = ms) '''
ns = [10, 100, 1000, 10000]
fig, ax = plt.subplots()
ax.errorbar(ns, times["lexical"]["avgs"], yerr=times["lexical"]["stddevs"],
marker='*', linestyle='-', color='y', label='Lexical')
ax.errorbar(ns, times["basic"]["avgs"], yerr=times["basic"]["stddevs"],
marker='o', linestyle='-', color='r', label='Basic')
ax.errorbar(ns, times["tree"]["avgs"], yerr=times["tree"]["stddevs"],
marker='s', linestyle='-', color='b', label='Tree')
ax.errorbar(ns, times["trained"]["avgs"], yerr=times["trained"]["stddevs"],
marker='^', linestyle='-', color='g',
label='Learned Thresholds')
ax.set_xlabel('Size of Test Set')
ax.set_ylabel('Time to Classify Test Set (ms)')
ax.set_title('Classifier Execution Times (ms) by Size of Test Set')
ax.set_xscale('log')
#ax.set_yscale('log')
ax.set_xlim(7, 14000)
# ax.set_ylim(0.0, 1.0)
# ax.set_yticklabels(["0%", "20%", "40%", "60%", "80%", "100%"])
plt.legend(loc=2)
plt.tight_layout()
plt.show()
#plt.savefig("../output/linechart_times.png")
if __name__ == "__main__":
#scorelines()
#scorebars()
#distancebars()
timelines()
| yarbroughw/JMDE | JMDE/scripts/graph.py | Python | mit | 4,491 |
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""
This module implements the low-level API for dealing with fulltext files.
- All the files associated to a I{record} (identified by a I{recid}) can be
managed via an instance of the C{BibRecDocs} class.
- A C{BibRecDocs} is a wrapper of the list of I{documents} attached to the
record.
- Each document is represented by an instance of the C{BibDoc} class.
- A document is identified by a C{docid} and name (C{docname}). The docname
must be unique within the record. A document is the set of all the
formats and revisions of a piece of information.
- A document has a type called C{doctype} and can have a restriction.
- Each physical file, i.e. the concretization of a document into a
particular I{version} and I{format} is represented by an instance of the
C{BibDocFile} class.
- The format is infact the extension of the physical file.
- A comment and a description and other information can be associated to a
BibDocFile.
- A C{bibdoc} is a synonim for a document, while a C{bibdocfile} is a
synonim for a physical file.
@group Main classes: BibRecDocs,BibDoc,BibDocFile
@group Other classes: BibDocMoreInfo,Md5Folder,InvenioBibDocFileError
@group Main functions: decompose_file,stream_file,bibdocfile_*,download_url
@group Configuration Variables: CFG_*
"""
__revision__ = "$Id$"
import os
import re
import shutil
import filecmp
import time
import random
import socket
import urllib2
import urllib
import tempfile
from six.moves import cPickle
import base64
import binascii
import cgi
import sys
try:
import magic
if hasattr(magic, "open"):
CFG_HAS_MAGIC = 1
if not hasattr(magic, "MAGIC_MIME_TYPE"):
## Patching RHEL6/CentOS6 version
magic.MAGIC_MIME_TYPE = 16
elif hasattr(magic, "Magic"):
CFG_HAS_MAGIC = 2
except ImportError:
CFG_HAS_MAGIC = 0
from flask import current_app
from datetime import datetime
from mimetypes import MimeTypes
from thread import get_ident
from six import iteritems
from invenio.utils import apache
## Let's set a reasonable timeout for URL request (e.g. FFT)
socket.setdefaulttimeout(40)
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.utils.shell import escape_shell_arg, run_shell_command
from invenio.legacy.dbquery import run_sql, DatabaseError
from invenio.ext.logging import register_exception
from invenio.legacy.bibrecord import record_get_field_instances, \
field_get_subfield_values, field_get_subfield_instances, \
encode_for_xml
from invenio.utils.url import create_url, make_user_agent_string
from invenio.utils.text import nice_size
from invenio.modules.access.engine import acc_authorize_action
from invenio.modules.access.control import acc_is_user_in_role, acc_get_role_id
from invenio.modules.access.firerole import compile_role_definition, acc_firerole_check_user
from invenio.modules.access.local_config import SUPERADMINROLE, CFG_WEBACCESS_WARNING_MSGS
from invenio.config import CFG_SITE_URL, \
CFG_WEBDIR, CFG_BIBDOCFILE_FILEDIR,\
CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS, \
CFG_BIBDOCFILE_FILESYSTEM_BIBDOC_GROUP_LIMIT, CFG_SITE_SECURE_URL, \
CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS, \
CFG_TMPDIR, CFG_TMPSHAREDDIR, CFG_PATH_MD5SUM, \
CFG_WEBSUBMIT_STORAGEDIR, \
CFG_BIBDOCFILE_USE_XSENDFILE, \
CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY, \
CFG_SITE_RECORD, CFG_PYLIBDIR, \
CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS, \
CFG_BIBDOCFILE_ENABLE_BIBDOCFSINFO_CACHE, \
CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES, \
CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING, \
CFG_BIBCATALOG_SYSTEM
from invenio.legacy.bibcatalog.api import BIBCATALOG_SYSTEM
from invenio.legacy.bibdocfile.config import CFG_BIBDOCFILE_ICON_SUBFORMAT_RE, \
CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT
from invenio.utils.hash import md5
from invenio.legacy.bibdocfile.registry import plugins
import invenio.legacy.template
def _plugin_bldr(plugin_code):
"""Preparing the plugin dictionary structure."""
if not plugin_code.__name__.split('.')[-1].startswith('bom_'):
return
ret = {}
ret['create_instance'] = getattr(plugin_code, "create_instance", None)
ret['supports'] = getattr(plugin_code, "supports", None)
return ret
_CFG_BIBDOC_PLUGINS = None
def get_plugins():
"""Lazy loading of plugins."""
global _CFG_BIBDOC_PLUGINS
if _CFG_BIBDOC_PLUGINS is None:
_CFG_BIBDOC_PLUGINS = filter(None, map(
_plugin_bldr,
plugins))
return _CFG_BIBDOC_PLUGINS
bibdocfile_templates = invenio.legacy.template.load('bibdocfile')
## The above flag controls whether HTTP range requests are supported or not
## when serving static files via Python. This is disabled by default as
## it currently breaks support for opening PDF files on Windows platforms
## using Acrobat reader brower plugin.
CFG_ENABLE_HTTP_RANGE_REQUESTS = False
#: block size when performing I/O.
CFG_BIBDOCFILE_BLOCK_SIZE = 1024 * 8
#: threshold used do decide when to use Python MD5 of CLI MD5 algorithm.
CFG_BIBDOCFILE_MD5_THRESHOLD = 256 * 1024
#: chunks loaded by the Python MD5 algorithm.
CFG_BIBDOCFILE_MD5_BUFFER = 1024 * 1024
#: whether to normalize e.g. ".JPEG" and ".jpg" into .jpeg.
CFG_BIBDOCFILE_STRONG_FORMAT_NORMALIZATION = False
#: flags that can be associated to files.
CFG_BIBDOCFILE_AVAILABLE_FLAGS = (
'PDF/A',
'STAMPED',
'PDFOPT',
'HIDDEN',
'CONVERTED',
'PERFORM_HIDE_PREVIOUS',
'OCRED'
)
DBG_LOG_QUERIES = False
#: constant used if FFT correct with the obvious meaning.
KEEP_OLD_VALUE = 'KEEP-OLD-VALUE'
_CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS = [(re.compile(_regex), _headers)
for _regex, _headers in CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS]
_mimes = MimeTypes(strict=False)
_mimes.suffix_map.update({'.tbz2' : '.tar.bz2'})
_mimes.encodings_map.update({'.bz2' : 'bzip2'})
if CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES:
for key, value in iteritems(CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES):
_mimes.add_type(key, value)
del key, value
_magic_cookies = {}
if CFG_HAS_MAGIC == 1:
def _get_magic_cookies():
"""
@return: a tuple of magic object.
@rtype: (MAGIC_NONE, MAGIC_COMPRESS, MAGIC_MIME, MAGIC_COMPRESS + MAGIC_MIME)
@note: ... not real magic. Just see: man file(1)
"""
thread_id = get_ident()
if thread_id not in _magic_cookies:
_magic_cookies[thread_id] = {
magic.MAGIC_NONE: magic.open(magic.MAGIC_NONE),
magic.MAGIC_COMPRESS: magic.open(magic.MAGIC_COMPRESS),
magic.MAGIC_MIME: magic.open(magic.MAGIC_MIME),
magic.MAGIC_COMPRESS + magic.MAGIC_MIME: magic.open(magic.MAGIC_COMPRESS + magic.MAGIC_MIME),
magic.MAGIC_MIME_TYPE: magic.open(magic.MAGIC_MIME_TYPE),
}
for key in _magic_cookies[thread_id].keys():
_magic_cookies[thread_id][key].load()
return _magic_cookies[thread_id]
elif CFG_HAS_MAGIC == 2:
def _magic_wrapper(local_path, mime=True, mime_encoding=False):
thread_id = get_ident()
if (thread_id, mime, mime_encoding) not in _magic_cookies:
magic_object = _magic_cookies[thread_id, mime, mime_encoding] = magic.Magic(mime=mime, mime_encoding=mime_encoding)
else:
magic_object = _magic_cookies[thread_id, mime, mime_encoding]
return magic_object.from_file(local_path) # pylint: disable=E1103
def _generate_extensions():
"""
Generate the regular expression to match all the known extensions.
@return: the regular expression.
@rtype: regular expression object
"""
_tmp_extensions = _mimes.encodings_map.keys() + \
_mimes.suffix_map.keys() + \
_mimes.types_map[1].keys() + \
CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS
extensions = []
for ext in _tmp_extensions:
if ext.startswith('.'):
extensions.append(ext)
else:
extensions.append('.' + ext)
extensions.sort()
extensions.reverse()
extensions = set([ext.lower() for ext in extensions])
extensions = '\\' + '$|\\'.join(extensions) + '$'
extensions = extensions.replace('+', '\\+')
return re.compile(extensions, re.I)
#: Regular expression to recognized extensions.
_extensions = _generate_extensions()
class InvenioBibDocFileError(Exception):
"""
Exception raised in case of errors related to fulltext files.
"""
pass
class InvenioBibdocfileUnauthorizedURL(InvenioBibDocFileError):
"""
Exception raised in case of errors related to fulltext files.
"""
## NOTE: this is a legacy Exception
pass
def _val_or_null(val, eq_name = None, q_str = None, q_args = None):
"""
Auxiliary function helpful while building WHERE clauses of SQL queries
that should contain field=val or field is val
If optional parameters q_str and q_args are provided, lists are updated
if val == None, a statement of the form "eq_name is Null" is returned
otherwise, otherwise the function returns a parametrised comparison
"eq_name=%s" with val as an argument added to the query args list.
Using parametrised queries diminishes the likelihood of having
SQL injection.
@param val Value to compare with
@type val
@param eq_name The name of the database column
@type eq_name string
@param q_str Query string builder - list of clauses
that should be connected by AND operator
@type q_str list
@param q_args Query arguments list. This list will be applied as
a second argument of run_sql command
@type q_args list
@result string of a single part of WHERE clause
@rtype string
"""
res = ""
if eq_name != None:
res += eq_name
if val == None:
if eq_name != None:
res += " is "
res += "NULL"
if q_str != None:
q_str.append(res)
return res
else:
if eq_name != None:
res += "="
res += "%s"
if q_str != None:
q_str.append(res)
if q_args != None:
q_args.append(str(val))
return res
def _sql_generate_conjunctive_where(to_process):
"""Generating WHERE clause of a SQL statement, consisting of conjunction
of declared terms. Terms are defined by the to_process argument.
the method creates appropriate entries different in the case, value
should be NULL (None in the list) and in the case of not-none arguments.
In the second case, parametrised query is generated decreasing the
chance of an SQL-injection.
@param to_process List of tuples (value, database_column)
@type to_process list"""
q_str = []
q_args = []
for entry in to_process:
q_str.append(_val_or_null(entry[0], eq_name = entry[1], q_args = q_args))
return (" AND ".join(q_str), q_args)
def file_strip_ext(afile, skip_version=False, only_known_extensions=False, allow_subformat=True):
"""
Strip in the best way the extension from a filename.
>>> file_strip_ext("foo.tar.gz")
'foo'
>>> file_strip_ext("foo.buz.gz")
'foo.buz'
>>> file_strip_ext("foo.buz")
'foo'
>>> file_strip_ext("foo.buz", only_known_extensions=True)
'foo.buz'
>>> file_strip_ext("foo.buz;1", skip_version=False,
... only_known_extensions=True)
'foo.buz;1'
>>> file_strip_ext("foo.gif;icon")
'foo'
>>> file_strip_ext("foo.gif:icon", allow_subformat=False)
'foo.gif:icon'
@param afile: the path/name of a file.
@type afile: string
@param skip_version: whether to skip a trailing ";version".
@type skip_version: bool
@param only_known_extensions: whether to strip out only known extensions or
to consider as extension anything that follows a dot.
@type only_known_extensions: bool
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: the name/path without the extension (and version).
@rtype: string
"""
if skip_version or allow_subformat:
afile = afile.split(';')[0]
nextfile = _extensions.sub('', afile)
if nextfile == afile and not only_known_extensions:
nextfile = os.path.splitext(afile)[0]
while nextfile != afile:
afile = nextfile
nextfile = _extensions.sub('', afile)
return nextfile
def normalize_format(docformat, allow_subformat=True):
"""
Normalize the format, e.g. by adding a dot in front.
@param format: the format/extension to be normalized.
@type format: string
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: the normalized format.
@rtype; string
"""
if not docformat:
return ''
if allow_subformat:
subformat = docformat[docformat.rfind(';'):]
docformat = docformat[:docformat.rfind(';')]
else:
subformat = ''
if docformat and docformat[0] != '.':
docformat = '.' + docformat
if CFG_BIBDOCFILE_STRONG_FORMAT_NORMALIZATION:
if docformat not in ('.Z', '.H', '.C', '.CC'):
docformat = docformat.lower()
docformat = {
'.jpg' : '.jpeg',
'.htm' : '.html',
'.tif' : '.tiff'
}.get(docformat, docformat)
return docformat + subformat
def guess_format_from_url(url):
"""
Given a URL tries to guess it's extension.
Different method will be used, including HTTP HEAD query,
downloading the resource and using mime
@param url: the URL for which the extension should be guessed.
@type url: string
@return: the recognized extension or '.bin' if it's impossible to
recognize it.
@rtype: string
"""
def guess_via_magic(local_path):
try:
if CFG_HAS_MAGIC == 1:
magic_cookie = _get_magic_cookies()[magic.MAGIC_MIME_TYPE]
mimetype = magic_cookie.file(local_path)
elif CFG_HAS_MAGIC == 2:
mimetype = _magic_wrapper(local_path, mime=True, mime_encoding=False)
if CFG_HAS_MAGIC:
if mimetype in CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING:
return normalize_format(CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING[mimetype])
else:
return normalize_format(_mimes.guess_extension(mimetype))
except Exception:
pass
## Let's try to guess the extension by considering the URL as a filename
ext = decompose_file(url, skip_version=True, only_known_extensions=True)[2]
if ext.startswith('.'):
return ext
if is_url_a_local_file(url):
## The URL corresponds to a local file, so we can safely consider
## traditional extensions after the dot.
ext = decompose_file(url, skip_version=True, only_known_extensions=False)[2]
if ext.startswith('.'):
return ext
## No extensions? Let's use Magic.
ext = guess_via_magic(url)
if ext:
return ext
else:
## Since the URL is remote, let's try to perform a HEAD request
## and see the corresponding headers
try:
response = open_url(url, head_request=True)
except (InvenioBibdocfileUnauthorizedURL, urllib2.URLError):
return ".bin"
ext = get_format_from_http_response(response)
if ext:
return ext
if CFG_HAS_MAGIC:
## Last solution: let's download the remote resource
## and use the Python magic library to guess the extension
filename = ""
try:
try:
filename = download_url(url, docformat='')
ext = guess_via_magic(filename)
if ext:
return ext
except Exception:
pass
finally:
if os.path.exists(filename):
## Let's free space
os.remove(filename)
return ".bin"
_docname_re = re.compile(r'[^-\w.]*')
def normalize_docname(docname):
"""
Normalize the docname.
At the moment the normalization is just returning the same string.
@param docname: the docname to be normalized.
@type docname: string
@return: the normalized docname.
@rtype: string
"""
#return _docname_re.sub('', docname)
return docname
def normalize_version(version):
"""
Normalize the version.
The version can be either an integer or the keyword 'all'. Any other
value will be transformed into the empty string.
@param version: the version (either a number or 'all').
@type version: integer or string
@return: the normalized version.
@rtype: string
"""
try:
int(version)
except ValueError:
if version.lower().strip() == 'all':
return 'all'
else:
return ''
return str(version)
def compose_file(dirname, extension, subformat=None, version=None, storagename=None):
"""
Construct back a fullpath given the separate components.
@param
@param storagename Name under which the file should be stored in the filesystem
@type storagename string
@return a fullpath to the file
@rtype string
"""
if version:
version = ";%i" % int(version)
else:
version = ""
if subformat:
if not subformat.startswith(";"):
subformat = ";%s" % subformat
else:
subformat = ""
if extension and not extension.startswith("."):
extension = ".%s" % extension
if not storagename:
storagename = "content"
return os.path.join(dirname, storagename + extension + subformat + version)
def compose_format(extension, subformat=None):
"""
Construct the format string
"""
if not extension.startswith("."):
extension = ".%s" % extension
if subformat:
if not subformat.startswith(";"):
subformat = ";%s" % subformat
else:
subformat = ""
return extension + subformat
def decompose_file(afile, skip_version=False, only_known_extensions=False,
allow_subformat=True):
"""
Decompose a file/path into its components dirname, basename and extension.
>>> decompose_file('/tmp/foo.tar.gz')
('/tmp', 'foo', '.tar.gz')
>>> decompose_file('/tmp/foo.tar.gz;1', skip_version=True)
('/tmp', 'foo', '.tar.gz')
>>> decompose_file('http://www.google.com/index.html')
('http://www.google.com', 'index', '.html')
@param afile: the path/name of a file.
@type afile: string
@param skip_version: whether to skip a trailing ";version".
@type skip_version: bool
@param only_known_extensions: whether to strip out only known extensions or
to consider as extension anything that follows a dot.
@type only_known_extensions: bool
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: a tuple with the directory name, the basename and extension.
@rtype: (dirname, basename, extension)
@note: if a URL is provided, the scheme will be part of the dirname.
@see: L{file_strip_ext} for the algorithm used to retrieve the extension.
"""
if skip_version:
version = afile.split(';')[-1]
try:
int(version)
afile = afile[:-len(version)-1]
except ValueError:
pass
basename = os.path.basename(afile)
dirname = afile[:-len(basename)-1]
base = file_strip_ext(
basename,
only_known_extensions=only_known_extensions,
allow_subformat=allow_subformat)
extension = basename[len(base) + 1:]
if extension:
extension = '.' + extension
return (dirname, base, extension)
def decompose_file_with_version(afile):
"""
Decompose a file into dirname, basename, extension and version.
>>> decompose_file_with_version('/tmp/foo.tar.gz;1')
('/tmp', 'foo', '.tar.gz', 1)
@param afile: the path/name of a file.
@type afile: string
@return: a tuple with the directory name, the basename, extension and
version.
@rtype: (dirname, basename, extension, version)
@raise ValueError: in case version does not exist it will.
@note: if a URL is provided, the scheme will be part of the dirname.
"""
version_str = afile.split(';')[-1]
version = int(version_str)
afile = afile[:-len(version_str)-1]
basename = os.path.basename(afile)
dirname = afile[:-len(basename)-1]
base = file_strip_ext(basename)
extension = basename[len(base) + 1:]
if extension:
extension = '.' + extension
return (dirname, base, extension, version)
def get_subformat_from_format(docformat):
"""
@return the subformat if any.
@rtype: string
>>> get_subformat_from_format('foo;bar')
'bar'
>>> get_subformat_from_format('foo')
''
"""
try:
return docformat[docformat.rindex(';') + 1:]
except ValueError:
return ''
def get_superformat_from_format(docformat):
"""
@return the superformat if any.
@rtype: string
>>> get_superformat_from_format('foo;bar')
'foo'
>>> get_superformat_from_format('foo')
'foo'
"""
try:
return docformat[:docformat.rindex(';')]
except ValueError:
return docformat
def propose_next_docname(docname):
"""
Given a I{docname}, suggest a new I{docname} (useful when trying to generate
a unique I{docname}).
>>> propose_next_docname('foo')
'foo_1'
>>> propose_next_docname('foo_1')
'foo_2'
>>> propose_next_docname('foo_10')
'foo_11'
@param docname: the base docname.
@type docname: string
@return: the next possible docname based on the given one.
@rtype: string
"""
if '_' in docname:
split_docname = docname.split('_')
try:
split_docname[-1] = str(int(split_docname[-1]) + 1)
docname = '_'.join(split_docname)
except ValueError:
docname += '_1'
else:
docname += '_1'
return docname
class BibRecDocs(object):
"""
This class represents all the files attached to one record.
@param recid: the record identifier.
@type recid: integer
@param deleted_too: whether to consider deleted documents as normal
documents (useful when trying to recover deleted information).
@type deleted_too: bool
@param human_readable: whether numbers should be printed in human readable
format (e.g. 2048 bytes -> 2Kb)
@ivar id: the record identifier as passed to the constructor.
@type id: integer
@ivar human_readable: the human_readable flag as passed to the constructor.
@type human_readable: bool
@ivar deleted_too: the deleted_too flag as passed to the constructor.
@type deleted_too: bool
@ivar bibdocs: the list of documents attached to the record.
@type bibdocs: list of BibDoc
"""
def __init__(self, recid, deleted_too=False, human_readable=False):
try:
self.id = int(recid)
except ValueError:
raise ValueError("BibRecDocs: recid is %s but must be an integer." % repr(recid))
self.human_readable = human_readable
self.deleted_too = deleted_too
self.attachment_types = {} # dictionary docname->attachment type
self._bibdocs = []
self.dirty = True
@property
def bibdocs(self):
if self.dirty:
self.build_bibdoc_list()
return self._bibdocs
def __repr__(self):
"""
@return: the canonical string representation of the C{BibRecDocs}.
@rtype: string
"""
return 'BibRecDocs(%s%s%s)' % (self.id,
self.deleted_too and ', True' or '',
self.human_readable and ', True' or ''
)
def __str__(self):
"""
@return: an easy to be I{grepped} string representation of the
whole C{BibRecDocs} content.
@rtype: string
"""
out = '%i::::total bibdocs attached=%i\n' % (self.id, len(self.bibdocs))
out += '%i::::total size latest version=%s\n' % (self.id, nice_size(self.get_total_size_latest_version()))
out += '%i::::total size all files=%s\n' % (self.id, nice_size(self.get_total_size()))
for (docname, (bibdoc, dummy)) in self.bibdocs.items():
out += str(docname) + ":" + str(bibdoc)
return out
def empty_p(self):
"""
@return: True when the record has no attached documents.
@rtype: bool
"""
return len(self.bibdocs) == 0
def deleted_p(self):
"""
@return: True if the correxsponding record has been deleted.
@rtype: bool
"""
from invenio.legacy.search_engine import record_exists
return record_exists(self.id) == -1
def get_xml_8564(self):
"""
Return a snippet of I{MARCXML} representing the I{8564} fields
corresponding to the current state.
@return: the MARCXML representation.
@rtype: string
"""
from invenio.legacy.search_engine import get_record
out = ''
record = get_record(self.id)
fields = record_get_field_instances(record, '856', '4', ' ')
for field in fields:
urls = field_get_subfield_values(field, 'u')
if urls and not bibdocfile_url_p(urls[0]):
out += '\t<datafield tag="856" ind1="4" ind2=" ">\n'
for subfield, value in field_get_subfield_instances(field):
out += '\t\t<subfield code="%s">%s</subfield>\n' % (subfield, encode_for_xml(value))
out += '\t</datafield>\n'
for afile in self.list_latest_files(list_hidden=False):
out += '\t<datafield tag="856" ind1="4" ind2=" ">\n'
url = afile.get_url()
description = afile.get_description()
comment = afile.get_comment()
if url:
out += '\t\t<subfield code="u">%s</subfield>\n' % encode_for_xml(url)
if description:
out += '\t\t<subfield code="y">%s</subfield>\n' % encode_for_xml(description)
if comment:
out += '\t\t<subfield code="z">%s</subfield>\n' % encode_for_xml(comment)
out += '\t</datafield>\n'
return out
def get_total_size_latest_version(self):
"""
Returns the total size used on disk by all the files belonging
to this record and corresponding to the latest version.
@return: the total size.
@rtype: integer
"""
size = 0
for (bibdoc, _) in self.bibdocs.values():
size += bibdoc.get_total_size_latest_version()
return size
def get_total_size(self):
"""
Return the total size used on disk of all the files belonging
to this record of any version (not only the last as in
L{get_total_size_latest_version}).
@return: the total size.
@rtype: integer
"""
size = 0
for (bibdoc, _) in self.bibdocs.values():
size += bibdoc.get_total_size()
return size
def build_bibdoc_list(self):
"""
This method must be called everytime a I{bibdoc} is added, removed or
modified.
"""
self._bibdocs = {}
if self.deleted_too:
res = run_sql("""SELECT brbd.id_bibdoc, brbd.docname, brbd.type FROM bibrec_bibdoc as brbd JOIN
bibdoc as bd ON bd.id=brbd.id_bibdoc WHERE brbd.id_bibrec=%s
ORDER BY brbd.docname ASC""", (self.id,))
else:
res = run_sql("""SELECT brbd.id_bibdoc, brbd.docname, brbd.type FROM bibrec_bibdoc as brbd JOIN
bibdoc as bd ON bd.id=brbd.id_bibdoc WHERE brbd.id_bibrec=%s AND
bd.status<>'DELETED' ORDER BY brbd.docname ASC""", (self.id,))
for row in res:
cur_doc = BibDoc.create_instance(docid=row[0], recid=self.id,
human_readable=self.human_readable)
self._bibdocs[row[1]] = (cur_doc, row[2])
self.dirty = False
def list_bibdocs_by_names(self, doctype=None):
"""
Returns the dictionary of all bibdocs object belonging to a recid.
Keys in the dictionary are names of documetns and values are BibDoc objects.
If C{doctype} is set, it returns just the bibdocs of that doctype.
@param doctype: the optional doctype.
@type doctype: string
@return: the dictionary of bibdocs.
@rtype: dictionary of Dcname -> BibDoc
"""
if not doctype:
return dict((k, v) for (k, (v, _)) in iteritems(self.bibdocs))
res = {}
for (docname, (doc, attachmenttype)) in iteritems(self.bibdocs):
if attachmenttype == doctype:
res[docname] = doc
return res
def list_bibdocs(self, doctype=None, rel_type=None):
"""
Returns the list all bibdocs object belonging to a recid.
If C{doctype} is set, it returns just the bibdocs of that doctype.
@param doctype: the optional doctype.
@type doctype: string
@return: the list of bibdocs.
@rtype: list of BibDoc
"""
return [bibdoc for (bibdoc, rtype) in self.bibdocs.values()
if (not doctype or doctype == bibdoc.doctype) and
(rel_type is None or rel_type == rtype)]
def get_bibdoc_names(self, doctype=None):
"""
Returns all the names of the documents associated with the bibrec.
If C{doctype} is set, restrict the result to all the matching doctype.
@param doctype: the optional doctype.
@type doctype: string
@return: the list of document names.
@rtype: list of string
"""
return [docname for (docname, dummy) in self.list_bibdocs_by_names(doctype).items()]
def check_file_exists(self, path, f_format):
"""
Check if a file with the same content of the file pointed in C{path}
is already attached to this record.
@param path: the file to be checked against.
@type path: string
@return: True if a file with the requested content is already attached
to the record.
@rtype: bool
"""
size = os.path.getsize(path)
# Let's consider all the latest files
files = self.list_latest_files()
# Let's consider all the latest files with same size
potential = [afile for afile in files if afile.get_size() == size and afile.format == f_format]
if potential:
checksum = calculate_md5(path)
# Let's consider all the latest files with the same size and the
# same checksum
potential = [afile for afile in potential if afile.get_checksum() == checksum]
if potential:
potential = [afile for afile in potential if
filecmp.cmp(afile.get_full_path(), path)]
if potential:
return True
else:
# Gosh! How unlucky, same size, same checksum but not same
# content!
pass
return False
def propose_unique_docname(self, docname):
"""
Given C{docname}, return a new docname that is not already attached to
the record.
@param docname: the reference docname.
@type docname: string
@return: a docname not already attached.
@rtype: string
"""
docname = normalize_docname(docname)
goodname = docname
i = 1
while goodname in self.get_bibdoc_names():
i += 1
goodname = "%s_%s" % (docname, i)
return goodname
def merge_bibdocs(self, docname1, docname2):
"""
This method merge C{docname2} into C{docname1}.
1. Given all the formats of the latest version of the files
attached to C{docname2}, these files are added as new formats
into C{docname1}.
2. C{docname2} is marked as deleted.
@raise InvenioBibDocFileError: if at least one format in C{docname2}
already exists in C{docname1}. (In this case the two bibdocs are
preserved)
@note: comments and descriptions are also copied.
@note: if C{docname2} has a I{restriction}(i.e. if the I{status} is
set) and C{docname1} doesn't, the restriction is imported.
"""
bibdoc1 = self.get_bibdoc(docname1)
bibdoc2 = self.get_bibdoc(docname2)
## Check for possibility
for bibdocfile in bibdoc2.list_latest_files():
docformat = bibdocfile.get_format()
if bibdoc1.format_already_exists_p(docformat):
raise InvenioBibDocFileError('Format %s already exists in bibdoc %s of record %s. It\'s impossible to merge bibdoc %s into it.' % (docformat, docname1, self.id, docname2))
## Importing restriction if needed.
restriction1 = bibdoc1.get_status()
restriction2 = bibdoc2.get_status()
if restriction2 and not restriction1:
bibdoc1.set_status(restriction2)
## Importing formats
for bibdocfile in bibdoc2.list_latest_files():
docformat = bibdocfile.get_format()
comment = bibdocfile.get_comment()
description = bibdocfile.get_description()
bibdoc1.add_file_new_format(bibdocfile.get_full_path(),
description=description,
comment=comment, docformat=docformat)
## Finally deleting old bibdoc2
bibdoc2.delete()
self.dirty = True
def get_docid(self, docname):
"""
@param docname: the document name.
@type docname: string
@return: the identifier corresponding to the given C{docname}.
@rtype: integer
@raise InvenioBibDocFileError: if the C{docname} does not
corresponds to a document attached to this record.
"""
if docname in self.bibdocs:
return self.bibdocs[docname][0].id
raise InvenioBibDocFileError, "Recid '%s' is not connected with a " \
"docname '%s'" % (self.id, docname)
def get_docname(self, docid):
"""
@param docid: the document identifier.
@type docid: integer
@return: the name of the document corresponding to the given document
identifier.
@rtype: string
@raise InvenioBibDocFileError: if the C{docid} does not
corresponds to a document attached to this record.
"""
for (docname, (bibdoc, _)) in self.bibdocs.items():
if bibdoc.id == docid:
return docname
raise InvenioBibDocFileError, "Recid '%s' is not connected with a " \
"docid '%s'" % (self.id, docid)
def change_name(self, newname, oldname=None, docid=None):
"""
Renames document of a given name.
@param newname: the new name.
@type newname: string
@raise InvenioBibDocFileError: if the new name corresponds to
a document already attached to the record owning this document.
"""
if not oldname and not docid:
raise StandardError("Trying to rename unspecified document")
if not oldname:
oldname = self.get_docname(docid)
if not docid:
docid = self.get_docid(oldname)
doc, atttype = self.bibdocs[oldname]
newname = normalize_docname(newname)
res = run_sql("SELECT id_bibdoc FROM bibrec_bibdoc WHERE id_bibrec=%s AND docname=%s", (self.id, newname))
if res:
raise InvenioBibDocFileError, "A bibdoc called %s already exists for recid %s" % (newname, self.id)
doc.change_name(self.id, newname)
# updating the record structure
del self._bibdocs[oldname]
self._bibdocs[newname] = (doc, atttype)
def has_docname_p(self, docname):
"""
@param docname: the document name,
@type docname: string
@return: True if a document with the given name is attached to this
record.
@rtype: bool
"""
return docname in self.bibdocs.keys()
def get_bibdoc(self, docname):
"""
@return: the bibdoc with a particular docname associated with
this recid"""
if docname in self.bibdocs:
return self.bibdocs[docname][0]
raise InvenioBibDocFileError, "Recid '%s' is not connected with " \
" docname '%s'" % (self.id, docname)
def delete_bibdoc(self, docname):
"""
Deletes the document with the specified I{docname}.
@param docname: the document name.
@type docname: string
"""
if docname in self.bibdocs:
self.bibdocs[docname][0].delete()
self.dirty = True
def add_bibdoc(self, doctype="Main", docname='file', never_fail=False):
"""
Add a new empty document object (a I{bibdoc}) to the list of
documents of this record.
@param doctype: the document type.
@type doctype: string
@param docname: the document name.
@type docname: string
@param never_fail: if True, this procedure will not fail, even if
a document with the given name is already attached to this
record. In this case a new name will be generated (see
L{propose_unique_docname}).
@type never_fail: bool
@return: the newly created document object.
@rtype: BibDoc
@raise InvenioBibDocFileError: in case of any error.
"""
try:
docname = normalize_docname(docname)
if never_fail:
docname = self.propose_unique_docname(docname)
if docname in self.get_bibdoc_names():
raise InvenioBibDocFileError, \
"%s has already a bibdoc with docname %s" % (self.id, docname)
else:
bibdoc = BibDoc.create_instance(recid=self.id, doctype=doctype,
docname=docname,
human_readable=self.human_readable)
self.dirty = True
return bibdoc
except Exception as e:
register_exception()
raise InvenioBibDocFileError(str(e))
def add_new_file(self, fullpath, doctype="Main", docname=None,
never_fail=False, description=None, comment=None,
docformat=None, flags=None, modification_date=None):
"""
Directly add a new file to this record.
Adds a new file with the following policy:
- if the C{docname} is not set it is retrieved from the name of the
file.
- If a bibdoc with the given docname doesn't already exist, it is
created and the file is added to it.
- It it exist but it doesn't contain the format that is being
added, the new format is added.
- If the format already exists then if C{never_fail} is True a new
bibdoc is created with a similar name but with a progressive
number as a suffix and the file is added to it (see
L{propose_unique_docname}).
@param fullpath: the filesystme path of the document to be added.
@type fullpath: string
@param doctype: the type of the document.
@type doctype: string
@param docname: the document name.
@type docname: string
@param never_fail: if True, this procedure will not fail, even if
a document with the given name is already attached to this
record. In this case a new name will be generated (see
L{propose_unique_docname}).
@type never_fail: bool
@param description: an optional description of the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioBibDocFileError: in case of error.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if docformat is None:
docformat = decompose_file(fullpath)[2]
docname = normalize_docname(docname)
try:
bibdoc = self.get_bibdoc(docname)
except InvenioBibDocFileError:
# bibdoc doesn't already exists!
bibdoc = self.add_bibdoc(doctype, docname, False)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, docformat=docformat, flags=flags, modification_date=modification_date)
else:
try:
bibdoc.add_file_new_format(fullpath, description=description, comment=comment, docformat=docformat, flags=flags, modification_date=modification_date)
except InvenioBibDocFileError as dummy:
# Format already exist!
if never_fail:
bibdoc = self.add_bibdoc(doctype, docname, True)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, docformat=docformat, flags=flags, modification_date=modification_date)
else:
raise
return bibdoc
def add_new_version(self, fullpath, docname=None, description=None, comment=None, docformat=None, flags=None):
"""
Adds a new file to an already existent document object as a new
version.
@param fullpath: the filesystem path of the file to be added.
@type fullpath: string
@param docname: the document name. If not specified it will be
extracted from C{fullpath} (see L{decompose_file}).
@type docname: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioBibDocFileError: in case of error.
@note: previous files associated with the same document will be
considered obsolete.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if docformat is None:
docformat = decompose_file(fullpath)[2]
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(docformat).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
bibdoc = self.get_bibdoc(docname=docname)
bibdoc.add_file_new_version(fullpath, description=description, comment=comment, docformat=docformat, flags=flags)
return bibdoc
def add_new_format(self, fullpath, docname=None, description=None, comment=None, docformat=None, flags=None, modification_date=None):
"""
Adds a new file to an already existent document object as a new
format.
@param fullpath: the filesystem path of the file to be added.
@type fullpath: string
@param docname: the document name. If not specified it will be
extracted from C{fullpath} (see L{decompose_file}).
@type docname: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be guessed (see L{guess_format_from_url}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@return: the elaborated document object.
@rtype: BibDoc
@raise InvenioBibDocFileError: in case the same format already
exists.
"""
if docname is None:
docname = decompose_file(fullpath)[1]
if docformat is None:
docformat = decompose_file(fullpath)[2]
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(docformat).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
bibdoc = self.get_bibdoc(docname=docname)
bibdoc.add_file_new_format(fullpath, description=description, comment=comment, docformat=docformat, flags=flags, modification_date=modification_date)
return bibdoc
def list_latest_files(self, doctype=None, list_hidden=True):
"""
Returns a list of the latest files.
@param doctype: if set, only document of the given type will be listed.
@type doctype: string
@param list_hidden: if True, will list also files with the C{HIDDEN}
flag being set.
@type list_hidden: bool
@return: the list of latest files.
@rtype: list of BibDocFile
"""
docfiles = []
for bibdoc in self.list_bibdocs(doctype):
docfiles += bibdoc.list_latest_files(list_hidden=list_hidden)
return docfiles
def fix(self, docname):
"""
Algorithm that transform a broken/old bibdoc into a coherent one.
Think of it as being the fsck of BibDocs.
- All the files in the bibdoc directory will be renamed according
to the document name. Proper .recid, .type, .md5 files will be
created/updated.
- In case of more than one file with the same format version a new
bibdoc will be created in order to put does files.
@param docname: the document name that need to be fixed.
@type docname: string
@return: the list of newly created bibdocs if any.
@rtype: list of BibDoc
@raise InvenioBibDocFileError: in case of issues that can not be
fixed automatically.
"""
bibdoc = self.get_bibdoc(docname)
versions = {}
res = []
new_bibdocs = [] # List of files with the same version/format of
# existing file which need new bibdoc.
counter = 0
zero_version_bug = False
if os.path.exists(bibdoc.basedir):
from invenio.config import CFG_CERN_SITE, CFG_INSPIRE_SITE, CFG_BIBDOCFILE_AFS_VOLUME_PATTERN, CFG_BIBDOCFILE_AFS_VOLUME_QUOTA
if os.path.realpath(bibdoc.basedir).startswith('/afs') and (CFG_CERN_SITE or CFG_INSPIRE_SITE):
## We are on AFS at CERN! Let's allocate directories the CERN/AFS way. E.g.
## $ afs_admin create -q 1000000 /afs/cern.ch/project/cds/files/g40 p.cds.g40
## NOTE: This might be extended to use low-level OpenAFS CLI tools
## so that this technique could be extended to other AFS users outside CERN.
mount_point = os.path.dirname(os.path.realpath(bibdoc.basedir))
if not os.path.exists(mount_point):
volume = CFG_BIBDOCFILE_AFS_VOLUME_PATTERN % os.path.basename(mount_point)
quota = str(CFG_BIBDOCFILE_AFS_VOLUME_QUOTA)
exit_code, stdout, stderr = run_shell_command("afs_admin create -q %s %s %s", (quota, mount_point, volume))
if exit_code or stderr:
raise IOError("Error in creating AFS mount point %s with quota %s and volume %s: exit_code=%s. Captured stdout:\n: %s\nCaptured stderr:\n: %s" % (mount_point, quota, volume, exit_code, stdout, stderr))
for filename in os.listdir(bibdoc.basedir):
if filename[0] != '.' and ';' in filename:
name, version = filename.rsplit(';', 1)
try:
version = int(version)
except ValueError:
# Strange name
register_exception()
raise InvenioBibDocFileError, "A file called %s exists under %s. This is not a valid name. After the ';' there must be an integer representing the file version. Please, manually fix this file either by renaming or by deleting it." % (filename, bibdoc.basedir)
if version == 0:
zero_version_bug = True
docformat = name[len(file_strip_ext(name)):]
docformat = normalize_format(docformat)
if version not in versions:
versions[version] = {}
new_name = 'FIXING-%s-%s' % (str(counter), name)
try:
shutil.move('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, new_name))
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in renaming '%s' to '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, new_name), e)
if docformat in versions[version]:
new_bibdocs.append((new_name, version))
else:
versions[version][docformat] = new_name
counter += 1
elif filename[0] != '.':
# Strange name
register_exception()
raise InvenioBibDocFileError, "A file called %s exists under %s. This is not a valid name. There should be a ';' followed by an integer representing the file version. Please, manually fix this file either by renaming or by deleting it." % (filename, bibdoc.basedir)
else:
# we create the corresponding storage directory
old_umask = os.umask(0o022)
os.makedirs(bibdoc.basedir)
# and save the father record id if it exists
try:
if self.id != "":
recid_fd = open("%s/.recid" % bibdoc.basedir, "w")
recid_fd.write(str(self.id))
recid_fd.close()
if bibdoc.doctype != "":
type_fd = open("%s/.type" % bibdoc.basedir, "w")
type_fd.write(str(bibdoc.doctype))
type_fd.close()
except Exception as e:
register_exception()
raise InvenioBibDocFileError, e
os.umask(old_umask)
if not versions:
bibdoc.delete()
self.dirty = True
else:
for version, formats in iteritems(versions):
if zero_version_bug:
version += 1
for docformat, filename in iteritems(formats):
destination = '%s%s;%i' % (docname, docformat, version)
try:
shutil.move('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, destination))
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in renaming '%s' to '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), '%s/%s' % (bibdoc.basedir, destination), e)
try:
recid_fd = open("%s/.recid" % bibdoc.basedir, "w")
recid_fd.write(str(self.id))
recid_fd.close()
type_fd = open("%s/.type" % bibdoc.basedir, "w")
type_fd.write(str(bibdoc.doctype))
type_fd.close()
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in creating .recid and .type file for '%s' folder: '%s'" % (bibdoc.basedir, e)
res = []
for (filename, version) in new_bibdocs:
if zero_version_bug:
version += 1
new_bibdoc = self.add_bibdoc(doctype=bibdoc.doctype, docname=docname, never_fail=True)
new_bibdoc.add_file_new_format('%s/%s' % (bibdoc.basedir, filename), version)
res.append(new_bibdoc)
try:
os.remove('%s/%s' % (bibdoc.basedir, filename))
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in removing '%s': '%s'" % ('%s/%s' % (bibdoc.basedir, filename), e)
Md5Folder(bibdoc.basedir).update(only_new=False)
bibdoc._build_file_list()
for (bibdoc, dummyatttype) in self.bibdocs.values():
if not run_sql('SELECT data_value FROM bibdocmoreinfo WHERE id_bibdoc=%s', (bibdoc.id,)):
## Import from MARC only if the bibdoc has never had
## its more_info initialized.
try:
bibdoc.import_descriptions_and_comments_from_marc()
except Exception as e:
register_exception()
raise InvenioBibDocFileError, "Error in importing description and comment from %s for record %s: %s" % (repr(bibdoc), self.id, e)
return res
def check_format(self, docname):
"""
Check for any format related issue.
In case L{CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS} is
altered or Python version changes, it might happen that a docname
contains files which are no more docname + .format ; version, simply
because the .format is now recognized (and it was not before, so
it was contained into the docname).
This algorithm verify if it is necessary to fix (seel L{fix_format}).
@param docname: the document name whose formats should be verified.
@type docname: string
@return: True if format is correct. False if a fix is needed.
@rtype: bool
@raise InvenioBibDocFileError: in case of any error.
"""
bibdoc = self.get_bibdoc(docname)
correct_docname = decompose_file(docname + '.pdf')[1]
if docname != correct_docname:
return False
for filename in os.listdir(bibdoc.basedir):
if not filename.startswith('.'):
try:
dummy, dummy, docformat, version = decompose_file_with_version(filename)
except Exception:
raise InvenioBibDocFileError('Incorrect filename "%s" for docname %s for recid %i' % (filename, docname, self.id))
if '%s%s;%i' % (correct_docname, docformat, version) != filename:
return False
return True
def check_duplicate_docnames(self):
"""
Check wethever the record is connected with at least tho documents
with the same name.
@return: True if everything is fine.
@rtype: bool
"""
docnames = set()
for docname in self.get_bibdoc_names():
if docname in docnames:
return False
else:
docnames.add(docname)
return True
def uniformize_bibdoc(self, docname):
"""
This algorithm correct wrong file name belonging to a bibdoc.
@param docname: the document name whose formats should be verified.
@type docname: string
"""
bibdoc = self.get_bibdoc(docname)
for filename in os.listdir(bibdoc.basedir):
if not filename.startswith('.'):
try:
dummy, dummy, docformat, version = decompose_file_with_version(filename)
except ValueError:
register_exception(alert_admin=True, prefix= "Strange file '%s' is stored in %s" % (filename, bibdoc.basedir))
else:
os.rename(os.path.join(bibdoc.basedir, filename), os.path.join(bibdoc.basedir, '%s%s;%i' % (docname, docformat, version)))
Md5Folder(bibdoc.basedir).update()
bibdoc.touch('rename')
def fix_format(self, docname, skip_check=False):
"""
Fixes format related inconsistencies.
@param docname: the document name whose formats should be verified.
@type docname: string
@param skip_check: if True assume L{check_format} has already been
called and the need for fix has already been found.
If False, will implicitly call L{check_format} and skip fixing
if no error is found.
@type skip_check: bool
@return: in case merging two bibdocs is needed but it's not possible.
@rtype: bool
"""
if not skip_check:
if self.check_format(docname):
return True
bibdoc = self.get_bibdoc(docname)
correct_docname = decompose_file(docname + '.pdf')[1]
need_merge = False
if correct_docname != docname:
need_merge = self.has_docname_p(correct_docname)
if need_merge:
proposed_docname = self.propose_unique_docname(correct_docname)
run_sql('UPDATE bibdoc SET docname=%s WHERE id=%s', (proposed_docname, bibdoc.id))
self.dirty = True
self.uniformize_bibdoc(proposed_docname)
try:
self.merge_bibdocs(docname, proposed_docname)
except InvenioBibDocFileError:
return False
else:
run_sql('UPDATE bibdoc SET docname=%s WHERE id=%s', (correct_docname, bibdoc.id))
self.dirty = True
self.uniformize_bibdoc(correct_docname)
else:
self.uniformize_bibdoc(docname)
return True
def fix_duplicate_docnames(self, skip_check=False):
"""
Algotirthm to fix duplicate docnames.
If a record is connected with at least two bibdoc having the same
docname, the algorithm will try to merge them.
@param skip_check: if True assume L{check_duplicate_docnames} has
already been called and the need for fix has already been found.
If False, will implicitly call L{check_duplicate_docnames} and skip
fixing if no error is found.
@type skip_check: bool
"""
if not skip_check:
if self.check_duplicate_docnames():
return
docnames = set()
for bibdoc in self.list_bibdocs():
docname = self.get_docname(bibdoc.id)
if docname in docnames:
new_docname = self.propose_unique_docname(self.get_docname(bibdoc.id))
self.change_name(docid=bibdoc.id, newname=new_docname)
self.merge_bibdocs(docname, new_docname)
docnames.add(docname)
def get_text(self, extract_text_if_necessary=True):
"""
@return: concatenated texts of all bibdocs separated by " ": string
"""
texts = []
for bibdoc in self.list_bibdocs():
if hasattr(bibdoc, 'has_text'):
if extract_text_if_necessary and not bibdoc.has_text(require_up_to_date=True):
perform_ocr = hasattr(bibdoc, 'is_ocr_required') and bibdoc.is_ocr_required()
from invenio.legacy.bibsched.bibtask import write_message
write_message("... will extract words from %s %s" % (bibdoc, perform_ocr and 'with OCR' or ''), verbose=2)
bibdoc.extract_text(perform_ocr=perform_ocr)
texts.append(bibdoc.get_text())
return " ".join(texts)
class BibDoc(object):
"""
This class represents one document (i.e. a set of files with different
formats and with versioning information that consitutes a piece of
information.
To instanciate a new document, the recid and the docname are mandatory.
To instanciate an already existing document, either the recid and docname
or the docid alone are sufficient to retrieve it.
@param docid: the document identifier.
@type docid: integer
@param recid: the record identifier of the record to which this document
belongs to. If the C{docid} is specified the C{recid} is automatically
retrieven from the database.
@type recid: integer
@param docname: the document name.
@type docname: string
@param doctype: the document type (used when instanciating a new document).
@type doctype: string
@param human_readable: whether sizes should be represented in a human
readable format.
@type human_readable: bool
@raise InvenioBibDocFileError: in case of error.
"""
@staticmethod
def create_new_document(doc_type="Main", rec_links=None):
if rec_links is None:
rec_links = []
status = ''
doc_id = run_sql("INSERT INTO bibdoc (status, creation_date, modification_date, doctype) "
"values(%s,NOW(),NOW(), %s)", (status, doc_type))
if not doc_id:
raise InvenioBibDocFileError, "New docid cannot be created"
# creating the representation on disk ... preparing the directory
try:
BibDoc.prepare_basedir(doc_id)
except Exception as e:
run_sql('DELETE FROM bibdoc WHERE id=%s', (doc_id, ))
register_exception(alert_admin=True)
raise InvenioBibDocFileError, e
# the object has been created: linking to bibliographical records
doc = BibDoc(doc_id)
for link in rec_links:
if "rec_id" in link and link["rec_id"]:
rec_id = link["rec_id"]
doc_name = normalize_docname(link["doc_name"])
a_type = link["a_type"]
doc.attach_to_record(rec_id, str(a_type), str(doc_name))
return doc_id
def __init__(self, docid, human_readable=False, initial_data=None):
"""Constructor of a bibdoc. At least the docid or the recid/docname
pair is needed.
specifying recid, docname and doctype without specifying docid results in
attaching newly created document to a record
"""
# docid is known, the document already exists
res2 = run_sql("SELECT id_bibrec, type, docname FROM bibrec_bibdoc WHERE id_bibdoc=%s", (docid,))
self.bibrec_types = [(r[0], r[1], r[2]) for r in res2 ] # just in case the result was behaving like tuples but was something else
if not res2:
# fake attachment
self.bibrec_types = [(0, None, "fake_name_for_unattached_document")]
if initial_data is None:
initial_data = BibDoc._retrieve_data(docid)
self._docfiles = []
self.__md5s = None
self._related_files = {}
self.human_readable = human_readable
self.cd = initial_data["cd"] # creation date
self.md = initial_data["md"] # modification date
self.td = initial_data["td"] # text extraction date # should be moved from here !!!!
self.bibrec_links = initial_data["bibrec_links"]
self.id = initial_data["id"]
self.status = initial_data["status"]
self.basedir = initial_data["basedir"]
self.doctype = initial_data["doctype"]
self.storagename = initial_data["storagename"] # the old docname -> now used as a storage name for old records
self.more_info = BibDocMoreInfo(self.id)
self.dirty = True
self.dirty_related_files = True
self.last_action = 'init'
def __del__(self):
if self.dirty and self.last_action != 'init':
## The object is dirty and we did something more than initializing it
self._build_file_list()
@property
def docfiles(self):
if self.dirty:
self._build_file_list(self.last_action)
self.dirty = False
return self._docfiles
@property
def related_files(self):
if self.dirty_related_files:
self._build_related_file_list()
self.dirty_related_files = False
return self._related_files
@staticmethod
def prepare_basedir(doc_id):
"""Prepares the directory serving as root of a BibDoc"""
basedir = _make_base_dir(doc_id)
# we create the corresponding storage directory
if not os.path.exists(basedir):
from invenio.config import CFG_CERN_SITE, CFG_INSPIRE_SITE, CFG_BIBDOCFILE_AFS_VOLUME_PATTERN, CFG_BIBDOCFILE_AFS_VOLUME_QUOTA
if os.path.realpath(basedir).startswith('/afs') and (CFG_CERN_SITE or CFG_INSPIRE_SITE):
## We are on AFS at CERN! Let's allocate directories the CERN/AFS way. E.g.
## $ afs_admin create -q 1000000 /afs/cern.ch/project/cds/files/g40 p.cds.g40
## NOTE: This might be extended to use low-level OpenAFS CLI tools
## so that this technique could be extended to other AFS users outside CERN.
mount_point = os.path.dirname(os.path.realpath(basedir))
if not os.path.exists(mount_point):
volume = CFG_BIBDOCFILE_AFS_VOLUME_PATTERN % os.path.basename(mount_point)
quota = str(CFG_BIBDOCFILE_AFS_VOLUME_QUOTA)
exit_code, stdout, stderr = run_shell_command("afs_admin create -q %s %s %s", (quota, mount_point, volume))
if exit_code or stderr:
raise IOError("Error in creating AFS mount point %s with quota %s and volume %s: exit_code=%s. Captured stdout:\n: %s\nCaptured stderr:\n: %s" % (mount_point, quota, volume, exit_code, stdout, stderr))
old_umask = os.umask(022)
os.makedirs(basedir)
os.umask(old_umask)
def _update_additional_info_files(self):
"""Update the hidden file in the document directory ... the file contains all links to records"""
try:
reclinks_fd = open("%s/.reclinks" % (self.basedir, ), "w")
reclinks_fd.write("RECID DOCNAME TYPE\n")
for link in self.bibrec_links:
reclinks_fd.write("%(recid)s %(docname)s %(doctype)s\n" % link)
reclinks_fd.close()
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError, e
@staticmethod
def _retrieve_data(docid = None):
"""
Filling information about a document from the database entry
"""
container = {}
container["bibrec_links"] = []
container["id"] = docid
container["basedir"] = _make_base_dir(container["id"])
# retrieving links betwen records and documents
res = run_sql("SELECT id_bibrec, type, docname FROM bibrec_bibdoc WHERE id_bibdoc=%s", (str(docid),), 1)
if res:
for r in res:
container["bibrec_links"].append({"recid": r[0], "doctype": r[1], "docname": r[2]})
# gather the other information
res = run_sql("SELECT status, creation_date, modification_date, text_extraction_date, doctype, docname FROM bibdoc WHERE id=%s LIMIT 1", (docid,), 1)
if res:
container["status"] = res[0][0]
container["cd"] = res[0][1]
container["md"] = res[0][2]
container["td"] = res[0][3]
container["doctype"] = res[0][4]
container["storagename"] = res[0][5]
else:
# this bibdoc doesn't exist
raise InvenioBibDocFileError, "The docid %s does not exist." % docid
# retreiving all available formats
fprefix = container["storagename"] or "content"
try:
if CFG_BIBDOCFILE_ENABLE_BIBDOCFSINFO_CACHE:
## We take all extensions from the existing formats in the DB.
container["extensions"] = set([ext[0] for ext in run_sql("SELECT format FROM bibdocfsinfo WHERE id_bibdoc=%s", (docid, ))])
else:
## We take all the extensions by listing the directory content, stripping name
## and version.
container["extensions"] = set([fname[len(fprefix):].rsplit(";", 1)[0] for fname in filter(lambda x: x.startswith(fprefix), os.listdir(container["basedir"]))])
except OSError:
container["extensions"] = []
current_app.logger.warning("Could not retrieve available formats",
exc_info=True)
return container
@staticmethod
def create_instance(docid=None, recid=None, docname=None,
doctype='Fulltext', a_type = '', human_readable=False):
"""
Parameters of an attachement to the record:
a_type, recid, docname
@param a_type Type of the attachment to the record (by default Main)
@type a_type String
@param doctype Type of the document itself (by default Fulltext)
@type doctype String
"""
# first try to retrieve existing record based on obtained data
data = None
extensions = []
if docid is not None:
data = BibDoc._retrieve_data(docid)
doctype = data["doctype"]
extensions = data["extensions"]
# Loading an appropriate plugin (by default a generic BibDoc)
used_plugin = None
for plugin in get_plugins():
if plugin['supports'](doctype, extensions):
used_plugin = plugin
if not a_type:
a_type = doctype or 'Main'
if not docid:
rec_links = []
if recid:
rec_links.append({"rec_id": recid, "doc_name" : docname, "a_type": a_type})
if used_plugin and 'create_new' in used_plugin:
docid = used_plugin['create_new'](doctype, rec_links)
else:
docid = BibDoc.create_new_document(doctype, rec_links)
if used_plugin:
return used_plugin['create_instance'](docid=docid,
human_readable=human_readable,
initial_data=data)
return BibDoc(docid=docid,
human_readable=human_readable,
initial_data=data)
def attach_to_record(self, recid, a_type, docname):
""" Attaches given document to a record given by its identifier.
@param recid The identifier of the record
@type recid Integer
@param a_type Function of a document in the record
@type a_type String
@param docname Name of a document inside of a record
@type docname String
"""
run_sql("INSERT INTO bibrec_bibdoc (id_bibrec, id_bibdoc, type, docname) VALUES (%s,%s,%s,%s)",
(str(recid), str(self.id), a_type, docname))
self._update_additional_info_files()
def __repr__(self):
"""
@return: the canonical string representation of the C{BibDoc}.
@rtype: string
"""
return 'BibDoc(%s, %s, %s)' % (repr(self.id), repr(self.doctype), repr(self.human_readable))
def format_recids(self):
"""Returns a string representation of related record ids"""
if len(self.bibrec_links) == 1:
return self.bibrec_links[0]["recid"]
return "[" + ",".join([str(el["recid"]) for el in self.bibrec_links]) + "]"
def __str__(self):
"""
@return: an easy to be I{grepped} string representation of the
whole C{BibDoc} content.
@rtype: string
"""
recids = self.format_recids()
out = '%s:%i:::doctype=%s\n' % (recids, self.id, self.doctype)
out += '%s:%i:::status=%s\n' % (recids, self.id, self.status)
out += '%s:%i:::basedir=%s\n' % (recids, self.id, self.basedir)
out += '%s:%i:::creation date=%s\n' % (recids, self.id, self.cd)
out += '%s:%i:::modification date=%s\n' % (recids, self.id, self.md)
out += '%s:%i:::text extraction date=%s\n' % (recids, self.id, self.td)
out += '%s:%i:::total file attached=%s\n' % (recids, self.id, len(self.docfiles))
if self.human_readable:
out += '%s:%i:::total size latest version=%s\n' % (recids, self.id, nice_size(self.get_total_size_latest_version()))
out += '%s:%i:::total size all files=%s\n' % (recids, self.id, nice_size(self.get_total_size()))
else:
out += '%s:%i:::total size latest version=%s\n' % (recids, self.id, self.get_total_size_latest_version())
out += '%s:%i:::total size all files=%s\n' % (recids, self.id, self.get_total_size())
for docfile in self.docfiles:
out += str(docfile)
return out
def get_md5s(self):
"""
@return: an instance of the Md5Folder class to access MD5 information
of the current BibDoc
@rtype: Md5Folder
"""
if self.__md5s is None:
self.__md5s = Md5Folder(self.basedir)
return self.__md5s
md5s = property(get_md5s)
def format_already_exists_p(self, docformat):
"""
@param format: a format to be checked.
@type format: string
@return: True if a file of the given format already exists among the
latest files.
@rtype: bool
"""
docformat = normalize_format(docformat)
for afile in self.list_latest_files():
if docformat == afile.get_format():
return True
return False
def get_status(self):
"""
@return: the status information.
@rtype: string
"""
return self.status
@staticmethod
def get_fileprefix(basedir, storagename=None):
fname = "%s" % (storagename or "content", )
return os.path.join(basedir, fname )
def get_filepath(self, docformat, version):
""" Generaters the path inside of the filesystem where the document should be stored.
@param format The format of the document
@type format string
@param version version to be stored in the file
@type version string
TODO: this should be completely replaced. File storage (and so, also path building)
should be abstracted from BibDoc and be using loadable extensions
@param format Format of the document to be stored
@type format string
@param version Version of the document to be stored
@type version String
@return Full path to the file encoding a particular version and format of the document
@trype string
"""
return "%s%s;%i" % (BibDoc.get_fileprefix(self.basedir, self.storagename), docformat, version)
def get_docname(self):
"""Obsolete !! (will return empty String for new format documents"""
return self.storagename
def get_doctype(self, recid):
"""Retrieves the type of this document in the scope of a given recid"""
link_types = [attachement["doctype"] for attachement in
self.bibrec_links
if str(attachement["recid"]) == str(recid)]
if link_types:
return link_types[0]
return ""
def touch(self, action=''):
"""
Update the modification time of the bibdoc (as in the UNIX command
C{touch}).
"""
run_sql('UPDATE bibdoc SET modification_date=NOW() WHERE id=%s', (self.id, ))
self.dirty = True
self.last_action = action
def change_doctype(self, new_doctype):
"""
Modify the doctype of a BibDoc
"""
run_sql('UPDATE bibdoc SET doctype=%s WHERE id=%s', (new_doctype, self.id))
run_sql('UPDATE bibrec_bibdoc SET type=%s WHERE id_bibdoc=%s', (new_doctype, self.id))
self.dirty = True
def set_status(self, new_status):
"""
Set a new status. A document with a status information is a restricted
document that can be accessed only to user which as an authorization
to the I{viewrestrdoc} WebAccess action with keyword status with value
C{new_status}.
@param new_status: the new status. If empty the document will be
unrestricted.
@type new_status: string
@raise InvenioBibDocFileError: in case the reserved word
'DELETED' is used.
"""
if new_status != KEEP_OLD_VALUE:
if new_status == 'DELETED':
raise InvenioBibDocFileError('DELETED is a reserved word and can not be used for setting the status')
run_sql('UPDATE bibdoc SET status=%s WHERE id=%s', (new_status, self.id))
self.status = new_status
self.touch('status')
def add_file_new_version(self, filename, description=None, comment=None, docformat=None, flags=None, modification_date=None):
"""
Add a new version of a file. If no physical file is already attached
to the document a the given file will have version 1. Otherwise the
new file will have the current version number plus one.
@param filename: the local path of the file.
@type filename: string
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be retrieved from the filename (see L{decompose_file}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@raise InvenioBibDocFileError: in case of error.
"""
latestVersion = self.get_latest_version()
if latestVersion == 0:
myversion = 1
else:
myversion = latestVersion + 1
if os.path.exists(filename):
if not os.path.getsize(filename) > 0:
raise InvenioBibDocFileError, "%s seems to be empty" % filename
if docformat is None:
docformat = decompose_file(filename)[2]
else:
docformat = normalize_format(docformat)
destination = self.get_filepath(docformat, myversion)
if run_sql("SELECT id_bibdoc FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=%s AND format=%s", (self.id, myversion, docformat)):
raise InvenioBibDocFileError("According to the database a file of format %s is already attached to the docid %s" % (docformat, self.id))
try:
shutil.copyfile(filename, destination)
os.chmod(destination, 0644)
if modification_date: # if the modification time of the file needs to be changed
update_modification_date_of_file(destination, modification_date)
except Exception as e:
register_exception()
raise InvenioBibDocFileError("Encountered an exception while copying '%s' to '%s': '%s'" % (filename, destination, e))
self.more_info.set_description(description, docformat, myversion)
self.more_info.set_comment(comment, docformat, myversion)
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(docformat).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
for flag in flags:
if flag == 'PERFORM_HIDE_PREVIOUS':
for afile in self.list_all_files():
docformat = afile.get_format()
version = afile.get_version()
if version < myversion:
self.more_info.set_flag('HIDDEN', docformat, myversion)
else:
self.more_info.set_flag(flag, docformat, myversion)
else:
raise InvenioBibDocFileError("'%s' does not exists!" % filename)
self.touch('newversion')
Md5Folder(self.basedir).update()
just_added_file = self.get_file(docformat, myversion)
run_sql("INSERT INTO bibdocfsinfo(id_bibdoc, version, format, last_version, cd, md, checksum, filesize, mime) VALUES(%s, %s, %s, true, %s, %s, %s, %s, %s)", (self.id, myversion, docformat, just_added_file.cd, just_added_file.md, just_added_file.get_checksum(), just_added_file.get_size(), just_added_file.mime))
run_sql("UPDATE bibdocfsinfo SET last_version=false WHERE id_bibdoc=%s AND version<%s", (self.id, myversion))
def add_file_new_format(self, filename, version=None, description=None, comment=None, docformat=None, flags=None, modification_date=None):
"""
Add a file as a new format.
@param filename: the local path of the file.
@type filename: string
@param version: an optional specific version to which the new format
should be added. If None, the last version will be used.
@type version: integer
@param description: an optional description for the file.
@type description: string
@param comment: an optional comment to the file.
@type comment: string
@param format: the extension of the file. If not specified it will
be retrieved from the filename (see L{decompose_file}).
@type format: string
@param flags: a set of flags to be associated with the file (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS})
@type flags: list of string
@raise InvenioBibDocFileError: if the given format already exists.
"""
if version is None:
version = self.get_latest_version()
if version == 0:
version = 1
if os.path.exists(filename):
if not os.path.getsize(filename) > 0:
raise InvenioBibDocFileError, "%s seems to be empty" % filename
if docformat is None:
docformat = decompose_file(filename)[2]
else:
docformat = normalize_format(docformat)
if run_sql("SELECT id_bibdoc FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=%s AND format=%s", (self.id, version, docformat)):
raise InvenioBibDocFileError("According to the database a file of format %s is already attached to the docid %s" % (docformat, self.id))
destination = self.get_filepath(docformat, version)
if os.path.exists(destination):
raise InvenioBibDocFileError, "A file for docid '%s' already exists for the format '%s'" % (str(self.id), docformat)
try:
shutil.copyfile(filename, destination)
os.chmod(destination, 0644)
if modification_date: # if the modification time of the file needs to be changed
update_modification_date_of_file(destination, modification_date)
except Exception, e:
register_exception()
raise InvenioBibDocFileError, "Encountered an exception while copying '%s' to '%s': '%s'" % (filename, destination, e)
self.more_info.set_comment(comment, docformat, version)
self.more_info.set_description(description, docformat, version)
if flags is None:
flags = []
if 'pdfa' in get_subformat_from_format(docformat).split(';') and not 'PDF/A' in flags:
flags.append('PDF/A')
for flag in flags:
if flag != 'PERFORM_HIDE_PREVIOUS':
self.more_info.set_flag(flag, docformat, version)
else:
raise InvenioBibDocFileError, "'%s' does not exists!" % filename
Md5Folder(self.basedir).update()
self.touch('newformat')
just_added_file = self.get_file(docformat, version)
run_sql("INSERT INTO bibdocfsinfo(id_bibdoc, version, format, last_version, cd, md, checksum, filesize, mime) VALUES(%s, %s, %s, true, %s, %s, %s, %s, %s)", (self.id, version, docformat, just_added_file.cd, just_added_file.md, just_added_file.get_checksum(), just_added_file.get_size(), just_added_file.mime))
def change_docformat(self, oldformat, newformat):
"""
Renames a format name on disk and in all BibDoc structures.
The change will touch only the last version files.
The change will take place only if the newformat doesn't already exist.
@param oldformat: the format that needs to be renamed
@type oldformat: string
@param newformat: the format new name
@type newformat: string
"""
oldformat = normalize_format(oldformat)
newformat = normalize_format(newformat)
if self.format_already_exists_p(newformat):
# same format already exists in the latest files, abort
return
for bibdocfile in self.list_latest_files():
if bibdocfile.get_format() == oldformat:
# change format -> rename x.oldformat -> x.newformat
dirname, base, docformat, version = decompose_file_with_version(bibdocfile.get_full_path())
os.rename(bibdocfile.get_full_path(), os.path.join(dirname, '%s%s;%i' %(base, newformat, version)))
Md5Folder(self.basedir).update()
self.touch('rename')
self._sync_to_db()
return
def purge(self):
"""
Physically removes all the previous version of the given bibdoc.
Everything but the last formats will be erased.
"""
version = self.get_latest_version()
if version > 1:
for afile in self.docfiles:
if afile.get_version() < version:
self.more_info.unset_comment(afile.get_format(), afile.get_version())
self.more_info.unset_description(afile.get_format(), afile.get_version())
for flag in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
self.more_info.unset_flag(flag, afile.get_format(), afile.get_version())
try:
os.remove(afile.get_full_path())
except Exception as dummy:
register_exception()
Md5Folder(self.basedir).update()
self.touch('purge')
run_sql("DELETE FROM bibdocfsinfo WHERE id_bibdoc=%s AND version<%s", (self.id, version))
def expunge(self):
"""
Physically remove all the traces of a given document.
@note: an expunged BibDoc object shouldn't be used anymore or the
result might be unpredicted.
"""
self.more_info.delete()
del self.more_info
os.system('rm -rf %s' % escape_shell_arg(self.basedir))
run_sql('DELETE FROM bibrec_bibdoc WHERE id_bibdoc=%s', (self.id, ))
run_sql('DELETE FROM bibdoc_bibdoc WHERE id_bibdoc1=%s OR id_bibdoc2=%s', (self.id, self.id))
run_sql('DELETE FROM bibdoc WHERE id=%s', (self.id, ))
run_sql('INSERT INTO hstDOCUMENT(action, docname, docformat, docversion, docsize, docchecksum, id_bibdoc, doctimestamp) VALUES("EXPUNGE", %s, %s, %s, %s, %s, %s, NOW())',
('', self.doctype, self.get_latest_version(), self.get_total_size_latest_version(), '', self.id, ))
run_sql('DELETE FROM bibdocfsinfo WHERE id_bibdoc=%s', (self.id, ))
del self._docfiles
del self.id
del self.cd
del self.md
del self.td
del self.basedir
del self.doctype
del self.bibrec_links
def revert(self, version):
"""
Revert the document to a given version. All the formats corresponding
to that version are copied forward to a new version.
@param version: the version to revert to.
@type version: integer
@raise InvenioBibDocFileError: in case of errors
"""
version = int(version)
docfiles = self.list_version_files(version)
if docfiles:
self.add_file_new_version(docfiles[0].get_full_path(), description=docfiles[0].get_description(), comment=docfiles[0].get_comment(), docformat=docfiles[0].get_format(), flags=docfiles[0].flags)
for docfile in docfiles[1:]:
self.add_file_new_format(docfile.filename, description=docfile.get_description(), comment=docfile.get_comment(), docformat=docfile.get_format(), flags=docfile.flags)
def import_descriptions_and_comments_from_marc(self, record=None):
"""
Import descriptions and comments from the corresponding MARC metadata.
@param record: the record (if None it will be calculated).
@type record: bibrecord recstruct
@note: If record is passed it is directly used, otherwise it is retrieved
from the MARCXML stored in the database.
"""
## Let's get the record
from invenio.legacy.search_engine import get_record
if record is None:
record = get_record(self.id)
fields = record_get_field_instances(record, '856', '4', ' ')
global_comment = None
global_description = None
local_comment = {}
local_description = {}
for field in fields:
url = field_get_subfield_values(field, 'u')
if url:
## Given a url
url = url[0]
if re.match('%s/%s/[0-9]+/files/' % (CFG_SITE_URL, CFG_SITE_RECORD), url):
## If it is a traditional /CFG_SITE_RECORD/1/files/ one
## We have global description/comment for all the formats
description = field_get_subfield_values(field, 'y')
if description:
global_description = description[0]
comment = field_get_subfield_values(field, 'z')
if comment:
global_comment = comment[0]
elif bibdocfile_url_p(url):
## Otherwise we have description/comment per format
dummy, docname, docformat = decompose_bibdocfile_url(url)
brd = BibRecDocs(self.id)
if docname == brd.get_docname(self.id):
description = field_get_subfield_values(field, 'y')
if description:
local_description[docformat] = description[0]
comment = field_get_subfield_values(field, 'z')
if comment:
local_comment[docformat] = comment[0]
## Let's update the tables
version = self.get_latest_version()
for docfile in self.list_latest_files():
docformat = docfile.get_format()
if docformat in local_comment:
self.set_comment(local_comment[docformat], docformat, version)
else:
self.set_comment(global_comment, docformat, version)
if docformat in local_description:
self.set_description(local_description[docformat], docformat, version)
else:
self.set_description(global_description, docformat, version)
self.dirty = True
def get_icon(self, subformat_re=CFG_BIBDOCFILE_ICON_SUBFORMAT_RE, display_hidden=True):
"""
@param subformat_re: by default the convention is that
L{CFG_BIBDOCFILE_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat_re: compiled regular expression
@return: the bibdocfile corresponding to CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT
or, if this does not exist, the smallest size icon of this
document, or None if no icon exists for this document.
@rtype: BibDocFile
@warning: before I{subformat} were introduced this method was
returning a BibDoc, while now is returning a BibDocFile. Check
if your client code is compatible with this.
"""
icons = []
for docfile in self.list_latest_files(list_hidden=display_hidden):
subformat = docfile.get_subformat()
if subformat.lower() == CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT.lower():
# If it's the default icon subformat, return it
return docfile
if subformat_re.match(subformat):
icons.append((docfile.get_size(), docfile))
if icons:
# Sort by size, retrieve the smallest one
icons.sort()
return icons[0][1]
return None
def add_icon(self, filename, docformat=None, subformat=CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT, modification_date=None):
"""
Attaches icon to this document.
@param filename: the local filesystem path to the icon.
@type filename: string
@param format: an optional format for the icon. If not specified it
will be calculated after the filesystem path.
@type format: string
@param subformat: by default the convention is that
CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: string
@raise InvenioBibDocFileError: in case of errors.
"""
#first check if an icon already exists
if not docformat:
docformat = decompose_file(filename)[2]
if subformat:
docformat += ";%s" % subformat
self.add_file_new_format(filename, docformat=docformat, modification_date=modification_date)
def delete_icon(self, subformat_re=CFG_BIBDOCFILE_ICON_SUBFORMAT_RE):
"""
@param subformat_re: by default the convention is that
L{CFG_BIBDOCFILE_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: compiled regular expression
Removes the icon attached to the document if it exists.
"""
for docfile in self.list_latest_files():
if subformat_re.match(docfile.get_subformat()):
self.delete_file(docfile.get_format(), docfile.get_version())
def change_name(self, recid, newname):
"""
Renames this document in connection with a given record.
@param newname: the new name.
@type newname: string
@raise InvenioBibDocFileError: if the new name corresponds to
a document already attached to the record owning this document or
if the name was not changed.
"""
newname = normalize_docname(newname)
res = run_sql("SELECT id_bibdoc FROM bibrec_bibdoc WHERE id_bibrec=%s AND docname=%s", (recid, newname))
if res:
raise InvenioBibDocFileError("A bibdoc called %s already exists for recid %s" % (newname, recid))
updated = run_sql("update bibrec_bibdoc set docname=%s where id_bibdoc=%s and id_bibrec=%s", (newname, self.id, recid))
if not updated:
raise InvenioBibDocFileError("Docname for bibdoc %s in record %s was not changed" % (self.id, recid))
# docid is known, the document already exists
res2 = run_sql("SELECT id_bibrec, type, docname FROM bibrec_bibdoc WHERE id_bibdoc=%s", (self.id,))
## Refreshing names and types.
self.bibrec_types = [(r[0], r[1], r[2]) for r in res2 ] # just in case the result was behaving like tuples but was something else
if not res2:
# fake attachment
self.bibrec_types = [(0, None, "fake_name_for_unattached_document")]
self.touch('rename')
def set_comment(self, comment, docformat, version=None):
"""
Updates the comment of a specific format/version of the document.
@param comment: the new comment.
@type comment: string
@param format: the specific format for which the comment should be
updated.
@type format: string
@param version: the specific version for which the comment should be
updated. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
self.more_info.set_comment(comment, docformat, version)
self.dirty = True
def set_description(self, description, docformat, version=None):
"""
Updates the description of a specific format/version of the document.
@param description: the new description.
@type description: string
@param format: the specific format for which the description should be
updated.
@type format: string
@param version: the specific version for which the description should be
updated. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
self.more_info.set_description(description, docformat, version)
self.dirty = True
def set_flag(self, flagname, docformat, version=None):
"""
Sets a flag for a specific format/version of the document.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
set.
@type format: string
@param version: the specific version for which the flag should be
set. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
self.more_info.set_flag(flagname, docformat, version)
self.dirty = True
def has_flag(self, flagname, docformat, version=None):
"""
Checks if a particular flag for a format/version is set.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
set.
@type format: string
@param version: the specific version for which the flag should be
set. If not specified the last version will be used.
@type version: integer
@return: True if the flag is set.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
return self.more_info.has_flag(flagname, docformat, version)
def unset_flag(self, flagname, docformat, version=None):
"""
Unsets a flag for a specific format/version of the document.
@param flagname: a flag from L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}.
@type flagname: string
@param format: the specific format for which the flag should be
unset.
@type format: string
@param version: the specific version for which the flag should be
unset. If not specified the last version will be used.
@type version: integer
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
self.more_info.unset_flag(flagname, docformat, version)
self.dirty = True
def get_comment(self, docformat, version=None):
"""
Retrieve the comment of a specific format/version of the document.
@param format: the specific format for which the comment should be
retrieved.
@type format: string
@param version: the specific version for which the comment should be
retrieved. If not specified the last version will be used.
@type version: integer
@return: the comment.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
return self.more_info.get_comment(docformat, version)
def get_description(self, docformat, version=None):
"""
Retrieve the description of a specific format/version of the document.
@param format: the specific format for which the description should be
retrieved.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@return: the description.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
docformat = normalize_format(docformat)
return self.more_info.get_description(docformat, version)
def hidden_p(self, docformat, version=None):
"""
Returns True if the file specified by the given format/version is
hidden.
@param format: the specific format for which the description should be
retrieved.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@return: True if hidden.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
return self.more_info.has_flag('HIDDEN', docformat, version)
def get_base_dir(self):
"""
@return: the base directory on the local filesystem for this document
(e.g. C{/soft/cdsweb/var/data/files/g0/123})
@rtype: string
"""
return self.basedir
def get_type(self):
"""
@return: the type of this document.
@rtype: string"""
return self.doctype
def get_id(self):
"""
@return: the id of this document.
@rtype: integer
"""
return self.id
def get_file(self, docformat, version="", exact_docformat=False):
"""
Returns a L{BibDocFile} instance of this document corresponding to the
specific format and version.
@param format: the specific format.
@type format: string
@param version: the specific version for which the description should
be retrieved. If not specified the last version will be used.
@type version: integer
@param exact_docformat: if True, consider always the
complete docformat (including subformat if any)
@type exact_docformat: bool
@return: the L{BibDocFile} instance.
@rtype: BibDocFile
"""
if version == "":
docfiles = self.list_latest_files()
else:
version = int(version)
docfiles = self.list_version_files(version)
docformat = normalize_format(docformat)
for docfile in docfiles:
if (docfile.get_format() == docformat or not docformat):
return docfile
## Let's skip the subformat specification and consider just the
## superformat
if not exact_docformat:
superformat = get_superformat_from_format(docformat)
for docfile in docfiles:
if get_superformat_from_format(docfile.get_format()) == superformat:
return docfile
raise InvenioBibDocFileError("No file for doc %i of format '%s', version '%s'" % (self.id, docformat, version))
def list_versions(self):
"""
@return: the list of existing version numbers for this document.
@rtype: list of integer
"""
versions = []
for docfile in self.docfiles:
if not docfile.get_version() in versions:
versions.append(docfile.get_version())
versions.sort()
return versions
def delete(self, recid=None):
"""
Delete this document.
@see: L{undelete} for how to undelete the document.
@raise InvenioBibDocFileError: in case of errors.
"""
try:
today = datetime.today()
recids = []
if recid:
recids = [recid]
else:
recids = [link["recid"] for link in self.bibrec_links]
for rid in recids:
brd = BibRecDocs(rid)
docname = brd.get_docname(self.id)
# if the document is attached to some records
brd.change_name(docid=self.id, newname = 'DELETED-%s%s-%s' % (today.strftime('%Y%m%d%H%M%S'), today.microsecond, docname))
run_sql("UPDATE bibdoc SET status='DELETED' WHERE id=%s", (self.id,))
self.status = 'DELETED'
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError, "It's impossible to delete bibdoc %s: %s" % (self.id, e)
def deleted_p(self):
"""
@return: True if this document has been deleted.
@rtype: bool
"""
return self.status == 'DELETED'
def empty_p(self):
"""
@return: True if this document is empty, i.e. it has no bibdocfile
connected.
@rtype: bool
"""
return len(self.docfiles) == 0
def undelete(self, previous_status='', recid=None):
"""
Undelete a deleted file (only if it was actually deleted via L{delete}).
The previous C{status}, i.e. the restriction key can be provided.
Otherwise the undeleted document will be public.
@param previous_status: the previous status the should be restored.
@type previous_status: string
@raise InvenioBibDocFileError: in case of any error.
"""
try:
run_sql("UPDATE bibdoc SET status=%s WHERE id=%s AND status='DELETED'", (previous_status, self.id))
except Exception as e:
raise InvenioBibDocFileError, "It's impossible to undelete bibdoc %s: %s" % (self.id, e)
if recid:
bibrecdocs = BibRecDocs(recid)
docname = bibrecdocs.get_docname(self.id)
if docname.startswith('DELETED-'):
try:
# Let's remove DELETED-20080214144322- in front of the docname
original_name = '-'.join(docname.split('-')[2:])
original_name = bibrecdocs.propose_unique_docname(original_name)
bibrecdocs.change_name(docid=self.id, newname=original_name)
except Exception as e:
raise InvenioBibDocFileError, "It's impossible to restore the previous docname %s. %s kept as docname because: %s" % (original_name, docname, e)
else:
raise InvenioBibDocFileError, "Strange just undeleted docname isn't called DELETED-somedate-docname but %s" % docname
def delete_file(self, docformat, version):
"""
Delete a specific format/version of this document on the filesystem.
@param format: the particular format to be deleted.
@type format: string
@param version: the particular version to be deleted.
@type version: integer
@note: this operation is not reversible!"""
try:
afile = self.get_file(docformat, version)
except InvenioBibDocFileError:
return
try:
os.remove(afile.get_full_path())
run_sql("DELETE FROM bibdocfsinfo WHERE id_bibdoc=%s AND version=%s AND format=%s", (self.id, afile.get_version(), afile.get_format()))
last_version = run_sql("SELECT max(version) FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.id, ))[0][0]
if last_version:
## Updating information about last version
run_sql("UPDATE bibdocfsinfo SET last_version=true WHERE id_bibdoc=%s AND version=%s", (self.id, last_version))
run_sql("UPDATE bibdocfsinfo SET last_version=false WHERE id_bibdoc=%s AND version<>%s", (self.id, last_version))
except OSError:
pass
self.touch('delete')
def get_history(self):
"""
@return: a human readable and parsable string that represent the
history of this document.
@rtype: string
"""
ret = []
hst = run_sql("""SELECT action, docname, docformat, docversion,
docsize, docchecksum, doctimestamp
FROM hstDOCUMENT
WHERE id_bibdoc=%s ORDER BY doctimestamp ASC""", (self.id, ))
for row in hst:
ret.append("%s %s '%s', format: '%s', version: %i, size: %s, checksum: '%s'" % (row[6].strftime('%Y-%m-%d %H:%M:%S'), row[0], row[1], row[2], row[3], nice_size(row[4]), row[5]))
return ret
def _build_file_list(self, context=''):
"""
Lists all files attached to the bibdoc. This function should be
called everytime the bibdoc is modified.
As a side effect it log everything that has happened to the bibdocfiles
in the log facility, according to the context:
"init": means that the function has been called;
for the first time by a constructor, hence no logging is performed
"": by default means to log every deleted file as deleted and every
added file as added;
"rename": means that every appearently deleted file is logged as
renamef and every new file as renamet.
"""
def log_action(action, docid, docname, docformat, version, size, checksum, timestamp=''):
"""Log an action into the bibdoclog table."""
try:
if timestamp:
run_sql('INSERT INTO hstDOCUMENT(action, id_bibdoc, docname, docformat, docversion, docsize, docchecksum, doctimestamp) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)', (action, docid, docname, docformat, version, size, checksum, timestamp))
else:
run_sql('INSERT INTO hstDOCUMENT(action, id_bibdoc, docname, docformat, docversion, docsize, docchecksum, doctimestamp) VALUES(%s, %s, %s, %s, %s, %s, %s, NOW())', (action, docid, docname, docformat, version, size, checksum))
except DatabaseError:
register_exception()
def make_removed_added_bibdocfiles(previous_file_list):
"""Internal function for build the log of changed files."""
# Let's rebuild the previous situation
old_files = {}
for bibdocfile in previous_file_list:
old_files[(bibdocfile.name, bibdocfile.format, bibdocfile.version)] = (bibdocfile.size, bibdocfile.checksum, bibdocfile.md)
# Let's rebuild the new situation
new_files = {}
for bibdocfile in self._docfiles:
new_files[(bibdocfile.name, bibdocfile.format, bibdocfile.version)] = (bibdocfile.size, bibdocfile.checksum, bibdocfile.md)
# Let's subtract from added file all the files that are present in
# the old list, and let's add to deleted files that are not present
# added file.
added_files = dict(new_files)
deleted_files = {}
for key, value in iteritems(old_files):
if key in added_files:
del added_files[key]
else:
deleted_files[key] = value
return (added_files, deleted_files)
if context != ('init', 'init_from_disk'):
previous_file_list = list(self._docfiles)
res = run_sql("SELECT status, creation_date,"
"modification_date FROM bibdoc WHERE id=%s", (self.id,))
self.cd = res[0][1]
self.md = res[0][2]
self.status = res[0][0]
self.more_info = BibDocMoreInfo(self.id)
self._docfiles = []
if CFG_BIBDOCFILE_ENABLE_BIBDOCFSINFO_CACHE and context == 'init':
## In normal init context we read from DB
res = run_sql("SELECT version, format, cd, md, checksum, filesize FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.id, ))
for version, docformat, cd, md, checksum, size in res:
filepath = self.get_filepath(docformat, version)
self._docfiles.append(BibDocFile(
filepath, self.bibrec_types,
version, docformat, self.id, self.status, checksum,
self.more_info, human_readable=self.human_readable, cd=cd, md=md, size=size, bibdoc=self))
else:
if os.path.exists(self.basedir):
files = os.listdir(self.basedir)
files.sort()
for afile in files:
if not afile.startswith('.'):
try:
filepath = os.path.join(self.basedir, afile)
dummy, dummy, docformat, fileversion = decompose_file_with_version(filepath)
checksum = self.md5s.get_checksum(afile)
self._docfiles.append(BibDocFile(filepath, self.bibrec_types,
fileversion, docformat,
self.id, self.status, checksum,
self.more_info, human_readable=self.human_readable, bibdoc=self))
except Exception as e:
register_exception()
raise InvenioBibDocFileError, e
if context in ('init', 'init_from_disk'):
return
else:
added_files, deleted_files = make_removed_added_bibdocfiles(previous_file_list)
deletedstr = "DELETED"
addedstr = "ADDED"
if context == 'rename':
deletedstr = "RENAMEDFROM"
addedstr = "RENAMEDTO"
for (docname, docformat, version), (size, checksum, md) in iteritems(added_files):
if context == 'rename':
md = '' # No modification time
log_action(addedstr, self.id, docname, docformat, version, size, checksum, md)
for (docname, docformat, version), (size, checksum, md) in iteritems(deleted_files):
if context == 'rename':
md = '' # No modification time
log_action(deletedstr, self.id, docname, docformat, version, size, checksum, md)
def _sync_to_db(self):
"""
Update the content of the bibdocfile table by taking what is available on the filesystem.
"""
self._build_file_list('init_from_disk')
run_sql("DELETE FROM bibdocfsinfo WHERE id_bibdoc=%s", (self.id,))
for afile in self.docfiles:
run_sql("INSERT INTO bibdocfsinfo(id_bibdoc, version, format, last_version, cd, md, checksum, filesize, mime) VALUES(%s, %s, %s, false, %s, %s, %s, %s, %s)", (self.id, afile.get_version(), afile.get_format(), afile.cd, afile.md, afile.get_checksum(), afile.get_size(), afile.mime))
run_sql("UPDATE bibdocfsinfo SET last_version=true WHERE id_bibdoc=%s AND version=%s", (self.id, self.get_latest_version()))
def _build_related_file_list(self):
"""Lists all files attached to the bibdoc. This function should be
called everytime the bibdoc is modified within e.g. its icon.
@deprecated: use subformats instead.
"""
self.related_files = {}
res = run_sql("SELECT ln.id_bibdoc2,ln.rel_type,bibdoc.status FROM "
"bibdoc_bibdoc AS ln,bibdoc WHERE bibdoc.id=ln.id_bibdoc2 AND "
"ln.id_bibdoc1=%s", (str(self.id),))
for row in res:
docid = row[0]
doctype = row[1]
if row[2] != 'DELETED':
if doctype not in self.related_files:
self.related_files[doctype] = []
cur_doc = BibDoc.create_instance(docid=docid, human_readable=self.human_readable)
self.related_files[doctype].append(cur_doc)
def get_total_size_latest_version(self):
"""Return the total size used on disk of all the files belonging
to this bibdoc and corresponding to the latest version."""
ret = 0
for bibdocfile in self.list_latest_files():
ret += bibdocfile.get_size()
return ret
def get_total_size(self):
"""Return the total size used on disk of all the files belonging
to this bibdoc."""
ret = 0
for bibdocfile in self.list_all_files():
ret += bibdocfile.get_size()
return ret
def list_all_files(self, list_hidden=True):
"""Returns all the docfiles linked with the given bibdoc."""
if list_hidden:
return self.docfiles
else:
return [afile for afile in self.docfiles if not afile.hidden_p()]
def list_latest_files(self, list_hidden=True):
"""Returns all the docfiles within the last version."""
return self.list_version_files(self.get_latest_version(), list_hidden=list_hidden)
def list_version_files(self, version, list_hidden=True):
"""Return all the docfiles of a particular version."""
version = int(version)
return [docfile for docfile in self.docfiles if docfile.get_version() == version and (list_hidden or not docfile.hidden_p())]
def get_latest_version(self):
""" Returns the latest existing version number for the given bibdoc.
If no file is associated to this bibdoc, returns '0'.
"""
version = 0
for bibdocfile in self.docfiles:
if bibdocfile.get_version() > version:
version = bibdocfile.get_version()
return version
def get_file_number(self):
"""Return the total number of files."""
return len(self.docfiles)
def register_download(self, ip_address, version, docformat, userid=0, recid=0):
"""Register the information about a download of a particular file."""
docformat = normalize_format(docformat)
if docformat[:1] == '.':
docformat = docformat[1:]
docformat = docformat.upper()
if not version:
version = self.get_latest_version()
return run_sql("INSERT INTO rnkDOWNLOADS "
"(id_bibrec,id_bibdoc,file_version,file_format,"
"id_user,client_host,download_time) VALUES "
"(%s,%s,%s,%s,%s,INET_ATON(%s),NOW())",
(recid, self.id, version, docformat,
userid, ip_address,))
def get_incoming_relations(self, rel_type=None):
"""Return all relations in which this BibDoc appears on target position
@param rel_type: Type of the relation, to which we want to limit our search. None = any type
@type rel_type: string
@return: List of BibRelation instances
@rtype: list
"""
return BibRelation.get_relations(rel_type = rel_type,
bibdoc2_id = self.id)
def get_outgoing_relations(self, rel_type=None):
"""Return all relations in which this BibDoc appears on target position
@param rel_type: Type of the relation, to which we want to limit our search. None = any type
@type rel_type: string
@return: List of BibRelation instances
@rtype: list
"""
return BibRelation.get_relations(rel_type = rel_type,
bibdoc1_id = self.id)
def create_outgoing_relation(self, bibdoc2, rel_type):
"""
Create an outgoing relation between current BibDoc and a different one
"""
return BibRelation.create(bibdoc1_id = self.id, bibdoc2_id = bibdoc2.id, rel_type = rel_type)
def create_incoming_relation(self, bibdoc1, rel_type):
"""
Create an outgoing relation between a particular version of
current BibDoc and a particular version of a different BibDoc
"""
return BibRelation.create(bibdoc1_id = bibdoc1.id, bibdoc2_id = self.id, rel_type = rel_type)
def generic_path2bidocfile(fullpath):
"""
Returns a BibDocFile objects that wraps the given fullpath.
@note: the object will contain the minimum information that can be
guessed from the fullpath (e.g. docname, format, subformat, version,
md5, creation_date, modification_date). It won't contain for example
a comment, a description, a doctype, a restriction.
"""
fullpath = os.path.abspath(fullpath)
try:
path, name, docformat, version = decompose_file_with_version(fullpath)
except ValueError:
## There is no version
version = 0
path, name, docformat = decompose_file(fullpath)
md5folder = Md5Folder(path)
checksum = md5folder.get_checksum(os.path.basename(fullpath))
return BibDocFile(fullpath=fullpath,
recid_doctypes=[(0, None, name)],
version=version,
docformat=docformat,
docid=0,
status=None,
checksum=checksum,
more_info=None)
class BibDocFile(object):
"""This class represents a physical file in the Invenio filesystem.
It should never be instantiated directly"""
def __init__(self, fullpath, recid_doctypes, version, docformat, docid, status, checksum, more_info=None, human_readable=False, cd=None, md=None, size=None, bibdoc = None):
self.fullpath = os.path.abspath(fullpath)
self.docid = docid
self.recids_doctypes = recid_doctypes
self.version = version
self.status = status
self.checksum = checksum
self.human_readable = human_readable
self.name = recid_doctypes[0][2]
self.bibdoc = bibdoc
if more_info:
self.description = more_info.get_description(docformat, version)
self.comment = more_info.get_comment(docformat, version)
self.flags = more_info.get_flags(docformat, version)
else:
self.description = None
self.comment = None
self.flags = []
self.format = normalize_format(docformat)
self.superformat = get_superformat_from_format(self.format)
self.subformat = get_subformat_from_format(self.format)
if docformat:
self.recids_doctypes = [(a,b,c+self.superformat) for (a,b,c) in self.recids_doctypes]
self.mime, self.encoding = _mimes.guess_type(self.recids_doctypes[0][2])
if self.mime is None:
self.mime = "application/octet-stream"
self.more_info = more_info
self.hidden = 'HIDDEN' in self.flags
self.size = size or os.path.getsize(fullpath)
self.md = md or datetime.fromtimestamp(os.path.getmtime(fullpath))
try:
self.cd = cd or datetime.fromtimestamp(os.path.getctime(fullpath))
except OSError:
self.cd = self.md
self.dir = os.path.dirname(fullpath)
if self.subformat:
self.url = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recids_doctypes[0][0], self.name, self.superformat), {'subformat' : self.subformat})
self.fullurl = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recids_doctypes[0][0], self.name, self.superformat), {'subformat' : self.subformat, 'version' : self.version})
else:
self.url = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recids_doctypes[0][0], self.name, self.superformat), {})
self.fullurl = create_url('%s/%s/%s/files/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, self.recids_doctypes[0][0], self.name, self.superformat), {'version' : self.version})
self.etag = '"%i%s%i"' % (self.docid, self.format, self.version)
self.magic = None
def __repr__(self):
return ('BibDocFile(%s, %i, %s, %s, %i, %i, %s, %s, %s, %s)' % (repr(self.fullpath), self.version, repr(self.name), repr(self.format), self.recids_doctypes[0][0], self.docid, repr(self.status), repr(self.checksum), repr(self.more_info), repr(self.human_readable)))
def format_recids(self):
if self.bibdoc:
return self.bibdoc.format_recids()
return "0"
def __str__(self):
recids = self.format_recids()
out = '%s:%s:%s:%s:fullpath=%s\n' % (recids, self.docid, self.version, self.format, self.fullpath)
out += '%s:%s:%s:%s:name=%s\n' % (recids, self.docid, self.version, self.format, self.name)
out += '%s:%s:%s:%s:subformat=%s\n' % (recids, self.docid, self.version, self.format, get_subformat_from_format(self.format))
out += '%s:%s:%s:%s:status=%s\n' % (recids, self.docid, self.version, self.format, self.status)
out += '%s:%s:%s:%s:checksum=%s\n' % (recids, self.docid, self.version, self.format, self.checksum)
if self.human_readable:
out += '%s:%s:%s:%s:size=%s\n' % (recids, self.docid, self.version, self.format, nice_size(self.size))
else:
out += '%s:%s:%s:%s:size=%s\n' % (recids, self.docid, self.version, self.format, self.size)
out += '%s:%s:%s:%s:creation time=%s\n' % (recids, self.docid, self.version, self.format, self.cd)
out += '%s:%s:%s:%s:modification time=%s\n' % (recids, self.docid, self.version, self.format, self.md)
out += '%s:%s:%s:%s:magic=%s\n' % (recids, self.docid, self.version, self.format, self.get_magic())
out += '%s:%s:%s:%s:mime=%s\n' % (recids, self.docid, self.version, self.format, self.mime)
out += '%s:%s:%s:%s:encoding=%s\n' % (recids, self.docid, self.version, self.format, self.encoding)
out += '%s:%s:%s:%s:url=%s\n' % (recids, self.docid, self.version, self.format, self.url)
out += '%s:%s:%s:%s:fullurl=%s\n' % (recids, self.docid, self.version, self.format, self.fullurl)
out += '%s:%s:%s:%s:description=%s\n' % (recids, self.docid, self.version, self.format, self.description)
out += '%s:%s:%s:%s:comment=%s\n' % (recids, self.docid, self.version, self.format, self.comment)
out += '%s:%s:%s:%s:hidden=%s\n' % (recids, self.docid, self.version, self.format, self.hidden)
out += '%s:%s:%s:%s:flags=%s\n' % (recids, self.docid, self.version, self.format, self.flags)
out += '%s:%s:%s:%s:etag=%s\n' % (recids, self.docid, self.version, self.format, self.etag)
return out
def is_restricted(self, user_info):
"""Returns restriction state. (see acc_authorize_action return values)"""
if self.status not in ('', 'DELETED'):
return check_bibdoc_authorization(user_info, status=self.status)
elif self.status == 'DELETED':
return (1, 'File has ben deleted')
else:
return (0, '')
def is_icon(self, subformat_re=CFG_BIBDOCFILE_ICON_SUBFORMAT_RE):
"""
@param subformat_re: by default the convention is that
L{CFG_BIBDOCFILE_ICON_SUBFORMAT_RE} is used as a subformat indicator to
mean that a particular format is to be used as an icon.
Specifiy a different subformat if you need to use a different
convention.
@type subformat: compiled regular expression
@return: True if this file is an icon.
@rtype: bool
"""
return bool(subformat_re.match(self.subformat))
def hidden_p(self):
return self.hidden
def get_url(self):
return self.url
def get_type(self):
"""Returns the first type connected with the bibdoc of this file."""
return self.recids_doctypes[0][1]
def get_path(self):
return self.fullpath
def get_bibdocid(self):
return self.docid
def get_name(self):
return self.name
def get_full_name(self):
"""Returns the first name connected with the bibdoc of this file."""
return self.recids_doctypes[0][2]
def get_full_path(self):
return self.fullpath
def get_format(self):
return self.format
def get_subformat(self):
return self.subformat
def get_superformat(self):
return self.superformat
def get_size(self):
return self.size
def get_version(self):
return self.version
def get_checksum(self):
return self.checksum
def get_description(self):
return self.description
def get_comment(self):
return self.comment
def get_content(self):
"""Returns the binary content of the file."""
content_fd = open(self.fullpath, 'rb')
content = content_fd.read()
content_fd.close()
return content
def get_recid(self):
"""Returns the first recid connected with the bibdoc of this file."""
return self.recids_doctypes[0][0]
def get_status(self):
"""Returns the status of the file, i.e. either '', 'DELETED' or a
restriction keyword."""
return self.status
def get_magic(self):
"""Return all the possible guesses from the magic library about
the content of the file."""
if self.magic is None:
if CFG_HAS_MAGIC == 1:
magic_cookies = _get_magic_cookies()
magic_result = []
for key in magic_cookies.keys():
magic_result.append(magic_cookies[key].file(self.fullpath))
self.magic = tuple(magic_result)
elif CFG_HAS_MAGIC == 2:
magic_result = []
for key in ({'mime': False, 'mime_encoding': False},
{'mime': True, 'mime_encoding': False},
{'mime': False, 'mime_encoding': True}):
magic_result.append(_magic_wrapper(self.fullpath, **key))
self.magic = tuple(magic_result)
return self.magic
def check(self):
"""Return True if the checksum corresponds to the file."""
return calculate_md5(self.fullpath) == self.checksum
def stream(self, req, download=False):
"""Stream the file. Note that no restriction check is being
done here, since restrictions have been checked previously
inside websubmit_webinterface.py."""
if os.path.exists(self.fullpath):
if random.random() < CFG_BIBDOCFILE_MD5_CHECK_PROBABILITY and calculate_md5(self.fullpath) != self.checksum:
raise InvenioBibDocFileError, "File %s, version %i, is corrupted!" % (self.recids_doctypes[0][2], self.version)
stream_file(req, self.fullpath, "%s%s" % (self.name, self.superformat), self.mime, self.encoding, self.etag, self.checksum, self.fullurl, download=download)
raise apache.SERVER_RETURN, apache.DONE
else:
req.status = apache.HTTP_NOT_FOUND
raise InvenioBibDocFileError, "%s does not exists!" % self.fullpath
_RE_STATUS_PARSER = re.compile(r'^(?P<type>email|group|egroup|role|firerole|status):\s*(?P<value>.*)$', re.S + re.I)
def check_bibdoc_authorization(user_info, status):
"""
Check if the user is authorized to access a document protected with the given status.
L{status} is a string of the form::
auth_type: auth_value
where C{auth_type} can have values in::
email, group, role, firerole, status
and C{auth_value} has a value interpreted againsta C{auth_type}:
- C{email}: the user can access the document if his/her email matches C{auth_value}
- C{group}: the user can access the document if one of the groups (local or
external) of which he/she is member matches C{auth_value}
- C{role}: the user can access the document if he/she belongs to the WebAccess
role specified in C{auth_value}
- C{firerole}: the user can access the document if he/she is implicitly matched
by the role described by the firewall like role definition in C{auth_value}
- C{status}: the user can access the document if he/she is authorized to
for the action C{viewrestrdoc} with C{status} paramter having value
C{auth_value}
@note: If no C{auth_type} is specified or if C{auth_type} is not one of the
above, C{auth_value} will be set to the value contained in the
parameter C{status}, and C{auth_type} will be considered to be C{status}.
@param user_info: the user_info dictionary
@type: dict
@param status: the status of the document.
@type status: string
@return: a tuple, of the form C{(auth_code, auth_message)} where auth_code is 0
if the authorization is granted and greater than 0 otherwise.
@rtype: (int, string)
@raise ValueError: in case of unexpected parsing error.
"""
if not status:
return (0, CFG_WEBACCESS_WARNING_MSGS[0])
def parse_status(status):
g = _RE_STATUS_PARSER.match(status)
if g:
return (g.group('type').lower(), g.group('value'))
else:
return ('status', status)
if acc_is_user_in_role(user_info, acc_get_role_id(SUPERADMINROLE)):
return (0, CFG_WEBACCESS_WARNING_MSGS[0])
auth_type, auth_value = parse_status(status)
if auth_type == 'status':
return acc_authorize_action(user_info, 'viewrestrdoc', status=auth_value)
elif auth_type == 'email':
if not auth_value.lower().strip() == user_info['email'].lower().strip():
return (1, 'You must be member of the group %s in order to access this document' % repr(auth_value))
elif auth_type == 'group':
if not auth_value in user_info['group']:
return (1, 'You must be member of the group %s in order to access this document' % repr(auth_value))
elif auth_type == 'role':
if not acc_is_user_in_role(user_info, acc_get_role_id(auth_value)):
return (1, 'You must be member in the role %s in order to access this document' % repr(auth_value))
elif auth_type == 'firerole':
if not acc_firerole_check_user(user_info, compile_role_definition(auth_value)):
return (1, 'You must be authorized in order to access this document')
else:
raise ValueError, 'Unexpected authorization type %s for %s' % (repr(auth_type), repr(auth_value))
return (0, CFG_WEBACCESS_WARNING_MSGS[0])
## TODO for future reimplementation of stream_file
#class StreamFileException(Exception):
# def __init__(self, value):
# self.value = value
_RE_BAD_MSIE = re.compile("MSIE\s+(\d+\.\d+)")
def stream_file(req, fullpath, fullname=None, mime=None, encoding=None, etag=None, md5str=None, location=None, download=False):
"""This is a generic function to stream a file to the user.
If fullname, mime, encoding, and location are not provided they will be
guessed based on req and fullpath.
md5str should be passed as an hexadecimal string.
"""
## TODO for future reimplementation of stream_file
# from flask import send_file
# if fullname is None:
# fullname = fullpath.split('/')[-1]
# response = send_file(fullpath,
# attachment_filename=fullname.replace('"', '\\"'),
# as_attachment=False)
# if not download:
# response.headers['Content-Disposition'] = 'inline; filename="%s"' % fullname.replace('"', '\\"')
#
# raise StreamFileException(response)
def normal_streaming(size):
req.set_content_length(size)
req.send_http_header()
if req.method != 'HEAD':
req.sendfile(fullpath)
return ""
def single_range(size, the_range):
req.set_content_length(the_range[1])
req.headers_out['Content-Range'] = 'bytes %d-%d/%d' % (the_range[0], the_range[0] + the_range[1] - 1, size)
req.status = apache.HTTP_PARTIAL_CONTENT
req.send_http_header()
if req.method != 'HEAD':
req.sendfile(fullpath, the_range[0], the_range[1])
return ""
def multiple_ranges(size, ranges, mime):
req.status = apache.HTTP_PARTIAL_CONTENT
boundary = '%s%04d' % (time.strftime('THIS_STRING_SEPARATES_%Y%m%d%H%M%S'), random.randint(0, 9999))
req.content_type = 'multipart/byteranges; boundary=%s' % boundary
content_length = 0
for arange in ranges:
content_length += len('--%s\r\n' % boundary)
content_length += len('Content-Type: %s\r\n' % mime)
content_length += len('Content-Range: bytes %d-%d/%d\r\n' % (arange[0], arange[0] + arange[1] - 1, size))
content_length += len('\r\n')
content_length += arange[1]
content_length += len('\r\n')
content_length += len('--%s--\r\n' % boundary)
req.set_content_length(content_length)
req.send_http_header()
if req.method != 'HEAD':
for arange in ranges:
req.write('--%s\r\n' % boundary, 0)
req.write('Content-Type: %s\r\n' % mime, 0)
req.write('Content-Range: bytes %d-%d/%d\r\n' % (arange[0], arange[0] + arange[1] - 1, size), 0)
req.write('\r\n', 0)
req.sendfile(fullpath, arange[0], arange[1])
req.write('\r\n', 0)
req.write('--%s--\r\n' % boundary)
req.flush()
return ""
def parse_date(date):
"""According to <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3>
a date can come in three formats (in order of preference):
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
Moreover IE is adding some trailing information after a ';'.
Wrong dates should be simpled ignored.
This function return the time in seconds since the epoch GMT or None
in case of errors."""
if not date:
return None
try:
date = date.split(';')[0].strip() # Because of IE
## Sun, 06 Nov 1994 08:49:37 GMT
return time.mktime(time.strptime(date, '%a, %d %b %Y %X %Z'))
except:
try:
## Sun, 06 Nov 1994 08:49:37 GMT
return time.mktime(time.strptime(date, '%A, %d-%b-%y %H:%M:%S %Z'))
except:
try:
## Sun, 06 Nov 1994 08:49:37 GMT
return time.mktime(date)
except:
return None
def parse_ranges(ranges):
"""According to <http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35>
a (multiple) range request comes in the form:
bytes=20-30,40-60,70-,-80
with the meaning:
from byte to 20 to 30 inclusive (11 bytes)
from byte to 40 to 60 inclusive (21 bytes)
from byte 70 to (size - 1) inclusive (size - 70 bytes)
from byte size - 80 to (size - 1) inclusive (80 bytes)
This function will return the list of ranges in the form:
[[first_byte, last_byte], ...]
If first_byte or last_byte aren't specified they'll be set to None
If the list is not well formatted it will return None
"""
try:
if ranges.startswith('bytes') and '=' in ranges:
ranges = ranges.split('=')[1].strip()
else:
return None
ret = []
for arange in ranges.split(','):
arange = arange.strip()
if arange.startswith('-'):
ret.append([None, int(arange[1:])])
elif arange.endswith('-'):
ret.append([int(arange[:-1]), None])
else:
ret.append(map(int, arange.split('-')))
return ret
except:
return None
def parse_tags(tags):
"""Return a list of tags starting from a comma separated list."""
return [tag.strip() for tag in tags.split(',')]
def fix_ranges(ranges, size):
"""Complementary to parse_ranges it will transform all the ranges
into (first_byte, length), adjusting all the value based on the
actual size provided.
"""
ret = []
for arange in ranges:
if (arange[0] is None and arange[1] > 0) or arange[0] < size:
if arange[0] is None:
arange[0] = size - arange[1]
elif arange[1] is None:
arange[1] = size - arange[0]
else:
arange[1] = arange[1] - arange[0] + 1
arange[0] = max(0, arange[0])
arange[1] = min(size - arange[0], arange[1])
if arange[1] > 0:
ret.append(arange)
return ret
def get_normalized_headers():
"""Strip and lowerize all the keys of the headers dictionary plus
strip, lowerize and transform known headers value into their value."""
ret = {
'if-match' : None,
'unless-modified-since' : None,
'if-modified-since' : None,
'range' : None,
'if-range' : None,
'if-none-match' : None,
}
for key, value in iteritems(req.headers_in):
key = key.strip().lower()
value = value.strip()
if key in ('unless-modified-since', 'if-modified-since'):
value = parse_date(value)
elif key == 'range':
value = parse_ranges(value)
elif key == 'if-range':
value = parse_date(value) or parse_tags(value)
elif key in ('if-match', 'if-none-match'):
value = parse_tags(value)
if value:
ret[key] = value
return ret
headers = get_normalized_headers()
g = _RE_BAD_MSIE.search(headers.get('user-agent', "MSIE 6.0"))
bad_msie = g and float(g.group(1)) < 9.0
if CFG_BIBDOCFILE_USE_XSENDFILE:
## If XSendFile is supported by the server, let's use it.
if os.path.exists(fullpath):
if fullname is None:
fullname = os.path.basename(fullpath)
if bad_msie:
## IE is confused by quotes
req.headers_out["Content-Disposition"] = 'attachment; filename=%s' % fullname.replace('"', '\\"')
elif download:
req.headers_out["Content-Disposition"] = 'attachment; filename="%s"' % fullname.replace('"', '\\"')
else:
## IE is confused by inline
req.headers_out["Content-Disposition"] = 'inline; filename="%s"' % fullname.replace('"', '\\"')
req.headers_out["X-Sendfile"] = fullpath
if mime is None:
(mime, encoding) = _mimes.guess_type(fullpath)
if mime is None:
mime = "application/octet-stream"
if not bad_msie:
## IE is confused by not supported mimetypes
req.content_type = mime
return ""
else:
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
if headers['if-match']:
if etag is not None and etag not in headers['if-match']:
raise apache.SERVER_RETURN, apache.HTTP_PRECONDITION_FAILED
if os.path.exists(fullpath):
mtime = os.path.getmtime(fullpath)
if fullname is None:
fullname = os.path.basename(fullpath)
if mime is None:
(mime, encoding) = _mimes.guess_type(fullpath)
if mime is None:
mime = "application/octet-stream"
if location is None:
location = req.uri
if not bad_msie:
## IE is confused by not supported mimetypes
req.content_type = mime
req.encoding = encoding
req.filename = fullname
req.headers_out["Last-Modified"] = time.strftime('%a, %d %b %Y %X GMT', time.gmtime(mtime))
if CFG_ENABLE_HTTP_RANGE_REQUESTS:
req.headers_out["Accept-Ranges"] = "bytes"
else:
req.headers_out["Accept-Ranges"] = "none"
req.headers_out["Content-Location"] = location
if etag is not None:
req.headers_out["ETag"] = etag
if md5str is not None:
req.headers_out["Content-MD5"] = base64.encodestring(binascii.unhexlify(md5str.upper()))[:-1]
if bad_msie:
## IE is confused by quotes
req.headers_out["Content-Disposition"] = 'attachment; filename=%s' % fullname.replace('"', '\\"')
elif download:
req.headers_out["Content-Disposition"] = 'attachment; filename="%s"' % fullname.replace('"', '\\"')
else:
## IE is confused by inline
req.headers_out["Content-Disposition"] = 'inline; filename="%s"' % fullname.replace('"', '\\"')
size = os.path.getsize(fullpath)
if not size:
try:
raise Exception, '%s exists but is empty' % fullpath
except Exception:
register_exception(req=req, alert_admin=True)
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
if headers['if-modified-since'] and headers['if-modified-since'] >= mtime:
raise apache.SERVER_RETURN, apache.HTTP_NOT_MODIFIED
if headers['if-none-match']:
if etag is not None and etag in headers['if-none-match']:
raise apache.SERVER_RETURN, apache.HTTP_NOT_MODIFIED
if headers['unless-modified-since'] and headers['unless-modified-since'] < mtime:
return normal_streaming(size)
if CFG_ENABLE_HTTP_RANGE_REQUESTS and headers['range']:
try:
if headers['if-range']:
if etag is None or etag not in headers['if-range']:
return normal_streaming(size)
ranges = fix_ranges(headers['range'], size)
except:
return normal_streaming(size)
if len(ranges) > 1:
return multiple_ranges(size, ranges, mime)
elif ranges:
return single_range(size, ranges[0])
else:
raise apache.SERVER_RETURN, apache.HTTP_RANGE_NOT_SATISFIABLE
else:
return normal_streaming(size)
else:
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
def stream_restricted_icon(req):
"""Return the content of the "Restricted Icon" file."""
stream_file(req, '%s/img/restricted.gif' % CFG_WEBDIR)
raise apache.SERVER_RETURN, apache.DONE
#def list_versions_from_array(docfiles):
# """Retrieve the list of existing versions from the given docfiles list."""
# versions = []
# for docfile in docfiles:
# if not docfile.get_version() in versions:
# versions.append(docfile.get_version())
# versions.sort()
# versions.reverse()
# return versions
def _make_base_dir(docid):
"""Given a docid it returns the complete path that should host its files."""
group = "g" + str(int(int(docid) / CFG_BIBDOCFILE_FILESYSTEM_BIBDOC_GROUP_LIMIT))
return os.path.join(CFG_BIBDOCFILE_FILEDIR, group, str(docid))
class Md5Folder(object):
"""Manage all the Md5 checksum about a folder"""
def __init__(self, folder):
"""Initialize the class from the md5 checksum of a given path"""
self.folder = folder
self.load()
def update(self, only_new=True):
"""Update the .md5 file with the current files. If only_new
is specified then only not already calculated file are calculated."""
if not only_new:
self.md5s = {}
if os.path.exists(self.folder):
for filename in os.listdir(self.folder):
if filename not in self.md5s and not filename.startswith('.'):
self.md5s[filename] = calculate_md5(os.path.join(self.folder, filename))
self.store()
def store(self):
"""Store the current md5 dictionary into .md5"""
try:
old_umask = os.umask(0o022)
md5file = open(os.path.join(self.folder, ".md5"), "w")
for key, value in self.md5s.items():
md5file.write('%s *%s\n' % (value, key))
md5file.close()
os.umask(old_umask)
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError("Encountered an exception while storing .md5 for folder '%s': '%s'" % (self.folder, e))
def load(self):
"""Load .md5 into the md5 dictionary"""
self.md5s = {}
md5_path = os.path.join(self.folder, ".md5")
if os.path.exists(md5_path):
for row in open(md5_path, "r"):
md5hash = row[:32]
filename = row[34:].strip()
self.md5s[filename] = md5hash
else:
self.update()
def check(self, filename=''):
"""Check the specified file or all the files for which it exists a hash
for being coherent with the stored hash."""
if filename and filename in self.md5s.keys():
try:
return self.md5s[filename] == calculate_md5(os.path.join(self.folder, filename))
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError("Encountered an exception while loading '%s': '%s'" % (os.path.join(self.folder, filename), e))
else:
for filename, md5hash in self.md5s.items():
try:
if calculate_md5(os.path.join(self.folder, filename)) != md5hash:
return False
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError("Encountered an exception while loading '%s': '%s'" % (os.path.join(self.folder, filename), e))
return True
def get_checksum(self, filename):
"""Return the checksum of a physical file."""
md5hash = self.md5s.get(filename, None)
if md5hash is None:
self.update()
# Now it should not fail!
md5hash = self.md5s[filename]
return md5hash
def calculate_md5_external(filename):
"""Calculate the md5 of a physical file through md5sum Command Line Tool.
This is suitable for file larger than 256Kb."""
try:
md5_result = os.popen(CFG_PATH_MD5SUM + ' -b %s' % escape_shell_arg(filename))
ret = md5_result.read()[:32]
md5_result.close()
if len(ret) != 32:
# Error in running md5sum. Let's fallback to internal
# algorithm.
return calculate_md5(filename, force_internal=True)
else:
return ret
except Exception as e:
raise InvenioBibDocFileError("Encountered an exception while calculating md5 for file '%s': '%s'" % (filename, e))
def calculate_md5(filename, force_internal=False):
"""Calculate the md5 of a physical file. This is suitable for files smaller
than 256Kb."""
if not CFG_PATH_MD5SUM or force_internal or os.path.getsize(filename) < CFG_BIBDOCFILE_MD5_THRESHOLD:
try:
to_be_read = open(filename, "rb")
computed_md5 = md5()
while True:
buf = to_be_read.read(CFG_BIBDOCFILE_MD5_BUFFER)
if buf:
computed_md5.update(buf)
else:
break
to_be_read.close()
return computed_md5.hexdigest()
except Exception as e:
register_exception(alert_admin=True)
raise InvenioBibDocFileError("Encountered an exception while calculating md5 for file '%s': '%s'" % (filename, e))
else:
return calculate_md5_external(filename)
def bibdocfile_url_to_bibrecdocs(url):
"""Given an URL in the form CFG_SITE_[SECURE_]URL/CFG_SITE_RECORD/xxx/files/... it returns
a BibRecDocs object for the corresponding recid."""
recid = decompose_bibdocfile_url(url)[0]
return BibRecDocs(recid)
def bibdocfile_url_to_bibdoc(url):
"""Given an URL in the form CFG_SITE_[SECURE_]URL/CFG_SITE_RECORD/xxx/files/... it returns
a BibDoc object for the corresponding recid/docname."""
docname = decompose_bibdocfile_url(url)[1]
return bibdocfile_url_to_bibrecdocs(url).get_bibdoc(docname)
def bibdocfile_url_to_bibdocfile(url):
"""Given an URL in the form CFG_SITE_[SECURE_]URL/CFG_SITE_RECORD/xxx/files/... it returns
a BibDocFile object for the corresponding recid/docname/format."""
docformat = decompose_bibdocfile_url(url)[2]
return bibdocfile_url_to_bibdoc(url).get_file(docformat)
def bibdocfile_url_to_fullpath(url):
"""Given an URL in the form CFG_SITE_[SECURE_]URL/CFG_SITE_RECORD/xxx/files/... it returns
the fullpath for the corresponding recid/docname/format."""
return bibdocfile_url_to_bibdocfile(url).get_full_path()
def bibdocfile_url_p(url):
"""Return True when the url is a potential valid url pointing to a
fulltext owned by a system."""
if url.startswith('%s/getfile.py' % CFG_SITE_URL) or url.startswith('%s/getfile.py' % CFG_SITE_SECURE_URL):
return True
if not (url.startswith('%s/%s/' % (CFG_SITE_URL, CFG_SITE_RECORD)) or url.startswith('%s/%s/' % (CFG_SITE_SECURE_URL, CFG_SITE_RECORD))):
return False
splitted_url = url.split('/files/')
return len(splitted_url) == 2 and splitted_url[0] != '' and splitted_url[1] != ''
def get_docid_from_bibdocfile_fullpath(fullpath):
"""Given a bibdocfile fullpath (e.g. "CFG_BIBDOCFILE_FILEDIR/g0/123/bar.pdf;1")
returns the docid (e.g. 123)."""
if not fullpath.startswith(os.path.join(CFG_BIBDOCFILE_FILEDIR, 'g')):
raise InvenioBibDocFileError, "Fullpath %s doesn't correspond to a valid bibdocfile fullpath" % fullpath
dirname = decompose_file_with_version(fullpath)[0]
try:
return int(dirname.split('/')[-1])
except:
raise InvenioBibDocFileError, "Fullpath %s doesn't correspond to a valid bibdocfile fullpath" % fullpath
def decompose_bibdocfile_fullpath(fullpath):
"""Given a bibdocfile fullpath (e.g. "CFG_BIBDOCFILE_FILEDIR/g0/123/bar.pdf;1")
returns a quadruple (recid, docname, format, version)."""
if not fullpath.startswith(os.path.join(CFG_BIBDOCFILE_FILEDIR, 'g')):
raise InvenioBibDocFileError, "Fullpath %s doesn't correspond to a valid bibdocfile fullpath" % fullpath
dirname, dummy, extension, version = decompose_file_with_version(fullpath)
try:
docid = int(dirname.split('/')[-1])
return {"doc_id" : docid, "extension": extension, "version": version}
except:
raise InvenioBibDocFileError, "Fullpath %s doesn't correspond to a valid bibdocfile fullpath" % fullpath
_RE_BIBDOCFILE_URL = re.compile("(%s|%s)/%s/(?P<recid>\d+)(?P<rest>.*)" % (re.escape(CFG_SITE_URL), re.escape(CFG_SITE_SECURE_URL), re.escape(CFG_SITE_RECORD)))
def decompose_bibdocfile_url(url):
"""Given a bibdocfile_url return a triple (recid, docname, format)."""
if url.startswith('%s/getfile.py' % CFG_SITE_URL) or url.startswith('%s/getfile.py' % CFG_SITE_SECURE_URL):
return decompose_bibdocfile_very_old_url(url)
g = _RE_BIBDOCFILE_URL.match(urllib.unquote(url))
if g:
recid = int(g.group('recid'))
rest = g.group('rest')
dummy, docname, docformat = decompose_file(rest)
return recid, docname, docformat
else:
raise InvenioBibDocFileError, "Url %s doesn't correspond to a valid record inside the system." % url
re_bibdocfile_old_url = re.compile(r'/%s/(\d*)/files/' % CFG_SITE_RECORD)
def decompose_bibdocfile_old_url(url):
"""Given a bibdocfile old url (e.g. CFG_SITE_URL/CFG_SITE_RECORD/123/files)
it returns the recid."""
g = re_bibdocfile_old_url.search(url)
if g:
return int(g.group(1))
raise InvenioBibDocFileError('%s is not a valid old bibdocfile url' % url)
def decompose_bibdocfile_very_old_url(url):
"""Decompose an old /getfile.py? URL"""
if url.startswith('%s/getfile.py' % CFG_SITE_URL) or url.startswith('%s/getfile.py' % CFG_SITE_SECURE_URL):
params = urllib.splitquery(url)[1]
if params:
try:
params = cgi.parse_qs(params)
if 'docid' in params:
docid = int(params['docid'][0])
bibdoc = BibDoc.create_instance(docid)
if bibdoc.bibrec_links:
recid = bibdoc.bibrec_links[0]["rec_id"]
docname = bibdoc.bibrec_links[0]["doc_name"]
else:
raise InvenioBibDocFileError("Old style URL pointing to an unattached document")
elif 'recid' in params:
recid = int(params['recid'][0])
if 'name' in params:
docname = params['name'][0]
else:
docname = ''
else:
raise InvenioBibDocFileError('%s has not enough params to correspond to a bibdocfile.' % url)
docformat = normalize_format(params.get('format', [''])[0])
return (recid, docname, docformat)
except Exception as e:
raise InvenioBibDocFileError('Problem with %s: %s' % (url, e))
else:
raise InvenioBibDocFileError('%s has no params to correspond to a bibdocfile.' % url)
else:
raise InvenioBibDocFileError('%s is not a valid very old bibdocfile url' % url)
def get_docname_from_url(url):
"""Return a potential docname given a url"""
path = urllib2.urlparse.urlsplit(urllib.unquote(url))[2]
filename = os.path.split(path)[-1]
return file_strip_ext(filename)
def get_format_from_url(url):
"""Return a potential format given a url"""
path = urllib2.urlparse.urlsplit(urllib.unquote(url))[2]
filename = os.path.split(path)[-1]
return filename[len(file_strip_ext(filename)):]
def clean_url(url):
"""Given a local url e.g. a local path it render it a realpath."""
if is_url_a_local_file(url):
path = urllib2.urlparse.urlsplit(urllib.unquote(url))[2]
return os.path.abspath(path)
else:
return url
def is_url_a_local_file(url):
"""Return True if the given URL is pointing to a local file."""
protocol = urllib2.urlparse.urlsplit(url)[0]
return protocol in ('', 'file')
def check_valid_url(url):
"""
Check for validity of a url or a file.
@param url: the URL to check
@type url: string
@raise StandardError: if the URL is not a valid URL.
"""
try:
if is_url_a_local_file(url):
path = urllib2.urlparse.urlsplit(urllib.unquote(url))[2]
if os.path.abspath(path) != path:
raise StandardError, "%s is not a normalized path (would be %s)." % (path, os.path.normpath(path))
for allowed_path in CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS + [CFG_TMPDIR, CFG_TMPSHAREDDIR, CFG_WEBSUBMIT_STORAGEDIR]:
if path.startswith(allowed_path):
dummy_fd = open(path)
dummy_fd.close()
return
raise StandardError, "%s is not in one of the allowed paths." % path
else:
try:
open_url(url)
except InvenioBibdocfileUnauthorizedURL as e:
raise StandardError, str(e)
except Exception as e:
raise StandardError, "%s is not a correct url: %s" % (url, e)
def safe_mkstemp(suffix, prefix='bibdocfile_'):
"""Create a temporary filename that don't have any '.' inside a part
from the suffix."""
tmpfd, tmppath = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=CFG_TMPDIR)
# Close the file and leave the responsability to the client code to
# correctly open/close it.
os.close(tmpfd)
if '.' not in suffix:
# Just in case format is empty
return tmppath
while '.' in os.path.basename(tmppath)[:-len(suffix)]:
os.remove(tmppath)
tmpfd, tmppath = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=CFG_TMPDIR)
os.close(tmpfd)
return tmppath
def download_local_file(filename, docformat=None):
"""
Copies a local file to Invenio's temporary directory.
@param filename: the name of the file to copy
@type filename: string
@param format: the format of the file to copy (will be found if not
specified)
@type format: string
@return: the path of the temporary file created
@rtype: string
@raise StandardError: if something went wrong
"""
# Make sure the format is OK.
if docformat is None:
docformat = guess_format_from_url(filename)
else:
docformat = normalize_format(docformat)
tmppath = ''
# Now try to copy.
try:
path = urllib2.urlparse.urlsplit(urllib.unquote(filename))[2]
if os.path.abspath(path) != path:
raise StandardError, "%s is not a normalized path (would be %s)." \
% (path, os.path.normpath(path))
for allowed_path in CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS + [CFG_TMPDIR,
CFG_WEBSUBMIT_STORAGEDIR]:
if path.startswith(allowed_path):
tmppath = safe_mkstemp(docformat)
shutil.copy(path, tmppath)
if os.path.getsize(tmppath) == 0:
os.remove(tmppath)
raise StandardError, "%s seems to be empty" % filename
break
else:
raise StandardError, "%s is not in one of the allowed paths." % path
except Exception as e:
raise StandardError, "Impossible to copy the local file '%s': %s" % \
(filename, str(e))
return tmppath
def download_external_url(url, docformat=None, progress_callback=None):
"""
Download a url (if it corresponds to a remote file) and return a
local url to it.
@param url: the URL to download
@type url: string
@param format: the format of the file (will be found if not specified)
@type format: string
@return: the path to the download local file
@rtype: string
@raise StandardError: if the download failed
"""
tmppath = None
# Make sure the format is OK.
if docformat is None:
# First try to find a known extension to the URL
docformat = decompose_file(url, skip_version=True,
only_known_extensions=True)[2]
if not docformat:
# No correct format could be found. Will try to get it from the
# HTTP message headers.
docformat = ''
else:
docformat = normalize_format(docformat)
from_file, to_file, tmppath = None, None, ''
try:
from_file = open_url(url)
except InvenioBibdocfileUnauthorizedURL as e:
raise StandardError, str(e)
except urllib2.URLError as e:
raise StandardError, 'URL could not be opened: %s' % str(e)
if not docformat:
# We could not determine the format from the URL, so let's try
# to read it from the HTTP headers.
docformat = get_format_from_http_response(from_file)
try:
tmppath = safe_mkstemp(docformat)
if progress_callback:
total_size = int(from_file.info().getheader('Content-Length').strip())
progress_size = 0
to_file = open(tmppath, 'w')
while True:
block = from_file.read(CFG_BIBDOCFILE_BLOCK_SIZE)
if not block:
break
to_file.write(block)
if progress_callback:
progress_size += CFG_BIBDOCFILE_BLOCK_SIZE
progress_callback(progress_size, CFG_BIBDOCFILE_BLOCK_SIZE,
total_size)
to_file.close()
from_file.close()
if os.path.getsize(tmppath) == 0:
raise StandardError, "%s seems to be empty" % url
except Exception as e:
# Try to close and remove the temporary file.
try:
to_file.close()
except Exception:
pass
try:
os.remove(tmppath)
except Exception:
pass
raise StandardError, "Error when downloading %s into %s: %s" % \
(url, tmppath, e)
return tmppath
def get_format_from_http_response(response):
"""
Tries to retrieve the format of the file from the message headers of the
HTTP response.
@param response: the HTTP response
@type response: file-like object (as returned by urllib.urlopen)
@return: the format of the remote resource
@rtype: string
"""
def parse_content_type(text):
return text.split(';')[0].strip()
def parse_content_disposition(text):
for item in text.split(';'):
item = item.strip()
if item.strip().startswith('filename='):
return item[len('filename="'):-len('"')]
info = response.info()
docformat = ''
content_disposition = info.getheader('Content-Disposition')
if content_disposition:
filename = parse_content_disposition(content_disposition)
if filename:
docformat = decompose_file(filename, only_known_extensions=False)[2]
if docformat:
return docformat
content_type = info.getheader('Content-Type')
if content_type:
content_type = parse_content_type(content_type)
if content_type not in ('text/plain', 'application/octet-stream'):
## We actually ignore these mimetypes since they are the
## defaults often returned by Apache in case the mimetype
## was not known
if content_type in CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING:
docformat = normalize_format(CFG_BIBDOCFILE_PREFERRED_MIMETYPES_MAPPING[content_type])
else:
ext = _mimes.guess_extension(content_type)
if ext:
docformat = normalize_format(ext)
return docformat
def download_url(url, docformat=None):
"""
Download a url (if it corresponds to a remote file) and return a
local url to it.
"""
tmppath = None
try:
if is_url_a_local_file(url):
tmppath = download_local_file(url, docformat = docformat)
else:
tmppath = download_external_url(url, docformat = docformat)
except StandardError:
raise
return tmppath
class MoreInfo(object):
"""This class represents a genering MoreInfo dictionary.
MoreInfo object can be attached to bibdoc, bibversion, format or BibRelation.
The entity where a particular MoreInfo object is attached has to be specified using the
constructor parametes.
This class is a thin wrapper around the database table.
"""
def __init__(self, docid = None, version = None, docformat = None,
relation = None, cache_only = False, cache_reads = True, initial_data = None):
"""
@param cache_only Determines if MoreInfo object should be created in
memory only or reflected in the database
@type cache_only boolean
@param cache_reads Determines if reads should be executed on the
in-memory cache or should be redirected to the
database. If this is true, cache can be entirely
regenerated from the database only upon an explicit
request. If the value is not present in the cache,
the database is queried
@type cache_reads boolean
@param initial_data Allows to specify initial content of the cache.
This parameter is useful when we create an in-memory
instance from serialised value
@type initial_data string
"""
self.docid = docid
self.version = version
self.format = docformat
self.relation = relation
self.cache_only = cache_only
if initial_data != None:
self.cache = initial_data
self.dirty = initial_data
if not self.cache_only:
self._flush_cache() #inserts new entries
else:
self.cache = {}
self.dirty = {}
self.cache_reads = cache_reads
if not self.cache_only:
self.populate_from_database()
@staticmethod
def create_from_serialised(ser_str, docid = None, version = None, docformat = None,
relation = None, cache_only = False, cache_reads = True):
"""Creates an instance of MoreInfo
using serialised data as the cache content"""
data = cPickle.loads(base64.b64decode(ser_str))
return MoreInfo(docid = docid, version = version, docformat = docformat,
relation = relation, cache_only = cache_only,
cache_reads = cache_reads, initial_data = data);
def serialise_cache(self):
"""Returns a serialised representation of the cache"""
return base64.b64encode(cPickle.dumps(self.get_cache()))
def populate_from_database(self):
"""Retrieves all values of MoreInfo and places them in the cache"""
where_str, where_args = self._generate_where_query_args()
query_str = "SELECT namespace, data_key, data_value FROM bibdocmoreinfo WHERE %s" % (where_str, )
res = run_sql(query_str, where_args)
if res:
for row in res:
namespace, data_key, data_value_ser = row
data_value = cPickle.loads(data_value_ser)
if not namespace in self.cache:
self.cache[namespace] = {}
self.cache[namespace][data_key] = data_value
def _mark_dirty(self, namespace, data_key):
"""Marks a data key dirty - that should be saved into the database"""
if not namespace in self.dirty:
self.dirty[namespace] = {}
self.dirty[namespace][data_key] = True
def _database_get_distinct_string_list(self, column, namespace = None):
"""A private method reading an unique list of strings from the
moreinfo database table"""
where_str, where_args = self._generate_where_query_args(
namespace = namespace)
query_str = "SELECT DISTINCT %s FROM bibdocmoreinfo WHERE %s" % \
( column, where_str, )
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(where_args))
print("Executing query: " + query_str + " ARGS: " + repr(where_args))
res = run_sql(query_str, where_args)
return (res and [x[0] for x in res]) or [] # after migrating to python 2.6, can be rewritten using x if y else z syntax: return [x[0] for x in res] if res else []
def _database_get_namespaces(self):
"""Read the database to discover namespaces declared in a given MoreInfo"""
return self._database_get_distinct_string_list("namespace")
def _database_get_keys(self, namespace):
"""Returns all keys assigned in a given namespace of a MoreInfo instance"""
return self._database_get_distinct_string_list("data_key", namespace=namespace)
def _database_contains_key(self, namespace, key):
return self._database_read_value(namespace, key) != None
def _database_save_value(self, namespace, key, value):
"""Write changes into the database"""
#TODO: this should happen within one transaction
serialised_val = cPickle.dumps(value)
# on duplicate key will not work here as miltiple null values are permitted by the index
if not self._database_contains_key(namespace, key):
#insert new value
query_parts = []
query_args = []
to_process = [(self.docid, "id_bibdoc"), (self.version, "version"),
(self.format, "format"), (self.relation, "id_rel"),
(str(namespace), "namespace"), (str(key), "data_key"),
(str(serialised_val), "data_value")]
for entry in to_process:
_val_or_null(entry[0], q_str = query_parts, q_args = query_args)
columns_str = ", ".join(map(lambda x: x[1], to_process))
values_str = ", ".join(query_parts)
query_str = "INSERT INTO bibdocmoreinfo (%s) VALUES(%s)" % \
(columns_str, values_str)
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(query_args))
print("Executing query: " + query_str + " ARGS: " + repr(query_args))
run_sql(query_str, query_args)
else:
#Update existing value
where_str, where_args = self._generate_where_query_args(namespace, key)
query_str = "UPDATE bibdocmoreinfo SET data_value=%s WHERE " + where_str
query_args = [str(serialised_val)] + where_args
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(query_args))
print("Executing query: " + query_str + " ARGS: " + repr(query_args))
run_sql(query_str, query_args )
def _database_read_value(self, namespace, key):
"""Reads a value directly from the database
@param namespace - namespace of the data to be read
@param key - key of the data to be read
"""
where_str, where_args = self._generate_where_query_args(namespace = namespace, data_key = key)
query_str = "SELECT data_value FROM bibdocmoreinfo WHERE " + where_str
res = run_sql(query_str, where_args)
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(where_args) + "WITH THE RESULT: " + str(res))
s_ = ""
if res:
s_ = cPickle.loads(res[0][0])
print("Executing query: " + query_str + " ARGS: " + repr(where_args) + " WITH THE RESULT: " + str(s_))
if res and res[0][0]:
try:
return cPickle.loads(res[0][0])
except:
raise Exception("Error when deserialising value for %s key=%s retrieved value=%s" % (repr(self), str(key), str(res[0][0])))
return None
def _database_remove_value(self, namespace, key):
"""Removes an entry directly in the database"""
where_str, where_args = self._generate_where_query_args(namespace = namespace, data_key = key)
query_str = "DELETE FROM bibdocmoreinfo WHERE " + where_str
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(where_args))
print("Executing query: " + query_str + " ARGS: " + repr(where_args))
run_sql(query_str, where_args)
return None
def _flush_cache(self):
"""Writes all the dirty cache entries into the database"""
for namespace in self.dirty:
for data_key in self.dirty[namespace]:
if namespace in self.cache and data_key in self.cache[namespace]\
and not self.cache[namespace][data_key] is None:
self._database_save_value(namespace, data_key, self.cache[namespace][data_key])
else:
# This might happen if a value has been removed from the cache
self._database_remove_value(namespace, data_key)
self.dirty = {}
def _generate_where_query_args(self, namespace = None, data_key = None):
"""Private method generating WHERE clause of SQL statements"""
ns = []
if namespace != None:
ns = [(namespace, "namespace")]
dk = []
if data_key != None:
dk = [(data_key, "data_key")]
to_process = [(self.docid, "id_bibdoc"), (self.version, "version"),
(self.format, "format"), (self.relation, "id_rel")] + \
ns + dk
return _sql_generate_conjunctive_where(to_process)
def set_data(self, namespace, key, value):
"""setting data directly in the database dictionary"""
if not namespace in self.cache:
self.cache[namespace] = {}
self.cache[namespace][key] = value
self._mark_dirty(namespace, key)
if not self.cache_only:
self._flush_cache()
def get_data(self, namespace, key):
"""retrieving data from the database"""
if self.cache_reads or self.cache_only:
if namespace in self.cache and key in self.cache[namespace]:
return self.cache[namespace][key]
if not self.cache_only:
# we have a permission to read from the database
value = self._database_read_value(namespace, key)
if value:
if not namespace in self.cache:
self.cache[namespace] = {}
self.cache[namespace][key] = value
return value
return None
def del_key(self, namespace, key):
"""retrieving data from the database"""
if not namespace in self.cache:
return None
del self.cache[namespace][key]
self._mark_dirty(namespace, key)
if not self.cache_only:
self._flush_cache()
def contains_key(self, namespace, key):
return self.get_data(namespace, key) != None
# the dictionary interface -> updating the default namespace
def __setitem__(self, key, value):
self.set_data("", key, value) #the default value
def __getitem__(self, key):
return self.get_data("", key)
def __delitem__(self, key):
self.del_key("", key)
def __contains__(self, key):
return self.contains_key("", key)
def __repr__(self):
return "MoreInfo(docid=%s, version=%s, docformat=%s, relation=%s)" % \
(self.docid, self.version, self.format, self.relation)
def delete(self):
"""Remove all entries associated with this MoreInfo"""
self.cache = {}
if not self.cache_only:
where_str, query_args = self._generate_where_query_args()
query_str = "DELETE FROM bibdocmoreinfo WHERE %s" % (where_str, )
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Executing query: " + query_str + " ARGS: " + repr(query_args))
print("Executing query: " + query_str + " ARGS: " + repr(query_args))
run_sql(query_str, query_args)
def get_cache(self):
"""Returns the content of the cache
@return The content of the MoreInfo cache
@rtype dictionary {namespace: {key1: value1, ... }, namespace2: {}}
"""
return self.cache
def get_namespaces(self):
"""Returns a list of namespaces present in the MoreInfo structure.
If the object is permitted access to the database, the data should
be always read from there. Unlike when reading a particular value,
we can not check if value is missing in the cache
"""
if self.cache_only and self.cache_reads:
return self.cache.keys()
return self._database_get_namespaces()
def get_keys(self, namespace):
"""Returns a list of keys present in a given namespace"""
if self.cache_only and self.cache_reads:
res = []
if namespace in self.cache:
res = self.cache[namespace].keys()
return res
else:
return self._database_get_keys(namespace)
def flush(self):
"""Flush the content into the database"""
self._flush_cache()
class BibDocMoreInfo(MoreInfo):
"""
This class wraps contextual information of the documents, such as the
- comments
- descriptions
- flags.
Such information is kept separately per every format/version instance of
the corresponding document and is searialized in the database, ready
to be retrieved (but not searched).
@param docid: the document identifier.
@type docid: integer
@param more_info: a serialized version of an already existing more_info
object. If not specified this information will be readed from the
database, and othewise an empty dictionary will be allocated.
@raise ValueError: if docid is not a positive integer.
@ivar docid: the document identifier as passed to the constructor.
@type docid: integer
@ivar more_info: the more_info dictionary that will hold all the
additional document information.
@type more_info: dict of dict of dict
@note: in general this class is never instanciated in client code and
never used outside bibdocfile module.
@note: this class will be extended in the future to hold all the new auxiliary
information about a document.
"""
def __init__(self, docid, cache_only = False, initial_data = None):
if not (type(docid) in (long, int) and docid > 0):
raise ValueError("docid is not a positive integer, but %s." % docid)
MoreInfo.__init__(self, docid, cache_only = cache_only, initial_data = initial_data)
if 'descriptions' not in self:
self['descriptions'] = {}
if 'comments' not in self:
self['comments'] = {}
if 'flags' not in self:
self['flags'] = {}
if DBG_LOG_QUERIES:
from invenio.legacy.bibsched.bibtask import write_message
write_message("Creating BibDocMoreInfo :" + repr(self["comments"]))
print("Creating BibdocMoreInfo :" + repr(self["comments"]))
def __repr__(self):
"""
@return: the canonical string representation of the C{BibDocMoreInfo}.
@rtype: string
"""
return 'BibDocMoreInfo(%i, %s)' % (self.docid, repr(cPickle.dumps(self)))
def set_flag(self, flagname, docformat, version):
"""
Sets a flag.
@param flagname: the flag to set (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}).
@type flagname: string
@param format: the format for which the flag should set.
@type format: string
@param version: the version for which the flag should set:
@type version: integer
@raise ValueError: if the flag is not in
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}
"""
if flagname in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
flags = self['flags']
if not flagname in flags:
flags[flagname] = {}
if not version in flags[flagname]:
flags[flagname][version] = {}
if not docformat in flags[flagname][version]:
flags[flagname][version][docformat] = {}
flags[flagname][version][docformat] = True
self['flags'] = flags
else:
raise ValueError, "%s is not in %s" % \
(flagname, CFG_BIBDOCFILE_AVAILABLE_FLAGS)
def get_comment(self, docformat, version):
"""
Returns the specified comment.
@param format: the format for which the comment should be
retrieved.
@type format: string
@param version: the version for which the comment should be
retrieved.
@type version: integer
@return: the specified comment.
@rtype: string
"""
try:
assert(type(version) is int)
docformat = normalize_format(docformat)
return self['comments'].get(version, {}).get(docformat)
except:
register_exception()
raise
def get_description(self, docformat, version):
"""
Returns the specified description.
@param format: the format for which the description should be
retrieved.
@type format: string
@param version: the version for which the description should be
retrieved.
@type version: integer
@return: the specified description.
@rtype: string
"""
try:
assert(type(version) is int)
docformat = normalize_format(docformat)
return self['descriptions'].get(version, {}).get(docformat)
except:
register_exception()
raise
def has_flag(self, flagname, docformat, version):
"""
Return True if the corresponding has been set.
@param flagname: the name of the flag (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}).
@type flagname: string
@param format: the format for which the flag should be checked.
@type format: string
@param version: the version for which the flag should be checked.
@type version: integer
@return: True if the flag is set for the given format/version.
@rtype: bool
@raise ValueError: if the flagname is not in
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}
"""
if flagname in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
return self['flags'].get(flagname, {}).get(version, {}).get(docformat, False)
else:
raise ValueError, "%s is not in %s" % (flagname, CFG_BIBDOCFILE_AVAILABLE_FLAGS)
def get_flags(self, docformat, version):
"""
Return the list of all the enabled flags.
@param format: the format for which the list should be returned.
@type format: string
@param version: the version for which the list should be returned.
@type version: integer
@return: the list of enabled flags (from
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}).
@rtype: list of string
"""
return [flag for flag in self['flags'] if docformat in self['flags'][flag].get(version, {})]
def set_comment(self, comment, docformat, version):
"""
Set a comment.
@param comment: the comment to be set.
@type comment: string
@param format: the format for which the comment should be set.
@type format: string
@param version: the version for which the comment should be set:
@type version: integer
"""
try:
assert(type(version) is int and version > 0)
docformat = normalize_format(docformat)
if comment == KEEP_OLD_VALUE:
comment = self.get_comment(docformat, version) or self.get_comment(docformat, version - 1)
if not comment:
self.unset_comment(docformat, version)
return
if not version in self['comments']:
comments = self['comments']
comments[version] = {}
self['comments'] = comments
comments = self['comments']
comments[version][docformat] = comment
self['comments'] = comments
except:
register_exception()
raise
def set_description(self, description, docformat, version):
"""
Set a description.
@param description: the description to be set.
@type description: string
@param format: the format for which the description should be set.
@type format: string
@param version: the version for which the description should be set:
@type version: integer
"""
try:
assert(type(version) is int and version > 0)
docformat = normalize_format(docformat)
if description == KEEP_OLD_VALUE:
description = self.get_description(docformat, version) or self.get_description(docformat, version - 1)
if not description:
self.unset_description(docformat, version)
return
descriptions = self['descriptions']
if not version in descriptions:
descriptions[version] = {}
descriptions[version][docformat] = description
self.set_data("", 'descriptions', descriptions)
except:
register_exception()
raise
def unset_comment(self, docformat, version):
"""
Unset a comment.
@param format: the format for which the comment should be unset.
@type format: string
@param version: the version for which the comment should be unset:
@type version: integer
"""
try:
assert(type(version) is int and version > 0)
comments = self['comments']
del comments[version][docformat]
self['comments'] = comments
except KeyError:
pass
except:
register_exception()
raise
def unset_description(self, docformat, version):
"""
Unset a description.
@param format: the format for which the description should be unset.
@type format: string
@param version: the version for which the description should be unset:
@type version: integer
"""
try:
assert(type(version) is int and version > 0)
descriptions = self['descriptions']
del descriptions[version][docformat]
self['descriptions'] = descriptions
except KeyError:
pass
except:
register_exception()
raise
def unset_flag(self, flagname, docformat, version):
"""
Unset a flag.
@param flagname: the flag to be unset (see
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}).
@type flagname: string
@param format: the format for which the flag should be unset.
@type format: string
@param version: the version for which the flag should be unset:
@type version: integer
@raise ValueError: if the flag is not in
L{CFG_BIBDOCFILE_AVAILABLE_FLAGS}
"""
if flagname in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
try:
flags = self['flags']
del flags[flagname][version][docformat]
self['flags'] = flags
except KeyError:
pass
else:
raise ValueError, "%s is not in %s" % (flagname, CFG_BIBDOCFILE_AVAILABLE_FLAGS)
_bib_relation__any_value = -1
class BibRelation(object):
"""
A representation of a relation between documents or their particular versions
"""
def __init__(self, rel_type = None,
bibdoc1_id = None, bibdoc2_id = None,
bibdoc1_ver = None, bibdoc2_ver = None,
bibdoc1_fmt = None, bibdoc2_fmt = None,
rel_id = None):
"""
The constructor of the class representing a relation between two
documents.
If the more_info parameter is specified, no data is retrieved from
the database and the internal dictionary is initialised with
the passed value. If the more_info is not provided, the value is
read from the database. In the case of non-existing record, an
empty dictionary is assigned.
If a version of whichever record is not specified, the resulting
object desctibes a relation of all version of a given BibDoc.
@param bibdoc1
@type bibdoc1 BibDoc
@param bibdoc1_ver
@type version1_ver int
@param bibdoc2
@type bibdoc2 BibDco
@param bibdoc2_ver
@type bibdoc2_ver int
@param bibdoc1_fmt format of the first document
@type bibdoc1_fmt string
@param bibdoc2_fmt format of the second document
@type bibdoc2_fmt string
@param rel_type
@type rel_type string
@param more_info The serialised representation of the more_info
@type more_info string
@param rel_id allows to specify the identifier of the newly created relation
@type rel_ide unsigned int
"""
self.id = rel_id
self.bibdoc1_id = bibdoc1_id
self.bibdoc2_id = bibdoc2_id
self.bibdoc1_ver = bibdoc1_ver
self.bibdoc2_ver = bibdoc2_ver
self.bibdoc1_fmt = bibdoc1_fmt
self.bibdoc2_fmt = bibdoc2_fmt
self.rel_type = rel_type
if rel_id == None:
self._fill_id_from_data()
else:
self._fill_data_from_id()
self.more_info = MoreInfo(relation = self.id)
def _fill_data_from_id(self):
"""Fill all the relation data from the relation identifier
"""
query = "SELECT id_bibdoc1, version1, format1, id_bibdoc2, version2, format2, rel_type FROM bibdoc_bibdoc WHERE id=%s"
res = run_sql(query, (str(self.id), ))
if res != None and res[0] != None:
self.bibdoc1_id = res[0][0]
self.bibdoc1_ver = res[0][1]
self.bibdoc1_fmt = res[0][2]
self.bibdoc2_id = res[0][3]
self.bibdoc2_ver = res[0][4]
self.bibdoc2_fmt = res[0][5]
self.rel_type = res[0][6]
def _fill_id_from_data(self):
"""Fill the relation identifier based on the data provided"""
where_str, where_args = self._get_where_clauses()
query = "SELECT id FROM bibdoc_bibdoc WHERE %s" % (where_str, )
res = run_sql(query, where_args)
if res and res[0][0]:
self.id = int(res[0][0])
def _get_value_column_mapping(self):
"""
Returns a list of tuples each tuple consists of a value and a name
of a database column where this value should fit
"""
return [(self.rel_type, "rel_type"), (self.bibdoc1_id, "id_bibdoc1"),
(self.bibdoc1_ver, "version1"),
(self.bibdoc1_fmt, "format1"),
(self.bibdoc2_id, "id_bibdoc2"),
(self.bibdoc2_ver, "version2"),
(self.bibdoc2_fmt, "format2")]
def _get_where_clauses(self):
"""Private function returning part of the SQL statement identifying
current relation
@return
@rtype tuple
"""
return _sql_generate_conjunctive_where(self._get_value_column_mapping())
@staticmethod
def create(bibdoc1_id = None, bibdoc1_ver = None,
bibdoc1_fmt = None, bibdoc2_id = None,
bibdoc2_ver = None, bibdoc2_fmt = None,
rel_type = ""):
"""
Create a relation and return instance.
Ommiting an argument means that a particular relation concerns any value of the parameter
"""
# check if there is already entry corresponding to parameters
existing = BibRelation.get_relations(rel_type = rel_type,
bibdoc1_id = bibdoc1_id,
bibdoc2_id = bibdoc2_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc2_ver = bibdoc2_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_fmt = bibdoc2_fmt)
if len(existing) > 0:
return existing[0]
# build the insert query and execute it
to_process = [(rel_type, "rel_type"), (bibdoc1_id, "id_bibdoc1"),
(bibdoc1_ver, "version1"), (bibdoc1_fmt, "format1"),
(bibdoc2_id, "id_bibdoc2"), (bibdoc2_ver, "version2"),
(bibdoc2_fmt, "format2")]
values_list = []
args_list = []
columns_list = []
for entry in to_process:
columns_list.append(entry[1])
if entry[0] == None:
values_list.append("NULL")
else:
values_list.append("%s")
args_list.append(entry[0])
query = "INSERT INTO bibdoc_bibdoc (%s) VALUES (%s)" % (", ".join(columns_list), ", ".join(values_list))
# print "Query: %s Args: %s" % (query, str(args_list))
rel_id = run_sql(query, args_list)
return BibRelation(rel_id = rel_id)
def delete(self):
""" Removes a relation between objects from the database.
executing the flush function on the same object will restore
the relation
"""
where_str, where_args = self._get_where_clauses()
run_sql("DELETE FROM bibdoc_bibdoc WHERE %s" % (where_str,), where_args) # kwalitee: disable=sql
# removing associated MoreInfo
self.more_info.delete()
def get_more_info(self):
return self.more_info
@staticmethod
def get_relations(rel_type = _bib_relation__any_value,
bibdoc1_id = _bib_relation__any_value,
bibdoc2_id = _bib_relation__any_value,
bibdoc1_ver = _bib_relation__any_value,
bibdoc2_ver = _bib_relation__any_value,
bibdoc1_fmt = _bib_relation__any_value,
bibdoc2_fmt = _bib_relation__any_value):
"""Retrieves list of relations satisfying condtions.
If a parameter is specified, its value has to match exactly.
If a parameter is ommited, any of its values will be accepted"""
to_process = [(rel_type, "rel_type"), (bibdoc1_id, "id_bibdoc1"),
(bibdoc1_ver, "version1"), (bibdoc1_fmt, "format1"),
(bibdoc2_id, "id_bibdoc2"), (bibdoc2_ver, "version2"),
(bibdoc2_fmt, "format2")]
where_str, where_args = _sql_generate_conjunctive_where(
filter(lambda x: x[0] != _bib_relation__any_value, to_process))
if where_str:
where_str = "WHERE " + where_str # in case of nonempty where, we need a where clause
query_str = "SELECT id FROM bibdoc_bibdoc %s" % (where_str, )
# print "running query : %s with arguments %s on the object %s" % (query_str, str(where_args), repr(self))
try:
res = run_sql(query_str, where_args)
except:
raise Exception(query_str + " " + str(where_args))
results = []
if res != None:
for res_row in res:
results.append(BibRelation(rel_id=res_row[0]))
return results
# Access to MoreInfo
def set_data(self, category, key, value):
"""assign additional information to this relation"""
self.more_info.set_data(category, key, value)
def get_data(self, category, key):
"""read additional information assigned to this relation"""
return self.more_info.get_data(category, key)
#the dictionary interface allowing to set data bypassing the namespaces
def __setitem__(self, key, value):
self.more_info[key] = value
def __getitem__(self, key):
return self.more_info[key]
def __contains__(self, key):
return self.more_info.__contains__(key)
def __repr__(self):
return "BibRelation(id_bibdoc1 = %s, version1 = %s, format1 = %s, id_bibdoc2 = %s, version2 = %s, format2 = %s, rel_type = %s)" % \
(self.bibdoc1_id, self.bibdoc1_ver, self.bibdoc1_fmt,
self.bibdoc2_id, self.bibdoc2_ver, self.bibdoc2_fmt,
self.rel_type)
def readfile(filename):
"""
Read a file.
@param filename: the name of the file to be read.
@type filename: string
@return: the text contained in the file.
@rtype: string
@note: Returns empty string in case of any error.
@note: this function is useful for quick implementation of websubmit
functions.
"""
try:
return open(filename).read()
except Exception:
return ''
class HeadRequest(urllib2.Request):
"""
A request object to perform a HEAD request.
"""
def get_method(self):
return 'HEAD'
def read_cookie(cookiefile):
"""
Parses a cookie file and returns a string as needed for the urllib2 headers
The file should respect the Netscape cookie specifications
"""
cookie_data = ''
cfile = open(cookiefile, 'r')
for line in cfile.readlines():
tokens = line.split('\t')
if len(tokens) == 7: # we are on a cookie line
cookie_data += '%s=%s; ' % (tokens[5], tokens[6].replace('\n', ''))
cfile.close()
return cookie_data
def open_url(url, headers=None, head_request=False):
"""
Opens a URL. If headers are passed as argument, no check is performed and
the URL will be opened. Otherwise checks if the URL is present in
CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS and uses the headers specified in
the config variable.
@param url: the URL to open
@type url: string
@param headers: the headers to use
@type headers: dictionary
@param head_request: if True, perform a HEAD request, otherwise a POST
request
@type head_request: boolean
@return: a file-like object as returned by urllib2.urlopen.
"""
headers_to_use = None
if headers is None:
for regex, headers in _CFG_BIBUPLOAD_FFT_ALLOWED_EXTERNAL_URLS:
if regex.match(url) is not None:
headers_to_use = headers
break
if headers_to_use is None:
# URL is not allowed.
raise InvenioBibdocfileUnauthorizedURL, "%s is not an authorized " \
"external URL." % url
else:
headers_to_use = headers
request_obj = head_request and HeadRequest or urllib2.Request
request = request_obj(url)
request.add_header('User-Agent', make_user_agent_string('bibdocfile'))
for key, value in headers_to_use.items():
try:
value = globals()[value['fnc']](**value['args'])
except (KeyError, TypeError):
pass
request.add_header(key, value)
return urllib2.urlopen(request)
def update_modification_date_of_file(filepath, modification_date):
"""Update the modification time and date of the file with the modification_date
@param filepath: the full path of the file that needs to be updated
@type filepath: string
@param modification_date: the new modification date and time
@type modification_date: datetime.datetime object
"""
try:
modif_date_in_seconds = time.mktime(modification_date.timetuple()) # try to get the time in seconds
except (AttributeError, TypeError):
modif_date_in_seconds = 0
if modif_date_in_seconds:
statinfo = os.stat(filepath) # we need to keep the same access time
os.utime(filepath, (statinfo.st_atime, modif_date_in_seconds)) #update the modification time
| lnielsen/invenio | invenio/legacy/bibdocfile/api.py | Python | gpl-2.0 | 205,836 |
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2011-2013, Regents of the University of California
# Alexander Afanasyev
#
# GNU 3.0 license, See the LICENSE file for more information
#
# Author: Alexander Afanasyev <alexander.afanasyev@ucla.edu>
#
#
# Based on PyCCN code, copyrighted and licensed as follows
#
# Copyright (c) 2011-2013, Regents of the University of California
# BSD license, See the COPYING file for more information
# Written by: Derek Kulinski <takeda@takeda.tk>
# Jeff Burke <jburke@ucla.edu>
#
import ns.ndnSIM
import ns.core
from Name import Name
class Interest (object):
_interest = None
def __init__(self,
name = None, scope = None, interestLifetime = None,
interest = None):
if interest:
if isinstance (interest, Interest):
self._interest = interest._interest
elif isinstance (interest, ns.ndnSIM.ndn.Interest):
self._interest = interest
else:
raise TypeError ("Invalid type supplied for 'interest' parameter [%s]" % type (interest))
else:
self._interest = ns.ndnSIM.ndn.Interest ()
self.name = name
self.scope = scope
self.interestLifetime = interestLifetime
@staticmethod
def fromWire (wire):
return Interest (interest = ns.ndnSIM.ndn.Wire.ToInterestStr (wire))
def toWire (self):
return ns.ndnSIM.ndn.Wire.FromInterestStr (self._interest)
def __getattr__ (self, name):
if name == "_interest":
return object.__getattr__ (self, name)
elif name == "name":
return Name (self._interest.GetName ())
elif name == "scope":
return self._interest.GetScope ()
elif name == "interestLifetime":
return self._interest.GetInterestLifetime ().ToDouble (ns.core.Time.S)
else:
return self._interest.__getattribute__ (name)
def __setattr__(self, name, value):
if name == "_interest":
return object.__setattr__ (self, name, value)
elif name == "name":
if value is None:
return self._interest.SetName (ns.ndnSIM.ndn.Name ())
elif isinstance (value, Name):
return self._interest.SetName (value._name)
elif isinstance (value, ns.ndnSIM.ndn.Name):
return self._interest.SetName (value)
elif isinstance (value, str):
return self._interest.SetName (ns.ndnSIM.ndn.Name (value))
else:
raise ValueError ("Invalid name parameter")
elif name == "scope":
if value is None:
return self._interest.SetScope (-1)
elif isinstance (value, int):
return self._interest.SetScope (value)
else:
raise ValueError ("Scope parameter should be int, [%s] supplied" % type (value))
elif name == "interestLifetime":
if value is None:
return self._interest.SetInterestLifetime (ns.core.Time ())
elif isinstance (value, float) or isinstance (value, int):
return self._interest.SetInterestLifetime (ns.core.Seconds (value))
else:
raise ValueError ("interestLifetime parameter should be fload or int, [%s] supplied" % type (value))
else:
raise ValueError ("Unknown or unsupported attribute [%s]" % name)
def __repr__(self):
return "ndnSIM.Interest(%s)" % str (self._interest)
| chrismbarnes/ndnSIM | src/ndnSIM/PyNDN/Interest.py | Python | gpl-2.0 | 3,677 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Windows API functions."""
import ctypes
import os
import sys
from ctypes import WinError, wintypes
from colorise.win.winhandle import WinHandle
# Create a separate WinDLL instance since the one from ctypes.windll.kernel32
# can be manipulated by other code that also imports it
#
# See
# https://stackoverflow.com/questions/34040123/ctypes-cannot-import-windll#comment55835311_34040124
kernel32 = ctypes.WinDLL('kernel32', use_errno=True, use_last_error=True)
# Handle IDs for stdout and stderr
_STDOUT_HANDLE_ID = -11
_STDERR_HANDLE_ID = -12
# Console modes for console virtual terminal sequences
DISABLE_NEWLINE_AUTO_RETURN = 0x0008
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
ERROR_INVALID_HANDLE = 6
# Struct defined in wincon.h
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): # noqa: D101
_fields_ = [
('dwSize', wintypes._COORD),
('dwCursorPosition', wintypes._COORD),
('wAttributes', ctypes.c_ushort),
('srWindow', wintypes._SMALL_RECT),
('dwMaximumWindowSize', wintypes._COORD),
]
# Struct defined in wincon.h
class CONSOLE_SCREEN_BUFFER_INFOEX(ctypes.Structure): # noqa: D101
_fields_ = [
('cbSize', wintypes.ULONG),
('dwSize', wintypes._COORD),
('dwCursorPosition', wintypes._COORD),
('wAttributes', ctypes.c_ushort),
('srWindow', wintypes._SMALL_RECT),
('dwMaximumWindowSize', wintypes._COORD),
('wPopupAttributes', wintypes.WORD),
('bFullscreenSupported', wintypes.BOOL),
('ColorTable', wintypes.COLORREF * 16),
]
if not hasattr(wintypes, 'LPDWORD'):
LPDWORD = ctypes.POINTER(wintypes.DWORD)
else:
LPDWORD = wintypes.LPDWORD
# Set argument and return types for Windows API calls
kernel32.GetConsoleScreenBufferInfo.argtypes =\
[wintypes.HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
kernel32.GetConsoleScreenBufferInfo.restype = wintypes.BOOL
kernel32.GetStdHandle.argtypes = [wintypes.DWORD]
kernel32.GetStdHandle.restype = wintypes.HANDLE
kernel32.GetConsoleMode.argtypes = [wintypes.HANDLE, LPDWORD]
kernel32.GetConsoleMode.restype = wintypes.BOOL
kernel32.SetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.DWORD]
kernel32.SetConsoleMode.restype = wintypes.BOOL
kernel32.SetLastError.argtypes = [wintypes.DWORD]
kernel32.SetLastError.restype = None # void
kernel32.FormatMessageW.argtypes = [
wintypes.DWORD,
wintypes.LPCVOID,
wintypes.DWORD,
wintypes.DWORD,
wintypes.LPWSTR,
wintypes.DWORD,
wintypes.LPVOID
]
kernel32.FormatMessageW.restype = wintypes.DWORD
kernel32.LocalFree.argtypes = [wintypes.HLOCAL]
kernel32.LocalFree.restype = wintypes.HLOCAL
kernel32.SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
kernel32.SetConsoleTextAttribute.restype = wintypes.BOOL
if kernel32.SetConsoleScreenBufferInfoEx is not None:
# We can query RGB values of console colors on Windows
kernel32.GetConsoleScreenBufferInfoEx.argtypes =\
[wintypes.HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFOEX)]
kernel32.GetConsoleScreenBufferInfoEx.restype = wintypes.BOOL
def isatty(handle):
"""Check if a handle is a valid console handle.
For example, if a handle is redirected to a file, it is not a valid console
handle and all win32 console API calls will fail.
"""
if not handle or not handle.valid:
return False
console_mode = wintypes.DWORD(0)
# We use GetConsoleMode here but it could be any function that expects a
# valid console handle
retval = kernel32.GetConsoleMode(handle.value, ctypes.byref(console_mode))
if retval == 0:
errno = ctypes.get_last_error()
if errno == ERROR_INVALID_HANDLE:
return False
else:
# Another error happened
raise WinError()
else:
return True
def can_redefine_colors(file):
"""Return whether the terminal allows redefinition of colors."""
handle = get_win_handle(WinHandle.from_sys_handle(file))
return kernel32.SetConsoleScreenBufferInfoEx is not None and isatty(handle)
def create_std_handle(handle_id):
"""Create a Windows standard handle from an identifier."""
handle = kernel32.GetStdHandle(handle_id)
if handle == WinHandle.INVALID:
raise WinError()
csbi = CONSOLE_SCREEN_BUFFER_INFO()
retval = kernel32.GetConsoleScreenBufferInfo(
handle,
ctypes.byref(csbi),
)
win_handle = None
if retval == 0:
errno = ctypes.get_last_error()
if errno == ERROR_INVALID_HANDLE:
# Return a special non-console handle
win_handle = WinHandle.get_nonconsole_handle(handle_id)
else:
raise WinError()
else:
win_handle = WinHandle(handle)
# Set defaults color values
# TODO: Do these need to be reread when colors are redefined?
win_handle.default_fg = csbi.wAttributes & 0xf
win_handle.default_bg = (csbi.wAttributes >> 4) & 0xf
# Set the color for the handle
win_handle.fg = win_handle.default_fg
win_handle.bg = win_handle.default_bg
return win_handle
def get_win_handle(target):
"""Return the Windows handle corresponding to a Python handle."""
if WinHandle.validate(target):
# We create a new handle each time since the old handle may have been
# invalidated by a redirection
return create_std_handle(target)
raise ValueError("Invalid handle identifier '{0}'".format(target))
def get_windows_clut():
"""Query and return the internal Windows color look-up table."""
# On Windows Vista and beyond you can query the current colors in the
# color table. On older platforms, use the default color table
csbiex = CONSOLE_SCREEN_BUFFER_INFOEX()
csbiex.cbSize = ctypes.sizeof(CONSOLE_SCREEN_BUFFER_INFOEX)
retval = kernel32.GetConsoleScreenBufferInfoEx(
get_win_handle(WinHandle.STDOUT).value,
ctypes.byref(csbiex),
)
if retval == 0:
raise WinError()
clut = {}
# Update according to the currently set colors
for i in range(16):
clut[i] = (
csbiex.ColorTable[i] & 0xff,
(csbiex.ColorTable[i] >> 8) & 0xff,
(csbiex.ColorTable[i] >> 16) & 0xff,
)
return clut
def enable_virtual_terminal_processing(handle):
"""Enable Windows processing of ANSI escape sequences."""
if not handle or not handle.valid:
raise ValueError('Invalid handle')
if not isatty(handle):
return False
console_mode = wintypes.DWORD(0)
if kernel32.GetConsoleMode(handle.value, ctypes.byref(console_mode)) == 0:
raise WinError()
handle.console_mode = console_mode
target_mode = wintypes.DWORD(
console_mode.value
| ENABLE_VIRTUAL_TERMINAL_PROCESSING
| DISABLE_NEWLINE_AUTO_RETURN
)
# First attempt to set console mode to interpret ANSI escape codes and
# disable immediately jumping to the next console line
if kernel32.SetConsoleMode(handle.value, target_mode) == 0:
# If that fails, try just setting the mode for ANSI escape codes
target_mode = wintypes.DWORD(
console_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING
)
if kernel32.SetConsoleMode(handle.value, target_mode) == 0:
return None
# Return the original console mode so we can restore it later
return console_mode
def restore_console_mode(handle, restore_mode):
"""Restore the console mode for a handle to its original mode."""
if not handle or handle == WinHandle.INVALID:
raise ValueError('Invalid handle')
if not kernel32.SetConsoleMode(handle.value, restore_mode):
raise WinError()
def restore_console_modes():
"""Restore console modes for stdout and stderr to their original mode."""
if can_interpret_ansi(sys.stdout):
stdout = get_win_handle(WinHandle.STDOUT)
restore_console_mode(stdout, stdout.console_mode)
if can_interpret_ansi(sys.stderr):
stderr = get_win_handle(WinHandle.STDERR)
restore_console_mode(stderr, stderr.console_mode)
def can_interpret_ansi(file):
"""Return True if the Windows console can interpret ANSI escape codes."""
# NOTE: Not sure if sys.stdout and sys.stderr are synced with the handles
# returned by GetStdHandle so we use existing windows functions to tell if
# the handles are valid console handles
handle = get_win_handle(WinHandle.from_sys_handle(file))
handle_isatty = isatty(handle)
if not handle_isatty:
return False
if os.environ.get('ConEmuANSI', '') == 'ON':
return True
return enable_virtual_terminal_processing(handle)
def set_console_text_attribute(handle, flags):
"""Set the console's text attributes."""
if not handle or handle == WinHandle.INVALID:
raise ValueError('Invalid handle')
if kernel32.SetConsoleTextAttribute(
handle.value,
wintypes.WORD(flags)
) == 0:
raise WinError()
def encode_rgb_tuple(rgb):
"""Hexadecimally encode an rgb tuple as 0xbbggrr."""
r, g, b = rgb
return (b << 16) | (g << 8) | r
def redefine_colors(color_map, file=sys.stdout):
"""Redefine the base console colors with a new mapping.
This only redefines the 8 colors in the console and changes all text in the
console that already uses the logical names. E.g. if 'red' is mapped to the
color red and this function changes it to another color, all text in 'red'
will be rendered with this new color, even though it may already have been
written to the console.
"""
if not can_redefine_colors(file):
raise RuntimeError('Cannot redefine colors on this system')
if not all(0 <= c < 16 for c in color_map):
raise RuntimeError('New color map must contain indices in range 0-15')
# Create a new CONSOLE_SCREEN_BUFFER_INFOEX structure based on the given
# color map
csbiex = CONSOLE_SCREEN_BUFFER_INFOEX()
# We must set the size of the structure before using it
csbiex.cbSize = ctypes.sizeof(CONSOLE_SCREEN_BUFFER_INFOEX)
win_handle = get_win_handle(WinHandle.from_sys_handle(file))
retval = kernel32.GetConsoleScreenBufferInfoEx(
win_handle.value,
ctypes.byref(csbiex)
)
# Get console color info
if retval == 0:
raise WinError()
# Redefine colortable
for idx in color_map:
csbiex.ColorTable[idx] = encode_rgb_tuple(color_map[idx])
# Set the new colors
if kernel32.SetConsoleScreenBufferInfoEx(win_handle.value, csbiex) == 0:
raise WinError()
| MisanthropicBit/colorise | src/colorise/win/win32_functions.py | Python | bsd-3-clause | 10,715 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
class ImageRecognizer:
def __init__(self, path, graph, labels, input_layer_name, output_layer_name, num_top_predictions):
self.path = path
self.graph = graph
self.labels = labels
self.input_layer_name = input_layer_name
self.output_layer_name = output_layer_name
self.num_top_predictions = num_top_predictions
def load_image(self, filename):
"""Read in the image_data to be classified."""
return tf.gfile.FastGFile(filename, 'rb').read()
def load_labels(self, filename):
"""Read in labels, one label per line."""
return [line.rstrip() for line in tf.gfile.GFile(filename)]
def load_graph(self, filename):
"""Unpersists graph from file as default graph."""
with tf.gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def run_graph(self, image_data, labels, input_layer_name, output_layer_name,
num_top_predictions):
with tf.Session() as sess:
# Feed the image_data as input to the graph.
# predictions will contain a two-dimensional array, where one
# dimension represents the input image count, and the other has
# predictions per class
softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
predictions, = sess.run(softmax_tensor, {input_layer_name: image_data})
# Sort to show labels in order of confidence
top_k = predictions.argsort()[-self.num_top_predictions:][::-1]
values = {}
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id].item()
print('%s (score = %.5f)' % (human_string, score))
values[human_string] = score
return values
def recognize(self, image):
image_data = self.load_image(image)
labels = self.load_labels(self.path + self.labels)
self.load_graph(self.path+ self.graph)
return self.run_graph(image_data, labels, self.input_layer_name, self.output_layer_name,
self.num_top_predictions)
| mmiranda96/chilaiquil-api | image_recognizer.py | Python | mit | 2,347 |
import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
from asposecellscloud.models import PasswordRequest
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
body = PasswordRequest.PasswordRequest()
body.Password = "aspose"
#upload file to aspose cloud storage
storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to set modify password of a workbook
response = cellsApi.PutDocumentProtectFromChanges(name=filename, body=body)
if response.Status == "OK":
#download protected document from cloud storage
response = storageApi.GetDownload(Path=filename)
outfilename = "c:/temp/" + "password_protected_" + filename
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
| asposecells/Aspose_Cells_Cloud | Examples/Python/Examples/SetModifyPassword.py | Python | mit | 1,577 |
# excelExpr.py
#
# Copyright 2010, Paul McGuire
#
# A partial implementation of a parser of Excel formula expressions.
#
from pyparsingOD import (CaselessKeyword, Suppress, Word, alphas,
alphanums, nums, Optional, Group, oneOf, Forward, Regex,
operatorPrecedence, opAssoc, dblQuotedString, delimitedList,
Combine, Literal, QuotedString)
EQ,EXCL,LPAR,RPAR,COLON,COMMA = map(Suppress, '=!():,')
EXCL, DOLLAR = map(Literal,"!$")
sheetRef = Word(alphas, alphanums) | QuotedString("'",escQuote="''")
colRef = Optional(DOLLAR) + Word(alphas,max=2)
rowRef = Optional(DOLLAR) + Word(nums)
cellRef = Combine(Group(Optional(sheetRef + EXCL)("sheet") + colRef("col") +
rowRef("row")))
cellRange = (Group(cellRef("start") + COLON + cellRef("end"))("range")
| cellRef | Word(alphas,alphanums))
expr = Forward()
COMPARISON_OP = oneOf("< = > >= <= != <>")
condExpr = expr + COMPARISON_OP + expr
ifFunc = (CaselessKeyword("if") +
LPAR +
Group(condExpr)("condition") +
COMMA + expr("if_true") +
COMMA + expr("if_false") + RPAR)
statFunc = lambda name : CaselessKeyword(name) + LPAR + delimitedList(expr) + RPAR
sumFunc = statFunc("sum")
minFunc = statFunc("min")
maxFunc = statFunc("max")
aveFunc = statFunc("ave")
funcCall = ifFunc | sumFunc | minFunc | maxFunc | aveFunc
multOp = oneOf("* /")
addOp = oneOf("+ -")
numericLiteral = Regex(r"\-?\d+(\.\d+)?")
operand = numericLiteral | funcCall | cellRange | cellRef
arithExpr = operatorPrecedence(operand,
[
(multOp, 2, opAssoc.LEFT),
(addOp, 2, opAssoc.LEFT),
])
textOperand = dblQuotedString | cellRef
textExpr = operatorPrecedence(textOperand,
[
('&', 2, opAssoc.LEFT),
])
expr << (arithExpr | textExpr)
test1 = "=3*A7+5"
test2 = "=3*Sheet1!$A$7+5"
test2a ="=3*'Sheet 1'!$A$7+5"
test2b ="=3*'O''Reilly''s sheet'!$A$7+5"
test3 = "=if(Sum(A1:A25)>42,Min(B1:B25), " \
"if(Sum(C1:C25)>3.14, (Min(C1:C25)+3)*18,Max(B1:B25)))"
test3a = "=sum(a1:a25,10,min(b1,c2,d3))"
import pprint
tests = [locals()[t] for t in list(locals().keys()) if t.startswith("test")]
for test in tests:
print(test)
pprint.pprint( (EQ + expr).parseString(test,parseAll=True).asList() )
print()
| schlichtanders/pyparsing-2.0.3-OrderedDict | examples/excelExpr.py | Python | mit | 2,327 |
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.vyos import vyos_static_route
from .vyos_module import TestVyosModule, load_fixture, set_module_args
class TestVyosStaticRouteModule(TestVyosModule):
module = vyos_static_route
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.vyos.vyos_static_route.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.vyos.vyos_static_route.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.load_config.return_value = dict(diff=None, session='session')
def test_vyos_static_route_present(self):
set_module_args(dict(prefix='172.26.0.0/16', next_hop='172.26.4.1', admin_distance='1'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'],
['set protocols static route 172.26.0.0/16 next-hop 172.26.4.1 distance 1'])
| erjohnso/ansible | test/units/modules/network/vyos/test_vyos_static_route.py | Python | gpl-3.0 | 1,966 |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from iptest.assert_util import *
add_clr_assemblies("loadorder_2")
# namespace First {
# public class Nongeneric1 {
# public static string Flag = typeof(Nongeneric1).FullName;
# }
# }
import First
from First import *
AreEqual(First.Nongeneric1.Flag, "First.Nongeneric1")
AreEqual(Nongeneric1.Flag, "First.Nongeneric1")
add_clr_assemblies("loadorder_2c")
# // non-generic type, which has same namespace, same name from First.Nongeneric1
# namespace First {
# public class Nongeneric1 {
# public static string Flag = typeof(Nongeneric1).FullName + "_Same";
# }
# }
AreEqual(First.Nongeneric1.Flag, "First.Nongeneric1_Same") # !!!
AreEqual(Nongeneric1.Flag, "First.Nongeneric1") # !!!
from First import *
AreEqual(First.Nongeneric1.Flag, "First.Nongeneric1_Same")
AreEqual(Nongeneric1.Flag, "First.Nongeneric1_Same") # !!!
| IronLanguages/ironpython3 | Tests/interop/net/loadorder/t2c.py | Python | apache-2.0 | 1,097 |
# This challenge could, in theory, be solved in multiple ways. However, for the
# sake of learning how to simulate an alternate filesystem, please solve this
# challenge according to structure provided below. As a challenge, once you have
# an initial solution, try solving this in an alternate way.
#
# Problem description and general solution strategy:
# The binary loads the password from a file using the fread function. If the
# password is correct, it prints "Good Job." In order to keep consistency with
# the other challenges, the input from the console is written to a file in the
# ignore_me function. As the name suggests, ignore it, as it only exists to
# maintain consistency with other challenges.
# We want to:
# 1. Determine the file from which fread reads.
# 2. Use Angr to simulate a filesystem where that file is replaced with our own
# simulated file.
# 3. Initialize the file with a symbolic value, which will be read with fread
# and propogated through the program.
# 4. Solve for the symbolic input to determine the password.
import angr
import claripy
import sys
def main(argv):
path_to_binary = argv[1]
project = angr.Project(path_to_binary)
start_address = ???
initial_state = project.factory.blank_state(addr=start_address)
# Specify some information needed to construct a simulated file. For this
# challenge, the filename is hardcoded, but in theory, it could be symbolic.
# Note: to read from the file, the binary calls
# 'fread(buffer, sizeof(char), 64, file)'.
# (!)
filename = ??? # :string
symbolic_file_size_bytes = ???
# A file, in Linux, represents a stream of sequential data. This stream may
# come from a physical file on your hard drive, the network, the output of
# another program (ex: /dev/urandom), or anything else. In our case, we want
# to construct a block of memory where we store our symbolic variables for the
# program to read. The following constructs the symbolic memory that will
# supply the stream of data to the Linux file. Also, to communicate with
# Angr's constraint solving system, we need to associate the memory with the
# initial_state.
symbolic_file_backing_memory = angr.state_plugins.SimSymbolicMemory()
symbolic_file_backing_memory.set_state(initial_state)
# Construct a bitvector for the password and then store it in the file's
# backing memory. The store method works exactly the same as the store method
# you have already used. In fact, it's the exact same method! That means that
# memory.store(address, bitvector) will write bitvector to the address we
# specify. In this memory, unlike our program's memory, we want to write to
# the beginning, as the Linux file will stream data from the beginning of the
# file. For example, imagine a simple file, 'hello.txt':
#
# Hello world, my name is John.
# ^ ^
# ^ address 0 ^ address 24 (count the number of characters)
# In order to represent this in memory, we would want to write the string to
# the beginning of the file:
#
# hello_txt_contents = claripy.BVV('Hello world, my name is John.', 30*8)
# hello_txt_backing_memory.store(0, hello_txt_contents)
#
# Perhaps, then, we would want to replace John with a
# symbolic variable. We would call:
#
# name_bitvector = claripy.BVS('symbolic_name', 4*8)
# hello_txt_backing_memory.store(24, name_bitvector)
#
# Then, after the program calls fopen('hello.txt', 'r') and then
# fread(buffer, sizeof(char), 30, hello_txt_file), the buffer would contain
# the string from the file, except four symbolic bytes where the name would be
# stored.
# (!)
password = claripy.BVS('password', symbolic_file_size_bytes * 8)
symbolic_file_backing_memory.store(???, password)
# Construct the symbolic file. The file_options parameter specifies the Linux
# file permissions (read, read/write, execute etc.) The content parameter
# specifies from where the stream of data should be supplied. If content is
# an instance of SimSymbolicMemory (we constructed one above), the stream will
# contain the contents (including any symbolic contents) of the memory,
# beginning from address zero.
# Set the content parameter to our SimSymbolicMemory instance that holds the
# symbolic data.
# (!)
file_options = 'r'
password_file = angr.storage.SimFile(filename, file_options, content=???, size=symbolic_file_size_bytes)
# We have already created the file and the memory that stores the data that
# the file will stream to the program, but we now need to tell Angr where the
# file should appear to exist on the filesystem. This is a mapping between
# strings representing the filenames and the angr.storage.SimFiles themselves. For
# example, if hello_txt_file was a SimFile,
# symbolic_filesystem = {
# 'hello.txt' : hello_txt_file
# }
# would specify that any fopen('hello.txt', 'r') calls should stream data from
# hello_txt_file.
symbolic_filesystem = {
filename : password_file
}
initial_state.posix.fs = symbolic_filesystem
simulation = project.factory.simgr(initial_state)
def is_successful(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return ???
def should_abort(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return ???
simulation.explore(find=is_successful, avoid=should_abort)
if simulation.found:
solution_state = simulation.found[0]
solution = solution_state.se.eval(password,cast_to=str)
print solution
else:
raise Exception('Could not find the solution')
if __name__ == '__main__':
main(sys.argv)
| thomashaw/SecGen | modules/utilities/unix/ctf/metactf/files/repository/src_angr/dist/scaffold07.py | Python | gpl-3.0 | 5,656 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'DEFAULT_API_VERSION'
]
DEFAULT_API_VERSION = 'v1'
REQUEST_ID_HEADER = 'X-Request-ID'
| alfasin/st2 | st2common/st2common/constants/api.py | Python | apache-2.0 | 885 |
# Copyright (C) 2017 Xavier Lucas
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import getpass
import keyring
def build_option_parser(parser):
parser.add_argument(
"--keyring-service",
metavar="<service>",
default="confluence-cli",
help="Service entry",
)
parser.add_argument(
"--keyring-username",
metavar="<username>",
help="User name",
)
def after_command(app, cmd, result, error):
pass
def before_command(app, cmd):
pass
def initialize(app):
_load_credentials(app, app.options)
def _load_credentials(app, options):
app.username = options.keyring_username
app.password = _get_or_save(options.keyring_service, app.username)
def _get_or_save(service, entry):
value = keyring.get_password(service, entry)
if value is None:
value = getpass.getpass("Password to store: ")
keyring.set_password(service, entry, value)
return value
| xlucas/confluence-python-cli | confluenceclient/plugins/credentials/keyring/plugin.py | Python | gpl-3.0 | 1,553 |
import os
import time
import sys
import random
import multiprocessing
import argparse
from itertools import islice
from pysmt.shortcuts import reset_env, read_smtlib
def get_all_smt_files(target_dir=None):
if target_dir == None:
target_dir = "./"
assert os.path.exists(target_dir)
for root, _, files in os.walk(target_dir):
for f in files:
if f.endswith(".smt2"):
yield os.path.join(root, f)
def execute_script_fname(smtfile):
"""Read and call a Solver to solve the instance"""
print(smtfile)
reset_env()
assert os.path.exists(smtfile)
start = time.clock()
read_smtlib(smtfile)
end = time.clock()
return ( (end - start), smtfile)
def dump_stats(timings, fname):
if fname is None:
fname = "stats.out"
with open(fname, "w") as f:
f.write('filename, time\n')
for k in timings:
f.write('%f, "%s"\n' % k)
def main():
parser = argparse.ArgumentParser(description='SMT-LIB Parser Benchmarking')
parser.add_argument('--base', type=str, nargs='?',
help='top-directory of the benchmarks')
parser.add_argument('--count', type=int, nargs='?',
default=-1,
help='number of files to benchmark')
parser.add_argument('--out', type=str, default="stats.out", nargs='?',
help='Where to save the statistics')
args = parser.parse_args()
random.seed(42)
p = multiprocessing.Pool()
chunks = multiprocessing.cpu_count()
file_list = list(get_all_smt_files(args.base))
random.shuffle(file_list)
if args.count == -1:
files_cnt = len(file_list)
else:
files_cnt = args.count
print("Submitting %d jobs, %d at the time" % (files_cnt, chunks))
timings = p.map(execute_script_fname, islice(file_list, files_cnt), chunks)
mean = sum(x[0] for x in timings) / len(timings)
print("The mean execution time was %0.2f seconds" % mean)
print("The max execution time was %0.2f seconds" % max(x[0] for x in timings))
outfile = args.out
dump_stats(timings, outfile)
print("The statistics file has been generated in '%s'" % outfile)
if __name__ == '__main__':
main()
| pysmt/pysmt | SMT-LIB/parse_all.py | Python | apache-2.0 | 2,268 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, print_function)
import sys
import os
from ansible import constants as C
from ansible.constants import mk_boolean
try:
from ansible.plugins.callback import CallbackBase
parent = CallbackBase
except ImportError:
parent = object
VAR_IDEMPOTENCE = u'IDEMPOTENCE'
class CallbackModule(parent):
"""
This callback module performs the idempotency test whenever the 'idempotency' variable is set to True.
"""
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'idempotency'
def __init__(self):
self.playbook = None
self.enabled = mk_boolean(os.getenv(VAR_IDEMPOTENCE, 'no'))
super(CallbackModule, self).__init__()
def playbook_on_stats(self, stats):
if self.enabled:
if len(stats.dark) > 0:
self._display.warning('idempotency test failed: unreachable=%s > 0' % stats.dark)
sys.exit(os.EX_SOFTWARE)
if len(stats.changed) > 0:
self._display.warning('idempotency test failed: changed=%s > 0' % stats.changed)
sys.exit(os.EX_SOFTWARE)
if len(stats.failures) > 0:
self._display.warning('idempotency test failed: failures=%s > 0' % stats.failures)
sys.exit(os.EX_SOFTWARE)
def v2_playbook_on_stats(self, stats):
"""Verify that playbook ran without any changes or failures."""
self.playbook_on_stats(stats)
| ansiblebit/pip | tests/plugins/callback/idempotence.py | Python | bsd-3-clause | 1,482 |
from datetime import datetime
from listenbrainz.model import db
from listenbrainz.webserver.admin import AdminModelView
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime(timezone=True), default=datetime.utcnow)
musicbrainz_id = db.Column(db.String)
auth_token = db.Column(db.String)
last_login = db.Column(db.DateTime(timezone=True), default=datetime.utcnow, nullable=False)
latest_import = db.Column(db.DateTime(timezone=True), default=lambda: datetime.fromutctimestamp(0))
gdpr_agreed = db.Column(db.DateTime(timezone=True))
musicbrainz_row_id = db.Column(db.Integer, nullable=False)
login_id = db.Column(db.String)
class UserAdminView(AdminModelView):
form_columns = [
'musicbrainz_id',
'musicbrainz_row_id',
]
column_list = [
'id',
'musicbrainz_id',
'musicbrainz_row_id',
'created',
'auth_token',
'gdpr_agreed',
'last_login',
'latest_import',
'login_id',
]
column_searchable_list = [
'id',
'musicbrainz_row_id',
'musicbrainz_id'
]
column_filters = [
'created',
'gdpr_agreed',
'last_login',
'latest_import',
'id',
'musicbrainz_id',
'musicbrainz_row_id',
]
| Freso/listenbrainz-server | listenbrainz/model/user.py | Python | gpl-2.0 | 1,376 |
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Glue"
prefix = "glue"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
BatchCreatePartition = Action("BatchCreatePartition")
BatchDeleteConnection = Action("BatchDeleteConnection")
BatchDeletePartition = Action("BatchDeletePartition")
BatchDeleteTable = Action("BatchDeleteTable")
BatchDeleteTableVersion = Action("BatchDeleteTableVersion")
BatchGetBlueprints = Action("BatchGetBlueprints")
BatchGetCrawlers = Action("BatchGetCrawlers")
BatchGetDevEndpoints = Action("BatchGetDevEndpoints")
BatchGetJobs = Action("BatchGetJobs")
BatchGetPartition = Action("BatchGetPartition")
BatchGetTriggers = Action("BatchGetTriggers")
BatchGetWorkflows = Action("BatchGetWorkflows")
BatchStopJobRun = Action("BatchStopJobRun")
BatchUpdatePartition = Action("BatchUpdatePartition")
CancelMLTaskRun = Action("CancelMLTaskRun")
CancelStatement = Action("CancelStatement")
CheckSchemaVersionValidity = Action("CheckSchemaVersionValidity")
CreateBlueprint = Action("CreateBlueprint")
CreateClassifier = Action("CreateClassifier")
CreateConnection = Action("CreateConnection")
CreateCrawler = Action("CreateCrawler")
CreateDatabase = Action("CreateDatabase")
CreateDevEndpoint = Action("CreateDevEndpoint")
CreateJob = Action("CreateJob")
CreateMLTransform = Action("CreateMLTransform")
CreatePartition = Action("CreatePartition")
CreatePartitionIndex = Action("CreatePartitionIndex")
CreateRegistry = Action("CreateRegistry")
CreateSchema = Action("CreateSchema")
CreateScript = Action("CreateScript")
CreateSecurityConfiguration = Action("CreateSecurityConfiguration")
CreateSession = Action("CreateSession")
CreateTable = Action("CreateTable")
CreateTrigger = Action("CreateTrigger")
CreateUserDefinedFunction = Action("CreateUserDefinedFunction")
CreateWorkflow = Action("CreateWorkflow")
DeleteBlueprint = Action("DeleteBlueprint")
DeleteClassifier = Action("DeleteClassifier")
DeleteColumnStatisticsForPartition = Action("DeleteColumnStatisticsForPartition")
DeleteColumnStatisticsForTable = Action("DeleteColumnStatisticsForTable")
DeleteConnection = Action("DeleteConnection")
DeleteCrawler = Action("DeleteCrawler")
DeleteDatabase = Action("DeleteDatabase")
DeleteDevEndpoint = Action("DeleteDevEndpoint")
DeleteJob = Action("DeleteJob")
DeleteMLTransform = Action("DeleteMLTransform")
DeletePartition = Action("DeletePartition")
DeletePartitionIndex = Action("DeletePartitionIndex")
DeleteRegistry = Action("DeleteRegistry")
DeleteResourcePolicy = Action("DeleteResourcePolicy")
DeleteSchema = Action("DeleteSchema")
DeleteSchemaVersions = Action("DeleteSchemaVersions")
DeleteSecurityConfiguration = Action("DeleteSecurityConfiguration")
DeleteSession = Action("DeleteSession")
DeleteTable = Action("DeleteTable")
DeleteTableVersion = Action("DeleteTableVersion")
DeleteTrigger = Action("DeleteTrigger")
DeleteUserDefinedFunction = Action("DeleteUserDefinedFunction")
DeleteWorkflow = Action("DeleteWorkflow")
GetBlueprint = Action("GetBlueprint")
GetBlueprintRun = Action("GetBlueprintRun")
GetBlueprintRuns = Action("GetBlueprintRuns")
GetCatalogImportStatus = Action("GetCatalogImportStatus")
GetClassifier = Action("GetClassifier")
GetClassifiers = Action("GetClassifiers")
GetColumnStatisticsForPartition = Action("GetColumnStatisticsForPartition")
GetColumnStatisticsForTable = Action("GetColumnStatisticsForTable")
GetConnection = Action("GetConnection")
GetConnections = Action("GetConnections")
GetCrawler = Action("GetCrawler")
GetCrawlerMetrics = Action("GetCrawlerMetrics")
GetCrawlers = Action("GetCrawlers")
GetDataCatalogEncryptionSettings = Action("GetDataCatalogEncryptionSettings")
GetDatabase = Action("GetDatabase")
GetDatabases = Action("GetDatabases")
GetDataflowGraph = Action("GetDataflowGraph")
GetDevEndpoint = Action("GetDevEndpoint")
GetDevEndpoints = Action("GetDevEndpoints")
GetJob = Action("GetJob")
GetJobBookmark = Action("GetJobBookmark")
GetJobRun = Action("GetJobRun")
GetJobRuns = Action("GetJobRuns")
GetJobs = Action("GetJobs")
GetMLTaskRun = Action("GetMLTaskRun")
GetMLTaskRuns = Action("GetMLTaskRuns")
GetMLTransform = Action("GetMLTransform")
GetMLTransforms = Action("GetMLTransforms")
GetMapping = Action("GetMapping")
GetPartition = Action("GetPartition")
GetPartitionIndexes = Action("GetPartitionIndexes")
GetPartitions = Action("GetPartitions")
GetPlan = Action("GetPlan")
GetRegistry = Action("GetRegistry")
GetResourcePolicies = Action("GetResourcePolicies")
GetResourcePolicy = Action("GetResourcePolicy")
GetSchema = Action("GetSchema")
GetSchemaByDefinition = Action("GetSchemaByDefinition")
GetSchemaVersion = Action("GetSchemaVersion")
GetSchemaVersionsDiff = Action("GetSchemaVersionsDiff")
GetSecurityConfiguration = Action("GetSecurityConfiguration")
GetSecurityConfigurations = Action("GetSecurityConfigurations")
GetSession = Action("GetSession")
GetStatement = Action("GetStatement")
GetTable = Action("GetTable")
GetTableVersion = Action("GetTableVersion")
GetTableVersions = Action("GetTableVersions")
GetTables = Action("GetTables")
GetTags = Action("GetTags")
GetTrigger = Action("GetTrigger")
GetTriggers = Action("GetTriggers")
GetUserDefinedFunction = Action("GetUserDefinedFunction")
GetUserDefinedFunctions = Action("GetUserDefinedFunctions")
GetWorkflow = Action("GetWorkflow")
GetWorkflowRun = Action("GetWorkflowRun")
GetWorkflowRunProperties = Action("GetWorkflowRunProperties")
GetWorkflowRuns = Action("GetWorkflowRuns")
ImportCatalogToGlue = Action("ImportCatalogToGlue")
ListBlueprints = Action("ListBlueprints")
ListCrawlers = Action("ListCrawlers")
ListDevEndpoints = Action("ListDevEndpoints")
ListJobs = Action("ListJobs")
ListMLTransforms = Action("ListMLTransforms")
ListRegistries = Action("ListRegistries")
ListSchemaVersions = Action("ListSchemaVersions")
ListSchemas = Action("ListSchemas")
ListSessions = Action("ListSessions")
ListStatements = Action("ListStatements")
ListTriggers = Action("ListTriggers")
ListWorkflows = Action("ListWorkflows")
NotifyEvent = Action("NotifyEvent")
PutDataCatalogEncryptionSettings = Action("PutDataCatalogEncryptionSettings")
PutResourcePolicy = Action("PutResourcePolicy")
PutSchemaVersionMetadata = Action("PutSchemaVersionMetadata")
PutWorkflowRunProperties = Action("PutWorkflowRunProperties")
QuerySchemaVersionMetadata = Action("QuerySchemaVersionMetadata")
RegisterSchemaVersion = Action("RegisterSchemaVersion")
RemoveSchemaVersionMetadata = Action("RemoveSchemaVersionMetadata")
ResetJobBookmark = Action("ResetJobBookmark")
ResumeWorkflowRun = Action("ResumeWorkflowRun")
RunStatement = Action("RunStatement")
SearchTables = Action("SearchTables")
StartBlueprintRun = Action("StartBlueprintRun")
StartCrawler = Action("StartCrawler")
StartCrawlerSchedule = Action("StartCrawlerSchedule")
StartExportLabelsTaskRun = Action("StartExportLabelsTaskRun")
StartImportLabelsTaskRun = Action("StartImportLabelsTaskRun")
StartJobRun = Action("StartJobRun")
StartMLEvaluationTaskRun = Action("StartMLEvaluationTaskRun")
StartMLLabelingSetGenerationTaskRun = Action("StartMLLabelingSetGenerationTaskRun")
StartTrigger = Action("StartTrigger")
StartWorkflowRun = Action("StartWorkflowRun")
StopCrawler = Action("StopCrawler")
StopCrawlerSchedule = Action("StopCrawlerSchedule")
StopSession = Action("StopSession")
StopTrigger = Action("StopTrigger")
StopWorkflowRun = Action("StopWorkflowRun")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateBlueprint = Action("UpdateBlueprint")
UpdateClassifier = Action("UpdateClassifier")
UpdateColumnStatisticsForPartition = Action("UpdateColumnStatisticsForPartition")
UpdateColumnStatisticsForTable = Action("UpdateColumnStatisticsForTable")
UpdateConnection = Action("UpdateConnection")
UpdateCrawler = Action("UpdateCrawler")
UpdateCrawlerSchedule = Action("UpdateCrawlerSchedule")
UpdateDatabase = Action("UpdateDatabase")
UpdateDevEndpoint = Action("UpdateDevEndpoint")
UpdateJob = Action("UpdateJob")
UpdateMLTransform = Action("UpdateMLTransform")
UpdatePartition = Action("UpdatePartition")
UpdateRegistry = Action("UpdateRegistry")
UpdateSchema = Action("UpdateSchema")
UpdateTable = Action("UpdateTable")
UpdateTrigger = Action("UpdateTrigger")
UpdateUserDefinedFunction = Action("UpdateUserDefinedFunction")
UpdateWorkflow = Action("UpdateWorkflow")
UseMLTransforms = Action("UseMLTransforms")
| cloudtools/awacs | awacs/glue.py | Python | bsd-2-clause | 8,796 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
"""Database import
"""
| andrebellafronte/stoq | stoqlib/importers/__init__.py | Python | gpl-2.0 | 892 |
# Copyright (C) 2008 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from gettext import gettext as _
import gconf
_COLORS = {
'red': {'dark': '#b20008', 'medium': '#e6000a', 'light': '#ffadce'},
'orange': {'dark': '#9a5200', 'medium': '#c97e00', 'light': '#ffc169'},
'yellow': {'dark': '#807500', 'medium': '#be9e00', 'light': '#fffa00'},
'green': {'dark': '#008009', 'medium': '#00b20d', 'light': '#8bff7a'},
'blue': {'dark': '#00588c', 'medium': '#005fe4', 'light': '#bccdff'},
'purple': {'dark': '#5e008c', 'medium': '#7f00bf', 'light': '#d1a3ff'},
}
_MODIFIERS = ('dark', 'medium', 'light')
def get_nick():
client = gconf.client_get_default()
return client.get_string('/desktop/sugar/user/nick')
def print_nick():
print get_nick()
def set_nick(nick):
"""Set the nickname.
nick : e.g. 'walter'
"""
if not nick:
raise ValueError(_('You must enter a name.'))
if not isinstance(nick, unicode):
nick = unicode(nick, 'utf-8')
client = gconf.client_get_default()
client.set_string('/desktop/sugar/user/nick', nick)
return 1
def get_color():
client = gconf.client_get_default()
return client.get_string('/desktop/sugar/user/color')
def print_color():
color_string = get_color()
tmp = color_string.split(',')
stroke_tuple = None
fill_tuple = None
for color in _COLORS:
for hue in _COLORS[color]:
if _COLORS[color][hue] == tmp[0]:
stroke_tuple = (color, hue)
if _COLORS[color][hue] == tmp[1]:
fill_tuple = (color, hue)
if stroke_tuple is not None:
print _('stroke: color=%s hue=%s') % (stroke_tuple[0],
stroke_tuple[1])
else:
print _('stroke: %s') % (tmp[0])
if fill_tuple is not None:
print _('fill: color=%s hue=%s') % (fill_tuple[0], fill_tuple[1])
else:
print _('fill: %s') % (tmp[1])
def set_color(stroke, fill, stroke_modifier='medium', fill_modifier='medium'):
"""Set the system color by setting a fill and stroke color.
fill : [red, orange, yellow, blue, green, purple]
stroke : [red, orange, yellow, blue, green, purple]
hue stroke : [dark, medium, light] (optional)
hue fill : [dark, medium, light] (optional)
"""
if stroke_modifier not in _MODIFIERS or fill_modifier not in _MODIFIERS:
print (_('Error in specified color modifiers.'))
return
if stroke not in _COLORS or fill not in _COLORS:
print (_('Error in specified colors.'))
return
if stroke_modifier == fill_modifier:
if fill_modifier == 'medium':
fill_modifier = 'light'
else:
fill_modifier = 'medium'
color = _COLORS[stroke][stroke_modifier] + ',' \
+ _COLORS[fill][fill_modifier]
client = gconf.client_get_default()
client.set_string('/desktop/sugar/user/color', color)
return 1
def get_color_xo():
client = gconf.client_get_default()
return client.get_string('/desktop/sugar/user/color')
def set_color_xo(color):
"""Set a color with an XoColor
This method is used by the graphical user interface
"""
client = gconf.client_get_default()
client.set_string('/desktop/sugar/user/color', color)
return 1
| nemesiscodex/JukyOS-sugar | extensions/cpsection/aboutme/model.py | Python | gpl-2.0 | 4,022 |
from pycp2k.inputsection import InputSection
class _each143(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
| SINGROUP/pycp2k | pycp2k/classes/_each143.py | Python | lgpl-3.0 | 1,114 |
# -*- coding: utf-8 -*-
from message import Message, except_f1
import C1, C2
class A3(Message):
"""Classe que implementa A3."""
@property
def sollicitud(self):
"""Retorna l'objecte Sollicitud"""
return C1.Sollicitud(self.obj.PasoMRAMLConCambiosRestoTarifa.\
DatosSolicitud)
@property
def contracte(self):
"""Retorna l'objecte Contracte"""
obj = getattr(self.obj, self._header)
return C1.Contracte(obj.Contrato)
@property
def client(self):
"""Retorna l'objecte Client"""
return C1.Client(self.obj.PasoMRAMLConCambiosRestoTarifa.\
Cliente)
@property
def acceptacio(self):
"""Retorna l'objecte Acceptacio"""
obj = getattr(self.obj, self._header, False)
if obj and hasattr(obj, 'DatosAceptacion'):
return C1.Acceptacio(obj.DatosAceptacion)
return False
@property
def rebuig(self):
"""Retorna una llista de Rebuig"""
data = []
for i in self.obj.RechazoATRDistribuidoras.Rechazo:
data.append(C1.Rebuig(i))
return data
@property
def rebuig_anullacio(self):
"""Retorna l'objecte Rebuig"""
data = []
for i in self.obj.RechazoDeAnulacion.RechazoAnulacion:
data.append(C1.Rebuig(i))
return data
@property
def header(self):
return self._header
@property
def activacio(self):
"""Retorna l'objecte Activacio"""
return C1.Activacio(self.obj.\
ActivacionPasoMRAMLConCambiosRestoTarifas)
@property
def anullacio(self):
"""Retorna l'object Anullacio"""
return C1.Anullacio(self.obj.AnulacionSolicitud)
@property
def punts_mesura(self):
"""Retorna una llista de punts de mesura"""
data = []
obj = getattr(self.obj, self._header)
for i in obj.PuntosDeMedida.PuntoDeMedida:
data.append(C1.PuntMesura(i))
return data
@property
def mesura(self):
"""Retorna l'objecte mesura"""
obj = getattr(self.obj, self._header)
return C2.Mesura(obj.Medida)
@property
def comentaris(self):
"""Retorna una llista de comentaris"""
data = []
obj = getattr(self.obj, self._header)
if (hasattr(obj, 'Comentarios') and
hasattr(obj.Comentarios, 'Comentario')):
for i in obj.Comentarios.Comentario:
data.append(C2.Comentari(i))
return data
@property
def incidencies(self):
"""Retorna una llista de incidencies"""
data = []
for i in self.obj.IncidenciasATRDistribuidoras.Incidencia:
data.append(C1.Rebuig(i))
return data
| Som-Energia/switching | switching/input/messages/A3.py | Python | gpl-3.0 | 2,810 |
"""Lowest Common Ancestor of a Binary Search Tree
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the BST.
According to the [definition of LCA on Wikipedia](https://en.wikipedia.org/wiki/Lowest_common_ancestor):
"The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both
p and q as descendants (where we allow a node to be a descendants of itself)".
Example 1:
3
/ \
/ \
5 1
/ \ / \
6 2 0 8
/ \
7 4
Input: root = [3, 5, 1, 6, 2, 0, 8, null, null, 7, 4], p = 5, q = 1
Output: 3
Explanation: The LCA of nodes 5 and 1 is 3.
Example 2:
3
/ \
/ \
5 1
/ \ / \
6 2 0 8
/ \
7 4
Input: root = [3, 5, 1, 6, 2, 0, 8, null, null, 7, 4], p = 5, q = 4
Output: 5
Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of itself according to the
LCA definition.
Example 3:
Input: root = [1, 2], p = 1, q = 2
Output: 1
Constraints:
* The number of nodes in the tree is in the range [2, 105].
* -109 <= Node.val <= 109
* All Node.val are unique.
* p != q
* p and q will exist in the tree.
"""
from typing import Optional, List
from utils.binary_tree import TreeNode, BinaryTreeBuilder
class Solution:
"""
根据这个定义,分情况讨论:
情况 1,如果 p 和 q 都在以 root 为根的树中,那么 left 和 right 一定分别是 p 和 q(从 base case 看出来的)。
情况 2,如果 p 和 q 都不在以 root 为根的树中,直接返回 null。
情况 3,如果 p 和 q 只有一个存在于 root 为根的树中,函数返回该节点。
而又由于是后序遍历,从下往上走,就好比从 p 和 q 出发往上走,第一次相交的节点就是这个root。
"""
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if root is None:
return None
if root.val == p.val or root.val == q.val:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left is not None and right is not None:
return root
if left is None and right is None:
return None
return left if left is not None else right
class SolutionB:
"""该解法将出现超时错误
"""
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
self.p = p
self.q = q
self.p_in_left = False
self.p_in_right = False
self.q_in_left = False
self.q_in_right = False
self.ancestor = None
self.traverse(root)
return self.ancestor
def traverse(self, root):
if root.val == self.p.val or root.val == self.q.val:
self.ancestor = root
return
self.found(root.left, True)
if self.p_in_left and self.q_in_left:
self.p_in_left = False
self.q_in_left = False
self.traverse(root.left)
elif self.p_in_left or self.q_in_left:
self.ancestor = root
return
self.found(root.right, False)
if self.p_in_right and self.q_in_right:
self.p_in_right = False
self.q_in_right = False
self.traverse(root.right)
if (self.p_in_left and self.q_in_right) or (self.p_in_right and self.q_in_left):
self.ancestor = root
return
def found(self, root, left):
if root is None:
return
if root.val == self.p.val:
self.p_in_left = left
self.p_in_right = not left
elif root.val == self.q.val:
self.q_in_left = left
self.q_in_right = not left
self.found(root.left, left)
self.found(root.right, left)
if __name__ == '__main__':
testcases = [
([3, 5, 1, 6, 2, 0, 8, None, None, 7, 4], 5, 1, 3),
([3, 5, 1, 6, 2, 0, 8, None, None, 7, 4], 5, 4, 5),
([1, 2], 1, 2, 1),
([-1, 0, 3, -2, 4, None, None, 8], 8, 4, 0)
]
ss = (Solution(), SolutionB())
for case in testcases:
root = BinaryTreeBuilder.build_from_level_ordered(case[0])
for s in ss:
result = s.lowestCommonAncestor(root, TreeNode(case[1]), TreeNode(case[2]))
assert result.val == case[3]
| aiden0z/snippets | leetcode/236_lowest_common_ancestor_of_binary_tree.py | Python | mit | 4,596 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# source: https://gist.github.com/1709069
# author/owner: nyergler github gist
"""
Frédéric Grosshans, 19 January 2012
Nathan R. Yergler, 6 June 2010
This file does not contain sufficient creative expression to invoke
assertion of copyright. No warranty is expressed or implied; use at
your own risk.
---
Uses Python's included mailbox library to convert mail archives from
maildir [http://en.wikipedia.org/wiki/Maildir] to
mbox [http://en.wikipedia.org/wiki/Mbox] format, icluding subfolder.
See http://docs.python.org/library/mailbox.html#mailbox.Mailbox for
full documentation on this library.
---
To run, save as md2mb.py and run:
$ python md2mb.py [maildir_path] [mbox_filename]
[maildir_path] should be the the path to the actual maildir (containing new,
cur, tmp, and the subfolders, which are hidden directories with names like
.subfolde.subsubfolder.subsubsbfolder);
[mbox_filename] will be newly created, as well as a [mbox_filename].sbd the
directory.
"""
import mailbox
import sys
import email
import os
def maildir2mailbox(maildirname, mboxfilename):
"""
slightly adapted from maildir2mbox.py,
Nathan R. Yergler, 6 June 2010
http://yergler.net/blog/2010/06/06/batteries-included-or-maildir-to-mbox-again/
"""
# open the existing maildir and the target mbox file
maildir = mailbox.Maildir(maildirname, email.message_from_file)
mbox = mailbox.mbox(mboxfilename)
# lock the mbox
mbox.lock()
# iterate over messages in the maildir and add to the mbox
for msg in maildir:
mbox.add(msg)
# close and unlock
mbox.close()
maildir.close()
def maildir2mailbox2(dirname, mboxname):
mboxdirname=mboxname+'.sbd'
maildir2mailbox(dirname,mboxname)
#if not os.path.exists(mboxdirname): os.makedirs(mboxdirname)
listofdirs=[dn for dn in os.walk(dirname).next()[1] if dn not in ['new', 'cur', 'tmp']]
for curfold in listofdirs:
curlist=[mboxname]+curfold.split('.')
curpath=os.path.join(*[dn+'.sbd' for dn in curlist if dn])
if not os.path.exists(curpath): os.makedirs(curpath)
print '| ' +curfold +' -> '+curpath[:-4]
maildir2mailbox(os.path.join(dirname,curfold),curpath[:-4])
if __name__ == "__main__":
dirname=sys.argv[-2]
mboxname=sys.argv[-1]
print(dirname + ' -> ' +mboxname)
maildir2mailbox2(dirname, mboxname)
print('Done')
| michal-ruzicka/archivematica | src/archivematicaCommon/lib/externals/maildirToMbox.py | Python | agpl-3.0 | 2,430 |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2012-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferTest
import GafferImage
class OpenColorIOTest( unittest.TestCase ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/checker.exr" )
def test( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
o = GafferImage.OpenColorIO()
o["in"].setInput( n["out"] )
self.assertEqual( n["out"].image(), o["out"].image() )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual( n["out"].image(), o["out"].image() )
def testHashPassThrough( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
o = GafferImage.OpenColorIO()
o["in"].setInput( n["out"] )
self.assertEqual( n["out"].image(), o["out"].image() )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual( n["out"].image(), o["out"].image() )
o["enabled"].setValue( False )
self.assertEqual( n["out"].image(), o["out"].image() )
self.assertEqual( n["out"]['format'].hash(), o["out"]['format'].hash() )
self.assertEqual( n["out"]['dataWindow'].hash(), o["out"]['dataWindow'].hash() )
self.assertEqual( n["out"]["metadata"].getValue(), o["out"]["metadata"].getValue() )
self.assertEqual( n["out"]['channelNames'].hash(), o["out"]['channelNames'].hash() )
o["enabled"].setValue( True )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "linear" )
self.assertEqual( n["out"].image(), o["out"].image() )
self.assertEqual( n["out"]['format'].hash(), o["out"]['format'].hash() )
self.assertEqual( n["out"]['dataWindow'].hash(), o["out"]['dataWindow'].hash() )
self.assertEqual( n["out"]["metadata"].getValue(), o["out"]["metadata"].getValue() )
self.assertEqual( n["out"]['channelNames'].hash(), o["out"]['channelNames'].hash() )
def testImageHashPassThrough( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.fileName )
o = GafferImage.OpenColorIO()
o["in"].setInput( i["out"] )
self.assertEqual( i["out"].imageHash(), o["out"].imageHash() )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual( i["out"].imageHash(), o["out"].imageHash() )
def testChannelsAreSeparate( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/circles.exr" ) )
o = GafferImage.OpenColorIO()
o["in"].setInput( i["out"] )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertNotEqual(
o["out"].channelDataHash( "R", IECore.V2i( 0 ) ),
o["out"].channelDataHash( "G", IECore.V2i( 0 ) )
)
self.assertNotEqual(
o["out"].channelData( "R", IECore.V2i( 0 ) ),
o["out"].channelData( "G", IECore.V2i( 0 ) )
)
def testPassThrough( self ) :
i = GafferImage.ImageReader()
i["fileName"].setValue( self.fileName )
o = GafferImage.OpenColorIO()
o["in"].setInput( i["out"] )
o["inputSpace"].setValue( "linear" )
o["outputSpace"].setValue( "sRGB" )
self.assertEqual( i["out"]["format"].hash(), o["out"]["format"].hash() )
self.assertEqual( i["out"]["dataWindow"].hash(), o["out"]["dataWindow"].hash() )
self.assertEqual( i["out"]["channelNames"].hash(), o["out"]["channelNames"].hash() )
self.assertEqual( i["out"]["format"].getValue(), o["out"]["format"].getValue() )
self.assertEqual( i["out"]["dataWindow"].getValue(), o["out"]["dataWindow"].getValue() )
self.assertEqual( i["out"]["channelNames"].getValue(), o["out"]["channelNames"].getValue() )
if __name__ == "__main__":
unittest.main()
| goddardl/gaffer | python/GafferImageTest/OpenColorIOTest.py | Python | bsd-3-clause | 5,537 |
#!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id$
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
def read(filename):
"""return a dict of tables from rfc3454"""
f = open(filename, 'r')
inTable = False
ret = {}
while True:
l = f.readline()
if not l:
break
if inTable:
m = re.search('^ *----- End Table ([A-Z0-9\.]+) ----- *$', l)
if m:
ret[m.group(1)] = t
inTable = False
else:
t.append(l)
if re.search('^ *----- Start Table ([A-Z0-9\.]+) ----- *$', l):
inTable = True
t = []
f.close()
return ret
| zarboz/XBMC-PVR-mac | tools/darwin/depends/samba/samba-3.6.6/source4/heimdal/lib/wind/rfc3454.py | Python | gpl-2.0 | 2,296 |
import sys, csv, xlwt, os
cmdargs = str(sys.argv)
def write_row(ws,rowx,row_array):
"""
Writes the values as either a float or a string, instead of just assuming everything
is a string.
"""
for colx, value in enumerate(row_array):
try:
float(value)
type_to_write="Float"
except ValueError:
type_to_write="String"
if type_to_write=="Float":
ws.write(rowx, colx, float(value))
elif type_to_write=="String":
ws.write(rowx, colx, value)
def csvs2xls(directory):
wb = xlwt.Workbook()
# Add 'instructions' tab
ws = wb.add_sheet('instructions')
write_row(ws,0,["#title","'xlim':(0.2,1.4), 'ylim':(-0.00000000001,0.00000000025)"])
write_row(ws,1,["file0","background","'linestyle':':', 'linewidth':3.0"])
write_row(ws,2,["file1","1mM analyte"])
write_row(ws,3,["file2","2mM analyte"])
# Write all the csv files in the directory to this workbook.
for filename in os.listdir(directory):
if filename.endswith(".csv") or filename.endswith(".txt"):
ws = wb.add_sheet(os.path.splitext(filename)[0])
with open('{}\\{}'.format(directory,filename),'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
skipped_rows=0
for rowx, row in enumerate(reader):
# Skip blank rows
if len(row)<1:
skipped_rows+=1
continue
# Skip text rows we don't want
if ("Segment" in row[0]) or \
("Header" in row[0]) or \
("Note" in row[0]) or \
("Ep" in row[0]) or \
("ip" in row[0]) or \
("Ah" in row[0]):
skipped_rows+=1
continue
# Add a blank row before the data
if ("Potential" in row[0]):
skipped_rows-=1
rowx = rowx - skipped_rows
write_row(ws,rowx,row)
return wb
if len(sys.argv)==3:
directory = sys.argv[1]
xls = csvs2xls(directory)
xls.save('{}\\{}{}'.format(directory,sys.argv[2],'.xls'))
print "Your file has been saved in the data folder."
elif len(sys.argv)==2:
# for when the script is in the same directory as all the data files.
directory = os.getcwd()
xls = csvs2xls(directory)
xls.save('{}\\{}{}'.format(directory,sys.argv[1],'.xls'))
print "Your file has been saved in the data folder."
else:
print "Please use this script with the following arguments: > python csvs2xls.py C:\data\directory outputfilename."
| tamarisk51/kampfmitexcel | chi_csvs2xls.py | Python | mit | 2,797 |
#----------------------------------------------------------------------
# Name: wx.lib.stattext
# Purpose: A generic wxGenStaticText class. Using this should
# eliminate some of the platform differences in wxStaticText,
# such as background colours and mouse sensitivity.
#
# Author: Robin Dunn
#
# Created: 8-July-2002
# RCS-ID: $Id: stattext.py 49762 2007-11-09 17:50:59Z AG $
# Copyright: (c) 2002 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# 12/12/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatability update.
# o Untested.
#
import wx
BUFFERED = 0 # In unbuffered mode we can let the theme shine through,
# is there a way to do this when buffering?
#----------------------------------------------------------------------
class GenStaticText(wx.PyControl):
labelDelta = 1
def __init__(self, parent, ID, label,
pos = wx.DefaultPosition, size = wx.DefaultSize,
style = 0,
name = "genstattext"):
wx.PyControl.__init__(self, parent, ID, pos, size, style|wx.NO_BORDER,
wx.DefaultValidator, name)
wx.PyControl.SetLabel(self, label) # don't check wx.ST_NO_AUTORESIZE yet
self.InheritAttributes()
self.SetInitialSize(size)
self.Bind(wx.EVT_PAINT, self.OnPaint)
if BUFFERED:
self.defBackClr = self.GetBackgroundColour()
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
else:
self.SetBackgroundStyle(wx.BG_STYLE_SYSTEM)
def SetLabel(self, label):
"""
Sets the static text label and updates the control's size to exactly
fit the label unless the control has wx.ST_NO_AUTORESIZE flag.
"""
wx.PyControl.SetLabel(self, label)
style = self.GetWindowStyleFlag()
self.InvalidateBestSize()
if not style & wx.ST_NO_AUTORESIZE:
self.SetSize(self.GetBestSize())
self.Refresh()
def SetFont(self, font):
"""
Sets the static text font and updates the control's size to exactly
fit the label unless the control has wx.ST_NO_AUTORESIZE flag.
"""
wx.PyControl.SetFont(self, font)
style = self.GetWindowStyleFlag()
self.InvalidateBestSize()
if not style & wx.ST_NO_AUTORESIZE:
self.SetSize(self.GetBestSize())
self.Refresh()
def DoGetBestSize(self):
"""
Overridden base class virtual. Determines the best size of
the control based on the label size and the current font.
"""
label = self.GetLabel()
font = self.GetFont()
if not font:
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc = wx.ClientDC(self)
dc.SetFont(font)
maxWidth = totalHeight = 0
for line in label.split('\n'):
if line == '':
w, h = dc.GetTextExtent('W') # empty lines have height too
else:
w, h = dc.GetTextExtent(line)
totalHeight += h
maxWidth = max(maxWidth, w)
best = wx.Size(maxWidth, totalHeight)
self.CacheBestSize(best)
return best
def Enable(self, enable=True):
"""Overridden Enable() method to properly refresh the widget. """
wx.PyControl.Enable(self, enable)
self.Refresh()
def Disable(self):
"""Overridden Disable() method to properly refresh the widget. """
wx.PyControl.Disable(self)
self.Refresh()
def AcceptsFocus(self):
"""Overridden base class virtual."""
return False
def GetDefaultAttributes(self):
"""
Overridden base class virtual. By default we should use
the same font/colour attributes as the native StaticText.
"""
return wx.StaticText.GetClassDefaultAttributes()
def ShouldInheritColours(self):
"""
Overridden base class virtual. If the parent has non-default
colours then we want this control to inherit them.
"""
return True
def OnPaint(self, event):
if BUFFERED:
dc = wx.BufferedPaintDC(self)
else:
dc = wx.PaintDC(self)
width, height = self.GetClientSize()
if not width or not height:
return
if BUFFERED:
clr = self.GetBackgroundColour()
backBrush = wx.Brush(clr, wx.SOLID)
if wx.Platform == "__WXMAC__" and clr == self.defBackClr:
# if colour is still the default then use the striped background on Mac
backBrush.MacSetTheme(1) # 1 == kThemeBrushDialogBackgroundActive
dc.SetBackground(backBrush)
dc.Clear()
if self.IsEnabled():
dc.SetTextForeground(self.GetForegroundColour())
else:
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
dc.SetFont(self.GetFont())
label = self.GetLabel()
style = self.GetWindowStyleFlag()
x = y = 0
for line in label.split('\n'):
if line == '':
w, h = self.GetTextExtent('W') # empty lines have height too
else:
w, h = self.GetTextExtent(line)
if style & wx.ALIGN_RIGHT:
x = width - w
if style & wx.ALIGN_CENTER:
x = (width - w)/2
dc.DrawText(line, x, y)
y += h
def OnEraseBackground(self, event):
pass
#----------------------------------------------------------------------
| ezequielpereira/Time-Line | libs64/wx/lib/stattext.py | Python | gpl-3.0 | 5,824 |
# -*- coding: utf-8 -*-
from django.template import Library
from tmitter.mvc.models import *
from tmitter.settings import *
register = Library()
def in_list(val,lst):
"""
summary:
检查只时候在列表中
author:
Jason Lee
"""
return val in lst
register.filter("in_list", in_list) | rsj217/dmblog | dmblog/mblog/templatetags/common_tags.py | Python | mit | 322 |
########################################################################
#
# University of Southampton IT Innovation Centre, 2011
#
# Copyright in this library belongs to the University of Southampton
# University Road, Highfield, Southampton, UK, SO17 1BJ
#
# This software may not be used, sold, licensed, transferred, copied
# or reproduced in whole or in part in any manner or form or in or
# on any media by any person other than in accordance with the terms
# of the Licence Agreement supplied with the software, or otherwise
# without the prior written consent of the copyright owners.
#
# This software is distributed WITHOUT ANY WARRANTY, without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE, except where stated in the Licence Agreement supplied with
# the software.
#
# Created By : Mark McArdle
# Created Date : 2011-03-25
# Created for Project : PrestoPrime
#
########################################################################
import re
import usage_store as usage_store
import settings as settings
import datetime
import logging
from models import MFile
from models import DataService
metric_responsetime = "http://mserve/responsetime"
metric_delivery_success = settings.DELIVERY_SUCCESS_METRIC
class ResponseMiddleware(object):
def process_response(self, request, response):
match = re.search("\/mfiles\/(?P<id>.*)\/file\/", request.path)
if match is not None:
mfileid = match.group("id")
starttime = request.META["starttime"]
endtime = datetime.datetime.now()
timetaken = endtime - starttime
time_taken = float("%s.%s" % (timetaken.seconds,timetaken.microseconds))
usage_store.record(mfileid,metric_responsetime,time_taken)
try:
mfile = MFile.objects.get(id=mfileid)
ds = DataService.objects.get(mfile__id=mfileid)
multiplier = ds.managementproperty_set.get(property="deliverySuccessMultiplier_GB").value
constant = ds.managementproperty_set.get(property="deliverySuccessConstant_Minutes").value
target_delivery_time = mfile.size/(1024.0*1024.0*1024.0) * float(multiplier) + float(constant)
time_taken_minutes = time_taken/60.0
if target_delivery_time < time_taken_minutes:
usage_store.record(mfile.id, metric_delivery_success, 0)
else:
usage_store.record(mfile.id, metric_delivery_success, 1)
except Exception as e:
logging.error("Request for mfile %s throws error - %s ", mfileid, e )
return response
def process_request(self, request):
match = re.search("\/mfiles\/(?P<id>.*)\/file\/", request.path)
if match is not None:
request.META["starttime"] = datetime.datetime.now()
return
| mmcardle/MServe | django-mserve/dataservice/middleware.py | Python | lgpl-2.1 | 2,906 |
from setuptools import setup, find_packages
setup(name='userprofile',
version='0.6',
description='Django pluggable user profile zone',
author='David Rubert',
packages=find_packages(),
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
include_package_data=True,
install_requires=['setuptools'],
)
| tualatrix/django-profile | setup.py | Python | bsd-2-clause | 620 |
'''
Plan
-----
for each 2013 bill, if id starts with "H " or "S ",
'''
import pymongo
from billy.core import db
def action2tuple(action):
ac = map(action.get, ['action', 'actor', 'date'])
ac.append('-'.join(action['type']))
return tuple(ac)
def main():
spec = dict(state='fl', session='2014')
print('fixing bills')
for dupe in db.bills.find(spec):
dupe_bill_id = dupe['bill_id']
letter, number = dupe_bill_id.split(' ', 1)
if len(letter) is 1:
regex = ur'%s[A-Z]* %s$' % (letter, number)
spec = {
'state': 'fl',
'session': '2014',
'bill_id': {'$regex': regex},
'title': dupe['title']}
bills_2014 = list(db.bills.find(spec))
same_actions = []
dupe_actionset = set(map(action2tuple, dupe['actions']))
for mergebill in bills_2014:
if mergebill == dupe:
continue
mergebill_actions = map(action2tuple, mergebill['actions'])
if dupe_actionset.issubset(mergebill_actions):
same_actions.append(mergebill)
if not same_actions:
print 'no dupes for', dupe['bill_id']
continue
if not len(same_actions) == 1:
print "CRAAAAAP"
import pdb; pdb.set_trace()
else:
mergebill = same_actions.pop()
print 'merging %s into %s' % (dupe['bill_id'], mergebill['bill_id'])
mergebill['_all_ids'].append(dupe['_id'])
db.bills.save(mergebill, w=1)
db.bills.remove(dupe['_id'])
else:
print("Not merging %s" % dupe['bill_id'])
if __name__ == "__main__":
main()
| showerst/openstates | scripts/fl/2014_dupes.py | Python | gpl-3.0 | 1,816 |
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import User
from djangobb_forum.models import Post
from djangobb_forum.templatetags.forum_extras import profile_link, link, mobile_link
class TestLinkTags(TestCase):
fixtures = ['test_forum.json']
def setUp(self):
self.user = User.objects.get(pk=1)
self.post = Post.objects.get(pk=1)
def test_profile_link(self):
plink = profile_link(self.user)
self.assertEqual(plink, u"<a href=\"/users/djangobb/\">djangobb</a>")
def test_link(self):
l = link(self.post)
self.assertEqual(l, "<a href=\"/discuss/post/1/\">Test Body</a>")
def test_mobile_link(self):
l = mobile_link(self.post)
self.assertEqual(l, "<a href=\"/discuss/m/post/1/\">Test Body</a>")
| tjvr/s2forums | djangobb_forum/tests/test_templatetags.py | Python | bsd-3-clause | 829 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from collections import OrderedDict
from django.core.urlresolvers import reverse
from django.db import models
from django.db.transaction import atomic
from django.template.defaultfilters import slugify
from django.utils.crypto import get_random_string
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import format_html, format_html_join
from django.utils.translation import ugettext_lazy as _
from dynamic_forms.actions import action_registry
from dynamic_forms.conf import settings
from dynamic_forms.fields import TextMultiSelectField
from dynamic_forms.formfields import formfield_registry
@python_2_unicode_compatible
class FormModel(models.Model):
name = models.CharField(_('Name'), max_length=50, unique=True)
submit_url = models.CharField(_('Submit URL'), max_length=100, unique=True,
help_text=_('The full URL path to the form. It should start '
'and end with a forward slash (<code>/</code>).'))
success_url = models.CharField(_('Success URL'), max_length=100,
help_text=_('The full URL path where the user will be '
'redirected after successfully sending the form. It should start '
'and end with a forward slash (<code>/</code>). If empty, the '
'success URL is generated by appending <code>done/</code> to the '
'“Submit URL”.'), blank=True, default='')
actions = TextMultiSelectField(_('Actions'), default='',
choices=action_registry.get_as_choices())
form_template = models.CharField(_('Form template path'), max_length=100,
default='dynamic_forms/form.html',
choices=settings.DYNAMIC_FORMS_FORM_TEMPLATES)
success_template = models.CharField(_('Success template path'),
max_length=100, default='dynamic_forms/form_success.html',
choices=settings.DYNAMIC_FORMS_SUCCESS_TEMPLATES)
allow_display = models.BooleanField(_('Allow display'), default=False,
help_text=_('Allow a user to view the input at a later time. This '
'requires the “Store in database” action to be active. The sender '
'will be given a unique URL to recall the data.'))
recipient_email = models.EmailField(_('Recipient email'), blank=True,
null=True, help_text=_('Email address to send form data.'))
class Meta:
ordering = ['name']
verbose_name = _('Dynamic form')
verbose_name_plural = _('Dynamic forms')
def __str__(self):
return self.name
def get_fields_as_dict(self):
"""
Returns an ``OrderedDict`` (``SortedDict`` when ``OrderedDict is not
available) with all fields associated with this form where their name
is the key and their label is the value.
"""
return OrderedDict(self.fields.values_list('name', 'label').all())
def save(self, *args, **kwargs):
"""
Makes sure that the ``submit_url`` and -- if defined the
``success_url`` -- end with a forward slash (``'/'``).
"""
if not self.submit_url.endswith('/'):
self.submit_url = self.submit_url + '/'
if self.success_url:
if not self.success_url.endswith('/'):
self.success_url = self.success_url + '/'
else:
self.success_url = self.submit_url + 'done/'
super(FormModel, self).save(*args, **kwargs)
@python_2_unicode_compatible
class FormFieldModel(models.Model):
parent_form = models.ForeignKey(FormModel, on_delete=models.CASCADE,
related_name='fields')
field_type = models.CharField(_('Type'), max_length=255,
choices=formfield_registry.get_as_choices())
label = models.CharField(_('Label'), max_length=255)
name = models.SlugField(_('Name'), max_length=50, blank=True)
_options = models.TextField(_('Options'), blank=True, null=True)
position = models.SmallIntegerField(_('Position'), blank=True, default=0)
class Meta:
ordering = ['parent_form', 'position']
unique_together = ("parent_form", "name",)
verbose_name = _('Form field')
verbose_name_plural = _('Form fields')
def __str__(self):
return _('Field “%(field_name)s” in form “%(form_name)s”') % {
'field_name': self.label,
'form_name': self.parent_form.name,
}
def generate_form_field(self, form):
field_type_cls = formfield_registry.get(self.field_type)
field = field_type_cls(**self.get_form_field_kwargs())
field.contribute_to_form(form)
return field
def get_form_field_kwargs(self):
kwargs = self.options
kwargs.update({
'name': self.name,
'label': self.label,
})
return kwargs
@property
def options(self):
"""Options passed to the form field during construction."""
if not hasattr(self, '_options_cached'):
self._options_cached = {}
if self._options:
try:
self._options_cached = json.loads(self._options)
except ValueError:
pass
return self._options_cached
@options.setter
def options(self, opts):
if hasattr(self, '_options_cached'):
del self._options_cached
self._options = json.dumps(opts)
def save(self, *args, **kwargs):
if not self.name:
self.name = slugify(self.label)
given_options = self.options
field_type_cls = formfield_registry.get(self.field_type)
invalid = set(self.options.keys()) - set(field_type_cls._meta.keys())
if invalid:
for key in invalid:
del given_options[key]
self.options = given_options
super(FormFieldModel, self).save(*args, **kwargs)
@python_2_unicode_compatible
class FormModelData(models.Model):
form = models.ForeignKey(FormModel, on_delete=models.SET_NULL,
related_name='data', null=True)
value = models.TextField(_('Form data'), blank=True, default='')
submitted = models.DateTimeField(_('Submitted on'), auto_now_add=True)
display_key = models.CharField(_('Display key'), max_length=24, null=True,
blank=True, db_index=True, default=None, unique=True,
help_text=_('A unique identifier that is used to allow users to view '
'their sent data. Unique over all stored data sets.'))
class Meta:
verbose_name = _('Form data')
verbose_name_plural = _('Form data')
def __str__(self):
return _('Form: “%(form)s” on %(date)s') % {
'form': self.form,
'date': self.submitted,
}
def save(self, *args, **kwargs):
with atomic():
if self.form.allow_display and not self.display_key:
dk = get_random_string(24)
while FormModelData.objects.filter(display_key=dk).exists():
dk = get_random_string(24)
self.display_key = dk
super(FormModelData, self).save(*args, **kwargs)
@property
def json_value(self):
return OrderedDict(sorted(json.loads(self.value).items()))
def pretty_value(self):
try:
value = format_html_join('',
'<dt>{0}</dt><dd>{1}</dd>',
(
(force_text(k), force_text(v))
for k, v in self.json_value.items()
)
)
return format_html('<dl>{0}</dl>', value)
except ValueError:
return self.value
pretty_value.allow_tags = True
@property
def show_url(self):
"""
If the form this data set belongs to has
:attr:`~FormModel.allow_display` ``== True``, return the permanent URL.
If displaying is not allowed, return an empty string.
"""
if self.form.allow_display:
return reverse('dynamic_forms:data-set-detail',
kwargs={'display_key': self.display_key})
return ''
@property
def show_url_link(self):
"""
Similar to :attr:`show_url` but wraps the display key in an `<a>`-tag
linking to the permanent URL.
"""
if self.form.allow_display:
return format_html('<a href="{0}">{1}</a>', self.show_url, self.display_key)
return ''
| wangjiaxi/django-dynamic-forms | dynamic_forms/models.py | Python | bsd-3-clause | 8,474 |
# encoding: utf-8
import re
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
determine_ext,
)
class DaumIE(InfoExtractor):
_VALID_URL = r'https?://tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
IE_NAME = u'daum.net'
_TEST = {
u'url': u'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
u'file': u'52554690.mp4',
u'info_dict': {
u'title': u'DOTA 2GETHER 시즌2 6회 - 2부',
u'description': u'DOTA 2GETHER 시즌2 6회 - 2부',
u'upload_date': u'20130831',
u'duration': 3868,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
webpage = self._download_webpage(canonical_url, video_id)
full_id = self._search_regex(r'<link rel="video_src" href=".+?vid=(.+?)"',
webpage, u'full id')
query = compat_urllib_parse.urlencode({'vid': full_id})
info = self._download_xml(
'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,
u'Downloading video info')
urls = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
video_id, u'Downloading video formats info')
self.to_screen(u'%s: Getting video urls' % video_id)
formats = []
for format_el in urls.findall('result/output_list/output_list'):
profile = format_el.attrib['profile']
format_query = compat_urllib_parse.urlencode({
'vid': full_id,
'profile': profile,
})
url_doc = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
video_id, note=False)
format_url = url_doc.find('result/url').text
formats.append({
'url': format_url,
'ext': determine_ext(format_url),
'format_id': profile,
})
info = {
'id': video_id,
'title': info.find('TITLE').text,
'formats': formats,
'thumbnail': self._og_search_thumbnail(webpage),
'description': info.find('CONTENTS').text,
'duration': int(info.find('DURATION').text),
'upload_date': info.find('REGDTTM').text[:8],
}
# TODO: Remove when #980 has been merged
info.update(formats[-1])
return info
| nilsonmorales/Puppyes-nightrc | usr/local/lib/python2.7/dist-packages/youtube_dl/extractor/daum.py | Python | gpl-3.0 | 2,601 |
import os
import sys
import logging
import xml.etree.ElementTree as ET
from urlparse import urlparse
from namenode import Namenode
log = logging.getLogger(__name__)
class HDFSConfig(object):
use_trash = False
@classmethod
def get_config_from_env(cls):
'''Gets configuration out of environment.
Returns list of dicts - list of namenode representations
'''
core_path = os.path.join(os.environ['HADOOP_HOME'], 'conf', 'core-site.xml')
configs = cls.read_core_config(core_path)
hdfs_path = os.path.join(os.environ['HADOOP_HOME'], 'conf', 'hdfs-site.xml')
tmp_config = cls.read_hdfs_config(hdfs_path)
if tmp_config:
# if config exists in hdfs - it's HA config, update configs
configs = tmp_config
if not configs:
raise Exception("No config found in %s nor in %s" % (core_path, hdfs_path))
return configs
@staticmethod
def read_hadoop_config(hdfs_conf_path):
if os.path.exists(hdfs_conf_path):
try:
tree = ET.parse(hdfs_conf_path)
except:
log.error("Unable to parse %s" % hdfs_conf_path)
return
root = tree.getroot()
for p in root.findall("./property"):
yield p
@classmethod
def read_core_config(cls, core_site_path):
config = []
for property in cls.read_hadoop_config(core_site_path):
# fs.default.name is the key name for the file system on EMR clusters
if property.findall('name')[0].text in ('fs.defaultFS', 'fs.default.name'):
parse_result = urlparse(property.findall('value')[0].text)
log.debug("Got namenode '%s' from %s" % (parse_result.geturl(), core_site_path))
config.append({"namenode": parse_result.hostname,
"port": parse_result.port if parse_result.port
else Namenode.DEFAULT_PORT})
if property.findall('name')[0].text == 'fs.trash.interval':
cls.use_trash = True
return config
@classmethod
def read_hdfs_config(cls, hdfs_site_path):
configs = []
for property in cls.read_hadoop_config(hdfs_site_path):
if property.findall('name')[0].text.startswith("dfs.namenode.rpc-address"):
parse_result = urlparse("//" + property.findall('value')[0].text)
log.debug("Got namenode '%s' from %s" % (parse_result.geturl(), hdfs_site_path))
configs.append({"namenode": parse_result.hostname,
"port": parse_result.port if parse_result.port
else Namenode.DEFAULT_PORT})
if property.findall('name')[0].text == 'fs.trash.interval':
cls.use_trash = True
return configs
core_try_paths = ('/etc/hadoop/conf/core-site.xml',
'/usr/local/etc/hadoop/conf/core-site.xml',
'/usr/local/hadoop/conf/core-site.xml')
hdfs_try_paths = ('/etc/hadoop/conf/hdfs-site.xml',
'/usr/local/etc/hadoop/conf/hdfs-site.xml',
'/usr/local/hadoop/conf/hdfs-site.xml')
@classmethod
def get_external_config(cls):
if os.environ.get('HADOOP_HOME'):
configs = cls.get_config_from_env()
return configs
else:
# Try to find other paths
configs = []
for core_conf_path in cls.core_try_paths:
configs = cls.read_core_config(core_conf_path)
if configs:
break
for hdfs_conf_path in cls.hdfs_try_paths:
tmp_config = cls.read_hdfs_config(hdfs_conf_path)
if tmp_config:
# if there is hdfs-site data available return it
return tmp_config
return configs
| dgoldin/snakebite | snakebite/config.py | Python | apache-2.0 | 4,037 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0008_project_created_by'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'permissions': (('view_comment', 'View comment'),)},
),
migrations.AlterModelOptions(
name='productstatus',
options={'permissions': (('view_productstatus', 'View product status'),)},
),
]
| GETLIMS/LIMS-Backend | lims/projects/migrations/0009_auto_20160722_0918.py | Python | mit | 571 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_recipiemeicineDialog.ui'
#
# Created: Sun Apr 21 16:21:54 2013
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_RecipieMedicineDialog(object):
def setupUi(self, RecipieMedicineDialog):
RecipieMedicineDialog.setObjectName(_fromUtf8("RecipieMedicineDialog"))
RecipieMedicineDialog.resize(243, 117)
self.verticalLayout = QtGui.QVBoxLayout(RecipieMedicineDialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.spinBox = QtGui.QSpinBox(RecipieMedicineDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBox.sizePolicy().hasHeightForWidth())
self.spinBox.setSizePolicy(sizePolicy)
self.spinBox.setMinimum(1)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.verticalLayout.addWidget(self.spinBox)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.closeButton = QtGui.QPushButton(RecipieMedicineDialog)
self.closeButton.setObjectName(_fromUtf8("closeButton"))
self.horizontalLayout.addWidget(self.closeButton)
self.saveButton = QtGui.QPushButton(RecipieMedicineDialog)
self.saveButton.setObjectName(_fromUtf8("saveButton"))
self.horizontalLayout.addWidget(self.saveButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(RecipieMedicineDialog)
QtCore.QMetaObject.connectSlotsByName(RecipieMedicineDialog)
def retranslateUi(self, RecipieMedicineDialog):
RecipieMedicineDialog.setWindowTitle(QtGui.QApplication.translate("RecipieMedicineDialog", "Reseptilääke", None, QtGui.QApplication.UnicodeUTF8))
self.closeButton.setText(QtGui.QApplication.translate("RecipieMedicineDialog", "Sulje", None, QtGui.QApplication.UnicodeUTF8))
self.saveButton.setText(QtGui.QApplication.translate("RecipieMedicineDialog", "Tallenna", None, QtGui.QApplication.UnicodeUTF8))
| mape90/VetApp | uipy/ui_recipiemeicineDialog.py | Python | gpl-3.0 | 2,723 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import numpy as np
from ctypes import POINTER, c_double, c_int64
from pyscf.nao.m_libnao import libnao
libnao.rsphar.argtypes = (POINTER(c_double), POINTER(c_int64), POINTER(c_double))
libnao.rsphar_vec.argtypes = (POINTER(c_double), POINTER(c_int64), POINTER(c_int64), POINTER(c_double))
libnao.rsphar_exp_vec.argtypes = (POINTER(c_double), POINTER(c_int64), POINTER(c_int64), POINTER(c_double))
#
#
#
def rsphar(r,lmax,res):
"""
Computes (all) real spherical harmonics up to the angular momentum lmax
Args:
r : Cartesian coordinates defining correct theta and phi angles for spherical harmonic
lmax : Integer, maximal angular momentum
Result:
1-d numpy array of float64 elements with all spherical harmonics stored in order 0,0; 1,-1; 1,0; 1,+1 ... lmax,lmax, althogether 0 : (lmax+1)**2 elements.
"""
assert r.shape[-1]==3
r_cp = np.require(r, dtype=float, requirements='C')
res = np.require(res, dtype=float, requirements='CW')
libnao.rsphar(r_cp.ctypes.data_as(POINTER(c_double)), c_int64(lmax), res.ctypes.data_as(POINTER(c_double)))
return 0
#
#
#
def rsphar_vec(rvs,lmax):
"""
Computes (all) real spherical harmonics up to the angular momentum lmax
Args:
rvs : Cartesian coordinates defining correct theta and phi angles for spherical harmonic
lmax : Integer, maximal angular momentum
Result:
1-d numpy array of float64 elements with all spherical harmonics stored in order 0,0; 1,-1; 1,0; 1,+1 ... lmax,lmax, althogether 0 : (lmax+1)**2 elements.
"""
assert rvs.shape[-1]==3
r_cp = np.require(rvs, dtype=float, requirements='C')
nc = len(rvs)
res = np.require( np.zeros((nc, (lmax+1)**2)), dtype=float, requirements='CW')
libnao.rsphar_vec(r_cp.ctypes.data_as(POINTER(c_double)), c_int64(nc), c_int64(lmax), res.ctypes.data_as(POINTER(c_double)))
#for irv,rvec in enumerate(rvs): rsphar(rvec,lmax,res[irv,:])
return res
#
#
#
def rsphar_exp_vec(rvs,lmax):
"""
Computes (all) real spherical harmonics up to the angular momentum lmax
Args:
rvs : Cartesian coordinates defining correct theta and phi angles for spherical harmonic
lmax : Integer, maximal angular momentum
Result:
1-d numpy array of float64 elements with all spherical harmonics stored in order 0,0; 1,-1; 1,0; 1,+1 ... lmax,lmax, althogether 0 : (lmax+1)**2 elements.
"""
assert rvs.shape[0]==3
r_cp = np.require(rvs, dtype=np.float64, requirements='C')
nc = rvs[0,...].size
res = np.require( np.zeros(((lmax+1)**2,nc)), dtype=np.float64, requirements='CW')
libnao.rsphar_exp_vec(r_cp.ctypes.data_as(POINTER(c_double)), c_int64(nc), c_int64(lmax), res.ctypes.data_as(POINTER(c_double)))
#for irv,rvec in enumerate(rvs): rsphar(rvec,lmax,res[irv,:])
return res
| gkc1000/pyscf | pyscf/nao/m_rsphar_libnao.py | Python | apache-2.0 | 3,454 |
# -*- coding: utf-8 -*-
from django.db import models
from projetos.models import ProjetoDeGraduacao
SEMESTRE = (
('1','1o Semestre'),
('2','2o Semestre'),
)
# class DatasMonografia(models.Model):
# grupo_de_disciplinas = models.ForeignKey(GrupoDisciplina, unique= True, verbose_name='Grupo de Dsiciplinas')
# ano = models.IntegerField()
# semestre = models.CharField(max_length = 1, default= 1, choices = SEMESTRE)
# max_monografia = models.DateField(verbose_name='Original', null=True, blank=True)
# max_monografia_emprestada = models.DateField(verbose_name='Emprestada', null=True, blank=True)
# max_monografia_revisada = models.DateField(verbose_name='Revisada', null=True, blank=True)
# class ReceberMonografia(models.Model):
# disciplina = models.ForeignKey(Disciplina, null = True, blank = True)
# data_recebimento = models.DateField(verbose_name = "Data de Recebimento")
# alunos = models.ForeignKey(User, null = True, limit_choices_to={'groups': 4},blank = True,related_name = 'receber_monografia_alunos')
# class CobrarMonografiaimpressa(models.Model):
# disciplina = models.ForeignKey(Disciplina, null = True, blank = True)
# data = models.DateTimeField()
# alunos = models.ManyToManyField(User, null = True, limit_choices_to={'groups': 4},blank = True,related_name = 'cobrar_monografia_impressa_alunos')
class EntregaMonografiaRevisada(models.Model):
class Meta:
verbose_name = "Monografia Revisada"
verbose_name_plural = "Monografias Revisadas"
projeto = models.OneToOneField(ProjetoDeGraduacao, null = True, blank = True,related_name = "monografia_revisada_projeto")
data = models.DateTimeField(auto_now=True,verbose_name="Data da Entraga")
monografia = models.FileField(upload_to = "monografias_revisadas/%Y/%M")
def __unicode__(self):
return "Monografia Revisada de "+self.projeto.aluno.nome_completo
def get_aluno_display(self):
return self.projeto.aluno.nome_completo
get_aluno_display.short_description = 'Aluno'
class EntregaMonografiaOriginal(models.Model):
class Meta:
verbose_name = "Monografia Original"
verbose_name_plural = "Monografias Originais"
projeto = models.OneToOneField(ProjetoDeGraduacao, null = True, blank = True, related_name = "monografia_original_projeto")
data = models.DateTimeField(auto_now=True, verbose_name="Data da Entraga")
monografia = models.FileField(upload_to = "monografias_original/%Y/%M")
def __unicode__(self):
return "Monografia Revisada de "+self.projeto.aluno.nome_completo
def get_aluno_display(self):
return self.projeto.aluno.nome_completo
get_aluno_display.short_description = 'Aluno'
# class CobrarMonografiaatrasada(models.Model):
# disciplina = models.ForeignKey(Disciplina, null = True, blank = True)
# data = models.DateTimeField()
# alunos = models.ManyToManyField(User, null = True, limit_choices_to={'groups': 4},blank = True,related_name = 'cobrar_monografia_atrasada_alunos')
# class DevolucaoMonografiaIMpressa(models.Model):
# disciplina = models.ForeignKey(Disciplina, null = True, blank = True)
# data = models.DateTimeField(verbose_name = "Data de Devolução")
# alunos = models.ForeignKey(User, null = True, limit_choices_to={'groups': 4},blank = True,related_name = 'devolucao_monografia_alunos')
| agendaTCC/AgendaTCC | tccweb/apps/monografias/models.py | Python | gpl-2.0 | 3,477 |
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .sparse_gp import SparseGP
from numpy.linalg.linalg import LinAlgError
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch
import logging
logger = logging.getLogger("sparse gp mpi")
class SparseGP_MPI(SparseGP):
"""
A general purpose Sparse GP model with MPI parallelization support
This model allows (approximate) inference using variational DTC or FITC
(Gaussian likelihoods) as well as non-conjugate sparse methods based on
these.
:param X: inputs
:type X: np.ndarray (num_data x input_dim)
:param likelihood: a likelihood instance, containing the observed data
:type likelihood: GPy.likelihood.(Gaussian | EP | Laplace)
:param kernel: the kernel (covariance function). See link kernels
:type kernel: a GPy.kern.kern instance
:param X_variance: The uncertainty in the measurements of X (Gaussian variance)
:type X_variance: np.ndarray (num_data x input_dim) | None
:param Z: inducing inputs
:type Z: np.ndarray (num_inducing x input_dim)
:param num_inducing: Number of inducing points (optional, default 10. Ignored if Z is not None)
:type num_inducing: int
:param mpi_comm: The communication group of MPI, e.g. mpi4py.MPI.COMM_WORLD
:type mpi_comm: mpi4py.MPI.Intracomm
"""
def __init__(self, X, Y, Z, kernel, likelihood, variational_prior=None, inference_method=None, name='sparse gp', Y_metadata=None, mpi_comm=None, normalizer=False):
self._IN_OPTIMIZATION_ = False
if mpi_comm != None:
if inference_method is None:
inference_method = VarDTC_minibatch(mpi_comm=mpi_comm)
else:
assert isinstance(inference_method, VarDTC_minibatch), 'inference_method has to support MPI!'
super(SparseGP_MPI, self).__init__(X, Y, Z, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer)
self.update_model(False)
if variational_prior is not None:
self.link_parameter(variational_prior)
self.mpi_comm = mpi_comm
# Manage the data (Y) division
if mpi_comm != None:
from ..util.parallel import divide_data
N_start, N_end, N_list = divide_data(Y.shape[0], mpi_comm.rank, mpi_comm.size)
self.N_range = (N_start, N_end)
self.N_list = np.array(N_list)
self.Y_local = self.Y[N_start:N_end]
print('MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range))
mpi_comm.Bcast(self.param_array, root=0)
self.update_model(True)
def __getstate__(self):
dc = super(SparseGP_MPI, self).__getstate__()
dc['mpi_comm'] = None
if self.mpi_comm != None:
del dc['N_range']
del dc['N_list']
del dc['Y_local']
if 'normalizer' not in dc:
dc['normalizer'] = None
dc['Y_normalized'] = dc['Y']
return dc
#=====================================================
# The MPI parallelization
# - can move to model at some point
#=====================================================
@SparseGP.optimizer_array.setter
def optimizer_array(self, p):
if self.mpi_comm != None:
if self._IN_OPTIMIZATION_ and self.mpi_comm.rank==0:
self.mpi_comm.Bcast(np.int32(1),root=0)
self.mpi_comm.Bcast(p, root=0)
SparseGP.optimizer_array.fset(self,p)
def optimize(self, optimizer=None, start=None, **kwargs):
self._IN_OPTIMIZATION_ = True
if self.mpi_comm==None:
ret = super(SparseGP_MPI, self).optimize(optimizer,start,**kwargs)
elif self.mpi_comm.rank==0:
ret = super(SparseGP_MPI, self).optimize(optimizer,start,**kwargs)
self.mpi_comm.Bcast(np.int32(-1),root=0)
elif self.mpi_comm.rank>0:
x = self.optimizer_array.copy()
flag = np.empty(1,dtype=np.int32)
while True:
self.mpi_comm.Bcast(flag,root=0)
if flag==1:
try:
self.optimizer_array = x
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
elif flag==-1:
break
else:
self._IN_OPTIMIZATION_ = False
raise Exception("Unrecognizable flag for synchronization!")
self._IN_OPTIMIZATION_ = False
return ret
def parameters_changed(self):
if isinstance(self.inference_method,VarDTC_minibatch):
update_gradients(self, mpi_comm=self.mpi_comm)
else:
super(SparseGP_MPI,self).parameters_changed()
| dhhjx880713/GPy | GPy/core/sparse_gp_mpi.py | Python | bsd-3-clause | 5,130 |
#!/usr/bin/env python
# coding: utf-8
"""
This script supports publishing Pystache to PyPI.
This docstring contains instructions to Pystache maintainers on how
to release a new version of Pystache.
(1) Prepare the release.
Make sure the code is finalized and merged to master. Bump the version
number in setup.py, update the release date in the HISTORY file, etc.
Generate the reStructuredText long_description using--
$ python setup.py prep
and be sure this new version is checked in. You must have pandoc installed
to do this step:
http://johnmacfarlane.net/pandoc/
It helps to review this auto-generated file on GitHub prior to uploading
because the long description will be sent to PyPI and appear there after
publishing. PyPI attempts to convert this string to HTML before displaying
it on the PyPI project page. If PyPI finds any issues, it will render it
instead as plain-text, which we do not want.
To check in advance that PyPI will accept and parse the reST file as HTML,
you can use the rst2html program installed by the docutils package
(http://docutils.sourceforge.net/). To install docutils:
$ pip install docutils
To check the file, run the following command and confirm that it reports
no warnings:
$ python setup.py --long-description | rst2html.py -v --no-raw > out.html
See here for more information:
http://docs.python.org/distutils/uploading.html#pypi-package-display
(2) Push to PyPI. To release a new version of Pystache to PyPI--
http://pypi.python.org/pypi/pystache
create a PyPI user account if you do not already have one. The user account
will need permissions to push to PyPI. A current "Package Index Owner" of
Pystache can grant you those permissions.
When you have permissions, run the following:
python setup.py publish
If you get an error like the following--
Upload failed (401): You must be identified to edit package information
then add a file called .pyirc to your home directory with the following
contents:
[server-login]
username: <PyPI username>
password: <PyPI password>
as described here, for example:
http://docs.python.org/release/2.5.2/dist/pypirc.html
(3) Tag the release on GitHub. Here are some commands for tagging.
List current tags:
git tag -l -n3
Create an annotated tag:
git tag -a -m "Version 0.5.1" "v0.5.1"
Push a tag to GitHub:
git push --tags defunkt v0.5.1
"""
import os
import shutil
import sys
py_version = sys.version_info
# distutils does not seem to support the following setup() arguments.
# It displays a UserWarning when setup() is passed those options:
#
# * entry_points
# * install_requires
#
# distribute works with Python 2.3.5 and above:
#
# http://packages.python.org/distribute/setuptools.html#building-and-distributing-packages-with-distribute
#
if py_version < (2, 3, 5):
# TODO: this might not work yet.
import distutils as dist
from distutils import core
setup = core.setup
else:
import setuptools as dist
setup = dist.setup
VERSION = '0.0.1' # Also change in pystache/__init__.py.
FILE_ENCODING = 'utf-8'
README_PATH = 'README.md'
HISTORY_PATH = 'HISTORY.md'
LICENSE_PATH = 'LICENSE'
RST_DESCRIPTION_PATH = 'setup_description.rst'
TEMP_EXTENSION = '.temp'
PREP_COMMAND = 'prep'
CLASSIFIERS = (
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
)
# Comments in reST begin with two dots.
RST_LONG_DESCRIPTION_INTRO = """\
.. Do not edit this file. This file is auto-generated for PyPI by setup.py
.. using pandoc, so edits should go in the source files rather than here.
"""
def read(path):
"""
Read and return the contents of a text file as a unicode string.
"""
# This function implementation was chosen to be compatible across Python 2/3.
f = open(path, 'rb')
# We avoid use of the with keyword for Python 2.4 support.
try:
b = f.read()
finally:
f.close()
return b.decode(FILE_ENCODING)
def write(u, path):
"""
Write a unicode string to a file (as utf-8).
"""
print("writing to: %s" % path)
# This function implementation was chosen to be compatible across Python 2/3.
f = open(path, "wb")
try:
b = u.encode(FILE_ENCODING)
f.write(b)
finally:
f.close()
def make_temp_path(path, new_ext=None):
"""
Arguments:
new_ext: the new file extension, including the leading dot.
Defaults to preserving the existing file extension.
"""
root, ext = os.path.splitext(path)
if new_ext is None:
new_ext = ext
temp_path = root + TEMP_EXTENSION + new_ext
return temp_path
def strip_html_comments(text):
"""Strip HTML comments from a unicode string."""
lines = text.splitlines(True) # preserve line endings.
# Remove HTML comments (which we only allow to take a special form).
new_lines = filter(lambda line: not line.startswith("<!--"), lines)
return "".join(new_lines)
# We write the converted file to a temp file to simplify debugging and
# to avoid removing a valid pre-existing file on failure.
def convert_md_to_rst(md_path, rst_temp_path):
"""
Convert the contents of a file from Markdown to reStructuredText.
Returns the converted text as a Unicode string.
Arguments:
md_path: a path to a UTF-8 encoded Markdown file to convert.
rst_temp_path: a temporary path to which to write the converted contents.
"""
# Pandoc uses the UTF-8 character encoding for both input and output.
command = "pandoc --write=rst --output=%s %s" % (rst_temp_path, md_path)
print("converting with pandoc: %s to %s\n-->%s" % (md_path, rst_temp_path,
command))
if os.path.exists(rst_temp_path):
os.remove(rst_temp_path)
os.system(command)
if not os.path.exists(rst_temp_path):
s = ("Error running: %s\n"
" Did you install pandoc per the %s docstring?" % (command,
__file__))
sys.exit(s)
return read(rst_temp_path)
# The long_description needs to be formatted as reStructuredText.
# See the following for more information:
#
# http://docs.python.org/distutils/setupscript.html#additional-meta-data
# http://docs.python.org/distutils/uploading.html#pypi-package-display
#
def make_long_description():
"""
Generate the reST long_description for setup() from source files.
Returns the generated long_description as a unicode string.
"""
readme_path = README_PATH
# Remove our HTML comments because PyPI does not allow it.
# See the setup.py docstring for more info on this.
readme_md = strip_html_comments(read(readme_path))
history_md = strip_html_comments(read(HISTORY_PATH))
license_md = """\
License
=======
""" + read(LICENSE_PATH)
sections = [readme_md, history_md, license_md]
md_description = '\n\n'.join(sections)
# Write the combined Markdown file to a temp path.
md_ext = os.path.splitext(readme_path)[1]
md_description_path = make_temp_path(RST_DESCRIPTION_PATH, new_ext=md_ext)
write(md_description, md_description_path)
rst_temp_path = make_temp_path(RST_DESCRIPTION_PATH)
long_description = convert_md_to_rst(md_path=md_description_path,
rst_temp_path=rst_temp_path)
return "\n".join([RST_LONG_DESCRIPTION_INTRO, long_description])
def prep():
"""Update the reST long_description file."""
long_description = make_long_description()
write(long_description, RST_DESCRIPTION_PATH)
def publish():
"""Publish this package to PyPI (aka "the Cheeseshop")."""
long_description = make_long_description()
if long_description != read(RST_DESCRIPTION_PATH):
print("""\
Description file not up-to-date: %s
Run the following command and commit the changes--
python setup.py %s
""" % (RST_DESCRIPTION_PATH, PREP_COMMAND))
sys.exit()
print("Description up-to-date: %s" % RST_DESCRIPTION_PATH)
answer = raw_input("Are you sure you want to publish to PyPI (yes/no)?")
if answer != "yes":
exit("Aborted: nothing published")
os.system('python setup.py sdist upload')
# We use the package simplejson for older Python versions since Python
# does not contain the module json before 2.6:
#
# http://docs.python.org/library/json.html
#
# Moreover, simplejson stopped officially support for Python 2.4 in version 2.1.0:
#
# https://github.com/simplejson/simplejson/blob/master/CHANGES.txt
#
requires = []
if py_version < (2, 5):
requires.append('simplejson<2.1')
elif py_version < (2, 6):
requires.append('simplejson')
INSTALL_REQUIRES = requires
# TODO: decide whether to use find_packages() instead. I'm not sure that
# find_packages() is available with distutils, for example.
PACKAGES = [
'pystache',
'pystache.commands',
# The following packages are only for testing.
'pystache.tests',
'pystache.tests.data',
'pystache.tests.data.locator',
'pystache.tests.examples',
]
# The purpose of this function is to follow the guidance suggested here:
#
# http://packages.python.org/distribute/python3.html#note-on-compatibility-with-setuptools
#
# The guidance is for better compatibility when using setuptools (e.g. with
# earlier versions of Python 2) instead of Distribute, because of new
# keyword arguments to setup() that setuptools may not recognize.
def get_extra_args():
"""
Return a dictionary of extra args to pass to setup().
"""
extra = {}
# TODO: it might be more correct to check whether we are using
# Distribute instead of setuptools, since use_2to3 doesn't take
# effect when using Python 2, even when using Distribute.
if py_version >= (3, ):
# Causes 2to3 to be run during the build step.
extra['use_2to3'] = True
return extra
def main(sys_argv):
# TODO: use the logging module instead of printing.
# TODO: include the following in a verbose mode.
sys.stderr.write("pystache: using: version %s of %s\n" % (repr(dist.__version__), repr(dist)))
command = sys_argv[-1]
if command == 'publish':
publish()
sys.exit()
elif command == PREP_COMMAND:
prep()
sys.exit()
long_description = read(RST_DESCRIPTION_PATH)
template_files = ['*.mustache', '*.txt']
extra_args = get_extra_args()
setup(name='pystache',
version=VERSION,
license='MIT',
description='Python Sample',
long_description=long_description,
author='Thiyagu Loganathan',
author_email='thiyaguelmails@gmail.com',
maintainer='Thiyagu Loganathan',
maintainer_email='thiyaguelmails@gmail.com',
url='http://github.com/tloganathan/pystache',
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
package_data = {
# Include template files so tests can be run.
'pystache.tests.data': template_files,
'pystache.tests.data.locator': template_files,
'pystache.tests.examples': template_files,
},
entry_points = {
'console_scripts': [
'pystache=pystache.commands.render:main',
'pystache-test=pystache.commands.test:main',
],
},
classifiers = CLASSIFIERS,
**extra_args
)
if __name__=='__main__':
main(sys.argv)
| thiyaguelmails/pystache | setup.py | Python | mit | 12,096 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0029_auto_20151017_1546'),
]
operations = [
migrations.AlterField(
model_name='mission',
name='finalize_date',
field=models.DateTimeField(null=True, blank=True),
),
]
| XcomConvent/xcom40k-shades | xcom40k/app/migrations/0030_auto_20151017_1603.py | Python | apache-2.0 | 418 |
from setuptools import setup, find_packages
setup(name='MODEL1110130001',
version=20140916,
description='MODEL1110130001 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1110130001',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/MODEL1110130001 | setup.py | Python | cc0-1.0 | 377 |
from collections import OrderedDict
from django.contrib.postgres.search import SearchRank, SearchVector
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections, transaction
from django.db.models import Count, F, Manager, Q, TextField, Value
from django.db.models.constants import LOOKUP_SEP
from django.db.models.functions import Cast
from django.utils.encoding import force_str
from wagtail.search.backends.base import (
BaseSearchBackend, BaseSearchQueryCompiler, BaseSearchResults, FilterFieldError)
from wagtail.search.index import RelatedFields, SearchField, get_indexed_models
from wagtail.search.query import And, Boost, MatchAll, Not, Or, PlainText
from wagtail.search.utils import ADD, MUL, OR
from .models import RawSearchQuery as PostgresRawSearchQuery
from .models import IndexEntry
from .utils import (
get_content_type_pk, get_descendants_content_types_pks, get_postgresql_connections,
get_sql_weights, get_weight)
EMPTY_VECTOR = SearchVector(Value('', output_field=TextField()))
class Index:
def __init__(self, backend, db_alias=None):
self.backend = backend
self.name = self.backend.index_name
self.db_alias = DEFAULT_DB_ALIAS if db_alias is None else db_alias
self.connection = connections[self.db_alias]
if self.connection.vendor != 'postgresql':
raise NotSupportedError(
'You must select a PostgreSQL database '
'to use PostgreSQL search.')
# Whether to allow adding items via the faster upsert method available in Postgres >=9.5
self._enable_upsert = (self.connection.pg_version >= 90500)
self.entries = IndexEntry._default_manager.using(self.db_alias)
def add_model(self, model):
pass
def refresh(self):
pass
def delete_stale_model_entries(self, model):
existing_pks = (model._default_manager.using(self.db_alias)
.annotate(object_id=Cast('pk', TextField()))
.values('object_id'))
content_types_pks = get_descendants_content_types_pks(model)
stale_entries = (
self.entries.filter(content_type_id__in=content_types_pks)
.exclude(object_id__in=existing_pks))
stale_entries.delete()
def delete_stale_entries(self):
for model in get_indexed_models():
# We don’t need to delete stale entries for non-root models,
# since we already delete them by deleting roots.
if not model._meta.parents:
self.delete_stale_model_entries(model)
def prepare_value(self, value):
if isinstance(value, str):
return value
if isinstance(value, list):
return ', '.join(self.prepare_value(item) for item in value)
if isinstance(value, dict):
return ', '.join(self.prepare_value(item)
for item in value.values())
return force_str(value)
def prepare_field(self, obj, field):
if isinstance(field, SearchField):
yield (field, get_weight(field.boost),
self.prepare_value(field.get_value(obj)))
elif isinstance(field, RelatedFields):
sub_obj = field.get_value(obj)
if sub_obj is None:
return
if isinstance(sub_obj, Manager):
sub_objs = sub_obj.all()
else:
if callable(sub_obj):
sub_obj = sub_obj()
sub_objs = [sub_obj]
for sub_obj in sub_objs:
for sub_field in field.fields:
yield from self.prepare_field(sub_obj, sub_field)
def prepare_obj(self, obj, search_fields):
obj._object_id_ = force_str(obj.pk)
obj._autocomplete_ = []
obj._body_ = []
for field in search_fields:
for current_field, boost, value in self.prepare_field(obj, field):
if isinstance(current_field, SearchField) and \
current_field.partial_match:
obj._autocomplete_.append((value, boost))
else:
obj._body_.append((value, boost))
def add_item(self, obj):
self.add_items(obj._meta.model, [obj])
def add_items_upsert(self, content_type_pk, objs):
config = self.backend.config
autocomplete_sql = []
body_sql = []
data_params = []
sql_template = ('to_tsvector(%s)' if config is None
else "to_tsvector('%s', %%s)" % config)
sql_template = 'setweight(%s, %%s)' % sql_template
for obj in objs:
data_params.extend((content_type_pk, obj._object_id_))
if obj._autocomplete_:
autocomplete_sql.append('||'.join(sql_template
for _ in obj._autocomplete_))
data_params.extend([v for t in obj._autocomplete_ for v in t])
else:
autocomplete_sql.append("''::tsvector")
if obj._body_:
body_sql.append('||'.join(sql_template for _ in obj._body_))
data_params.extend([v for t in obj._body_ for v in t])
else:
body_sql.append("''::tsvector")
data_sql = ', '.join(['(%%s, %%s, %s, %s)' % (a, b)
for a, b in zip(autocomplete_sql, body_sql)])
with self.connection.cursor() as cursor:
cursor.execute("""
INSERT INTO %s (content_type_id, object_id, autocomplete, body)
(VALUES %s)
ON CONFLICT (content_type_id, object_id)
DO UPDATE SET autocomplete = EXCLUDED.autocomplete,
body = EXCLUDED.body
""" % (IndexEntry._meta.db_table, data_sql), data_params)
def add_items_update_then_create(self, content_type_pk, objs):
config = self.backend.config
ids_and_objs = {}
for obj in objs:
obj._autocomplete_ = (
ADD([SearchVector(Value(text, output_field=TextField()), weight=weight, config=config)
for text, weight in obj._autocomplete_])
if obj._autocomplete_ else EMPTY_VECTOR)
obj._body_ = (
ADD([SearchVector(Value(text, output_field=TextField()), weight=weight, config=config)
for text, weight in obj._body_])
if obj._body_ else EMPTY_VECTOR)
ids_and_objs[obj._object_id_] = obj
index_entries_for_ct = self.entries.filter(
content_type_id=content_type_pk)
indexed_ids = frozenset(
index_entries_for_ct.filter(object_id__in=ids_and_objs)
.values_list('object_id', flat=True))
for indexed_id in indexed_ids:
obj = ids_and_objs[indexed_id]
index_entries_for_ct.filter(object_id=obj._object_id_) \
.update(autocomplete=obj._autocomplete_, body=obj._body_)
to_be_created = []
for object_id in ids_and_objs:
if object_id not in indexed_ids:
obj = ids_and_objs[object_id]
to_be_created.append(IndexEntry(
content_type_id=content_type_pk, object_id=object_id,
autocomplete=obj._autocomplete_, body=obj._body_))
self.entries.bulk_create(to_be_created)
def add_items(self, model, objs):
search_fields = model.get_search_fields()
if not search_fields:
return
for obj in objs:
self.prepare_obj(obj, search_fields)
# TODO: Delete unindexed objects while dealing with proxy models.
if objs:
content_type_pk = get_content_type_pk(model)
update_method = (
self.add_items_upsert if self._enable_upsert
else self.add_items_update_then_create)
update_method(content_type_pk, objs)
def delete_item(self, item):
item.index_entries.using(self.db_alias).delete()
def __str__(self):
return self.name
class PostgresSearchQueryCompiler(BaseSearchQueryCompiler):
DEFAULT_OPERATOR = 'and'
TSQUERY_AND = ' & '
TSQUERY_OR = ' | '
TSQUERY_OPERATORS = {
'and': TSQUERY_AND,
'or': TSQUERY_OR,
}
TSQUERY_WORD_FORMAT = "'%s'"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.search_fields = self.queryset.model.get_searchable_search_fields()
# Due to a Django bug, arrays are not automatically converted
# when we use WEIGHTS_VALUES.
self.sql_weights = get_sql_weights()
if self.fields is not None:
search_fields = self.queryset.model.get_searchable_search_fields()
self.search_fields = {
field_lookup: self.get_search_field(field_lookup,
fields=search_fields)
for field_lookup in self.fields}
def get_search_field(self, field_lookup, fields=None):
if fields is None:
fields = self.search_fields
if LOOKUP_SEP in field_lookup:
field_lookup, sub_field_name = field_lookup.split(LOOKUP_SEP, 1)
else:
sub_field_name = None
for field in fields:
if isinstance(field, SearchField) \
and field.field_name == field_lookup:
return field
# Note: Searching on a specific related field using
# `.search(fields=…)` is not yet supported by Wagtail.
# This method anticipates by already implementing it.
if isinstance(field, RelatedFields) \
and field.field_name == field_lookup:
return self.get_search_field(sub_field_name, field.fields)
def build_tsquery_content(self, query, group=False):
if isinstance(query, PlainText):
query_formats = []
query_params = []
for word in query.query_string.split():
query_formats.append(self.TSQUERY_WORD_FORMAT)
query_params.append(word)
operator = self.TSQUERY_OPERATORS[query.operator]
query_format = operator.join(query_formats)
if group and len(query_formats) > 1:
query_format = '(%s)' % query_format
return query_format, query_params
if isinstance(query, Boost):
return self.build_tsquery_content(query.subquery)
if isinstance(query, Not):
query_format, query_params = \
self.build_tsquery_content(query.subquery, group=True)
return '!' + query_format, query_params
if isinstance(query, (And, Or)):
query_formats = []
query_params = []
for subquery in query.subqueries:
subquery_format, subquery_params = \
self.build_tsquery_content(subquery, group=True)
query_formats.append(subquery_format)
query_params.extend(subquery_params)
operator = (self.TSQUERY_AND if isinstance(query, And)
else self.TSQUERY_OR)
return operator.join(query_formats), query_params
raise NotImplementedError(
'`%s` is not supported by the PostgreSQL search backend.'
% query.__class__.__name__)
def build_tsquery(self, query, config=None):
query_format, query_params = self.build_tsquery_content(query)
return PostgresRawSearchQuery(query_format, query_params,
config=config)
def build_tsrank(self, vector, query, config=None, boost=1.0):
if isinstance(query, (PlainText, Not)):
rank_expression = SearchRank(
vector,
self.build_tsquery(query, config=config),
weights=self.sql_weights)
if boost != 1.0:
rank_expression *= boost
return rank_expression
if isinstance(query, Boost):
boost *= query.boost
return self.build_tsrank(vector, query.subquery,
config=config, boost=boost)
if isinstance(query, And):
return MUL(
1 + self.build_tsrank(vector, subquery,
config=config, boost=boost)
for subquery in query.subqueries) - 1
if isinstance(query, Or):
return ADD(
self.build_tsrank(vector, subquery,
config=config, boost=boost)
for subquery in query.subqueries) / (len(query.subqueries) or 1)
raise NotImplementedError(
'`%s` is not supported by the PostgreSQL search backend.'
% query.__class__.__name__)
def get_index_vector(self, search_query):
return F('index_entries__autocomplete')._combine(
F('index_entries__body'), '||', False)
def get_fields_vector(self, search_query):
return ADD(
SearchVector(field_lookup, config=search_query.config,
weight=get_weight(search_field.boost))
for field_lookup, search_field in self.search_fields.items())
def get_search_vector(self, search_query):
return (self.get_index_vector(search_query) if self.fields is None
else self.get_fields_vector(search_query))
def search(self, config, start, stop, score_field=None):
# TODO: Handle MatchAll nested inside other search query classes.
if isinstance(self.query, MatchAll):
return self.queryset[start:stop]
search_query = self.build_tsquery(self.query, config=config)
vector = self.get_search_vector(search_query)
rank_expression = self.build_tsrank(vector, self.query, config=config)
queryset = self.queryset.annotate(
_vector_=vector).filter(_vector_=search_query)
if self.order_by_relevance:
queryset = queryset.order_by(rank_expression.desc(), '-pk')
elif not queryset.query.order_by:
# Adds a default ordering to avoid issue #3729.
queryset = queryset.order_by('-pk')
rank_expression = F('pk')
if score_field is not None:
queryset = queryset.annotate(**{score_field: rank_expression})
return queryset[start:stop]
def _process_lookup(self, field, lookup, value):
return Q(**{field.get_attname(self.queryset.model)
+ '__' + lookup: value})
def _connect_filters(self, filters, connector, negated):
if connector == 'AND':
q = Q(*filters)
elif connector == 'OR':
q = OR([Q(fil) for fil in filters])
else:
return
if negated:
q = ~q
return q
class PostgresAutocompleteQueryCompiler(PostgresSearchQueryCompiler):
TSQUERY_WORD_FORMAT = "'%s':*"
def get_index_vector(self, search_query):
return F('index_entries__autocomplete')
def get_fields_vector(self, search_query):
return ADD(
SearchVector(field_lookup, config=search_query.config,
weight=get_weight(search_field.boost))
for field_lookup, search_field in self.search_fields.items()
if search_field.partial_match)
class PostgresSearchResults(BaseSearchResults):
def _do_search(self):
return list(self.query_compiler.search(self.backend.config,
self.start, self.stop,
score_field=self._score_field))
def _do_count(self):
return self.query_compiler.search(
self.backend.config, None, None,
score_field=self._score_field).count()
supports_facet = True
def facet(self, field_name):
# Get field
field = self.query_compiler._get_filterable_field(field_name)
if field is None:
raise FilterFieldError(
'Cannot facet search results with field "' + field_name + '". Please add index.FilterField(\''
+ field_name + '\') to ' + self.query_compiler.queryset.model.__name__ + '.search_fields.',
field_name=field_name
)
query = self.query_compiler.search(self.backend.config, None, None)
results = query.values(field_name).annotate(count=Count('pk')).order_by('-count')
return OrderedDict([
(result[field_name], result['count'])
for result in results
])
class PostgresSearchRebuilder:
def __init__(self, index):
self.index = index
def start(self):
self.index.delete_stale_entries()
return self.index
def finish(self):
pass
class PostgresSearchAtomicRebuilder(PostgresSearchRebuilder):
def __init__(self, index):
super().__init__(index)
self.transaction = transaction.atomic(using=index.db_alias)
self.transaction_opened = False
def start(self):
self.transaction.__enter__()
self.transaction_opened = True
return super().start()
def finish(self):
self.transaction.__exit__(None, None, None)
self.transaction_opened = False
def __del__(self):
# TODO: Implement a cleaner way to close the connection on failure.
if self.transaction_opened:
self.transaction.needs_rollback = True
self.finish()
class PostgresSearchBackend(BaseSearchBackend):
query_compiler_class = PostgresSearchQueryCompiler
autocomplete_query_compiler_class = PostgresAutocompleteQueryCompiler
results_class = PostgresSearchResults
rebuilder_class = PostgresSearchRebuilder
atomic_rebuilder_class = PostgresSearchAtomicRebuilder
def __init__(self, params):
super().__init__(params)
self.index_name = params.get('INDEX', 'default')
self.config = params.get('SEARCH_CONFIG')
if params.get('ATOMIC_REBUILD', False):
self.rebuilder_class = self.atomic_rebuilder_class
def get_index_for_model(self, model, db_alias=None):
return Index(self, db_alias)
def get_index_for_object(self, obj):
return self.get_index_for_model(obj._meta.model, obj._state.db)
def reset_index(self):
for connection in get_postgresql_connections():
IndexEntry._default_manager.using(connection.alias).delete()
def add_type(self, model):
pass # Not needed.
def refresh_index(self):
pass # Not needed.
def add(self, obj):
self.get_index_for_object(obj).add_item(obj)
def add_bulk(self, model, obj_list):
if obj_list:
self.get_index_for_object(obj_list[0]).add_items(model, obj_list)
def delete(self, obj):
self.get_index_for_object(obj).delete_item(obj)
SearchBackend = PostgresSearchBackend
| timorieber/wagtail | wagtail/contrib/postgres_search/backend.py | Python | bsd-3-clause | 19,063 |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Remove prohibitive foreign key constraints
Revision ID: 32221e9f330c
Revises: 235b7b9989be
Create Date: 2014-08-04 20:34:36.697866
"""
# revision identifiers, used by Alembic.
revision = '32221e9f330c'
down_revision = '235b7b9989be'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_constraint(
'uq_t_workflows', table_name='workflows', type_='unique')
op.drop_constraint(
'uq_t_task_groups', table_name='task_groups', type_='unique')
op.drop_constraint(
'cycle_task_group_object_tasks_ibfk_4',
table_name='cycle_task_group_object_tasks',
type_='foreignkey'
)
op.drop_constraint(
'cycle_task_group_objects_ibfk_4',
table_name='cycle_task_group_objects',
type_='foreignkey'
)
def downgrade():
pass
| VinnieJohns/ggrc-core | src/ggrc_workflows/migrations/versions/20140804203436_32221e9f330c_remove_prohibitive_foreign_key_.py | Python | apache-2.0 | 942 |
#/usr/bin/env python
import sys
from setuptools import setup
from duvet import VERSION
try:
readme = open('README.rst')
long_description = str(readme.read())
finally:
readme.close()
required_pkgs = [
'coverage',
'tkreadonly',
]
if sys.version_info < (2, 7):
required_pkgs.append('argparse')
setup(
name='duvet',
version=VERSION,
description='A GUI tool for visualizing code coverage results.',
long_description=long_description,
author='Russell Keith-Magee',
author_email='russell@keith-magee.com',
url='http://pybee.org/duvet',
packages=[
'duvet',
],
install_requires=required_pkgs,
scripts=[],
entry_points={
'console_scripts': [
'duvet = duvet.__main__:main',
]
},
license='New BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Software Development',
'Topic :: Utilities',
],
test_suite='tests'
) | pybee/duvet | setup.py | Python | bsd-3-clause | 1,155 |
# -*- coding: utf-8 -*-
__messages = {
'abstract_method': '{method} is abstract and must be overridden.',
'incompatible_type_argument': 'Incompatible type of the argument "{name}". Expected type "{type}".',
'invalid_argument_value': 'Invalid argument value "{name}".',
'invalid_units': 'Invalid units - "{unit}". Layout uses "{layout_unit}".',
'property_calculated_automatically': 'The property will be calculated automatically.',
'started_process_without_gui': 'You have started the process without a GUI.',
'window_not_set': 'The window is not set.',
}
def t(msg, *args, **kwargs):
msg = __messages.get(msg)
return msg.format(*args, **kwargs) if msg else ''
| kyzima-spb/screen-god | screen_god/messages.py | Python | apache-2.0 | 699 |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
import os.path
from packages_test_utils import PackageTester
from packagedcode import phpcomposer
from packagedcode.utils import parse_repo_url
class TestPHPcomposer(PackageTester):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_parse_person(self):
test = [
{
"name": "Nils Adermann",
"email": "naderman@naderman.de",
"homepage": "http://www.naderman.de",
"role": "Developer"
},
{
"name": "Jordi Boggiano",
"email": "j.boggiano@seld.be",
"homepage": "http://seld.be",
"role": "Developer"
}
]
expected = [('Nils Adermann', 'naderman@naderman.de', 'http://www.naderman.de'),
('Jordi Boggiano', 'j.boggiano@seld.be', 'http://seld.be')
]
assert expected == list(phpcomposer.parse_person(test))
def test_parse_repo_url_basic(self):
url = 'https://pear2.php.net'
result = parse_repo_url(url)
expected = 'https://pear2.php.net'
assert expected == result
def test_parse_repo_url_svn(self):
url = 'http://svn.example.org/projectA/'
result = parse_repo_url(url)
expected = 'http://svn.example.org/projectA/'
assert expected == result
def test_parse_repo_url_github(self):
url = 'https://github.com/igorw/monolog'
result = parse_repo_url(url)
expected = 'https://github.com/igorw/monolog'
assert expected == result
def test_parse_repo_url_bitbucket(self):
url = 'git@bitbucket.org:vendor/my-private-repo.git'
result = parse_repo_url(url)
expected = 'https://bitbucket.org/vendor/my-private-repo.git'
assert expected == result
def test_parse_atimer(self):
test_file = self.get_test_loc('phpcomposer/a-timer/composer.json')
expected_loc = self.get_test_loc('phpcomposer/a-timer/composer.json.expected')
package = phpcomposer.parse(test_file)
self.check_package(package, expected_loc)
package.validate()
def test_parse_framework(self):
test_file = self.get_test_loc('phpcomposer/framework/composer.json')
expected_loc = self.get_test_loc('phpcomposer/framework/composer.json.expected')
package = phpcomposer.parse(test_file)
self.check_package(package, expected_loc)
package.validate()
def test_parse_slim(self):
test_file = self.get_test_loc('phpcomposer/slim/composer.json')
expected_loc = self.get_test_loc('phpcomposer/slim/composer.json.expected')
package = phpcomposer.parse(test_file)
self.check_package(package, expected_loc)
package.validate() | yasharmaster/scancode-toolkit | tests/packagedcode/test_phpcomposer.py | Python | apache-2.0 | 4,232 |
"""
Clio
Mnemosyne Client Library
ThreatStream 2014
"""
import pymongo
from dateutil.parser import parse as parse_date
from collections import Counter
from bson import ObjectId, son
import json
import datetime
class Clio():
"""
Main interface for Clio - Mnemosyne Client Library -
Usage:
clio = Clio()
sessions = clio.session.get(source_ip='5.15.15.85')
"""
def __init__(self):
self.client = pymongo.MongoClient()
@property
def session(self):
return Session(self.client)
@property
def counts(self):
return Counts(self.client)
@property
def session_protocol(self):
return SessionProtocol(self.client)
@property
def hpfeed(self):
return HpFeed(self.client)
@property
def authkey(self):
return AuthKey(self.client)
@property
def url(self):
return Url(self.client)
@property
def file(self):
return File(self.client)
@property
def dork(self):
return Dork(self.client)
@property
def metadata(self):
return Metadata(self.client)
class ResourceMixin(object):
db_name = 'mnemosyne'
expected_filters = ('_id',)
def __init__(self, client=None, **kwargs):
self.client = client
for attr in self.__class__.expected_filters:
setattr(self, attr, kwargs.get(attr))
def __call__(self, *args, **kwargs):
return self.get(*args, **kwargs)
@classmethod
def _clean_query(cls, dirty):
clean = dict()
for arg in cls.expected_filters:
# Creating a query dictionary
# with values passed in kwargs.
if dirty.get(arg):
clean[arg] = dirty.get(arg)
if 'hours_ago' in dirty:
clean['timestamp'] = {
'$gte': datetime.datetime.utcnow() - datetime.timedelta(hours=int(dirty['hours_ago']))
}
return clean
@classmethod
def _clean_options(cls, opts):
try:
skip = int(opts.get('skip', 0))
except (ValueError, TypeError):
skip = 0
limit = opts.get('limit', None)
# If limit was not indicated, we'll leave it as 'None'.
if limit:
try:
limit = int(limit)
except (ValueError, TypeError):
# Limit provided but wrong value,
# give a default value.
limit = 20
order_by = opts.get('order_by', None)
# If order_by wasn't passed, we'll return an empty dict.
if order_by:
# Figure out desired direction from order_by value.
if order_by.startswith('-'):
direction = pymongo.DESCENDING
else:
direction = pymongo.ASCENDING
# Clean up direction from field name.
order_by = order_by.replace('-', '')
if order_by not in cls.expected_filters:
# Clean up field is not valid.
order_by = None
else:
# Returns the argumens needed by sort() call.
order_by = (order_by, direction,)
return skip, limit, order_by
def new(self, **kwargs):
return self.__class__.from_dict(kwargs, self.client)
def to_dict(self):
todict = {}
for attr in self.__class__.expected_filters:
todict[attr] = getattr(self, attr)
if isinstance(todict[attr], datetime.datetime):
todict[attr] = todict[attr].isoformat()
# Making sure dict is json serializable.
todict['_id'] = str(todict['_id'])
return todict
def get(self, options={}, **kwargs):
if self.client is None:
raise ValueError
else:
if '_id' in kwargs:
kwargs['_id'] = ObjectId(kwargs['_id'])
return self.__class__.from_dict(
self.collection.find_one(kwargs), self.client)
query = self.__class__._clean_query(kwargs)
queryset = self.collection.find(query)
if options:
skip, limit, order_by = self.__class__._clean_options(options)
if skip:
queryset = queryset.skip(skip)
if limit:
queryset = queryset.limit(limit)
if order_by:
queryset = queryset.sort(*order_by)
return (self.__class__.from_dict(f, self.client) for f in queryset)
def delete(self, **kwargs):
query = dict()
if kwargs:
query = self.__class__._clean_query(kwargs)
elif self._id:
query = {'_id': self._id}
else:
# Need to be at least a valid resource or
# pass keyword arguments.
return None
return self.collection.remove(query)
def count(self, **kwargs):
query = self.__class__._clean_query(kwargs)
# Just counting the results.
return self.collection.find(query).count()
@property
def collection(self):
"""Shortcut for getting the appropriate collection object"""
cls = self.__class__
return self.client[cls.db_name][cls.collection_name]
@classmethod
def from_dict(cls, dict_, client=None):
"""
Returns an object from a dictionary, most likely
to come from pymongo results.
"""
if dict_ is None:
# Invalid dict incoming.
return None
doc = cls(client)
attrs = dict_.keys()
for at in attrs:
# Set every key in dict_ as attribute in the object.
setattr(doc, at, dict_.get(at))
return doc
class Counts(ResourceMixin):
collection_name = 'counts'
expected_filters = ('identifier', 'date', 'event_count',)
def get_count(self, identifier, date=None):
query = {'identifier': identifier}
if date:
query['date'] = date
return int(sum([rec['event_count'] for rec in self.collection.find(query)]))
class Session(ResourceMixin):
collection_name = 'session'
expected_filters = ('protocol', 'source_ip', 'source_port',
'destination_ip', 'destination_port',
'honeypot', 'timestamp', '_id', 'identifier',)
@classmethod
def _clean_query(cls, dirty):
clean = super(Session, cls)._clean_query(dirty)
def date_to_datetime(d):
return datetime.datetime.combine(d, datetime.datetime.min.time())
def clean_integer(field_name, query):
# Integer fields in mongo need to be int type, GET queries
# are passed as str so this method converts the str to
# integer so the find() call matches properly.
# If it's not a proper integer it will be remove
# from the query.
try:
integer = int(query[field_name])
except (ValueError, TypeError):
query.pop(field_name)
else:
query[field_name] = integer
finally:
return query
intfields = ('destination_port', 'source_port',)
for field in intfields:
if field in clean.copy():
clean = clean_integer(field, clean)
if 'timestamp' in clean and isinstance(clean['timestamp'], basestring):
# Transforms timestamp queries into
# timestamp_lte queries.
try:
timestamp = parse_date(clean.pop('timestamp'))
except (ValueError, TypeError):
pass
else:
clean['timestamp'] = {
'$gte': date_to_datetime(timestamp.date()),
'$lt': date_to_datetime(timestamp.date() + datetime.timedelta(days=1))
}
return clean
def _tops(self, fields, top=5, hours_ago=None, **kwargs):
if isinstance(fields, basestring):
fields = [fields,]
match_query = dict([ (field, {'$ne': None}) for field in fields ])
for name, value in kwargs.items():
if name.startswith('ne__'):
match_query[name[4:]] = {'$ne': value}
elif name.startswith('gt__'):
match_query[name[4:]] = {'$gt': value}
elif name.startswith('lt__'):
match_query[name[4:]] = {'$lt': value}
elif name.startswith('gte__'):
match_query[name[5:]] = {'$gte': value}
elif name.startswith('lte__'):
match_query[name[5:]] = {'$lte': value}
else:
match_query[name] = value
if hours_ago:
match_query['timestamp'] = {
'$gte': datetime.datetime.now() - datetime.timedelta(hours=hours_ago)
}
query = [
{
'$match': match_query
},
{
'$group': {
'_id': dict( [(field, '${}'.format(field)) for field in fields] ),
'count': {'$sum': 1}
}
},
{
'$sort': son.SON([('count', -1)])
}
]
res = self.collection.aggregate(query)
def format_result(r):
result = dict(r['_id'])
result['count'] = r['count']
return result
if 'ok' in res:
return [
format_result(r) for r in res.get('result', [])[:top]
]
def top_attackers(self, top=5, hours_ago=None):
return self._tops('source_ip', top, hours_ago)
def top_targeted_ports(self, top=5, hours_ago=None):
return self._tops('destination_port', top, hours_ago)
def top_hp(self, top=5, hours_ago=None):
return self._tops('honeypot', top, hours_ago)
def top_sensor(self, top=5, hours_ago=None):
return self._tops('identifier', top, hours_ago)
def attacker_stats(self, ip, hours_ago=None):
match_query = { 'source_ip': ip }
if hours_ago:
match_query['timestamp'] = {
'$gte': datetime.datetime.now() - datetime.timedelta(hours=hours_ago)
}
query = [
{
'$match': match_query
},
{
'$group': {
'_id': "source_ip",
'count': {'$sum' : 1},
'ports': { '$addToSet': "$destination_port"},
'honeypots': {'$addToSet': "$honeypot"},
'sensor_ids': {'$addToSet': "$identifier"},
'first_seen': {'$min': '$timestamp'},
'last_seen': {'$max': '$timestamp'},
}
},
{
'$project': {
"count":1,
'ports': 1,
'honeypots':1,
'first_seen':1,
'last_seen':1,
'num_sensors': {'$size': "$sensor_ids"}
}
}
]
res = self.collection.aggregate(query)
if 'ok' in res and len(res['result']) > 0:
r = res['result'][0]
del r['_id']
r['first_seen'] = r['first_seen'].isoformat()
r['last_seen'] = r['last_seen'].isoformat()
return r
return {
'ip': ip,
'count': 0,
'ports': [],
'honeypots': [],
'num_sensors': 0,
'first_seen': None,
'last_seen': None,
}
class SessionProtocol(ResourceMixin):
collection_name = 'session_protocol'
expected_filters = ('protocol', 'source_ip', 'source_port',
'destination_ip', 'destination_port',
'honeypot', '_id')
class HpFeed(ResourceMixin):
collection_name = 'hpfeed'
expected_filters = ('ident', 'channel', 'payload', '_id', 'timestamp', )
channel_map = {
'snort.alerts':['date', 'sensor', 'source_ip', 'destination_port', 'priority', 'classification', 'signature'],
'dionaea.capture':['url', 'daddr', 'saddr', 'dport', 'sport', 'sha512', 'md5'],
'glastopf.events':['time', 'pattern', 'filename', 'source', 'request_url'],
'suricata.events':['timestamp', 'sensor', 'source_ip', 'destination_port', 'proto', 'signature'],
}
def json_payload(self, data):
if type(data) is dict:
o_data = data
else:
o_data = json.loads(data)
return o_data
def get_payloads(self, options, req_args):
payloads = []
columns = []
if len(req_args.get('payload','')) > 1:
req_args['payload'] = {'$regex':req_args['payload']}
cnt_query = super(HpFeed, self)._clean_query(req_args)
count = self.collection.find(cnt_query).count()
columns = self.channel_map.get(req_args['channel'])
return count,columns,(self.json_payload(fr.payload) for fr in self.get(options=options, **req_args))
def count_passwords(self,payloads):
passwords=[]
for creds in payloads:
if creds['credentials']!= None:
for cred in (creds['credentials']):
passwords.append(cred[1])
return Counter(passwords).most_common(10)
def count_users(self,payloads):
users=[]
for creds in payloads:
if creds['credentials']!= None:
for cred in (creds['credentials']):
users.append(cred[0])
return Counter(users).most_common(10)
def count_combos(self,payloads):
combos_count=[]
for combos in payloads:
if combos['credentials']!= None:
for combo in combos['credentials']:
combos_count.append(combo[0]+": "+combo[1])
return Counter(combos_count).most_common(10)
def _tops(self, field, chan, top=5, hours_ago=None):
query = {'channel': chan}
if hours_ago:
query['hours_ago'] = hours_ago
res = self.get(options={}, **query)
val_list = [rec.get(field) for rec in [self.json_payload(r.payload) for r in res] if field in rec]
cnt = Counter()
for val in val_list:
cnt[val] += 1
results = [dict({field:val, 'count':num}) for val,num in cnt.most_common(top)]
return results
def top_sigs(self, top=5, hours_ago=24):
return self._tops('signature', 'snort.alerts', top, hours_ago)
def top_files(self, top=5, hours_ago=24):
return self._tops('destination_port', top, hours_ago)
class Url(ResourceMixin):
collection_name = 'url'
expected_filters = ('protocol', 'source_ip', 'source_port',
'destination_ip', 'destination_port',
'honeypot', '_id')
class File(ResourceMixin):
collection_name = 'file'
expected_filters = ('_id', 'content_guess', 'encoding', 'hashes',)
class Dork(ResourceMixin):
collection_name = 'dork'
expected_filters = ('_id', 'content', 'inurl', 'lasttime', 'count',)
class Metadata(ResourceMixin):
collection_name = 'metadata'
expected_filters = ('ip', 'date', 'os', 'link', 'app', 'uptime', '_id', 'honeypot', 'timestamp',)
class AuthKey(ResourceMixin):
db_name = 'hpfeeds'
collection_name = 'auth_key'
expected_filters = ('identifier', 'secret', 'publish', 'subscribe', '_id')
def get(self, options={}, **kwargs):
if 'identifier' in kwargs:
return AuthKey.from_dict(
self.collection.find_one(kwargs), self.client)
else:
return super(AuthKey, self).get(options, **kwargs)
def post(self):
objectid = self.collection.insert(dict(
identifier=self.identifier, secret=self.secret,
publish=self.publish, subscribe=self.subscribe))
self.client.fsync()
return objectid
def put(self, **kwargs):
updated = self.collection.update({"identifier": self.identifier},
{'$set': kwargs}, upsert=False)
return updated
| Antelox/mhn | server/mhn/common/clio.py | Python | lgpl-2.1 | 16,259 |
import json
import tornado.websocket
import threading
import json_api
from article_helpers import *
from base_handler import *
from user_account_helpers import *
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
# Seconds between heartbeat messages for long running web-socket based
# computation
HEARTBEAT_INTERVAL = 15
def convert(name):
""" Convert from camelCase to hyphenated-names. """
s1 = first_cap_re.sub(r'\1-\2', name)
return all_cap_re.sub(r'\1-\2', s1).lower()
# map the hyphenated names to the corresponding classes (endpoints)
endpoints = {}
for endpoint in [f for f in dir(json_api) if "EndpointHandler" in f]:
func = eval("json_api." + endpoint)
name = convert(endpoint.replace("EndpointHandler", ""))
if func.api_version != 1:
name = "v" + str(func.api_version) + "/" + name
endpoints[name] = func
class EndpointWebSocket(tornado.websocket.WebSocketHandler):
""" Allow developers to access the JSON API via WebSockets.
Only works for synchronous handlers. """
def open(self):
# setup
pass
def check_origin(self, origin):
if "PRODUCTION_FLAG" not in os.environ:
return True
allowed_origins = {
"https://brainspell.herokuapp.com",
"https://metacurious.org"}
return any([origin.startswith(org) for org in allowed_origins])
def issue_periodic_write(self, f_stop):
if not f_stop.is_set():
self.write_message(
json.dumps({"loading": 1})
)
threading.Timer(
HEARTBEAT_INTERVAL,
self.issue_periodic_write,
[f_stop]).start()
async def on_message(self, message):
"""
Receive a JSON formatted message, parse the arguments,
and pass the resulting arguments dictionary to the processing
function of the corresponding JSON API class. Return the response.
"""
messageDict = json.loads(message)
if messageDict["type"] not in endpoints:
self.write_message({
"success": 0,
"description": "Endpoint undefined."
})
else:
func = endpoints[messageDict["type"]]
payload = {}
if "payload" in messageDict:
payload = messageDict["payload"]
# Initialize long running compute messages
f_stop = threading.Event()
self.issue_periodic_write(f_stop)
res = await api_call(func, payload)
f_stop.set()
self.write_message(json.dumps(res))
def on_close(self):
# cleanup
pass
# set_default_headers = BaseHandler.set_default_headers
def api_call(func, args={}):
""" Return the output of a call to an endpoint, given an arguments dict.
Take the name of an Endpoint class and an arguments dict, where the keys
of the arguments dict are those specified in the Endpoint.parameters dict,
plus the "key" parameter, if the endpoint is a PUSH_API endpoint.
(For a complete list of arguments for an endpoint, go to
http://localhost:5000/json/{ENDPOINT_NAME}/help)
Do not modify the args dict passed in.
Ex:
>>> api_call(RandomQueryEndpointHandler)
{
'success': 1,
'articles': [
{
'id': '22357844',
'title': 'Linking pain and the body: neural correlates of visually induced analgesia.',
'authors': 'Longo MR,Iannetti GD,Mancini F,Driver J,Haggard P'
},
...
]
}
>>> api_call(QueryEndpointHandler, {
"q": "brain"
})
{
'success': 1,
'articles': [
{
'id': '15858160',
'title': 'Dissociable roles of prefrontal and anterior cingulate cortices in deception.',
'authors': 'Abe N,Suzuki M,Tsukiura T,Mori E,Yamaguchi K,Itoh M,Fujii T'
},
...
],
'start_index': 0
}
"""
argsDict = BaseHandler.get_safe_arguments(
func, args, lambda k: args[k])
if argsDict["success"] == 1:
# validate API key if push endpoint
if func.endpoint_type == Endpoint.PULL_API or (
func.endpoint_type == Endpoint.PUSH_API and valid_api_key(
argsDict["args"]["key"])):
response = {
"success": 1
}
response = func.process(func, response, argsDict["args"])
return response
else:
return {"success": 0, "description": "Invalid API key."}
# print the error message from argument parsing, if get_safe_arguments
# failed
return argsDict
| neelsomani/brainspell-neo | brainspell/websockets.py | Python | mit | 4,768 |
# -*- test-case-name: twext.enterprise.dal.test.test_record -*-
##
# Copyright (c) 2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.enterprise.dal.syntax import Max, Select, Parameter, Delete, Insert, \
Update, ColumnSyntax, TableSyntax, Upper, utcNowSQL
from twext.python.clsprop import classproperty
from twext.python.log import Logger
from twisted.internet.defer import succeed, inlineCallbacks, returnValue
from txdav.base.datastore.util import normalizeUUIDOrNot
from txdav.common.datastore.sql_tables import schema
from txdav.common.icommondatastore import SyncTokenValidException, \
ENOTIFICATIONTYPE, ECALENDARTYPE, EADDRESSBOOKTYPE
import time
from uuid import UUID
log = Logger()
"""
Classes and methods for the SQL store.
"""
class _EmptyCacher(object):
def set(self, key, value):
return succeed(True)
def get(self, key, withIdentifier=False):
return succeed(None)
def delete(self, key):
return succeed(True)
class _SharedSyncLogic(object):
"""
Logic for maintaining sync-token shared between notification collections and
shared collections.
"""
@classproperty
def _childSyncTokenQuery(cls):
"""
DAL query for retrieving the sync token of a L{CommonHomeChild} based on
its resource ID.
"""
rev = cls._revisionsSchema
return Select([Max(rev.REVISION)], From=rev,
Where=rev.RESOURCE_ID == Parameter("resourceID"))
@classmethod
def _revisionsForResourceIDs(cls, resourceIDs):
rev = cls._revisionsSchema
return Select(
[rev.RESOURCE_ID, Max(rev.REVISION)],
From=rev,
Where=rev.RESOURCE_ID.In(Parameter("resourceIDs", len(resourceIDs))).And(
(rev.RESOURCE_NAME != None).Or(rev.DELETED == False)),
GroupBy=rev.RESOURCE_ID
)
def revisionFromToken(self, token):
if token is None:
return 0
elif isinstance(token, str) or isinstance(token, unicode):
_ignore_uuid, revision = token.split("_", 1)
return int(revision)
else:
return token
@inlineCallbacks
def syncToken(self):
if self._syncTokenRevision is None:
self._syncTokenRevision = yield self.syncTokenRevision()
returnValue(("%s_%s" % (self._resourceID, self._syncTokenRevision,)))
@inlineCallbacks
def syncTokenRevision(self):
revision = (yield self._childSyncTokenQuery.on(self._txn, resourceID=self._resourceID))[0][0]
if revision is None:
revision = int((yield self._txn.calendarserverValue("MIN-VALID-REVISION")))
returnValue(revision)
@classmethod
@inlineCallbacks
def childSyncTokenRevisions(cls, home, childResourceIDs):
rows = (yield cls._revisionsForResourceIDs(childResourceIDs).on(home._txn, resourceIDs=childResourceIDs))
revisions = dict(rows)
# Add in any that were missing - this assumes that childResourceIDs were all valid to begin with
missingIDs = set(childResourceIDs) - set(revisions.keys())
if missingIDs:
min_revision = int((yield home._txn.calendarserverValue("MIN-VALID-REVISION")))
for resourceID in missingIDs:
revisions[resourceID] = min_revision
returnValue(revisions)
def objectResourcesSinceToken(self, token):
raise NotImplementedError()
@classmethod
def _objectNamesSinceRevisionQuery(cls, deleted=True):
"""
DAL query for (resource, deleted-flag)
"""
rev = cls._revisionsSchema
where = (rev.REVISION > Parameter("revision")).And(rev.RESOURCE_ID == Parameter("resourceID"))
if not deleted:
where = where.And(rev.DELETED == False)
return Select(
[rev.RESOURCE_NAME, rev.DELETED],
From=rev,
Where=where,
)
def resourceNamesSinceToken(self, token):
"""
Return the changed and deleted resources since a particular sync-token. This simply extracts
the revision from from the token then calls L{resourceNamesSinceRevision}.
@param revision: the revision to determine changes since
@type revision: C{int}
"""
return self.resourceNamesSinceRevision(self.revisionFromToken(token))
@inlineCallbacks
def resourceNamesSinceRevision(self, revision):
"""
Return the changed and deleted resources since a particular revision.
@param revision: the revision to determine changes since
@type revision: C{int}
"""
changed = []
deleted = []
invalid = []
if revision:
minValidRevision = yield self._txn.calendarserverValue("MIN-VALID-REVISION")
if revision < int(minValidRevision):
raise SyncTokenValidException
results = [
(name if name else "", removed) for name, removed in (
yield self._objectNamesSinceRevisionQuery().on(
self._txn, revision=revision, resourceID=self._resourceID)
)
]
results.sort(key=lambda x: x[1])
for name, wasdeleted in results:
if name:
if wasdeleted:
deleted.append(name)
else:
changed.append(name)
else:
changed = yield self.listObjectResources()
returnValue((changed, deleted, invalid))
@classproperty
def _removeDeletedRevision(cls):
rev = cls._revisionsSchema
return Delete(From=rev,
Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
rev.COLLECTION_NAME == Parameter("collectionName")))
@classproperty
def _addNewRevision(cls):
rev = cls._revisionsSchema
return Insert(
{
rev.HOME_RESOURCE_ID: Parameter("homeID"),
rev.RESOURCE_ID: Parameter("resourceID"),
rev.COLLECTION_NAME: Parameter("collectionName"),
rev.RESOURCE_NAME: None,
# Always starts false; may be updated to be a tombstone
# later.
rev.DELETED: False
},
Return=[rev.REVISION]
)
@inlineCallbacks
def _initSyncToken(self):
yield self._removeDeletedRevision.on(
self._txn, homeID=self._home._resourceID, collectionName=self._name
)
self._syncTokenRevision = (yield (
self._addNewRevision.on(self._txn, homeID=self._home._resourceID,
resourceID=self._resourceID,
collectionName=self._name)))[0][0]
self._txn.bumpRevisionForObject(self)
@classproperty
def _renameSyncTokenQuery(cls):
"""
DAL query to change sync token for a rename (increment and adjust
resource name).
"""
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.COLLECTION_NAME: Parameter("name"),
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
(rev.RESOURCE_NAME == None),
Return=rev.REVISION
)
@inlineCallbacks
def _renameSyncToken(self):
rows = yield self._renameSyncTokenQuery.on(
self._txn, name=self._name, resourceID=self._resourceID)
if rows:
self._syncTokenRevision = rows[0][0]
self._txn.bumpRevisionForObject(self)
else:
yield self._initSyncToken()
@classproperty
def _bumpSyncTokenQuery(cls):
"""
DAL query to change collection sync token. Note this can impact multiple rows if the
collection is shared.
"""
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And
(rev.RESOURCE_NAME == None)
)
@inlineCallbacks
def _bumpSyncToken(self):
if not self._txn.isRevisionBumpedAlready(self):
self._txn.bumpRevisionForObject(self)
yield self._bumpSyncTokenQuery.on(
self._txn,
resourceID=self._resourceID,
)
self._syncTokenRevision = None
@classproperty
def _deleteSyncTokenQuery(cls):
"""
DAL query to remove all child revision information. The revision for the collection
itself is not touched.
"""
rev = cls._revisionsSchema
return Delete(
From=rev,
Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And
(rev.RESOURCE_ID == Parameter("resourceID")).And
(rev.COLLECTION_NAME == None)
)
@classproperty
def _sharedRemovalQuery(cls):
"""
DAL query to indicate a shared collection has been deleted.
"""
rev = cls._revisionsSchema
return Update(
{
rev.RESOURCE_ID: None,
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.HOME_RESOURCE_ID == Parameter("homeID")).And(
rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == None)
)
@classproperty
def _unsharedRemovalQuery(cls):
"""
DAL query to indicate an owned collection has been deleted.
"""
rev = cls._revisionsSchema
return Update(
{
rev.RESOURCE_ID: None,
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == None),
)
@inlineCallbacks
def _deletedSyncToken(self, sharedRemoval=False):
"""
When a collection is deleted we remove all the revision information for its child resources.
We update the collection's sync token to indicate it has been deleted - that way a sync on
the home collection can report the deletion of the collection.
@param sharedRemoval: indicates whether the collection being removed is shared
@type sharedRemoval: L{bool}
"""
# Remove all child entries
yield self._deleteSyncTokenQuery.on(self._txn,
homeID=self._home._resourceID,
resourceID=self._resourceID)
# If this is a share being removed then we only mark this one specific
# home/resource-id as being deleted. On the other hand, if it is a
# non-shared collection, then we need to mark all collections
# with the resource-id as being deleted to account for direct shares.
if sharedRemoval:
yield self._sharedRemovalQuery.on(self._txn,
homeID=self._home._resourceID,
resourceID=self._resourceID)
else:
yield self._unsharedRemovalQuery.on(self._txn,
resourceID=self._resourceID)
self._syncTokenRevision = None
def _insertRevision(self, name):
return self._changeRevision("insert", name)
def _updateRevision(self, name):
return self._changeRevision("update", name)
def _deleteRevision(self, name):
return self._changeRevision("delete", name)
@classproperty
def _deleteBumpTokenQuery(cls):
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == Parameter("name")),
Return=rev.REVISION
)
@classproperty
def _updateBumpTokenQuery(cls):
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == Parameter("name")),
Return=rev.REVISION
)
@classproperty
def _insertFindPreviouslyNamedQuery(cls):
rev = cls._revisionsSchema
return Select(
[rev.RESOURCE_ID],
From=rev,
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == Parameter("name"))
)
@classproperty
def _updatePreviouslyNamedQuery(cls):
rev = cls._revisionsSchema
return Update(
{
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: False,
rev.MODIFIED: utcNowSQL,
},
Where=(rev.RESOURCE_ID == Parameter("resourceID")).And(
rev.RESOURCE_NAME == Parameter("name")),
Return=rev.REVISION
)
@classproperty
def _completelyNewRevisionQuery(cls):
rev = cls._revisionsSchema
return Insert(
{
rev.HOME_RESOURCE_ID: Parameter("homeID"),
rev.RESOURCE_ID: Parameter("resourceID"),
rev.RESOURCE_NAME: Parameter("name"),
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: False
},
Return=rev.REVISION
)
@classproperty
def _completelyNewDeletedRevisionQuery(cls):
rev = cls._revisionsSchema
return Insert(
{
rev.HOME_RESOURCE_ID: Parameter("homeID"),
rev.RESOURCE_ID: Parameter("resourceID"),
rev.RESOURCE_NAME: Parameter("name"),
rev.REVISION: schema.REVISION_SEQ,
rev.DELETED: True
},
Return=rev.REVISION
)
@inlineCallbacks
def _changeRevision(self, action, name):
# Need to handle the case where for some reason the revision entry is
# actually missing. For a "delete" we don't care, for an "update" we
# will turn it into an "insert".
if action == "delete":
rows = (
yield self._deleteBumpTokenQuery.on(
self._txn, resourceID=self._resourceID, name=name))
if rows:
self._syncTokenRevision = rows[0][0]
else:
self._syncTokenRevision = (
yield self._completelyNewDeletedRevisionQuery.on(
self._txn, homeID=self.ownerHome()._resourceID,
resourceID=self._resourceID, name=name)
)[0][0]
elif action == "update":
rows = (
yield self._updateBumpTokenQuery.on(
self._txn, resourceID=self._resourceID, name=name))
if rows:
self._syncTokenRevision = rows[0][0]
else:
self._syncTokenRevision = (
yield self._completelyNewRevisionQuery.on(
self._txn, homeID=self.ownerHome()._resourceID,
resourceID=self._resourceID, name=name)
)[0][0]
elif action == "insert":
# Note that an "insert" may happen for a resource that previously
# existed and then was deleted. In that case an entry in the
# REVISIONS table still exists so we have to detect that and do db
# INSERT or UPDATE as appropriate
found = bool((
yield self._insertFindPreviouslyNamedQuery.on(
self._txn, resourceID=self._resourceID, name=name)))
if found:
self._syncTokenRevision = (
yield self._updatePreviouslyNamedQuery.on(
self._txn, resourceID=self._resourceID, name=name)
)[0][0]
else:
self._syncTokenRevision = (
yield self._completelyNewRevisionQuery.on(
self._txn, homeID=self.ownerHome()._resourceID,
resourceID=self._resourceID, name=name)
)[0][0]
yield self._maybeNotify()
returnValue(self._syncTokenRevision)
def _maybeNotify(self):
"""
Maybe notify changed. (Overridden in NotificationCollection.)
"""
return succeed(None)
def determineNewest(uid, homeType):
"""
Construct a query to determine the modification time of the newest object
in a given home.
@param uid: the UID of the home to scan.
@type uid: C{str}
@param homeType: The type of home to scan; C{ECALENDARTYPE},
C{ENOTIFICATIONTYPE}, or C{EADDRESSBOOKTYPE}.
@type homeType: C{int}
@return: A select query that will return a single row containing a single
column which is the maximum value.
@rtype: L{Select}
"""
if homeType == ENOTIFICATIONTYPE:
return Select(
[Max(schema.NOTIFICATION.MODIFIED)],
From=schema.NOTIFICATION_HOME.join(
schema.NOTIFICATION,
on=schema.NOTIFICATION_HOME.RESOURCE_ID ==
schema.NOTIFICATION.NOTIFICATION_HOME_RESOURCE_ID),
Where=schema.NOTIFICATION_HOME.OWNER_UID == uid
)
homeTypeName = {ECALENDARTYPE: "CALENDAR",
EADDRESSBOOKTYPE: "ADDRESSBOOK"}[homeType]
home = getattr(schema, homeTypeName + "_HOME")
bind = getattr(schema, homeTypeName + "_BIND")
child = getattr(schema, homeTypeName)
obj = getattr(schema, homeTypeName + "_OBJECT")
return Select(
[Max(obj.MODIFIED)],
From=home.join(bind, on=bind.HOME_RESOURCE_ID == home.RESOURCE_ID).join(
child, on=child.RESOURCE_ID == bind.RESOURCE_ID).join(
obj, on=obj.PARENT_RESOURCE_ID == child.RESOURCE_ID),
Where=(bind.BIND_MODE == 0).And(home.OWNER_UID == uid)
)
@inlineCallbacks
def mergeHomes(sqlTxn, one, other, homeType):
"""
Merge two homes together. This determines which of C{one} or C{two} is
newer - that is, has been modified more recently - and pulls all the data
from the older into the newer home. Then, it changes the UID of the old
home to its UID, normalized and prefixed with "old.", and then re-names the
new home to its name, normalized.
Because the UIDs of both homes have changed, B{both one and two will be
invalid to all other callers from the start of the invocation of this
function}.
@param sqlTxn: the transaction to use
@type sqlTxn: A L{CommonTransaction}
@param one: A calendar home.
@type one: L{ICalendarHome}
@param two: Another, different calendar home.
@type two: L{ICalendarHome}
@param homeType: The type of home to scan; L{ECALENDARTYPE} or
L{EADDRESSBOOKTYPE}.
@type homeType: C{int}
@return: a L{Deferred} which fires with with the newer of C{one} or C{two},
into which the data from the other home has been merged, when the merge
is complete.
"""
from txdav.caldav.datastore.util import migrateHome as migrateCalendarHome
from txdav.carddav.datastore.util import migrateHome as migrateABHome
migrateHome = {EADDRESSBOOKTYPE: migrateABHome,
ECALENDARTYPE: migrateCalendarHome,
ENOTIFICATIONTYPE: _dontBotherWithNotifications}[homeType]
homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
ECALENDARTYPE: schema.CALENDAR_HOME,
ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
both = []
both.append([one,
(yield determineNewest(one.uid(), homeType).on(sqlTxn))])
both.append([other,
(yield determineNewest(other.uid(), homeType).on(sqlTxn))])
both.sort(key=lambda x: x[1])
older = both[0][0]
newer = both[1][0]
yield migrateHome(older, newer, merge=True)
# Rename the old one to 'old.<correct-guid>'
newNormalized = normalizeUUIDOrNot(newer.uid())
oldNormalized = normalizeUUIDOrNot(older.uid())
yield _renameHome(sqlTxn, homeTable, older.uid(), "old." + oldNormalized)
# Rename the new one to '<correct-guid>'
if newer.uid() != newNormalized:
yield _renameHome(sqlTxn, homeTable, newer.uid(), newNormalized)
yield returnValue(newer)
def _renameHome(txn, table, oldUID, newUID):
"""
Rename a calendar, addressbook, or notification home. Note that this
function is only safe in transactions that have had caching disabled, and
more specifically should only ever be used during upgrades. Running this
in a normal transaction will have unpredictable consequences, especially
with respect to memcache.
@param txn: an SQL transaction to use for this update
@type txn: L{twext.enterprise.ienterprise.IAsyncTransaction}
@param table: the storage table of the desired home type
@type table: L{TableSyntax}
@param oldUID: the old UID, the existing home's UID
@type oldUID: L{str}
@param newUID: the new UID, to change the UID to
@type newUID: L{str}
@return: a L{Deferred} which fires when the home is renamed.
"""
return Update({table.OWNER_UID: newUID},
Where=table.OWNER_UID == oldUID).on(txn)
def _dontBotherWithNotifications(older, newer, merge):
"""
Notifications are more transient and can be easily worked around; don't
bother to migrate all of them when there is a UUID case mismatch.
"""
pass
@inlineCallbacks
def _normalizeHomeUUIDsIn(t, homeType):
"""
Normalize the UUIDs in the given L{txdav.common.datastore.CommonStore}.
This changes the case of the UUIDs in the calendar home.
@param t: the transaction to normalize all the UUIDs in.
@type t: L{CommonStoreTransaction}
@param homeType: The type of home to scan, L{ECALENDARTYPE},
L{EADDRESSBOOKTYPE}, or L{ENOTIFICATIONTYPE}.
@type homeType: C{int}
@return: a L{Deferred} which fires with C{None} when the UUID normalization
is complete.
"""
from txdav.caldav.datastore.util import fixOneCalendarHome
homeTable = {EADDRESSBOOKTYPE: schema.ADDRESSBOOK_HOME,
ECALENDARTYPE: schema.CALENDAR_HOME,
ENOTIFICATIONTYPE: schema.NOTIFICATION_HOME}[homeType]
homeTypeName = homeTable.model.name.split("_")[0]
allUIDs = yield Select([homeTable.OWNER_UID],
From=homeTable,
OrderBy=homeTable.OWNER_UID).on(t)
total = len(allUIDs)
allElapsed = []
for n, [UID] in enumerate(allUIDs):
start = time.time()
if allElapsed:
estimate = "%0.3d" % ((sum(allElapsed) / len(allElapsed)) *
total - n)
else:
estimate = "unknown"
log.info(
"Scanning UID {uid} [{homeType}] "
"({pct:0.2d}%, {estimate} seconds remaining)...",
uid=UID, pct=(n / float(total)) * 100, estimate=estimate,
homeType=homeTypeName
)
other = None
this = yield _getHome(t, homeType, UID)
if homeType == ECALENDARTYPE:
fixedThisHome = yield fixOneCalendarHome(this)
else:
fixedThisHome = 0
fixedOtherHome = 0
if this is None:
log.info(
"{uid!r} appears to be missing, already processed", uid=UID
)
try:
uuidobj = UUID(UID)
except ValueError:
pass
else:
newname = str(uuidobj).upper()
if UID != newname:
log.info(
"Detected case variance: {uid} {newuid}[{homeType}]",
uid=UID, newuid=newname, homeType=homeTypeName
)
other = yield _getHome(t, homeType, newname)
if other is None:
# No duplicate: just fix the name.
yield _renameHome(t, homeTable, UID, newname)
else:
if homeType == ECALENDARTYPE:
fixedOtherHome = yield fixOneCalendarHome(other)
this = yield mergeHomes(t, this, other, homeType)
# NOTE: WE MUST NOT TOUCH EITHER HOME OBJECT AFTER THIS POINT.
# THE UIDS HAVE CHANGED AND ALL OPERATIONS WILL FAIL.
end = time.time()
elapsed = end - start
allElapsed.append(elapsed)
log.info(
"Scanned UID {uid}; {elapsed} seconds elapsed,"
" {fixes} properties fixed ({duplicate} fixes in duplicate).",
uid=UID, elapsed=elapsed, fixes=fixedThisHome,
duplicate=fixedOtherHome
)
returnValue(None)
def _getHome(txn, homeType, uid):
"""
Like L{CommonHome.homeWithUID} but also honoring ENOTIFICATIONTYPE which
isn't I{really} a type of home.
@param txn: the transaction to retrieve the home from
@type txn: L{CommonStoreTransaction}
@param homeType: L{ENOTIFICATIONTYPE}, L{ECALENDARTYPE}, or
L{EADDRESSBOOKTYPE}.
@param uid: the UID of the home to retrieve.
@type uid: L{str}
@return: a L{Deferred} that fires with the L{CommonHome} or
L{NotificationHome} when it has been retrieved.
"""
if homeType == ENOTIFICATIONTYPE:
return txn.notificationsWithUID(uid)
else:
return txn.homeWithUID(homeType, uid)
@inlineCallbacks
def _normalizeColumnUUIDs(txn, column):
"""
Upper-case the UUIDs in the given SQL DAL column.
@param txn: The transaction.
@type txn: L{CommonStoreTransaction}
@param column: the column, which may contain UIDs, to normalize.
@type column: L{ColumnSyntax}
@return: A L{Deferred} that will fire when the UUID normalization of the
given column has completed.
"""
tableModel = column.model.table
# Get a primary key made of column syntax objects for querying and
# comparison later.
pkey = [ColumnSyntax(columnModel)
for columnModel in tableModel.primaryKey]
for row in (yield Select([column] + pkey,
From=TableSyntax(tableModel)).on(txn)):
before = row[0]
pkeyparts = row[1:]
after = normalizeUUIDOrNot(before)
if after != before:
where = _AndNothing
# Build a where clause out of the primary key and the parts of the
# primary key that were found.
for pkeycol, pkeypart in zip(pkeyparts, pkey):
where = where.And(pkeycol == pkeypart)
yield Update({column: after}, Where=where).on(txn)
class _AndNothing(object):
"""
Simple placeholder for iteratively generating a 'Where' clause; the 'And'
just returns its argument, so it can be used at the start of the loop.
"""
@staticmethod
def And(self):
"""
Return the argument.
"""
return self
@inlineCallbacks
def _needsNormalizationUpgrade(txn):
"""
Determine whether a given store requires a UUID normalization data upgrade.
@param txn: the transaction to use
@type txn: L{CommonStoreTransaction}
@return: a L{Deferred} that fires with C{True} or C{False} depending on
whether we need the normalization upgrade or not.
"""
for x in [schema.CALENDAR_HOME, schema.ADDRESSBOOK_HOME,
schema.NOTIFICATION_HOME]:
slct = Select([x.OWNER_UID], From=x,
Where=x.OWNER_UID != Upper(x.OWNER_UID))
rows = yield slct.on(txn)
if rows:
for [uid] in rows:
if normalizeUUIDOrNot(uid) != uid:
returnValue(True)
returnValue(False)
@inlineCallbacks
def fixUUIDNormalization(store):
"""
Fix all UUIDs in the given SQL store to be in a canonical form;
00000000-0000-0000-0000-000000000000 format and upper-case.
"""
t = store.newTransaction(disableCache=True)
# First, let's see if there are any calendar, addressbook, or notification
# homes that have a de-normalized OWNER_UID. If there are none, then we can
# early-out and avoid the tedious and potentially expensive inspection of
# oodles of calendar data.
if not (yield _needsNormalizationUpgrade(t)):
log.info("No potentially denormalized UUIDs detected, "
"skipping normalization upgrade.")
yield t.abort()
returnValue(None)
try:
yield _normalizeHomeUUIDsIn(t, ECALENDARTYPE)
yield _normalizeHomeUUIDsIn(t, EADDRESSBOOKTYPE)
yield _normalizeHomeUUIDsIn(t, ENOTIFICATIONTYPE)
yield _normalizeColumnUUIDs(t, schema.RESOURCE_PROPERTY.VIEWER_UID)
yield _normalizeColumnUUIDs(t, schema.APN_SUBSCRIPTIONS.SUBSCRIBER_GUID)
except:
log.failure("Unable to normalize UUIDs")
yield t.abort()
# There's a lot of possible problems here which are very hard to test
# for individually; unexpected data that might cause constraint
# violations under one of the manipulations done by
# normalizeHomeUUIDsIn. Since this upgrade does not come along with a
# schema version bump and may be re- attempted at any time, just raise
# the exception and log it so that we can try again later, and the
# service will survive for everyone _not_ affected by this somewhat
# obscure bug.
else:
yield t.commit()
| red-hood/calendarserver | txdav/common/datastore/sql_util.py | Python | apache-2.0 | 30,699 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import logging
import os
from pylib import android_commands
from pylib import constants
from pylib import perf_tests_helper
from pylib.android_commands import errors
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.utils import run_tests_helper
import test_package_apk
import test_package_executable
def _GetDataFilesForTestSuite(test_suite_basename):
"""Returns a list of data files/dirs needed by the test suite.
Args:
test_suite_basename: The test suite basename for which to return file paths.
Returns:
A list of test file and directory paths.
"""
# Ideally, we'd just push all test data. However, it has >100MB, and a lot
# of the files are not relevant (some are used for browser_tests, others for
# features not supported, etc..).
if test_suite_basename == 'base_unittests':
return [
'base/test/data/',
]
elif test_suite_basename == 'unit_tests':
test_files = [
'base/test/data/',
'chrome/test/data/download-test1.lib',
'chrome/test/data/extensions/bad_magic.crx',
'chrome/test/data/extensions/good.crx',
'chrome/test/data/extensions/icon1.png',
'chrome/test/data/extensions/icon2.png',
'chrome/test/data/extensions/icon3.png',
'chrome/test/data/extensions/allow_silent_upgrade/',
'chrome/test/data/extensions/app/',
'chrome/test/data/extensions/bad/',
'chrome/test/data/extensions/effective_host_permissions/',
'chrome/test/data/extensions/empty_manifest/',
'chrome/test/data/extensions/good/Extensions/',
'chrome/test/data/extensions/manifest_tests/',
'chrome/test/data/extensions/page_action/',
'chrome/test/data/extensions/permissions/',
'chrome/test/data/extensions/script_and_capture/',
'chrome/test/data/extensions/unpacker/',
'chrome/test/data/bookmarks/',
'chrome/test/data/components/',
'chrome/test/data/extensions/json_schema_test.js',
'chrome/test/data/History/',
'chrome/test/data/json_schema_validator/',
'chrome/test/data/pref_service/',
'chrome/test/data/simple_open_search.xml',
'chrome/test/data/top_sites/',
'chrome/test/data/web_app_info/',
'chrome/test/data/web_database',
'chrome/test/data/webui/',
'chrome/third_party/mock4js/',
'net/data/ssl/certificates',
'third_party/accessibility-developer-tools/gen/axs_testing.js',
'third_party/zlib/google/test/data',
]
# The following are spell check data. Now only list the data under
# third_party/hunspell_dictionaries which are used by unit tests.
old_cwd = os.getcwd()
os.chdir(constants.CHROME_DIR)
test_files += glob.glob('third_party/hunspell_dictionaries/*.bdic')
os.chdir(old_cwd)
return test_files
elif test_suite_basename == 'media_unittests':
return [
'media/test/data',
]
elif test_suite_basename == 'net_unittests':
return [
'chrome/test/data/animate1.gif',
'chrome/test/data/simple.html',
'net/data/cache_tests',
'net/data/filter_unittests',
'net/data/ftp',
'net/data/proxy_resolver_v8_tracing_unittest',
'net/data/proxy_resolver_v8_unittest',
'net/data/proxy_script_fetcher_unittest',
'net/data/ssl/certificates',
'net/data/test.html',
'net/data/url_request_unittest/',
]
elif test_suite_basename == 'ui_tests':
return [
'chrome/test/data/dromaeo',
'chrome/test/data/json2.js',
'chrome/test/data/sunspider',
]
elif test_suite_basename == 'ui_unittests':
return [
'ui/base/test/data/data_pack_unittest/truncated-header.pak',
]
elif test_suite_basename == 'content_unittests':
return [
'content/test/data/gpu/webgl_conformance_test_expectations.txt',
'net/data/ssl/certificates/',
'third_party/hyphen/hyph_en_US.dic',
'webkit/data/dom_storage/webcore_test_database.localstorage',
]
elif test_suite_basename == 'cc_perftests':
return [
'cc/test/data',
]
elif test_suite_basename == 'perf_tests':
return [
'base/test/data',
]
elif test_suite_basename == 'content_browsertests':
return [
'content/test/data/content-disposition-inline.html',
'content/test/data/title1.html',
'content/test/data/post_message2.html',
'content/test/data/content-sniffer-test0.html.mock-http-headers',
'content/test/data/content-sniffer-test1.html.mock-http-headers',
'content/test/data/speech',
'content/test/data/page404.html.mock-http-headers',
'content/test/data/content-sniffer-test3.html',
'content/test/data/post_message.html',
'content/test/data/remove_frame_on_unload.html',
'content/test/data/cross-origin-redirect-blocked.html',
'content/test/data/prerender',
'content/test/data/device_orientation',
'content/test/data/content-disposition-empty.html',
'content/test/data/workers',
'content/test/data/content-sniffer-test3.html.mock-http-headers',
'content/test/data/content-sniffer-test0.html',
'content/test/data/browser_plugin_title_change.html',
'content/test/data/android',
'content/test/data/page404.html',
'content/test/data/dynamic2.html',
'content/test/data/browser_plugin_embedder.html',
'content/test/data/indexeddb',
'content/test/data/content-disposition-inline.html.mock-http-headers',
'content/test/data/nosniff-test.html',
'content/test/data/title3.html',
'content/test/data/browser_plugin_post_message_guest.html',
'content/test/data/content-disposition-empty.html.mock-http-headers',
'content/test/data/session_history',
'content/test/data/browser_plugin_naming_guest.html',
'content/test/data/overscroll_navigation.html',
'content/test/data/simple_database.html',
'content/test/data/gtk_key_bindings_test_gtkrc',
'content/test/data/browser_plugin_embedder_guest_unresponsive.html',
'content/test/data/sync_xmlhttprequest.html',
'content/test/data/content-sniffer-test3-frame.txt.mock-http-headers',
'content/test/data/frame_tree',
'content/test/data/browser_plugin_naming_embedder.html',
'content/test/data/content-sniffer-test2.html.mock-http-headers',
'content/test/data/sync_xmlhttprequest_disallowed.html',
'content/test/data/rwh_simple.html',
'content/test/data/title2.html',
'content/test/data/webkit',
'content/test/data/content-sniffer-test1.html',
'content/test/data/download',
'content/test/data/content-sniffer-test2.html',
'content/test/data/simple_page.html',
'content/test/data/google.mht',
'content/test/data/site_per_process_main.html',
'content/test/data/gpu',
'content/test/data/onunload_cookie.html',
'content/test/data/textinput',
'content/test/data/navigate_opener.html',
'content/test/data/dom_storage',
'content/test/data/sync_xmlhttprequest_during_unload.html',
'content/test/data/browser_plugin_dragging.html',
'content/test/data/fileapi',
'content/test/data/npapi',
'content/test/data/nosniff-test.html.mock-http-headers',
'content/test/data/accessibility',
'content/test/data/dynamic1.html',
'content/test/data/browser_plugin_focus_child.html',
'content/test/data/rwhv_compositing_animation.html',
'content/test/data/click-noreferrer-links.html',
'content/test/data/browser_plugin_focus.html',
'content/test/data/media',
'third_party/webgl_conformance',
]
return []
def _GetOptionalDataFilesForTestSuite(test_suite_basename):
"""Returns a list of data files/dirs that are pushed if present.
Args:
test_suite_basename: The test suite basename for which to return file paths.
Returns:
A list of test file and directory paths.
"""
if test_suite_basename == 'content_browsertests':
# See http://crbug.com/105104 for why these are needed.
return [
'third_party/WebKit/LayoutTests/fast/events',
'third_party/WebKit/LayoutTests/fast/files',
'third_party/WebKit/LayoutTests/fast/filesystem',
'third_party/WebKit/LayoutTests/fast/js/resources',
'third_party/WebKit/LayoutTests/fast/workers',
'third_party/WebKit/LayoutTests/http/tests',
'third_party/WebKit/LayoutTests/storage/indexeddb',
'third_party/WebKit/LayoutTests/media',
'content/test/data/layout_tests/LayoutTests/fast/events',
'content/test/data/layout_tests/LayoutTests/fast/files',
'content/test/data/layout_tests/LayoutTests/fast/filesystem',
'content/test/data/layout_tests/LayoutTests/fast/js/resources',
'content/test/data/layout_tests/LayoutTests/fast/workers',
'content/test/data/layout_tests/LayoutTests/http/tests',
'content/test/data/layout_tests/LayoutTests/storage/indexeddb',
'content/test/data/layout_tests/LayoutTests/media',
]
return []
def _TestSuiteRequiresMockTestServer(test_suite_basename):
"""Returns True if the test suite requires mock test server."""
tests_require_net_test_server = ['unit_tests', 'net_unittests',
'content_unittests',
'content_browsertests']
return (test_suite_basename in
tests_require_net_test_server)
class TestRunner(base_test_runner.BaseTestRunner):
"""Single test suite attached to a single device.
Args:
device: Device to run the tests.
test_suite: A specific test suite to run, empty to run all.
test_arguments: Additional arguments to pass to the test binary.
timeout: Timeout for each test.
cleanup_test_files: Whether or not to cleanup test files on device.
tool_name: Name of the Valgrind tool.
build_type: 'Release' or 'Debug'.
in_webkit_checkout: Whether the suite is being run from a WebKit checkout.
test_apk_package_name: Apk package name for tests running in APKs.
test_activity_name: Test activity to invoke for APK tests.
command_line_file: Filename to use to pass arguments to tests.
"""
def __init__(self, device, test_suite, test_arguments, timeout,
cleanup_test_files, tool_name, build_type,
in_webkit_checkout, test_apk_package_name=None,
test_activity_name=None, command_line_file=None):
super(TestRunner, self).__init__(device, tool_name, build_type)
self._running_on_emulator = self.device.startswith('emulator')
self._test_arguments = test_arguments
self.in_webkit_checkout = in_webkit_checkout
self._cleanup_test_files = cleanup_test_files
logging.warning('Test suite: ' + test_suite)
if os.path.splitext(test_suite)[1] == '.apk':
self.test_package = test_package_apk.TestPackageApk(
self.adb,
device,
test_suite,
timeout,
self._cleanup_test_files,
self.tool,
test_apk_package_name,
test_activity_name,
command_line_file)
else:
# Put a copy into the android out/target directory, to allow stack trace
# generation.
symbols_dir = os.path.join(constants.CHROME_DIR, 'out', build_type,
'lib.target')
self.test_package = test_package_executable.TestPackageExecutable(
self.adb,
device,
test_suite,
timeout,
self._cleanup_test_files,
self.tool,
symbols_dir)
#override
def PushDependencies(self):
self.test_package.StripAndCopyExecutable()
self.test_package.PushDataAndPakFiles()
self.tool.CopyFiles()
test_data = _GetDataFilesForTestSuite(self.test_package.test_suite_basename)
if test_data:
# Make sure SD card is ready.
self.adb.WaitForSdCardReady(20)
for data in test_data:
self.CopyTestData([data], self.adb.GetExternalStorage())
optional_test_data = _GetOptionalDataFilesForTestSuite(
self.test_package.test_suite_basename)
if optional_test_data:
self.adb.WaitForSdCardReady(20)
for data in optional_test_data:
if os.path.exists(data):
self.CopyTestData([data], self.adb.GetExternalStorage())
if self.test_package.test_suite_basename == 'webkit_unit_tests':
self.PushWebKitUnitTestsData()
def PushWebKitUnitTestsData(self):
"""Pushes the webkit_unit_tests data files to the device.
The path of this directory is different when the suite is being run as
part of a WebKit check-out.
"""
webkit_src = os.path.join(constants.CHROME_DIR, 'third_party', 'WebKit')
if self.in_webkit_checkout:
webkit_src = os.path.join(constants.CHROME_DIR, '..', '..', '..')
self.adb.PushIfNeeded(
os.path.join(webkit_src, 'Source/WebKit/chromium/tests/data'),
os.path.join(
self.adb.GetExternalStorage(),
'third_party/WebKit/Source/WebKit/chromium/tests/data'))
# TODO(craigdh): There is no reason for this to be part of TestRunner.
def GetDisabledTests(self):
"""Returns a list of disabled tests.
Returns:
A list of disabled tests obtained from 'filter' subdirectory.
"""
gtest_filter_base_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'filter',
self.test_package.test_suite_basename)
disabled_tests = run_tests_helper.GetExpectations(
gtest_filter_base_path + '_disabled')
if self._running_on_emulator:
# Append emulator's filter file.
disabled_tests.extend(run_tests_helper.GetExpectations(
gtest_filter_base_path + '_emulator_additional_disabled'))
return disabled_tests
#override
def RunTest(self, test):
test_results = base_test_result.TestRunResults()
if not test:
return test_results, None
try:
self.test_package.ClearApplicationState()
self.test_package.CreateTestRunnerScript(test, self._test_arguments)
test_results = self.test_package.RunTestsAndListResults()
except errors.DeviceUnresponsiveError as e:
# Make sure this device is not attached
logging.warning(e)
if android_commands.IsDeviceAttached(self.device):
raise
finally:
self.CleanupSpawningServerState()
# Calculate unknown test results.
all_tests = set(test.split(':'))
all_tests_ran = set([t.GetName() for t in test_results.GetAll()])
unknown_tests = all_tests - all_tests_ran
test_results.AddResults(
[base_test_result.BaseTestResult(t, base_test_result.ResultType.UNKNOWN)
for t in unknown_tests])
retry = ':'.join([t.GetName() for t in test_results.GetNotPass()])
return test_results, retry
#override
def SetUp(self):
"""Sets up necessary test enviroment for the test suite."""
super(TestRunner, self).SetUp()
if _TestSuiteRequiresMockTestServer(self.test_package.test_suite_basename):
self.LaunchChromeTestServerSpawner()
self.tool.SetupEnvironment()
#override
def TearDown(self):
"""Cleans up the test enviroment for the test suite."""
self.tool.CleanUpEnvironment()
if self._cleanup_test_files:
self.adb.RemovePushedFiles()
super(TestRunner, self).TearDown()
| loopCM/chromium | build/android/pylib/gtest/test_runner.py | Python | bsd-3-clause | 15,719 |
#trun based rpg
import random
import time
class role:
name=""
lv=1
exp=0
nextLv=1000
hp=100
mp=30
stra=5
inte=5
spd=5
defe=5
rest=5
void=5
dropItems=[None]
dropPrecent=[100]
command=['attack','void','def','fireball']
def __init__(self,name,lv):
self.name=name
self.lv=lv
self.initRoleByLv(lv)
def initRoleByLv(self,lv):
self.exp=lv*(1000+lv*200)
self.nextLv=(lv+1)*(1000+(lv+1)*200)
self.hp=int(self.hp+lv*30*random.random())
self.mp=int(self.mp+lv*10*random.random())
self.stra=int(self.stra+lv*2*random.random())
self.inte=int(self.inte+lv*2*random.random())
self.spd=int(self.spd+lv*2*random.random())
self.defe=int(self.defe+lv*2*random.random())
self.rest=int(self.rest+lv*2*random.random())
self.void=int(self.void+lv*2*random.random())
def getInfo(self):
return self.name+"[lv:"+str(self.lv)+",exp:"+str(self.exp)+\
",nextLv:"+str(self.nextLv)+\
",hp:"+str(self.hp)+",mp:"+str(self.mp)+\
",stra:"+str(self.stra)+",inte:"+str(self.inte)+\
",spd:"+str(self.spd)+",defe:"+str(self.defe)+\
",rest:"+str(self.rest)+\
",void:"+str(self.void)+",command:["+",".join(self.command)+"]]"
def addExp(self,exp):
self.exp+=exp
if self.exp>=self.nextLv:
self.lvUp();
print self.name+' get '+str(exp)+' exp!'
def lvUp(self):
self.lv+=1
self.nextLv=(self.lv+1)*(1000+(self.lv+1)*200)
self.hp=int(self.hp+30*random.random())
self.mp=int(self.mp+10*random.random())
self.stra=int(self.stra+2*random.random())
self.inte=int(self.inte+2*random.random())
self.spd=int(self.spd+2*random.random())
self.defe=int(self.defe+2*random.random())
self.rest=int(self.rest+2*random.random())
self.void=int(self.void+2*random.random())
if self.exp>=self.nextLv:
self.lvUp();
print self.name+' LEVELUP!'+self.getInfo()
class stage:
stagename="stage"
stageLv=1
compelete=False
startPos=0
endPos=100
emenyLIst=[role("man",1),role("slime",3),role("swordman",4),\
role("dragon baby",5),role("dragon",7),role("vampire",8)]
emenyPrecent=[30,30,20,10,5,5]
boss=role("boss",10)
def __init__(self,stagename,stagelv):
self.stagename=stagename
self.stagelv=stagelv
self.startPos=0
def getInfo(self):
s=''
for num in self.emenyPrecent :s+=str(num)+','
s2=''
for num2 in self.emenyLIst :s2+=num2.name+','
return self.stagename+"[stageLv:"+str(self.stageLv)+",compelete:"+str(self.compelete)+\
",startPos:"+str(self.startPos)+\
",endPos:"+str(self.endPos)+\
",emenyLIst:["+s2+\
"],emenyPrecent:["+s+"]]"
#my=role('my',7)
#print my.getInfo()
#my.addExp(18000)
#print my.getInfo()
#stage=stage("forest",1)
#print stage.getInfo()
#commads:
def attack(roleself,roleattacked):
damage=0
if roleself.stra-roleattacked.defe>0:
damage=int((roleself.stra-roleattacked.defe)*random.random()*20)
else:
damage=int(random.random()*20)
roleattacked.hp-=damage
print roleself.name+'\'s attack:deal '+str(damage)+' damage to '+roleattacked.name
#methods:
def expolore(stage):
while True:
r=int(random.random()*100);
precentnew=0;
for (precent,emeny) in zip(stage.emenyPrecent,stage.emenyLIst):
stage.startPos+=int(4*random.random())+1;
if(stage.startPos>=stage.endPos):
print "stage clear!"
return "stage clear!"
precentold=precentnew
precentnew+=precent
if r>=precentold and r<precentnew :
while True:
print time.strftime("%Y-%m-%d-%H-%M-%S",\
time.localtime(time.time())),\
precentold,\
precentnew,emeny.name,emeny.hp,emeny.mp,player.name,player.hp,player.mp
#print emeny.getInfo()
#print player.getInfo()
cmd=raw_input()
if cmd=="exit" :
break
if cmd=="show":
print stage.startPos,stage.endPos,player.getInfo(),emeny.getInfo()
break
if emeny.spd>player.spd:
attack(emeny,player)
if cmd=="a" or cmd=="attack":
attack(player,emeny)
if emeny.spd<=player.spd:
attack(emeny,player)
if emeny.hp<=0:
player.addExp(int((emeny.lv+emeny.inte+emeny.stra)*500*random.random()))
break
elif player.hp<=0:
print "game over"
return 'game over'
#main methods:
global player
player=role("player",8)
while True:
print 'Please type enter to start,type"exit" to exit'
cmd=raw_input()
if cmd=="exit" :
break
else:
expolore(stage("forest",1))
| alucardlockon/LearnCode | 01PythonTest/05_trunBasedRpg.py | Python | gpl-3.0 | 5,415 |
import apt
import datetime
import os
import subprocess
import time
import unittest
from gi.repository import GLib
from tests.utils import (
DATA_DIR,
do_events,
setup_test_env,
)
setup_test_env()
from softwarecenter.db.history_impl.apthistory import AptHistory
from softwarecenter.utils import ExecutionTime
class TestAptHistory(unittest.TestCase):
def setUp(self):
self.basedir = os.path.join(DATA_DIR, "apt-history")
apt.apt_pkg.config.set("Dir::Log", self.basedir)
#apt_pkg.config.set("Dir::Log::History", "./")
def _get_apt_history(self):
history = AptHistory(use_cache=False)
do_events()
return history
def test_history(self):
history = self._get_apt_history()
self.assertEqual(history.transactions[0].start_date,
datetime.datetime.strptime("2010-06-09 14:50:00",
"%Y-%m-%d %H:%M:%S"))
# 186 is from "zgrep Start data/apt-history/history.log*|wc -l"
#print "\n".join([str(x) for x in history.transactions])
self.assertEqual(len(history.transactions), 186)
def test_apthistory_upgrade(self):
history = self._get_apt_history()
self.assertEqual(history.transactions[1].upgrade,
['acl (2.2.49-2, 2.2.49-3)'])
def _glib_timeout(self):
self._timeouts.append(time.time())
return True
def _generate_big_history_file(self, new_history):
# needs to ensure the date is decreasing, otherwise the rescan
# code is too clever and skips it
f = open(new_history,"w")
date=datetime.date(2009, 8, 2)
for i in range(1000):
date -= datetime.timedelta(days=i)
s="Start-Date: %s 14:00:00\nInstall: 2vcard\nEnd-Date: %s 14:01:00\n\n" % (date, date)
f.write(s)
f.close()
subprocess.call(["gzip", new_history])
self.addCleanup(os.remove, new_history + ".gz")
def test_apthistory_rescan_big(self):
""" create big history file and ensure that on rescan the
events are still processed
"""
self._timeouts = []
new_history = os.path.join(self.basedir, "history.log.2")
history = self._get_apt_history()
self.assertEqual(len(history.transactions), 186)
self._generate_big_history_file(new_history)
timer_id = GLib.timeout_add(100, self._glib_timeout)
with ExecutionTime("rescan %s byte file" % os.path.getsize(new_history+".gz")):
history._rescan(use_cache=False)
GLib.source_remove(timer_id)
# verify rescan
self.assertTrue(len(history.transactions) > 186)
# check the timeouts
self.assertTrue(len(self._timeouts) > 0)
for i in range(len(self._timeouts)-1):
# check that we get a max timeout of 0.2s
if abs(self._timeouts[i] - self._timeouts[i+1]) > 0.2:
raise
def test_no_history_log(self):
# set to dir with no existing history.log
apt.apt_pkg.config.set("Dir::Log", "/")
# this should not raise
history = self._get_apt_history()
self.assertEqual(history.transactions, [])
apt.apt_pkg.config.set("Dir::Log", self.basedir)
if __name__ == "__main__":
unittest.main()
| ceibal-tatu/software-center | tests/test_apthistory.py | Python | lgpl-3.0 | 3,358 |
"""
<Program Name>
testportfiller.py
<Started>
November 13, 2008
<Author>
Brent Couvrette
<Purpose>
This module is used to fill in the port numbers in the repy unit tests.
Because the unit tests can be run on any random node you have access to,
hardcoding in a port or even a small set of ports is asking for failure
when the tests are run on a node that does not have those ports open.
Therefore it is best to dynamically determine what ports are available on
the node that is being used, then make all the tests use those ports.
However, we also want to be able to still run the unit tests locally, which
requires that this functionallity be in two places, hence the existence of
this module.
If run on its own, this module will find and replace all of the uses of port
numbers in the repy tests with some default port.
If included, the replacePorts function should be called to replace all the
ports with the given port numbers (more details in the replacePorts doc).
"""
import glob
# Goes through all of the test files and replaces the <messport> and <connport>
# tags with the ports that were found on the actual vessel
def replace_ports(foundMessports, foundConnports):
"""
<Purpose>
Replaces all mess and conn port tags in the repy test files with the given
lists of mess and conn ports. Currently, to completely replace every port,
foundMessports and foundConnports must be of length at least 3. However,
if they are shorter, It will still replace as many as it can, though this
will leave some tests with invalid syntax as they still have some
unreplaced tags.
<Arguments>
foundMessports:
The list of port numbers that should be used to replace the <messport>
tags as shown:
<messport> => foundMessports[0]
<messport1> => foundMessports[1]
<messport2> => foundMessports[2]
If a foundMessports index as given above does not exist, then that tag
will just not get replaced.
foundConnports:
The list of port numbers that should be used to replace the <connport>
tags as shown:
<connport> => foundConnports[0]
<connport1> => foundConnports[1]
<connport2> => foundConnports[2]
If a foundConnports index as given above does not exist, then that tag
will just not get replaced.
<Side Effects>
Changes all of the repy unit tests to include actual port numbers as
possible.
<Returns>
None.
"""
for testfile in glob.glob("rs_*.py") + glob.glob("ut_*.py") + glob.glob("rn_*.py") + \
glob.glob("rz_*.py") + glob.glob("rb_*.py") + glob.glob("ru_*.py") + \
glob.glob("re_*.py") + glob.glob("rl_*.py") +glob.glob("s_*.py") + \
glob.glob("n_*.py") + glob.glob("z_*.py") + glob.glob("b_*.py") + \
glob.glob("u_*.py") + glob.glob("e_*.py") + glob.glob("l_*.py") + \
glob.glob('restrictions.*') + glob.glob("ut_*.mix"):
# read in the initial file
inFile = file(testfile, 'r')
filestring = inFile.read()
inFile.close()
# Replace the instances of messport that we can replace
if len(foundMessports) >= 1:
filestring = filestring.replace('<messport>', foundMessports[0])
if len(foundMessports) >= 2:
filestring = filestring.replace('<messport1>', foundMessports[1])
if len(foundMessports) >= 3:
filestring = filestring.replace('<messport2>', foundMessports[2])
# Replace the instances of connport that we can replace
if len(foundConnports) >= 1:
filestring = filestring.replace('<connport>', foundConnports[0])
if len(foundConnports) >= 2:
filestring = filestring.replace('<connport1>', foundConnports[1])
if len(foundConnports) >= 3:
filestring = filestring.replace('<connport2>', foundConnports[2])
# write out the file with our changes
outFile = file(testfile, 'w')
outFile.write(filestring)
outFile.close()
def main():
# If running separately, just put back in the values that were previously
# hardcoded.
replace_ports(['12345', '12346', '12347'], ['12345', '12346', '12347'])
if __name__ == '__main__':
main()
| SeattleTestbed/dist | testportfiller.py | Python | mit | 4,233 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-12-12 17:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("studies", "0040_add_scheduled_jobs")]
operations = [
migrations.AddField(
model_name="study", name="built", field=models.BooleanField(default=False)
)
]
| CenterForOpenScience/lookit-api | studies/migrations/0041_add_built_field.py | Python | apache-2.0 | 409 |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Heat API Server.
An OpenStack ReST API to Heat.
"""
import eventlet
eventlet.monkey_patch(os=False)
import sys
from oslo_config import cfg
import oslo_i18n as i18n
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_service import systemd
import six
from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging
from heat.common import profiler
from heat.common import wsgi
from heat import version
i18n.enable_lazy()
LOG = logging.getLogger('heat.api')
def main():
try:
logging.register_options(cfg.CONF)
cfg.CONF(project='heat', prog='heat-api',
version=version.version_info.version_string())
logging.setup(cfg.CONF, 'heat-api')
config.set_config_defaults()
messaging.setup()
app = config.load_paste_app()
port = cfg.CONF.heat_api.bind_port
host = cfg.CONF.heat_api.bind_host
LOG.info(_LI('Starting Heat REST API on %(host)s:%(port)s'),
{'host': host, 'port': port})
profiler.setup('heat-api', host)
gmr.TextGuruMeditation.setup_autorun(version)
server = wsgi.Server('heat-api', cfg.CONF.heat_api)
server.start(app, default_port=port)
systemd.notify_once()
server.wait()
except RuntimeError as e:
msg = six.text_type(e)
sys.exit("ERROR: %s" % msg)
| jasondunsmore/heat | heat/cmd/api.py | Python | apache-2.0 | 2,024 |
import socket
sockt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sockt.connect(('127.0.0.1', 17102))
sockt.send(b's')
sockt.close()
| haoozi/visulguide | dummytest.py | Python | gpl-2.0 | 139 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Abhijeet Kasurde <akasurde@redhat.com>
# Copyright: (c) 2018, Christian Kotte <christian.kotte@gmx.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_local_role_manager
short_description: Manage local roles on an ESXi host
description:
- This module can be used to manage local roles on an ESXi host.
version_added: 2.5
author:
- Abhijeet Kasurde (@Akasurde)
- Christian Kotte (@ckotte)
notes:
- Tested on ESXi 6.5
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit roles
requirements:
- "python >= 2.6"
- PyVmomi
options:
local_role_name:
description:
- The local role name to be managed.
required: True
local_privilege_ids:
description:
- The list of privileges that role needs to have.
- Please see U(https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-ED56F3C4-77D0-49E3-88B6-B99B8B437B62.html)
default: []
state:
description:
- Indicate desired state of the role.
- If the role already exists when C(state=present), the role info is updated.
choices: ['present', 'absent']
default: present
force_remove:
description:
- If set to C(False) then prevents the role from being removed if any permissions are using it.
default: False
type: bool
action:
description:
- This parameter is only valid while updating an existing role with privileges.
- C(add) will add the privileges to the existing privilege list.
- C(remove) will remove the privileges from the existing privilege list.
- C(set) will replace the privileges of the existing privileges with user defined list of privileges.
default: set
choices: [ add, remove, set ]
version_added: 2.8
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Add local role to ESXi
vmware_local_role_manager:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
local_role_name: vmware_qa
state: present
delegate_to: localhost
- name: Add local role with privileges to ESXi
vmware_local_role_manager:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
local_role_name: vmware_qa
local_privilege_ids: [ 'Folder.Create', 'Folder.Delete']
state: present
delegate_to: localhost
- name: Remove local role from ESXi
vmware_local_role_manager:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
local_role_name: vmware_qa
state: absent
delegate_to: localhost
- name: Add a privilege to an existing local role
vmware_local_role_manager:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
local_role_name: vmware_qa
local_privilege_ids: [ 'Folder.Create' ]
action: add
delegate_to: localhost
- name: Remove a privilege to an existing local role
vmware_local_role_manager:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
local_role_name: vmware_qa
local_privilege_ids: [ 'Folder.Create' ]
action: remove
delegate_to: localhost
- name: Set a privilege to an existing local role
vmware_local_role_manager:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
local_role_name: vmware_qa
local_privilege_ids: [ 'Folder.Create' ]
action: set
delegate_to: localhost
'''
RETURN = r'''
role_name:
description: Name of local role
returned: always
type: str
role_id:
description: ESXi generated local role id
returned: always
type: int
privileges:
description: List of privileges
returned: always
type: list
privileges_previous:
description: List of privileges of role before the update
returned: on update
type: list
# NOTE: the following keys are deprecated from 2.11 onwards
local_role_name:
description: Name of local role
returned: always
type: str
new_privileges:
description: List of privileges
returned: always
type: list
old_privileges:
description: List of privileges of role before the update
returned: on update
type: list
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VMwareLocalRoleManager(PyVmomi):
"""Class to manage local roles"""
def __init__(self, module):
super(VMwareLocalRoleManager, self).__init__(module)
self.module = module
self.params = module.params
self.role_name = self.params['local_role_name']
self.state = self.params['state']
self.priv_ids = self.params['local_privilege_ids']
self.force = not self.params['force_remove']
self.current_role = None
self.action = self.params['action']
if self.content.authorizationManager is None:
self.module.fail_json(
msg="Failed to get local authorization manager settings.",
details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.params['hostname']
)
def process_state(self):
"""Process the state of the local role"""
local_role_manager_states = {
'absent': {
'present': self.state_remove_role,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_role,
'absent': self.state_create_role,
}
}
try:
local_role_manager_states[self.state][self.check_local_role_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_role_manager_state(self):
"""Check local roles"""
auth_role = self.find_authorization_role()
if auth_role:
self.current_role = auth_role
return 'present'
return 'absent'
def find_authorization_role(self):
"""Find local role"""
desired_role = None
for role in self.content.authorizationManager.roleList:
if role.name == self.role_name:
desired_role = role
return desired_role
def state_create_role(self):
"""Create local role"""
role_id = None
results = dict()
results['role_name'] = self.role_name
results['privileges'] = self.priv_ids
# NOTE: the following code is deprecated from 2.11 onwards
results['local_role_name'] = self.role_name
results['new_privileges'] = self.priv_ids
if self.module.check_mode:
results['msg'] = "Role would be created"
else:
try:
role_id = self.content.authorizationManager.AddAuthorizationRole(
name=self.role_name,
privIds=self.priv_ids
)
results['role_id'] = role_id
results['msg'] = "Role created"
except vim.fault.AlreadyExists as already_exists:
self.module.fail_json(
msg="Failed to create role '%s' as the user specified role name already exists." %
self.role_name, details=already_exists.msg
)
except vim.fault.InvalidName as invalid_name:
self.module.fail_json(
msg="Failed to create a role %s as the user specified role name is empty" %
self.role_name, details=invalid_name.msg
)
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(
msg="Failed to create a role %s as the user specified privileges are unknown" %
self.role_name, etails=invalid_argument.msg
)
self.module.exit_json(changed=True, result=results)
def state_remove_role(self):
"""Remove local role"""
results = dict()
results['role_name'] = self.role_name
results['role_id'] = self.current_role.roleId
# NOTE: the following code is deprecated from 2.11 onwards
results['local_role_name'] = self.role_name
if self.module.check_mode:
results['msg'] = "Role would be deleted"
else:
try:
self.content.authorizationManager.RemoveAuthorizationRole(
roleId=self.current_role.roleId,
failIfUsed=self.force
)
results['msg'] = "Role deleted"
except vim.fault.NotFound as not_found:
self.module.fail_json(
msg="Failed to remove a role %s as the user specified role name does not exist." %
self.role_name, details=not_found.msg
)
except vim.fault.RemoveFailed as remove_failed:
msg = "Failed to remove role '%s' as the user specified role name." % self.role_name
if self.force:
msg += " Use force_remove as True."
self.module.fail_json(msg=msg, details=remove_failed.msg)
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(
msg="Failed to remove a role %s as the user specified role is a system role" %
self.role_name, details=invalid_argument.msg
)
self.module.exit_json(changed=True, result=results)
def state_exit_unchanged(self):
"""Don't do anything"""
results = dict()
results['role_name'] = self.role_name
# NOTE: the following code is deprecated from 2.11 onwards
results['local_role_name'] = self.role_name
results['msg'] = "Role not present"
self.module.exit_json(changed=False, result=results)
def state_update_role(self):
"""Update local role"""
changed = False
changed_privileges = []
results = dict()
results['role_name'] = self.role_name
results['role_id'] = self.current_role.roleId
# NOTE: the following code is deprecated from 2.11 onwards
results['local_role_name'] = self.role_name
current_privileges = self.current_role.privilege
results['privileges'] = current_privileges
# NOTE: the following code is deprecated from 2.11 onwards
results['new_privileges'] = current_privileges
if self.action == 'add':
# Add to existing privileges
for priv in self.params['local_privilege_ids']:
if priv not in current_privileges:
changed_privileges.append(priv)
changed = True
if changed:
changed_privileges.extend(current_privileges)
elif self.action == 'set':
# Set given privileges
# Add system-defined privileges, "System.Anonymous", "System.View", and "System.Read".
self.params['local_privilege_ids'].extend(['System.Anonymous', 'System.Read', 'System.View'])
changed_privileges = self.params['local_privilege_ids']
changes_applied = list(set(current_privileges) ^ set(changed_privileges))
if changes_applied:
changed = True
elif self.action == 'remove':
changed_privileges = list(current_privileges)
# Remove given privileges from existing privileges
for priv in self.params['local_privilege_ids']:
if priv in current_privileges:
changed = True
changed_privileges.remove(priv)
if changed:
results['privileges'] = changed_privileges
results['privileges_previous'] = current_privileges
# NOTE: the following code is deprecated from 2.11 onwards
results['new_privileges'] = changed_privileges
results['old_privileges'] = current_privileges
if self.module.check_mode:
results['msg'] = "Role privileges would be updated"
else:
try:
self.content.authorizationManager.UpdateAuthorizationRole(
roleId=self.current_role.roleId,
newName=self.current_role.name,
privIds=changed_privileges
)
results['msg'] = "Role privileges updated"
except vim.fault.NotFound as not_found:
self.module.fail_json(
msg="Failed to update role. Please check privileges provided for update", details=not_found.msg
)
except vim.fault.InvalidName as invalid_name:
self.module.fail_json(
msg="Failed to update role as role name is empty", details=invalid_name.msg
)
except vim.fault.AlreadyExists as already_exists:
self.module.fail_json(
msg="Failed to update role", details=already_exists.msg
)
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(
msg="Failed to update role as user specified role is system role which can not be changed",
details=invalid_argument.msg
)
except vim.fault.NoPermission as no_permission:
self.module.fail_json(
msg="Failed to update role as current session doesn't have any privilege to update specified role",
details=no_permission.msg
)
else:
results['msg'] = "Role priviledges are properly configured"
self.module.exit_json(changed=changed, result=results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_role_name=dict(required=True, type='str'),
local_privilege_ids=dict(default=[], type='list'),
force_remove=dict(default=False, type='bool'),
action=dict(type='str', default='set', choices=[
'add',
'set',
'remove',
]),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
vmware_local_role_manager = VMwareLocalRoleManager(module)
vmware_local_role_manager.process_state()
if __name__ == '__main__':
main()
| rosmo/ansible | lib/ansible/modules/cloud/vmware/vmware_local_role_manager.py | Python | gpl-3.0 | 15,582 |
from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import os
import sys
__all__ = ['InceptionResNetV2', 'inceptionresnetv2']
pretrained_settings = {
'inceptionresnetv2': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
'imagenet+background': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1001
}
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=False) # verify bias false
self.bn = nn.BatchNorm2d(out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_5b(nn.Module):
def __init__(self):
super(Mixed_5b, self).__init__()
self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(192, 48, kernel_size=1, stride=1),
BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)
)
self.branch2 = nn.Sequential(
BasicConv2d(192, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(192, 64, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block35(nn.Module):
def __init__(self, scale=1.0):
super(Block35, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(320, 32, kernel_size=1, stride=1),
BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(320, 32, kernel_size=1, stride=1),
BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),
BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)
)
self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Mixed_6a(nn.Module):
def __init__(self):
super(Mixed_6a, self).__init__()
self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(320, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),
BasicConv2d(256, 384, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Block17(nn.Module):
def __init__(self, scale=1.0):
super(Block17, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1088, 128, kernel_size=1, stride=1),
BasicConv2d(128, 160, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(160, 192, kernel_size=(7,1), stride=1, padding=(3,0))
)
self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
out = self.relu(out)
return out
class Mixed_7a(nn.Module):
def __init__(self):
super(Mixed_7a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 384, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 288, kernel_size=3, stride=2)
)
self.branch2 = nn.Sequential(
BasicConv2d(1088, 256, kernel_size=1, stride=1),
BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1),
BasicConv2d(288, 320, kernel_size=3, stride=2)
)
self.branch3 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Block8(nn.Module):
def __init__(self, scale=1.0, noReLU=False):
super(Block8, self).__init__()
self.scale = scale
self.noReLU = noReLU
self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(2080, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=(1,3), stride=1, padding=(0,1)),
BasicConv2d(224, 256, kernel_size=(3,1), stride=1, padding=(1,0))
)
self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)
if not self.noReLU:
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
out = out * self.scale + x
if not self.noReLU:
out = self.relu(out)
return out
class InceptionResNetV2(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionResNetV2, self).__init__()
# Special attributs
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
# Modules
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b()
self.repeat = nn.Sequential(
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17)
)
self.mixed_6a = Mixed_6a()
self.repeat_1 = nn.Sequential(
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10)
)
self.mixed_7a = Mixed_7a()
self.repeat_2 = nn.Sequential(
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20)
)
self.block8 = Block8(noReLU=True)
self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)
self.avgpool_1a = nn.AvgPool2d(8, count_include_pad=False)
self.last_linear = nn.Linear(1536, num_classes)
def features(self, input):
x = self.conv2d_1a(input)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
x = self.mixed_7a(x)
x = self.repeat_2(x)
x = self.block8(x)
x = self.conv2d_7b(x)
return x
def logits(self, features):
x = self.avgpool_1a(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
def inceptionresnetv2(num_classes=1000, pretrained='imagenet'):
r"""InceptionResNetV2 model architecture from the
`"InceptionV4, Inception-ResNet..." <https://arxiv.org/abs/1602.07261>`_ paper.
"""
if pretrained:
settings = pretrained_settings['inceptionresnetv2'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = InceptionResNetV2(num_classes=1001)
model.load_state_dict(model_zoo.load_url(settings['url']))
if pretrained == 'imagenet':
new_last_linear = nn.Linear(1536, 1000)
new_last_linear.weight.data = model.last_linear.weight.data[1:]
new_last_linear.bias.data = model.last_linear.bias.data[1:]
model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = InceptionResNetV2(num_classes=num_classes)
return model
'''
TEST
Run this code with:
```
cd $HOME/pretrained-models.pytorch
python -m pretrainedmodels.inceptionresnetv2
```
'''
if __name__ == '__main__':
assert inceptionresnetv2(num_classes=10, pretrained=None)
print('success')
assert inceptionresnetv2(num_classes=1000, pretrained='imagenet')
print('success')
assert inceptionresnetv2(num_classes=1001, pretrained='imagenet+background')
print('success')
# fail
assert inceptionresnetv2(num_classes=1001, pretrained='imagenet') | Cadene/pretrained-models.pytorch | pretrainedmodels/models/inceptionresnetv2.py | Python | bsd-3-clause | 12,055 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test example app."""
import os
import signal
import subprocess
import time
from os.path import abspath, dirname, join
import pytest
@pytest.fixture
def example_app():
"""Example app fixture."""
current_dir = os.getcwd()
project_dir = dirname(dirname(abspath(__file__)))
exampleapp_dir = join(project_dir, 'examples')
os.chdir(exampleapp_dir)
# Setup example
cmd = './app-setup.sh'
exit_status = subprocess.call(cmd, shell=True)
assert exit_status == 0
# Starting example web app
cmd = 'FLASK_APP=app.py FLASK_DEBUG=1 flask run'
webapp = subprocess.Popen(cmd, stdout=subprocess.PIPE,
preexec_fn=os.setsid, shell=True)
time.sleep(5)
yield webapp
# Stop server
os.killpg(webapp.pid, signal.SIGTERM)
# Tear down example app
cmd = './app-teardown.sh'
subprocess.call(cmd, shell=True)
# Return to the original directory
os.chdir(current_dir)
def test_example_app_record_creation(example_app):
"""Test example app record creation."""
# Testing record creation
cmd = """echo '{"title": "Test title"}' | """
cmd += """FLASK_APP=app.py FLASK_DEBUG=1 """
cmd += """flask records create -i deadbeef-9fe4-43d3-a08f-38c2b309afba"""
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
assert 'deadbeef-9fe4-43d3-a08f-38c2b309afba' in str(output)
# Testing record retrieval via web
cmd = 'curl http://127.0.0.1:5000/deadbeef-9fe4-43d3-a08f-38c2b309afba'
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
assert 'Test title' in str(output)
# Testing record retrieval via shell
cmd = """echo "from invenio_records.api import Record;"""
cmd += """Record.get_record('deadbeef-9fe4-43d3-a08f-38c2b309afba')" | """
cmd += """FLASK_APP=app.py FLASK_DEBUG=1 """
cmd += """flask shell"""
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
assert 'Test title' in str(output)
| tiborsimko/invenio-records | tests/test_example_app.py | Python | mit | 2,263 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mezzanine.utils.tests import TestCase
from organization.job.models import JobOffer, Candidacy, JobResponse
# from organization.job.admin import *
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core import urlresolvers
from django.contrib.auth import get_user_model as User
class URLTests(TestCase):
def setUp(self):
super(URLTests, self).setUp()
self.job_offer = JobOffer.objects.create(
title="django dev",
email="testing@email.fr",
type="internship",
content="python"
)
self.candidacy = Candidacy.objects.create(
title="research",
text_button_external="more"
)
def test_job_offer_detail_url(self):
response = self.client.get('/job-offer/' + self.job_offer.slug + "/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "python")
self.assertTemplateUsed(response, "job/job_offer_detail.html")
def test_basic_job_offer_url(self):
response = self.client.get('/job-offer/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "django-dev")
self.assertTemplateUsed(response, "job/job_offer_list.html")
def test_basic_candidacies_url(self):
response = self.client.get('/candidacies/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "research")
self.assertTemplateUsed(response, "job/candidacy_list.html")
def test_candidacies_autocomplete(self):
response = self.client.get('/candidacy-autocomplete/')
self.assertEqual(response.status_code, 200)
class JobOfferTests(TestCase):
def setUp(self):
super(JobOfferTests, self).setUp()
app = "organization_job"
model = "joboffer"
self.url = urlresolvers.reverse("admin:%s_%s_add" % (app, model))
self.file = SimpleUploadedFile('letter.txt'.encode(), 'content'.encode())
self.job_offer = JobOffer.objects.create(
email="test@test.fr",
type="internship"
)
self.job_response = JobResponse.objects.create(
first_name="jean",
last_name="dupont",
email="jean@dupont.fr",
message="I want this job",
curriculum_vitae=self.file,
cover_letter=self.file,
job_offer=self.job_offer
)
def test_job_offer_display_for_everyone(self):
self.client.logout()
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
self.client.login(username='user', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
self.client.login(username='test', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
def test_job_offer_admin(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='user', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='test', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_job_offer_admin_creation(self):
self.client.login(username='test', password='test')
nmb = JobOffer.objects.count()
response = self.client.post(
self.url,
{
"title": 'title',
"status": 2,
"email": 'email@email.fr',
"type": 'internship',
'job_response-INITIAL_FORMS': '0',
'job_response-TOTAL_FORMS': '1'
}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(nmb+1, JobOffer.objects.count())
def test_job_offer_admin_edition(self):
self.client.logout()
response = self.client.get(self.job_offer.get_absolute_url())
self.assertNotContains(response, "editable")
self.client.login(username='user', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertNotContains(response, "editable")
self.client.login(username='test', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertContains(response, "editable")
def test_job_offer_creation(self):
self.assertTrue(isinstance(self.job_offer, JobOffer))
self.assertEqual(self.job_offer.email, "test@test.fr")
self.assertEqual(self.job_offer.type, "internship")
def test_job_offer_retrieval(self):
self.assertTrue(self.job_offer in JobOffer.objects.all())
self.assertTrue(self.job_offer in JobOffer.objects.filter(email="test@test.fr"))
self.assertTrue(self.job_offer in JobOffer.objects.filter(type="internship"))
def test_job_offer_update(self):
self.job_offer.email = "test@django.fr"
self.assertEqual(1, JobOffer.objects.filter(email="test@test.fr").count())
self.assertEqual(0, JobOffer.objects.filter(email="test@django.fr").count())
self.job_offer.save()
self.assertEqual(0, JobOffer.objects.filter(email="test@test.fr").count())
self.assertEqual(1, JobOffer.objects.filter(email="test@django.fr").count())
class JobResponseTests(TestCase):
def setUp(self):
super(JobResponseTests, self).setUp()
app = "organization_job"
model = "joboffer"
self.user = User().objects.create_user(username="user", password='test')
self.file = SimpleUploadedFile('letter.txt'.encode(), 'content'.encode())
self.job_offer = JobOffer.objects.create(
email="test@test.fr",
type="internship"
)
self.job_response = JobResponse.objects.create(
first_name="jean",
last_name="dupont",
email="jean@dupont.fr",
message="I want this job",
curriculum_vitae=self.file,
cover_letter=self.file,
job_offer=self.job_offer
)
self.url = urlresolvers.reverse(
"admin:%s_%s_change" % (app, model),
args=(self.job_offer.id,)
)
def test_job_response_fk_deletion(self):
self.job_offer.delete()
self.assertTrue(
self.job_response in JobResponse.objects.filter(
job_offer__isnull=True
)
)
def test_job_response_not_display_for_everyone(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='user', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='test', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "admin/change_form.html")
self.assertContains(response, "jean@dupont.fr")
def test_job_response_creation(self):
self.assertTrue(isinstance(self.job_response, JobResponse))
self.assertEqual(self.job_response.first_name, "jean")
self.assertEqual(self.job_response.last_name, "dupont")
self.assertEqual(self.job_response.email, "jean@dupont.fr")
self.assertEqual(self.job_response.message, "I want this job")
self.assertEqual(self.job_response.job_offer, self.job_offer)
def test_job_response_retrieval(self):
self.assertTrue(self.job_response in JobResponse.objects.all())
self.assertTrue(
self.job_response in JobResponse.objects.filter(first_name="jean")
)
self.assertTrue(
self.job_response in JobResponse.objects.filter(last_name="dupont")
)
self.assertTrue(
self.job_response in JobResponse.objects.filter(email="jean@dupont.fr")
)
self.assertTrue(
self.job_response in JobResponse.objects.filter(message="I want this job")
)
self.assertTrue(
self.job_response in JobResponse.objects.filter(job_offer=self.job_offer)
)
def test_job_response_update(self):
self.job_response.message = "I don't want this job"
self.assertEqual(
1,
JobResponse.objects.filter(message="I want this job").count()
)
self.assertEqual(
0,
JobResponse.objects.filter(message="I don't want this job").count()
)
self.job_response.save()
self.assertEqual(
0,
JobResponse.objects.filter(message="I want this job").count()
)
self.assertEqual(
1,
JobResponse.objects.filter(message="I don't want this job").count()
)
def test_job_response_deletion(self):
self.job_response.delete()
self.assertFalse(self.job_response in JobResponse.objects.all())
| Ircam-Web/mezzanine-organization | organization/job/test/tests.py | Python | agpl-3.0 | 10,376 |
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The solver module defines solvers for problems of the kind ``res = 0`` or
``∂inertia/∂t + res = 0``, where ``res`` is a
:class:`nutils.sample.Integral`. To demonstrate this consider the following
setup:
>>> from nutils import mesh, function, solver
>>> ns = function.Namespace()
>>> domain, ns.x = mesh.rectilinear([4,4])
>>> ns.basis = domain.basis('spline', degree=2)
>>> cons = domain.boundary['left,top'].project(0, onto=ns.basis, geometry=ns.x, ischeme='gauss4')
project > constrained 11/36 dofs, error 0.00e+00/area
>>> ns.u = 'basis_n ?lhs_n'
Function ``u`` represents an element from the discrete space but cannot not
evaluated yet as we did not yet establish values for ``?lhs``. It can,
however, be used to construct a residual functional ``res``. Aiming to solve
the Poisson problem ``u_,kk = f`` we define the residual functional ``res = v,k
u,k + v f`` and solve for ``res == 0`` using ``solve_linear``:
>>> res = domain.integral('(basis_n,i u_,i + basis_n) d:x' @ ns, degree=2)
>>> lhs = solver.solve_linear('lhs', residual=res, constrain=cons)
solve > solving 25 dof system to machine precision using arnoldi solver
solve > solver returned with residual ...
The coefficients ``lhs`` represent the solution to the Poisson problem.
In addition to ``solve_linear`` the solver module defines ``newton`` and
``pseudotime`` for solving nonlinear problems, as well as ``impliciteuler`` for
time dependent problems.
"""
from . import function, evaluable, cache, numeric, sample, types, util, matrix, warnings, sparse
import abc, numpy, itertools, functools, numbers, collections, math, treelog as log
## TYPE COERCION
argdict = types.frozendict[types.strictstr,types.frozenarray]
def integraltuple(arg):
if isinstance(arg, sample.Integral):
return arg,
for obj in arg:
if not isinstance(obj, sample.Integral):
raise TypeError('expected integral, got {}'.format(type(obj)))
return tuple(arg)
def optionalintegraltuple(arg):
if isinstance(arg, sample.Integral):
return arg,
for obj in arg:
if obj is not None and not isinstance(obj, sample.Integral):
raise TypeError('expected integral or None, got {}'.format(type(obj)))
return tuple(arg)
def arrayordict(arg):
return types.frozenarray(arg) if numeric.isarray(arg) else argdict(arg)
## DECORATORS
def single_or_multiple(f):
'''add support for legacy string target + array return value'''
@functools.wraps(f)
def wrapper(target, *args, **kwargs):
single = isinstance(target, str)
retval = f(tuple([target] if single else target), *args, **kwargs)
return retval[target] if single else retval
return wrapper
class iterable:
'''iterable equivalent of single_or_multiple'''
@classmethod
def single_or_multiple(cls, wrapped):
return type(wrapped.__name__, (cls,), dict(__wrapped__=wrapped, __doc__=cls.__doc__))
def __init__(self, target, *args, **kwargs):
self._target = target
self._single = isinstance(target, str)
self._wrapped = self.__wrapped__(tuple([target] if self._single else target), *args, **kwargs)
@property
def __nutils_hash__(self):
return types.nutils_hash(self._wrapped)
def __iter__(self):
return (retval[self._target] for retval in self._wrapped) if self._single else iter(self._wrapped)
class withsolve(iterable):
'''add a .solve method to (lhs,resnorm) iterators'''
def __iter__(self):
return ((retval[self._target], info) for retval, info in self._wrapped) if self._single else iter(self._wrapped)
def solve(self, tol=0., maxiter=float('inf')):
'''execute nonlinear solver, return lhs
Iterates over nonlinear solver until tolerance is reached. Example::
lhs = newton(target, residual).solve(tol=1e-5)
Parameters
----------
tol : :class:`float`
Target residual norm
maxiter : :class:`int`
Maximum number of iterations
Returns
-------
:class:`numpy.ndarray`
Coefficient vector that corresponds to a smaller than ``tol`` residual.
'''
lhs, info = self.solve_withinfo(tol=tol, maxiter=maxiter)
return lhs
@types.apply_annotations
@cache.function
def solve_withinfo(self, tol, maxiter=float('inf')):
'''execute nonlinear solver, return lhs and info
Like :func:`solve`, but return a 2-tuple of the solution and the
corresponding info object which holds information about the final residual
norm and other generator-dependent information.
'''
with log.iter.wrap(_progress(self.__class__.__name__, tol), self) as items:
i = 0
for lhs, info in items:
if info.resnorm <= tol:
break
if i > maxiter:
raise SolverError('failed to reach target tolerance')
i += 1
log.info('converged in {} steps to residual {:.1e}'.format(i, info.resnorm))
return lhs, info
## EXCEPTIONS
class SolverError(Exception): pass
## LINE SEARCH
class LineSearch(types.Immutable):
'''
Line search abstraction for gradient based optimization.
A line search object is a callable that takes four arguments: the current
residual and directional derivative, and the candidate residual and
directional derivative, with derivatives normalized to unit length; and
returns the optimal scaling and a boolean flag that marks whether the
candidate should be accepted.
'''
@abc.abstractmethod
def __call__(self, res0, dres0, res1, dres1):
raise NotImplementedError
class NormBased(LineSearch):
'''
Line search abstraction for Newton-like iterations, computing relaxation
values that correspond to greatest reduction of the residual norm.
Parameters
----------
minscale : :class:`float`
Minimum relaxation scaling per update. Must be strictly greater than
zero.
acceptscale : :class:`float`
Relaxation scaling that is considered close enough to optimality to
to accept the current Newton update. Must lie between minscale and one.
maxscale : :class:`float`
Maximum relaxation scaling per update. Must be greater than one, and
therefore always coincides with acceptance, determining how fast
relaxation values rebound to one if not bounded by optimality.
'''
@types.apply_annotations
def __init__(self, minscale:float=.01, acceptscale:float=2/3, maxscale:float=2.):
assert 0 < minscale < acceptscale < 1 < maxscale
self.minscale = minscale
self.acceptscale = acceptscale
self.maxscale = maxscale
@classmethod
def legacy(cls, kwargs):
minscale, acceptscale = kwargs.pop('searchrange', (.01, 2/3))
maxscale = kwargs.pop('rebound', 2.)
return cls(minscale=minscale, acceptscale=acceptscale, maxscale=maxscale)
def __call__(self, res0, dres0, res1, dres1):
if not numpy.isfinite(res1).all():
log.info('non-finite residual')
return self.minscale, False
# To determine optimal relaxation we minimize a polynomial estimation for
# the residual norm: P(x) = p0 + q0 x + c x^2 + d x^3
p0 = res0@res0
q0 = 2*res0@dres0
p1 = res1@res1
q1 = 2*res1@dres1
if q0 >= 0:
raise SolverError('search vector does not reduce residual')
c = math.fsum([-3*p0, 3*p1, -2*q0, -q1])
d = math.fsum([2*p0, -2*p1, q0, q1])
# To minimize P we need to determine the roots for P'(x) = q0 + 2 c x + 3 d x^2
# For numerical stability we use Citardauq's formula: x = -q0 / (c +/- sqrt(D)),
# with D the discriminant
D = c**2 - 3 * q0 * d
# If D <= 0 we have at most one duplicate root, which we ignore. For D > 0,
# taking into account that q0 < 0, we distinguish three situations:
# - d > 0 => sqrt(D) > abs(c): one negative, one positive root
# - d = 0 => sqrt(D) = abs(c): one negative root
# - d < 0 => sqrt(D) < abs(c): two roots of same sign as c
scale = -q0 / (c + math.sqrt(D)) if D > 0 and (c > 0 or d > 0) else math.inf
if scale >= 1 and p1 > p0: # this should not happen, but just in case
log.info('failed to estimate scale factor')
return self.minscale, False
log.info('estimated residual minimum at {:.0f}% of update vector'.format(scale*100))
return min(max(scale, self.minscale), self.maxscale), scale >= self.acceptscale and p1 < p0
class MedianBased(LineSearch, version=1):
'''
Line search abstraction for Newton-like iterations, computing relaxation
values such that half (or any other configurable quantile) of the residual
vector has its optimal reduction beyond it. Unline the :class:`NormBased`
approach this is invariant to constant scaling of the residual items.
Parameters
----------
minscale : :class:`float`
Minimum relaxation scaling per update. Must be strictly greater than
zero.
acceptscale : :class:`float`
Relaxation scaling that is considered close enough to optimality to
to accept the current Newton update. Must lie between minscale and one.
maxscale : :class:`float`
Maximum relaxation scaling per update. Must be greater than one, and
therefore always coincides with acceptance, determining how fast
relaxation values rebound to one if not bounded by optimality.
quantile : :class:`float`
Fraction of the residual vector that is aimed to have its optimal
reduction at a smaller relaxation value. The default value of one half
corresponds to the median. A value close to zero means tighter control,
resulting in strong relaxation.
'''
@types.apply_annotations
def __init__(self, minscale:float=.01, acceptscale:float=2/3, maxscale:float=2., quantile:float=.5):
assert 0 < minscale < acceptscale < 1 < maxscale
assert 0 < quantile < 1
self.minscale = minscale
self.acceptscale = acceptscale
self.maxscale = maxscale
self.quantile = quantile
def __call__(self, res0, dres0, res1, dres1):
if not numpy.isfinite(res1).all():
log.info('non-finite residual')
return self.minscale, False
# To determine optimal relaxation we minimize a polynomial estimation for
# the squared residual: P(x) = p0 + q0 x + c x^2 + d x^3
dp = res1**2 - res0**2
q0 = 2*res0*dres0
q1 = 2*res1*dres1
mask = q0 <= 0 # ideally this mask is all true, but solver inaccuracies can result in some positive slopes
n = round(len(res0)*self.quantile) - (~mask).sum()
if n < 0:
raise SolverError('search vector fails to reduce more than {}-quantile of residual vector'.format(self.quantile))
c = 3*dp - 2*q0 - q1
d = -2*dp + q0 + q1
D = c**2 - 3 * q0 * d
mask &= D > 0
numer = -q0[mask]
denom = c[mask] + numpy.sqrt(D[mask])
mask = denom > 0
if n < mask.sum():
scales = numer[mask] / denom[mask]
scales.sort()
scale = scales[n]
else:
scale = numpy.inf
log.info('estimated {}-quantile at {:.0f}% of update vector'.format(self.quantile, scale*100))
return min(max(scale, self.minscale), self.maxscale), scale >= self.acceptscale
## SOLVERS
@single_or_multiple
@types.apply_annotations
@cache.function
def solve_linear(target, residual:integraltuple, *, constrain:arrayordict=None, lhs0:types.frozenarray[types.strictfloat]=None, arguments:argdict={}, **kwargs):
'''solve linear problem
Parameters
----------
target : :class:`str`
Name of the target: a :class:`nutils.function.Argument` in ``residual``.
residual : :class:`nutils.sample.Integral`
Residual integral, depends on ``target``
constrain : :class:`numpy.ndarray` with dtype :class:`float`
Defines the fixed entries of the coefficient vector
arguments : :class:`collections.abc.Mapping`
Defines the values for :class:`nutils.function.Argument` objects in
`residual`. The ``target`` should not be present in ``arguments``.
Optional.
Returns
-------
:class:`numpy.ndarray`
Array of ``target`` values for which ``residual == 0``'''
solveargs = _strip(kwargs, 'lin')
if kwargs:
raise TypeError('unexpected keyword arguments: {}'.format(', '.join(kwargs)))
lhs0, constrain = _parse_lhs_cons(lhs0, constrain, target, _argshapes(residual), arguments)
jacobian = _derivative(residual, target)
if any(jac.contains(t) for t in target for jac in jacobian):
raise SolverError('problem is not linear')
lhs, vlhs = _redict(lhs0, target)
mask, vmask = _invert(constrain, target)
res, jac = _integrate_blocks(residual, jacobian, arguments=lhs, mask=mask)
vlhs[vmask] -= jac.solve(res, **solveargs)
return lhs
@withsolve.single_or_multiple
class newton(cache.Recursion, length=1):
'''iteratively solve nonlinear problem by gradient descent
Generates targets such that residual approaches 0 using Newton procedure with
line search based on the residual norm. Suitable to be used inside ``solve``.
An optimal relaxation value is computed based on the following cubic
assumption::
|res(lhs + r * dlhs)|^2 = A + B * r + C * r^2 + D * r^3
where ``A``, ``B``, ``C`` and ``D`` are determined based on the current
residual and tangent, the new residual, and the new tangent. If this value is
found to be close to 1 then the newton update is accepted.
Parameters
----------
target : :class:`str`
Name of the target: a :class:`nutils.function.Argument` in ``residual``.
residual : :class:`nutils.sample.Integral`
lhs0 : :class:`numpy.ndarray`
Coefficient vector, starting point of the iterative procedure.
relax0 : :class:`float`
Initial relaxation value.
constrain : :class:`numpy.ndarray` with dtype :class:`bool` or :class:`float`
Equal length to ``lhs0``, masks the free vector entries as ``False``
(boolean) or NaN (float). In the remaining positions the values of
``lhs0`` are returned unchanged (boolean) or overruled by the values in
`constrain` (float).
linesearch : :class:`nutils.solver.LineSearch`
Callable that defines relaxation logic.
failrelax : :class:`float`
Fail with exception if relaxation reaches this lower limit.
arguments : :class:`collections.abc.Mapping`
Defines the values for :class:`nutils.function.Argument` objects in
`residual`. The ``target`` should not be present in ``arguments``.
Optional.
Yields
------
:class:`numpy.ndarray`
Coefficient vector that approximates residual==0 with increasing accuracy
'''
@types.apply_annotations
def __init__(self, target, residual:integraltuple, jacobian:integraltuple=None, lhs0:types.frozenarray[types.strictfloat]=None, relax0:float=1., constrain:arrayordict=None, linesearch=None, failrelax:types.strictfloat=1e-6, arguments:argdict={}, **kwargs):
super().__init__()
self.target = target
self.residual = residual
self.jacobian = _derivative(residual, target, jacobian)
self.lhs0, self.constrain = _parse_lhs_cons(lhs0, constrain, target, _argshapes(residual), arguments)
self.relax0 = relax0
self.linesearch = linesearch or NormBased.legacy(kwargs)
self.failrelax = failrelax
self.solveargs = _strip(kwargs, 'lin')
if kwargs:
raise TypeError('unexpected keyword arguments: {}'.format(', '.join(kwargs)))
self.solveargs.setdefault('rtol', 1e-3)
def _eval(self, lhs, mask):
return _integrate_blocks(self.residual, self.jacobian, arguments=lhs, mask=mask)
def resume(self, history):
mask, vmask = _invert(self.constrain, self.target)
if history:
lhs, info = history[-1]
lhs, vlhs = _redict(lhs, self.target)
res, jac = self._eval(lhs, mask)
assert numpy.linalg.norm(res) == info.resnorm
relax = info.relax
else:
lhs, vlhs = _redict(self.lhs0, self.target)
res, jac = self._eval(lhs, mask)
relax = self.relax0
yield lhs, types.attributes(resnorm=numpy.linalg.norm(res), relax=relax)
while True:
dlhs = -jac.solve_leniently(res, **self.solveargs) # compute new search vector
res0 = res
dres = jac@dlhs # == -res if dlhs was solved to infinite precision
vlhs[vmask] += relax * dlhs
res, jac = self._eval(lhs, mask)
scale, accept = self.linesearch(res0, relax*dres, res, relax*(jac@dlhs))
while not accept: # line search
assert scale < 1
oldrelax = relax
relax *= scale
if relax <= self.failrelax:
raise SolverError('stuck in local minimum')
vlhs[vmask] += (relax - oldrelax) * dlhs
res, jac = self._eval(lhs, mask)
scale, accept = self.linesearch(res0, relax*dres, res, relax*(jac@dlhs))
log.info('update accepted at relaxation', round(relax, 5))
relax = min(relax * scale, 1)
yield lhs, types.attributes(resnorm=numpy.linalg.norm(res), relax=relax)
@withsolve.single_or_multiple
class minimize(cache.Recursion, length=1, version=3):
'''iteratively minimize nonlinear functional by gradient descent
Generates targets such that residual approaches 0 using Newton procedure with
line search based on the energy. Suitable to be used inside ``solve``.
An optimal relaxation value is computed based on the following assumption::
energy(lhs + r * dlhs) = A + B * r + C * r^2 + D * r^3 + E * r^4 + F * r^5
where ``A``, ``B``, ``C``, ``D``, ``E`` and ``F`` are determined based on the
current and new energy, residual and tangent. If this value is found to be
close to 1 then the newton update is accepted.
Parameters
----------
target : :class:`str`
Name of the target: a :class:`nutils.function.Argument` in ``residual``.
residual : :class:`nutils.sample.Integral`
lhs0 : :class:`numpy.ndarray`
Coefficient vector, starting point of the iterative procedure.
constrain : :class:`numpy.ndarray` with dtype :class:`bool` or :class:`float`
Equal length to ``lhs0``, masks the free vector entries as ``False``
(boolean) or NaN (float). In the remaining positions the values of
``lhs0`` are returned unchanged (boolean) or overruled by the values in
`constrain` (float).
rampup : :class:`float`
Value to increase the relaxation power by in case energy is decreasing.
rampdown : :class:`float`
Value to decrease the relaxation power by in case energy is increasing.
failrelax : :class:`float`
Fail with exception if relaxation reaches this lower limit.
arguments : :class:`collections.abc.Mapping`
Defines the values for :class:`nutils.function.Argument` objects in
`residual`. The ``target`` should not be present in ``arguments``.
Optional.
Yields
------
:class:`numpy.ndarray`
Coefficient vector that approximates residual==0 with increasing accuracy
'''
@types.apply_annotations
def __init__(self, target, energy:sample.strictintegral, lhs0:types.frozenarray[types.strictfloat]=None, constrain:arrayordict=None, rampup:types.strictfloat=.5, rampdown:types.strictfloat=-1., failrelax:types.strictfloat=-10., arguments:argdict={}, **kwargs):
super().__init__()
if energy.shape != ():
raise ValueError('`energy` should be scalar')
self.target = target
self.energy = energy
self.residual = [energy.derivative(target) for target in self.target]
self.jacobian = _derivative(self.residual, target)
self.lhs0, self.constrain = _parse_lhs_cons(lhs0, constrain, target, energy.argshapes, arguments)
self.rampup = rampup
self.rampdown = rampdown
self.failrelax = failrelax
self.solveargs = _strip(kwargs, 'lin')
if kwargs:
raise TypeError('unexpected keyword arguments: {}'.format(', '.join(kwargs)))
def _eval(self, lhs, mask):
return _integrate_blocks(self.energy, self.residual, self.jacobian, arguments=lhs, mask=mask)
def resume(self, history):
mask, vmask = _invert(self.constrain, self.target)
if history:
lhs, info = history[-1]
lhs, vlhs = _redict(lhs, self.target)
nrg, res, jac = self._eval(lhs, mask)
assert nrg == info.energy
assert numpy.linalg.norm(res) == info.resnorm
relax = info.relax
else:
lhs, vlhs = _redict(self.lhs0, self.target)
nrg, res, jac = self._eval(lhs, mask)
relax = 0
yield lhs, types.attributes(resnorm=numpy.linalg.norm(res), energy=nrg, relax=relax)
while True:
nrg0 = nrg
dlhs = -jac.solve_leniently(res, **self.solveargs)
vlhs[vmask] += dlhs # baseline: vanilla Newton
# compute first two ritz values to determine approximate path of steepest descent
dlhsnorm = numpy.linalg.norm(dlhs)
k0 = dlhs / dlhsnorm
k1 = -res / dlhsnorm # = jac @ k0
a = k1 @ k0
k1 -= k0 * a # orthogonalize
c = numpy.linalg.norm(k1)
k1 /= c # normalize
b = k1 @ (jac @ k1)
# at this point k0 and k1 are orthonormal, and [k0 k1]^T jac [k0 k1] = [a c; c b]
D = numpy.hypot(b-a, 2*c)
L = numpy.array([a+b-D, a+b+D]) / 2 # 2nd order ritz values: eigenvalues of [a c; c b]
v0, v1 = res + dlhs * L[:,numpy.newaxis]
V = numpy.array([v1, -v0]).T / D # ritz vectors times dlhs -- note: V.dot(L) = -res, V.sum() = dlhs
log.info('spectrum: {:.1e}..{:.1e} ({}definite)'.format(*L, 'positive ' if L[0] > 0 else 'negative ' if L[-1] < 0 else 'in'))
eL = 0
for irelax in itertools.count(): # line search along steepest descent curve
r = numpy.exp(relax - numpy.log(D)) # = exp(relax) / D
eL0 = eL
eL = numpy.exp(-r*L)
vlhs[vmask] -= V.dot(eL - eL0)
nrg, res, jac = self._eval(lhs, mask)
slope = res.dot(V.dot(eL*L))
log.info('energy {:+.2e} / e{:+.1f} and {}creasing'.format(nrg - nrg0, relax, 'in' if slope > 0 else 'de'))
if numpy.isfinite(nrg) and numpy.isfinite(res).all() and nrg <= nrg0 and slope <= 0:
relax += self.rampup
break
relax += self.rampdown
if relax <= self.failrelax:
raise SolverError('stuck in local minimum')
yield lhs, types.attributes(resnorm=numpy.linalg.norm(res), energy=nrg, relax=relax)
@withsolve.single_or_multiple
class pseudotime(cache.Recursion, length=1):
'''iteratively solve nonlinear problem by pseudo time stepping
Generates targets such that residual approaches 0 using hybrid of Newton and
time stepping. Requires an inertia term and initial timestep. Suitable to be
used inside ``solve``.
Parameters
----------
target : :class:`str`
Name of the target: a :class:`nutils.function.Argument` in ``residual``.
residual : :class:`nutils.sample.Integral`
inertia : :class:`nutils.sample.Integral`
timestep : :class:`float`
Initial time step, will scale up as residual decreases
lhs0 : :class:`numpy.ndarray`
Coefficient vector, starting point of the iterative procedure.
constrain : :class:`numpy.ndarray` with dtype :class:`bool` or :class:`float`
Equal length to ``lhs0``, masks the free vector entries as ``False``
(boolean) or NaN (float). In the remaining positions the values of
``lhs0`` are returned unchanged (boolean) or overruled by the values in
`constrain` (float).
arguments : :class:`collections.abc.Mapping`
Defines the values for :class:`nutils.function.Argument` objects in
`residual`. The ``target`` should not be present in ``arguments``.
Optional.
Yields
------
:class:`numpy.ndarray` with dtype :class:`float`
Tuple of coefficient vector and residual norm
'''
@types.apply_annotations
def __init__(self, target, residual:integraltuple, inertia:optionalintegraltuple, timestep:types.strictfloat, lhs0:types.frozenarray[types.strictfloat]=None, constrain:arrayordict=None, arguments:argdict={}, **kwargs):
super().__init__()
if target in arguments:
raise ValueError('`target` should not be defined in `arguments`')
if len(residual) != len(inertia):
raise Exception('length of residual and inertia do no match')
for inert, res in zip(inertia, residual):
if inert and inert.shape != res.shape:
raise ValueError('expected `inertia` with shape {} but got {}'.format(res.shape, inert.shape))
self.target = target
self.timesteptarget = '_pseudotime_timestep'
dt = evaluable.Argument(self.timesteptarget, ())
self.residuals = residual
self.jacobians = _derivative([res + sample.Integral({smp: func/dt for smp, func in inert._integrands.items()} if inert else {}, shape=res.shape)
for res, inert in zip(residual, inertia)], target)
self.lhs0, self.constrain = _parse_lhs_cons(lhs0, constrain, target, _argshapes(residual+inertia), arguments)
self.timestep = timestep
self.solveargs = _strip(kwargs, 'lin')
if kwargs:
raise TypeError('unexpected keyword arguments: {}'.format(', '.join(kwargs)))
self.solveargs.setdefault('rtol', 1e-3)
def _eval(self, lhs, mask, timestep):
return _integrate_blocks(self.residuals, self.jacobians, arguments=dict({self.timesteptarget: timestep}, **lhs), mask=mask)
def resume(self, history):
mask, vmask = _invert(self.constrain, self.target)
if history:
lhs, info = history[-1]
lhs, vlhs = _redict(lhs, self.target)
resnorm0 = info.resnorm0
timestep = info.timestep
res, jac = self._eval(lhs, mask, timestep)
resnorm = numpy.linalg.norm(res)
assert resnorm == info.resnorm
else:
lhs, vlhs = _redict(self.lhs0, self.target)
timestep = self.timestep
res, jac = self._eval(lhs, mask, timestep)
resnorm = resnorm0 = numpy.linalg.norm(res)
yield lhs, types.attributes(resnorm=resnorm, timestep=timestep, resnorm0=resnorm0)
while True:
vlhs[vmask] -= jac.solve_leniently(res, **self.solveargs)
timestep = self.timestep * (resnorm0/resnorm)
log.info('timestep: {:.0e}'.format(timestep))
res, jac = self._eval(lhs, mask, timestep)
resnorm = numpy.linalg.norm(res)
yield lhs, types.attributes(resnorm=resnorm, timestep=timestep, resnorm0=resnorm0)
@iterable.single_or_multiple
class thetamethod(cache.Recursion, length=1, version=1):
'''solve time dependent problem using the theta method
Parameters
----------
target : :class:`str`
Name of the target: a :class:`nutils.function.Argument` in ``residual``.
residual : :class:`nutils.sample.Integral`
inertia : :class:`nutils.sample.Integral`
timestep : :class:`float`
Initial time step, will scale up as residual decreases
lhs0 : :class:`numpy.ndarray`
Coefficient vector, starting point of the iterative procedure.
theta : :class:`float`
Theta value (theta=1 for implicit Euler, theta=0.5 for Crank-Nicolson)
residual0 : :class:`nutils.sample.Integral`
Optional additional residual component evaluated in previous timestep
constrain : :class:`numpy.ndarray` with dtype :class:`bool` or :class:`float`
Equal length to ``lhs0``, masks the free vector entries as ``False``
(boolean) or NaN (float). In the remaining positions the values of
``lhs0`` are returned unchanged (boolean) or overruled by the values in
`constrain` (float).
newtontol : :class:`float`
Residual tolerance of individual timesteps
arguments : :class:`collections.abc.Mapping`
Defines the values for :class:`nutils.function.Argument` objects in
`residual`. The ``target`` should not be present in ``arguments``.
Optional.
timetarget : :class:`str`
Name of the :class:`nutils.function.Argument` that represents time.
Optional.
time0 : :class:`float`
The intial time. Default: ``0.0``.
Yields
------
:class:`numpy.ndarray`
Coefficient vector for all timesteps after the initial condition.
'''
@types.apply_annotations
def __init__(self, target, residual:integraltuple, inertia:optionalintegraltuple, timestep:types.strictfloat, theta:types.strictfloat, lhs0:types.frozenarray[types.strictfloat]=None, target0:types.strictstr=None, constrain:arrayordict=None, newtontol:types.strictfloat=1e-10, arguments:argdict={}, newtonargs:types.frozendict={}, timetarget:types.strictstr='_thetamethod_time', time0:types.strictfloat=0., historysuffix:types.strictstr='0'):
super().__init__()
if len(residual) != len(inertia):
raise Exception('length of residual and inertia do no match')
for inert, res in zip(inertia, residual):
if inert and inert.shape != res.shape:
raise ValueError('expected `inertia` with shape {} but got {}'.format(res.shape, inert.shape))
self.target = target
self.newtonargs = newtonargs
self.newtontol = newtontol
self.timestep = timestep
self.timetarget = timetarget
self.lhs0, self.constrain = _parse_lhs_cons(lhs0, constrain, target, _argshapes(residual+inertia), arguments)
self.lhs0[timetarget] = numpy.array(time0)
if target0 is None:
self.old_new = [(t+historysuffix, t) for t in target]
elif len(target) == 1:
warnings.deprecation('target0 is deprecated; use historysuffix instead (target0=target+historysuffix)')
self.old_new = [(target0, target[0])]
else:
raise Exception('target0 is not supported in combination with multiple targets; use historysuffix instead')
self.old_new.append((timetarget+historysuffix, timetarget))
subs0 = {new: evaluable.Argument(old, self.lhs0[new].shape) for old, new in self.old_new}
dt = evaluable.Argument(timetarget, ()) - subs0[timetarget]
self.residuals = [sample.Integral({smp: func * theta + evaluable.replace_arguments(func, subs0) * (1-theta) for smp, func in res._integrands.items()}, shape=res.shape)
+ sample.Integral({smp: (func - evaluable.replace_arguments(func, subs0)) / dt for smp, func in inert._integrands.items()} if inert else {}, shape=res.shape)
for res, inert in zip(residual, inertia)]
self.jacobians = _derivative(self.residuals, target)
def _step(self, lhs0, dt):
arguments = lhs0.copy()
arguments.update((old, lhs0[new]) for old, new in self.old_new)
arguments[self.timetarget] = lhs0[self.timetarget] + dt
try:
return newton(self.target, residual=self.residuals, jacobian=self.jacobians, constrain=self.constrain, arguments=arguments, **self.newtonargs).solve(tol=self.newtontol)
except (SolverError, matrix.MatrixError) as e:
log.error('error: {}; retrying with timestep {}'.format(e, dt/2))
return self._step(self._step(lhs0, dt/2), dt/2)
def resume(self, history):
if history:
lhs, = history
else:
lhs = self.lhs0
yield lhs
while True:
lhs = self._step(lhs, self.timestep)
yield lhs
impliciteuler = functools.partial(thetamethod, theta=1)
cranknicolson = functools.partial(thetamethod, theta=0.5)
@log.withcontext
@single_or_multiple
@types.apply_annotations
@cache.function(version=1)
def optimize(target, functional:sample.strictintegral, *, tol:types.strictfloat=0., arguments:argdict={}, droptol:float=None, constrain:arrayordict=None, lhs0:types.frozenarray[types.strictfloat]=None, relax0:float=1., linesearch=None, failrelax:types.strictfloat=1e-6, **kwargs):
'''find the minimizer of a given functional
Parameters
----------
target : :class:`str`
Name of the target: a :class:`nutils.function.Argument` in ``residual``.
functional : scalar :class:`nutils.sample.Integral`
The functional the should be minimized by varying target
tol : :class:`float`
Target residual norm.
arguments : :class:`collections.abc.Mapping`
Defines the values for :class:`nutils.function.Argument` objects in
`residual`. The ``target`` should not be present in ``arguments``.
Optional.
droptol : :class:`float`
Threshold for leaving entries in the return value at NaN if they do not
contribute to the value of the functional.
constrain : :class:`numpy.ndarray` with dtype :class:`float`
Defines the fixed entries of the coefficient vector
lhs0 : :class:`numpy.ndarray`
Coefficient vector, starting point of the iterative procedure.
relax0 : :class:`float`
Initial relaxation value.
linesearch : :class:`nutils.solver.LineSearch`
Callable that defines relaxation logic.
failrelax : :class:`float`
Fail with exception if relaxation reaches this lower limit.
Yields
------
:class:`numpy.ndarray`
Coefficient vector corresponding to the functional optimum
'''
if linesearch is None:
linesearch = NormBased.legacy(kwargs)
solveargs = _strip(kwargs, 'lin')
if kwargs:
raise TypeError('unexpected keyword arguments: {}'.format(', '.join(kwargs)))
if any(t not in functional.argshapes for t in target):
if not droptol:
raise ValueError('target {} does not occur in integrand; consider setting droptol>0'.format(', '.join(t for t in target if t not in functional.argshapes)))
target = [t for t in target if t in functional.argshapes]
if not target:
return {}
residual = [functional.derivative(t) for t in target]
jacobian = _derivative(residual, target)
lhs0, constrain = _parse_lhs_cons(lhs0, constrain, target, functional.argshapes, arguments)
mask, vmask = _invert(constrain, target)
lhs, vlhs = _redict(lhs0, target)
val, res, jac = _integrate_blocks(functional, residual, jacobian, arguments=lhs, mask=mask)
if droptol is not None:
supp = jac.rowsupp(droptol)
res = res[supp]
jac = jac.submatrix(supp, supp)
nan = numpy.zeros_like(vmask)
nan[vmask] = ~supp # return value is set to nan if dof is not supported and not constrained
vmask[vmask] = supp # dof is computed if it is supported and not constrained
assert vmask.sum() == len(res)
resnorm = numpy.linalg.norm(res)
if any(jacobian.contains(t) for jacobian in jacobian for t in target):
if tol <= 0:
raise ValueError('nonlinear optimization problem requires a nonzero "tol" argument')
solveargs.setdefault('rtol', 1e-3)
firstresnorm = resnorm
relax = relax0
accept = True
with log.context('newton {:.0f}%', 0) as reformat:
while not numpy.isfinite(resnorm) or resnorm > tol:
if accept:
reformat(100 * numpy.log(firstresnorm/resnorm) / numpy.log(firstresnorm/tol))
dlhs = -jac.solve_leniently(res, **solveargs)
res0 = res
dres = jac@dlhs # == -res0 if dlhs was solved to infinite precision
relax0 = 0
vlhs[vmask] += (relax - relax0) * dlhs
relax0 = relax # currently applied relaxation
val, res, jac = _integrate_blocks(functional, residual, jacobian, arguments=lhs, mask=mask)
resnorm = numpy.linalg.norm(res)
scale, accept = linesearch(res0, relax*dres, res, relax*(jac@dlhs))
relax = min(relax * scale, 1)
if relax <= failrelax:
raise SolverError('stuck in local minimum')
log.info('converged with residual {:.1e}'.format(resnorm))
elif resnorm > tol:
solveargs.setdefault('atol', tol)
dlhs = -jac.solve(res, **solveargs)
vlhs[vmask] += dlhs
val += (res + jac@dlhs/2).dot(dlhs)
if droptol is not None:
vlhs[nan] = numpy.nan
log.info('constrained {}/{} dofs'.format(len(vlhs)-nan.sum(), len(vlhs)))
log.info('optimum value {:.2e}'.format(val))
return lhs
## HELPER FUNCTIONS
def _strip(kwargs, prefix):
return {key[len(prefix):]: kwargs.pop(key) for key in list(kwargs) if key.startswith(prefix)}
def _parse_lhs_cons(lhs0, constrain, targets, argshapes, arguments):
arguments = arguments.copy()
if lhs0 is not None:
if len(targets) != 1:
raise SolverError('lhs0 argument cannot be used in combination with multiple targets')
arguments[targets[0]] = lhs0
if numeric.isarray(constrain):
if len(targets) != 1:
raise SolverError('constrain argument must be a dictionary in combination with multiple targets')
constrain = {targets[0]: constrain}
elif constrain:
constrain = constrain.copy()
else:
constrain = {}
for target in targets:
if target not in argshapes:
raise SolverError('target does not occur in functional: {!r}'.format(target))
shape = argshapes[target]
if target not in arguments:
arguments[target] = numpy.zeros(shape)
elif arguments[target].shape != shape:
raise SolverError('invalid argument shape for {}: {} != {}'.format(target, arguments[target].shape, shape))
if target not in constrain:
constrain[target] = numpy.zeros(shape, dtype=bool)
elif constrain[target].shape != shape:
raise SolverError('invalid constrain shape for {}: {} != {}'.format(target, constrain[target].shape, shape))
if constrain[target].dtype != bool:
isnan = numpy.isnan(constrain[target])
arguments[target] = numpy.choose(isnan, [constrain[target], arguments[target]])
constrain[target] = ~isnan
return arguments, constrain
def _derivative(residual, target, jacobian=None):
argshapes = _argshapes(residual)
if jacobian is None:
jacobian = tuple(res.derivative(evaluable.Argument(t, argshapes[t])) for res in residual for t in target)
elif len(jacobian) != len(residual) * len(target):
raise ValueError('jacobian has incorrect length')
elif any(jacobian[i*len(target)+j].shape != res.shape + argshapes[t] for i, res in enumerate(residual) for j, t in enumerate(target)):
raise ValueError('jacobian has incorrect shape')
return jacobian
def _progress(name, tol):
'''helper function for iter.wrap'''
lhs, info = yield name
resnorm0 = info.resnorm
while True:
lhs, info = yield (name + ' {:.0f}%').format(100 * numpy.log(resnorm0/max(info.resnorm,tol)) / numpy.log(resnorm0/tol) if tol else 0 if info.resnorm else 100)
def _redict(lhs, targets):
'''copy argument dictionary referencing a newly allocated contiguous array'''
vlhs = numpy.empty(sum(lhs[target].size for target in targets))
lhs = lhs.copy()
offset = 0
for target in targets:
old = lhs[target]
nextoffset = offset + old.size
new = vlhs[offset:nextoffset].reshape(old.shape)
new[...] = old
new.flags.writeable = False
lhs[target] = new
offset = nextoffset
assert offset == len(vlhs)
return lhs, vlhs
def _invert(cons, targets):
'''invert constraints dictionary to tuple referencing a contiguous array'''
mask = []
vmask = numpy.empty(sum(cons[target].size for target in targets), dtype=bool)
offset = 0
for target in targets:
c = cons[target]
nextoffset = offset + c.size
mask.append(numpy.invert(c, out=vmask[offset:nextoffset].reshape(c.shape)))
offset = nextoffset
assert offset == len(vmask)
return tuple(mask), vmask
def _integrate_blocks(*blocks, arguments, mask):
'''helper function for blockwise integration'''
*scalars, residuals, jacobians = blocks
assert len(residuals) == len(mask)
assert len(jacobians) == len(mask)**2
data = iter(sample.eval_integrals_sparse(*(scalars + list(residuals) + list(jacobians)), **arguments))
nrg = [sparse.toarray(next(data)) for _ in range(len(scalars))]
res = [sparse.take(next(data), [m]) for m in mask]
jac = [[sparse.take(next(data), [mi, mj]) for mj in mask] for mi in mask]
assert not list(data)
return nrg + [sparse.toarray(sparse.block(res)), matrix.fromsparse(sparse.block(jac), inplace=True)]
def _argshapes(integrals):
'''merge argshapes of multiple integrals'''
argshapes = {}
for target, shape in (item for integral in integrals if integral for item in integral.argshapes.items()):
if target not in argshapes:
argshapes[target] = shape
elif argshapes[target] != shape:
raise ValueError('shapes do not match for target {!r}: {} != {}'.format(target, argshapes[target], shape))
return argshapes
# vim:sw=2:sts=2:et
| joostvanzwieten/nutils | nutils/solver.py | Python | mit | 40,683 |
#!/usr/bin/env python3
import logging
import socket
import sys
from zeroconf import ServiceInfo, Zeroconf, __version__
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
assert sys.argv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
# Test a few module features, including service registration, service
# query (for Zoe), and service unregistration.
print(f"Multicast DNS Service Discovery for Python, version {__version__}")
r = Zeroconf()
print("1. Testing registration of a service...")
desc = {'version': '0.10', 'a': 'test value', 'b': 'another value'}
addresses = [socket.inet_aton("127.0.0.1")]
expected = {'127.0.0.1'}
if socket.has_ipv6:
addresses.append(socket.inet_pton(socket.AF_INET6, '::1'))
expected.add('::1')
info = ServiceInfo(
"_http._tcp.local.",
"My Service Name._http._tcp.local.",
addresses=addresses,
port=1234,
properties=desc,
)
print(" Registering service...")
r.register_service(info)
print(" Registration done.")
print("2. Testing query of service information...")
print(" Getting ZOE service: %s" % (r.get_service_info("_http._tcp.local.", "ZOE._http._tcp.local.")))
print(" Query done.")
print("3. Testing query of own service...")
queried_info = r.get_service_info("_http._tcp.local.", "My Service Name._http._tcp.local.")
assert queried_info
assert set(queried_info.parsed_addresses()) == expected
print(f" Getting self: {queried_info}")
print(" Query done.")
print("4. Testing unregister of service information...")
r.unregister_service(info)
print(" Unregister done.")
r.close()
| jstasiak/python-zeroconf | examples/self_test.py | Python | lgpl-2.1 | 1,785 |
#!/usr/bin/python
#
# Copyright 2008 Google Inc. All Rights Reserved.
"""Tests for action_common."""
import unittest, os, sys, StringIO, copy
import common
from autotest_lib.cli import cli_mock, topic_common, action_common, rpc
from autotest_lib.frontend.afe.json_rpc import proxy
#
# List action
#
class atest_list_unittest(cli_mock.cli_unittest):
def test_check_for_wilcard_none(self):
orig_filters = {'name__in': ['item0', 'item1']}
orig_checks = {'name__in': ['item0', 'item1']}
mytest = action_common.atest_list()
filters = copy.deepcopy(orig_filters)
checks = copy.deepcopy(orig_checks)
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, orig_filters)
self.assertEqual(checks, orig_checks)
def test_check_for_wilcard_none_list(self):
orig_filters = {'name__in': ['item0']}
orig_checks = {'name__in': ['item0']}
mytest = action_common.atest_list()
filters = copy.deepcopy(orig_filters)
checks = copy.deepcopy(orig_checks)
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, orig_filters)
self.assertEqual(checks, orig_checks)
def test_check_for_wilcard_one_list(self):
filters = {'something__in': ['item*']}
checks = {'something__in': ['item*']}
mytest = action_common.atest_list()
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, {'something__startswith': 'item'})
self.assertEqual(checks, {'something__startswith': None})
def test_check_for_wilcard_one_string(self):
filters = {'something__name': 'item*'}
checks = {'something__name': 'item*'}
mytest = action_common.atest_list()
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, {'something__name__startswith': 'item'})
self.assertEqual(checks, {'something__name__startswith': None})
def test_check_for_wilcard_one_string_login(self):
filters = {'something__login': 'item*'}
checks = {'something__login': 'item*'}
mytest = action_common.atest_list()
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, {'something__login__startswith': 'item'})
self.assertEqual(checks, {'something__login__startswith': None})
def test_check_for_wilcard_two(self):
orig_filters = {'something__in': ['item0*', 'item1*']}
orig_checks = {'something__in': ['item0*', 'item1*']}
mytest = action_common.atest_list()
filters = copy.deepcopy(orig_filters)
checks = copy.deepcopy(orig_checks)
self.god.stub_function(sys, 'exit')
sys.exit.expect_call(1).and_raises(cli_mock.ExitException)
self.god.mock_io()
self.assertRaises(cli_mock.ExitException,
mytest.check_for_wildcard, filters, checks)
(out, err) = self.god.unmock_io()
self.god.check_playback()
self.assertEqual(filters, orig_filters)
self.assertEqual(checks, orig_checks)
def _atest_list_execute(self, filters={}, check_results={}):
values = [{u'id': 180,
u'platform': 0,
u'name': u'label0',
u'invalid': 0,
u'kernel_config': u''},
{u'id': 338,
u'platform': 0,
u'name': u'label1',
u'invalid': 0,
u'kernel_config': u''}]
mytest = action_common.atest_list()
mytest.afe = rpc.afe_comm()
self.mock_rpcs([('get_labels',
filters,
True,
values)])
self.god.mock_io()
self.assertEqual(values,
mytest.execute(op='get_labels',
filters=filters,
check_results=check_results))
(out, err) = self.god.unmock_io()
self.god.check_playback()
return (out, err)
def test_atest_list_execute_no_filters(self):
self._atest_list_execute()
def test_atest_list_execute_filters_all_good(self):
filters = {}
check_results = {}
filters['name__in'] = ['label0', 'label1']
check_results['name__in'] = 'name'
(out, err) = self._atest_list_execute(filters, check_results)
self.assertEqual(err, '')
def test_atest_list_execute_filters_good_and_bad(self):
filters = {}
check_results = {}
filters['name__in'] = ['label0', 'label1', 'label2']
check_results['name__in'] = 'name'
(out, err) = self._atest_list_execute(filters, check_results)
self.assertWords(err, ['Unknown', 'label2'])
def test_atest_list_execute_items_good_and_bad_no_check(self):
filters = {}
check_results = {}
filters['name__in'] = ['label0', 'label1', 'label2']
check_results['name__in'] = None
(out, err) = self._atest_list_execute(filters, check_results)
self.assertEqual(err, '')
def test_atest_list_execute_filters_wildcard(self):
filters = {}
check_results = {}
filters['name__in'] = ['label*']
check_results['name__in'] = 'name'
values = [{u'id': 180,
u'platform': False,
u'name': u'label0',
u'invalid': False,
u'kernel_config': u''},
{u'id': 338,
u'platform': False,
u'name': u'label1',
u'invalid': False,
u'kernel_config': u''}]
mytest = action_common.atest_list()
mytest.afe = rpc.afe_comm()
self.mock_rpcs([('get_labels', {'name__startswith': 'label'},
True, values)])
self.god.mock_io()
self.assertEqual(values,
mytest.execute(op='get_labels',
filters=filters,
check_results=check_results))
(out, err) = self.god.unmock_io()
self.god.check_playback()
self.assertEqual(err, '')
#
# Creation & Deletion of a topic (ACL, label, user)
#
class atest_create_or_delete_unittest(cli_mock.cli_unittest):
def _create_cr_del(self, items):
def _items():
return items
crdel = action_common.atest_create_or_delete()
crdel.afe = rpc.afe_comm()
crdel.topic = crdel.usage_topic = 'label'
crdel.op_action = 'add'
crdel.get_items = _items
crdel.data['platform'] = False
crdel.data_item_key = 'name'
return crdel
def test_execute_create_one_topic(self):
acr = self._create_cr_del(['label0'])
self.mock_rpcs([('add_label',
{'name': 'label0', 'platform': False},
True, 42)])
ret = acr.execute()
self.god.check_playback()
self.assert_(['label0'], ret)
def test_execute_create_two_topics(self):
acr = self._create_cr_del(['label0', 'label1'])
self.mock_rpcs([('add_label',
{'name': 'label0', 'platform': False},
True, 42),
('add_label',
{'name': 'label1', 'platform': False},
True, 43)])
ret = acr.execute()
self.god.check_playback()
self.assertEqualNoOrder(['label0', 'label1'], ret)
def test_execute_create_error(self):
acr = self._create_cr_del(['label0'])
self.mock_rpcs([('add_label',
{'name': 'label0', 'platform': False},
False,
'''ValidationError:
{'name': 'This value must be unique (label0)'}''')])
ret = acr.execute()
self.god.check_playback()
self.assertEqualNoOrder([], ret)
#
# Adding or Removing users or hosts from a topic(ACL or label)
#
class atest_add_or_remove_unittest(cli_mock.cli_unittest):
def _create_add_remove(self, items, users=None, hosts=None):
def _items():
return [items]
addrm = action_common.atest_add_or_remove()
addrm.afe = rpc.afe_comm()
if users:
addrm.users = users
if hosts:
addrm.hosts = hosts
addrm.topic = 'acl_group'
addrm.msg_topic = 'ACL'
addrm.op_action = 'add'
addrm.msg_done = 'Added to'
addrm.get_items = _items
return addrm
def test__add_remove_uh_to_topic(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None)])
acl_addrm._add_remove_uh_to_topic('acl0', 'users')
self.god.check_playback()
def test__add_remove_uh_to_topic_raise(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.assertRaises(AttributeError,
acl_addrm._add_remove_uh_to_topic,
'acl0', 'hosts')
def test_execute_add_or_remove_uh_to_topic_acl_users(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqualNoOrder(['acl0'], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_users_hosts(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqualNoOrder(['acl0'], execute_result['users'])
self.assertEqualNoOrder(['acl0'], execute_result['hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_users(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0, user1')])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual([], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
err_words_ok=['DoesNotExist',
'acl_group_add_users',
'user0', 'user1'],
err_words_no = ['acl_group_add_hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_users_partial(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0'),
('acl_group_add_users',
{'id': 'acl0',
'users': ['user1']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual(['acl0'], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=['Added to ACL acl0', 'user1'],
err_words_ok=['DoesNotExist',
'acl_group_add_users',
'user0'],
err_words_no = ['acl_group_add_hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_u_partial_kill(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
acl_addrm.kill_on_failure = True
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0')])
sys.exit.expect_call(1).and_raises(cli_mock.ExitException)
self.god.mock_io()
self.assertRaises(cli_mock.ExitException, acl_addrm.execute)
(out, err) = self.god.unmock_io()
self.god.check_playback()
self._check_output(out=out, err=err,
err_words_ok=['DoesNotExist',
'acl_group_add_users',
'user0'],
err_words_no = ['acl_group_add_hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_users_good_hosts(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0, user1'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual([], execute_result['users'])
self.assertEqual(['acl0'], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=['Added to ACL acl0 hosts:',
'host0', 'host1'],
err_words_ok=['DoesNotExist',
'acl_group_add_users',
'user0', 'user1'],
err_words_no = ['acl_group_add_hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_good_users_bad_hosts(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: The following hosts do not exist: '
'host0, host1')])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual(['acl0'], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=['Added to ACL acl0 users:',
'user0', 'user1'],
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'host0', 'host1'],
err_words_no = ['acl_group_add_users'])
def test_exe_add_or_remove_uh_to_topic_acl_good_u_bad_hosts_partial(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: The following hosts do not exist: '
'host1'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual(['acl0'], execute_result['users'])
self.assertEqual(['acl0'], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=['Added to ACL acl0 users:',
'user0', 'user1', 'host0'],
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'host1'],
err_words_no = ['acl_group_add_users'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_users_bad_hosts(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0, user1'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: The following hosts do not exist: '
'host0, host1')])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual([], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'host0', 'host1',
'acl_group_add_users',
'user0', 'user1'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_u_bad_h_partial(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0'),
('acl_group_add_users',
{'id': 'acl0',
'users': ['user1']},
True,
None),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: The following hosts do not exist: '
'host1'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual(['acl0'], execute_result['users'])
self.assertEqual(['acl0'], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=['Added to ACL acl0 user:',
'Added to ACL acl0 host:',
'user1', 'host0'],
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'host1',
'acl_group_add_users',
'user0'])
def test_execute_add_or_remove_to_topic_bad_acl_uh(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: acl_group matching '
'query does not exist.'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: acl_group matching '
'query does not exist.')])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual([], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'acl_group_add_users'])
if __name__ == '__main__':
unittest.main()
| yochow/autotest | cli/action_common_unittest.py | Python | gpl-2.0 | 23,200 |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
import os
import subprocess
from pex.interpreter import PythonInterpreter
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
from pants.util.collections import assert_single_element
from pants.util.contextutil import open_zip, temporary_dir
class PexBuildUtilIntegrationTest(PantsRunIntegrationTest):
binary_target_address = "testprojects/src/python/python_targets:test"
def test_ipex_gets_imprecise_constraint(self) -> None:
cur_interpreter_id = PythonInterpreter.get().identity
interpreter_name = cur_interpreter_id.requirement.name
major, minor, patch = cur_interpreter_id.version
# Pin the selected interpreter to the one used by pants to execute this test.
cur_interpreter_constraint = f"{interpreter_name}=={major}.{minor}.{patch}"
# Validate the the .ipex file specifically matches the major and minor versions, but allows
# any patch version.
imprecise_constraint = f"{interpreter_name}=={major}.{minor}.*"
with temporary_dir() as tmp_dir:
self.do_command(
"--binary-py-generate-ipex",
"binary",
self.binary_target_address,
config={
"GLOBAL": {"pants_distdir": tmp_dir},
"python-setup": {"interpreter_constraints": [cur_interpreter_constraint]},
},
)
pex_path = os.path.join(tmp_dir, "test.ipex")
assert os.path.isfile(pex_path)
pex_execution_result = subprocess.run([pex_path], stdout=subprocess.PIPE, check=True)
assert pex_execution_result.stdout.decode() == "test!\n"
with open_zip(pex_path) as zf:
info = json.loads(zf.read("PEX-INFO"))
constraint = assert_single_element(info["interpreter_constraints"])
assert constraint == imprecise_constraint
| tdyas/pants | src/python/pants/python/pex_build_util_test_integration.py | Python | apache-2.0 | 2,074 |
"""bigint
Revision ID: 4d9c223a796d
Revises: ca7b124852b1
Create Date: 2019-06-21 20:35:48.936858
"""
# revision identifiers, used by Alembic.
revision = "4d9c223a796d"
down_revision = "ca7b124852b1"
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"author",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"authors_entries",
"author_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"authors_entries",
"entry_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"authors_entries",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"email",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
existing_server_default=sa.text("nextval('email_id_seq'::regclass)"),
)
op.alter_column(
"email",
"period_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"email",
"user_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"emails_authors",
"author_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"emails_authors",
"email_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"emails_authors",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"emails_entries",
"email_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"emails_entries",
"entry_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"emails_entries",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"entry",
"feed_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"entry",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"feed",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
existing_server_default=sa.text("nextval('feed_id_seq'::regclass)"),
)
op.alter_column(
"feed",
"user_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"feed_stats",
"feed_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"feed_stats",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"period",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
existing_server_default=sa.text("nextval('period_id_seq'::regclass)"),
)
op.alter_column(
"recommended",
"author_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"recommended",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"role",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
existing_server_default=sa.text("nextval('role_id_seq'::regclass)"),
)
op.alter_column(
"roles_users",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"roles_users",
"role_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"roles_users",
"user_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"subs_periods",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"subs_periods",
"period_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"subs_periods",
"subscription_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"subscription",
"author_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"subscription",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
existing_server_default=sa.text("nextval('subscription_id_seq'::regclass)"),
)
op.alter_column(
"subscription",
"user_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"userfeed",
"author_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"userfeed",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"userfeed",
"user_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
op.alter_column(
"users",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
existing_server_default=sa.text("nextval('users_id_seq'::regclass)"),
)
op.alter_column(
"websub_subscription",
"id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
autoincrement=True,
)
op.alter_column(
"websub_subscription",
"userfeed_id",
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"websub_subscription",
"userfeed_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"websub_subscription",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"users",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
existing_server_default=sa.text("nextval('users_id_seq'::regclass)"),
)
op.alter_column(
"userfeed",
"user_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"userfeed",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"userfeed",
"author_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"subscription",
"user_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"subscription",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
existing_server_default=sa.text("nextval('subscription_id_seq'::regclass)"),
)
op.alter_column(
"subscription",
"author_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"subs_periods",
"subscription_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"subs_periods",
"period_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"subs_periods",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"roles_users",
"user_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"roles_users",
"role_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"roles_users",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"role",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
existing_server_default=sa.text("nextval('role_id_seq'::regclass)"),
)
op.alter_column(
"recommended",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"recommended",
"author_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"period",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
existing_server_default=sa.text("nextval('period_id_seq'::regclass)"),
)
op.alter_column(
"feed_stats",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"feed_stats",
"feed_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"feed",
"user_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"feed",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
existing_server_default=sa.text("nextval('feed_id_seq'::regclass)"),
)
op.alter_column(
"entry",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"entry",
"feed_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"emails_entries",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"emails_entries",
"entry_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"emails_entries",
"email_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"emails_authors",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"emails_authors",
"email_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"emails_authors",
"author_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"email",
"user_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"email",
"period_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"email",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
existing_server_default=sa.text("nextval('email_id_seq'::regclass)"),
)
op.alter_column(
"authors_entries",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
op.alter_column(
"authors_entries",
"entry_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"authors_entries",
"author_id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True,
)
op.alter_column(
"author",
"id",
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
autoincrement=True,
)
# ### end Alembic commands ###
| DBeath/flask-feedrsub | migrations/versions/4d9c223a796d_bigint.py | Python | mit | 13,907 |
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
import datetime
import re
from ..model import ModelView, ModelSQL, fields
from ..report import Report
from ..wizard import Wizard, StateView, StateAction, Button
from ..transaction import Transaction
from ..cache import Cache
from ..pool import Pool
from ..pyson import Bool, Eval
IDENTIFIER = re.compile(r'^[a-zA-z_][a-zA-Z0-9_]*$')
class Model(ModelSQL, ModelView):
"Model"
_name = 'ir.model'
_description = __doc__
name = fields.Char('Model Description', translate=True, loading='lazy',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
model = fields.Char('Model Name', required=True,
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
info = fields.Text('Information',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
module = fields.Char('Module',
help="Module in which this model is defined", readonly=True)
fields = fields.One2Many('ir.model.field', 'model', 'Fields',
required=True)
def __init__(self):
super(Model, self).__init__()
self._sql_constraints += [
('model_uniq', 'UNIQUE(model)',
'The model must be unique!'),
]
self._constraints += [
('check_module', 'invalid_module'),
]
self._error_messages.update({
'invalid_module': 'Module Name must be a python identifier!',
})
self._order.insert(0, ('model', 'ASC'))
def check_module(self, ids):
'''
Check module
'''
for model in self.browse(ids):
if model.module and not IDENTIFIER.match(model.module):
return False
return True
def create(self, vals):
pool = Pool()
property_obj = pool.get('ir.property')
res = super(Model, self).create(vals)
# Restart the cache of models_get
property_obj.models_get.reset()
return res
def write(self, ids, vals):
pool = Pool()
property_obj = pool.get('ir.property')
res = super(Model, self).write(ids, vals)
# Restart the cache of models_get
property_obj.models_get.reset()
return res
def delete(self, ids):
pool = Pool()
property_obj = pool.get('ir.property')
res = super(Model, self).delete(ids)
# Restart the cache of models_get
property_obj.models_get.reset()
return res
Model()
class ModelField(ModelSQL, ModelView):
"Model field"
_name = 'ir.model.field'
_description = __doc__
name = fields.Char('Name', required=True,
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
relation = fields.Char('Model Relation',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
model = fields.Many2One('ir.model', 'Model', required=True,
select=True, ondelete='CASCADE',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
field_description = fields.Char('Field Description', translate=True,
loading='lazy',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
ttype = fields.Char('Field Type',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
groups = fields.Many2Many('ir.model.field-res.group', 'field_id',
'group_id', 'Groups')
help = fields.Text('Help', translate=True, loading='lazy',
states={
'readonly': Bool(Eval('module')),
},
depends=['module'])
module = fields.Char('Module',
help="Module in which this field is defined")
def __init__(self):
super(ModelField, self).__init__()
self._sql_constraints += [
('name_model_uniq', 'UNIQUE(name, model)',
'The field name in model must be unique!'),
]
self._constraints += [
('check_name', 'invalid_name'),
]
self._error_messages.update({
'invalid_name': 'Model Field Name must be a python identifier!',
})
self._order.insert(0, ('name', 'ASC'))
def default_name(self):
return 'No Name'
def default_field_description(self):
return 'No description available'
def check_name(self, ids):
'''
Check name
'''
for field in self.browse(ids):
if not IDENTIFIER.match(field.name):
return False
return True
def read(self, ids, fields_names=None):
pool = Pool()
translation_obj = pool.get('ir.translation')
to_delete = []
if Transaction().context.get('language'):
if fields_names is None:
fields_names = self._columns.keys()
if 'field_description' in fields_names \
or 'help' in fields_names:
if 'model' not in fields_names:
fields_names.append('model')
to_delete.append('model')
if 'name' not in fields_names:
fields_names.append('name')
to_delete.append('name')
int_id = False
if isinstance(ids, (int, long)):
int_id = True
ids = [ids]
res = super(ModelField, self).read(ids, fields_names=fields_names)
if (Transaction().context.get('language')
and ('field_description' in fields_names
or 'help' in fields_names)):
model_ids = set()
for rec in res:
if isinstance(rec['model'], (list, tuple)):
model_ids.add(rec['model'][0])
else:
model_ids.add(rec['model'])
model_ids = list(model_ids)
cursor = Transaction().cursor
cursor.execute('SELECT id, model FROM ir_model WHERE id IN ' \
'(' + ','.join(('%s',) * len(model_ids)) + ')', model_ids)
id2model = dict(cursor.fetchall())
trans_args = []
for rec in res:
if isinstance(rec['model'], (list, tuple)):
model_id = rec['model'][0]
else:
model_id = rec['model']
if 'field_description' in fields_names:
trans_args.append((id2model[model_id] + ',' + rec['name'],
'field', Transaction().language, None))
if 'help' in fields_names:
trans_args.append((id2model[model_id] + ',' + rec['name'],
'help', Transaction().language, None))
translation_obj.get_sources(trans_args)
for rec in res:
if isinstance(rec['model'], (list, tuple)):
model_id = rec['model'][0]
else:
model_id = rec['model']
if 'field_description' in fields_names:
res_trans = translation_obj.get_source(
id2model[model_id] + ',' + rec['name'],
'field', Transaction().language)
if res_trans:
rec['field_description'] = res_trans
if 'help' in fields_names:
res_trans = translation_obj.get_source(
id2model[model_id] + ',' + rec['name'],
'help', Transaction().language)
if res_trans:
rec['help'] = res_trans
if to_delete:
for rec in res:
for field in to_delete:
del rec[field]
if int_id:
res = res[0]
return res
ModelField()
class ModelAccess(ModelSQL, ModelView):
"Model access"
_name = 'ir.model.access'
_description = __doc__
_rec_name = 'model'
model = fields.Many2One('ir.model', 'Model', required=True,
ondelete="CASCADE")
group = fields.Many2One('res.group', 'Group',
ondelete="CASCADE")
perm_read = fields.Boolean('Read Access')
perm_write = fields.Boolean('Write Access')
perm_create = fields.Boolean('Create Access')
perm_delete = fields.Boolean('Delete Access')
description = fields.Text('Description')
def __init__(self):
super(ModelAccess, self).__init__()
self._sql_constraints += [
('model_group_uniq', 'UNIQUE("model", "group")',
'Only one record by model and group is allowed!'),
]
self._error_messages.update({
'read': 'You can not read this document! (%s)',
'write': 'You can not write in this document! (%s)',
'create': 'You can not create this kind of document! (%s)',
'delete': 'You can not delete this document! (%s)',
})
def check_xml_record(self, ids, values):
return True
def default_perm_read(self):
return False
def default_perm_write(self):
return False
def default_perm_create(self):
return False
def default_perm_delete(self):
return False
@Cache('ir_model_access.check')
def check(self, model_name, mode='read', raise_exception=True):
'''
Check access for model_name
:param model_name: the model name
:param mode: 'read', 'write', 'create' or 'delete'
:param raise_exception: raise an exception if the test failed
:return: a boolean
'''
assert mode in ['read', 'write', 'create', 'delete'], \
'Invalid access mode for security'
if Transaction().user == 0:
return True
pool = Pool()
ir_model_obj = pool.get('ir.model')
user_group_obj = pool.get('res.user-res.group')
cursor = Transaction().cursor
cursor.execute('SELECT MAX(CASE WHEN a.perm_%s THEN 1 ELSE 0 END) '
'FROM "%s" AS a '
'JOIN "%s" AS m '
'ON (a.model = m.id) '
'LEFT JOIN "%s" AS gu '
'ON (gu."group" = a."group") '
'WHERE m.model = %%s AND (gu."user" = %%s OR a."group" IS NULL)'
% (mode, self._table, ir_model_obj._table, user_group_obj._table),
(model_name, Transaction().user))
access, = cursor.fetchone()
if not access and access is not None:
if raise_exception:
self.raise_user_error(mode, model_name)
else:
return False
return True
def write(self, ids, vals):
res = super(ModelAccess, self).write(ids, vals)
# Restart the cache
self.check.reset()
pool = Pool()
for _, model in pool.iterobject():
try:
model.fields_view_get.reset()
except Exception:
pass
return res
def create(self, vals):
res = super(ModelAccess, self).create(vals)
# Restart the cache
self.check.reset()
pool = Pool()
for _, model in pool.iterobject():
try:
model.fields_view_get.reset()
except Exception:
pass
return res
def delete(self, ids):
res = super(ModelAccess, self).delete(ids)
# Restart the cache
self.check.reset()
pool = Pool()
for _, model in pool.iterobject():
try:
model.fields_view_get.reset()
except Exception:
pass
return res
ModelAccess()
class ModelFieldAccess(ModelSQL, ModelView):
"Model Field Access"
_name = 'ir.model.field.access'
_description = __doc__
_rec_name = 'field'
field = fields.Many2One('ir.model.field', 'Field', required=True,
ondelete='CASCADE')
group = fields.Many2One('res.group', 'Group', ondelete='CASCADE')
perm_read = fields.Boolean('Read Access')
perm_write = fields.Boolean('Write Access')
description = fields.Text('Description')
def __init__(self):
super(ModelFieldAccess, self).__init__()
self._sql_constraints += [
('field_group_uniq', 'UNIQUE("field", "group")',
'Only one record by field and group is allowed!'),
]
self._error_messages.update({
'read': 'You can not read the field! (%s.%s)',
'write': 'You can not write on the field! (%s.%s)',
})
def check_xml_record(self, ids, values):
return True
def default_perm_read(self):
return False
def default_perm_write(self):
return False
@Cache('ir_model_field_access.check')
def check(self, model_name, fields, mode='read', raise_exception=True,
access=False):
'''
Check access for fields on model_name.
:param model_name: the model name
:param fields: a list of fields
:param mode: 'read' or 'write'
:param raise_exception: raise an exception if the test failed
:param access: return a dictionary with access right instead of boolean
:return: a boolean
'''
assert mode in ('read', 'write'), 'Invalid access mode'
if Transaction().user == 0:
if access:
return dict((x, True) for x in fields)
return True
pool = Pool()
ir_model_obj = pool.get('ir.model')
ir_model_field_obj = pool.get('ir.model.field')
user_group_obj = pool.get('res.user-res.group')
cursor = Transaction().cursor
cursor.execute('SELECT f.name, '
'MAX(CASE WHEN a.perm_%s THEN 1 ELSE 0 END) '
'FROM "%s" AS a '
'JOIN "%s" AS f '
'ON (a.field = f.id) '
'JOIN "%s" AS m '
'ON (f.model = m.id) '
'LEFT JOIN "%s" AS gu '
'ON (gu."group" = a."group") '
'WHERE m.model = %%s AND (gu."user" = %%s OR a."group" IS NULL) '
'GROUP BY f.name'
% (mode, self._table, ir_model_field_obj._table,
ir_model_obj._table, user_group_obj._table), (model_name,
Transaction().user))
accesses = dict(cursor.fetchall())
if access:
return accesses
for field in fields:
if not accesses.get(field, True):
if raise_exception:
self.raise_user_error(mode, (model_name, field))
else:
return False
return True
def write(self, ids, vals):
res = super(ModelFieldAccess, self).write(ids, vals)
# Restart the cache
self.check.reset()
pool = Pool()
for _, model in pool.iterobject():
try:
model.fields_view_get.reset()
except Exception:
pass
return res
def create(self, vals):
res = super(ModelFieldAccess, self).create(vals)
# Restart the cache
self.check.reset()
pool = Pool()
for _, model in pool.iterobject():
try:
model.fields_view_get.reset()
except Exception:
pass
return res
def delete(self, ids):
res = super(ModelFieldAccess, self).delete(ids)
# Restart the cache
self.check.reset()
pool = Pool()
for _, model in pool.iterobject():
try:
model.fields_view_get.reset()
except Exception:
pass
return res
ModelFieldAccess()
class ModelButton(ModelSQL, ModelView):
"Model Button"
_name = 'ir.model.button'
_description = __doc__
name = fields.Char('Name', required=True, readonly=True)
model = fields.Many2One('ir.model', 'Model', required=True, readonly=True,
ondelete='CASCADE', select=True)
groups = fields.Many2Many('ir.model.button-res.group', 'button', 'group',
'Groups')
def __init__(self):
super(ModelButton, self).__init__()
self._sql_constraints += [
('name_model_uniq', 'UNIQUE(name, model)',
'The button name in model must be unique!'),
]
self._order.insert(0, ('model', 'ASC'))
def create(self, values):
result = super(ModelButton, self).create(values)
# Restart the cache for get_groups
self.get_groups.reset()
return result
def write(self, ids, values):
result = super(ModelButton, self).write(ids, values)
# Restart the cache for get_groups
self.get_groups.reset()
return result
def delete(self, ids):
result = super(ModelButton, self).delete(ids)
# Restart the cache for get_groups
self.get_groups.reset()
return result
@Cache('ir.model.button')
def get_groups(self, model, name):
'''
Return a set of group ids for the named button on the model.
'''
button_ids = self.search([
('model', '=', model),
('name', '=', name),
])
if not button_ids:
return set()
button_id, = button_ids
button = self.browse(button_id)
return set(g.id for g in button.groups)
ModelButton()
class ModelData(ModelSQL, ModelView):
"Model data"
_name = 'ir.model.data'
_description = __doc__
fs_id = fields.Char('Identifier on File System', required=True,
help="The id of the record as known on the file system.",
select=True)
model = fields.Char('Model', required=True, select=True)
module = fields.Char('Module', required=True, select=True)
db_id = fields.Integer('Resource ID',
help="The id of the record in the database.", select=True,
required=True)
date_update = fields.DateTime('Update Date')
date_init = fields.DateTime('Init Date')
values = fields.Text('Values')
inherit = fields.Boolean('Inherit')
noupdate = fields.Boolean('No Update')
def __init__(self):
super(ModelData, self).__init__()
self._sql_constraints = [
('fs_id_module_model_uniq', 'UNIQUE("fs_id", "module", "model")',
'The triple (fs_id, module, model) must be unique!'),
]
def default_date_init(self):
return datetime.datetime.now()
def default_inherit(self):
return False
def default_noupdate(self):
return False
def write(self, ids, values):
result = super(ModelData, self).write(ids, values)
# Restart the cache for get_id
self.get_id.reset()
return result
@Cache('ir_model_data.get_id')
def get_id(self, module, fs_id):
"""
Return for an fs_id the corresponding db_id.
:param module: the module name
:param fs_id: the id in the xml file
:return: the database id
"""
ids = self.search([
('module', '=', module),
('fs_id', '=', fs_id),
('inherit', '=', False),
], limit=1)
if not ids:
raise Exception("Reference to %s not found"
% ".".join([module, fs_id]))
return self.read(ids[0], ['db_id'])['db_id']
ModelData()
class PrintModelGraphStart(ModelView):
'Print Model Graph'
_name = 'ir.model.print_model_graph.start'
_description = __doc__
level = fields.Integer('Level', required=True)
filter = fields.Text('Filter', help="Entering a Python "
"Regular Expression will exclude matching models from the graph.")
def default_level(self):
return 1
PrintModelGraphStart()
class PrintModelGraph(Wizard):
_name = 'ir.model.print_model_graph'
start = StateView('ir.model.print_model_graph.start',
'ir.print_model_graph_start_view_form', [
Button('Cancel', 'end', 'tryton-cancel'),
Button('Print', 'print_', 'tryton-ok', default=True),
])
print_ = StateAction('ir.report_model_graph')
def transition_print_(self, session):
return 'end'
def do_print_(self, session, action):
return action, {
'id': Transaction().context.get('active_id'),
'ids': Transaction().context.get('active_ids'),
'level': session.start.level,
'filter': session.start.filter,
}
PrintModelGraph()
class ModelGraph(Report):
_name = 'ir.model.graph'
def execute(self, ids, data):
import pydot
pool = Pool()
model_obj = pool.get('ir.model')
action_report_obj = pool.get('ir.action.report')
if not data['filter']:
filter = None
else:
filter = re.compile(data['filter'], re.VERBOSE)
action_report_ids = action_report_obj.search([
('report_name', '=', self._name)
])
if not action_report_ids:
raise Exception('Error', 'Report (%s) not find!' % self._name)
action_report = action_report_obj.browse(action_report_ids[0])
models = model_obj.browse(ids)
graph = pydot.Dot(fontsize="8")
graph.set('center', '1')
graph.set('ratio', 'auto')
self.fill_graph(models, graph, level=data['level'], filter=filter)
data = graph.create(prog='dot', format='png')
return ('png', buffer(data), False, action_report.name)
def fill_graph(self, models, graph, level=1, filter=None):
'''
Fills a pydot graph with a models structure.
:param models: a BrowseRecordList of ir.model
:param graph: a pydot.Graph
:param level: the depth to dive into model reationships
:param filter: a compiled regular expression object to filter specific
models
'''
import pydot
pool = Pool()
model_obj = pool.get('ir.model')
sub_models = set()
if level > 0:
for model in models:
for field in model.fields:
if field.name in ('create_uid', 'write_uid'):
continue
if field.relation and not graph.get_node(field.relation):
sub_models.add(field.relation)
if sub_models:
model_ids = model_obj.search([
('model', 'in', list(sub_models)),
])
sub_models = model_obj.browse(model_ids)
if set(sub_models) != set(models):
self.fill_graph(sub_models, graph, level=level - 1,
filter=filter)
for model in models:
if filter and re.search(filter, model.model):
continue
label = '{' + model.model + '\\n'
if model.fields:
label += '|'
for field in model.fields:
if field.name in ('create_uid', 'write_uid',
'create_date', 'write_date', 'id'):
continue
label += '+ ' + field.name + ': ' + field.ttype
if field.relation:
label += ' ' + field.relation
label += '\l'
label += '}'
if pydot.__version__ == '1.0.2':
# version 1.0.2 doesn't quote correctly label on Node object
label = '"' + label + '"'
node = pydot.Node(str(model.model), shape='record', label=label)
graph.add_node(node)
for field in model.fields:
if field.name in ('create_uid', 'write_uid'):
continue
if field.relation:
node_name = field.relation
if pydot.__version__ == '1.0.2':
# version 1.0.2 doesn't quote correctly node name
node_name = '"' + node_name + '"'
if not graph.get_node(node_name):
continue
args = {}
tail = model.model
head = field.relation
edge_model_name = model.model
edge_relation_name = field.relation
if pydot.__version__ == '1.0.2':
# version 1.0.2 doesn't quote correctly edge name
edge_model_name = '"' + edge_model_name + '"'
edge_relation_name = '"' + edge_relation_name + '"'
if field.ttype == 'many2one':
edge = graph.get_edge(edge_model_name,
edge_relation_name)
if edge:
continue
args['arrowhead'] = "normal"
elif field.ttype == 'one2many':
edge = graph.get_edge(edge_relation_name,
edge_model_name)
if edge:
continue
args['arrowhead'] = "normal"
tail = field.relation
head = model.model
elif field.ttype == 'many2many':
if graph.get_edge(edge_model_name, edge_relation_name):
continue
if graph.get_edge(edge_relation_name, edge_model_name):
continue
args['arrowtail'] = "inv"
args['arrowhead'] = "inv"
edge = pydot.Edge(str(tail), str(head), **args)
graph.add_edge(edge)
ModelGraph()
| mediafactory/tryton_core_daemon | trytond/ir/model.py | Python | gpl-3.0 | 26,062 |
import requests
from requests.utils import quote
import re
import json
from articledownloader import scrapers
from autologging import logged, traced
from csv import reader
from time import sleep
@logged
class ArticleDownloader:
def __init__(self, els_api_key=None, sleep_sec=1, timeout_sec=30):
'''
Initialize and set up API keys
:param els_api_key: API key for Elsevier (for Elsevier's API)
:type els_api_key: str
:param sleep_sec: Sleep time between API calls (default = 1s)
:type sleep_sec: int
:param timeout_sec: Max time before timeout (default = 30s)
:type timeout_sec: int
'''
self.els_api_key = els_api_key
self.sleep_sec = sleep_sec
self.timeout_sec = timeout_sec
@traced
def get_dois_from_search(self, query, rows=500, mailto="null@null.com"):
'''
Grabs a set of unique DOIs based on a search query using the CrossRef API
:param query: the search string
:type query: str
:param rows: the maximum number of DOIs to find
:type rows: int
:param mailto: mailto address for API
:type rows: str
:returns: the unique set of DOIs as a list
:rtype: list
'''
dois = []
base_url = 'https://api.crossref.org/works?query='
max_rows = 1000 #Defined by CrossRef API
headers = {
'Accept': 'application/json',
'User-agent': 'mailto:' + mailto
}
if rows <= max_rows: #No multi-query needed
search_url = base_url + query + '&rows=' + str(rows)
response = requests.get(search_url, headers=headers, timeout=self.timeout_sec).json()
for item in response["message"]["items"]:
dois.append(item["DOI"])
else: #Need to split queries
cursor = "*"
keep_paging = True
while (keep_paging):
sleep(self.sleep_sec)
r = requests.get(base_url + query + "&rows=" + str(max_rows) + "&cursor=" + cursor,
headers=headers, timeout=self.timeout_sec)
cursor = quote(r.json()['message']['next-cursor'], safe='')
if len(r.json()['message']['items']) == 0:
keep_paging = False
for item in r.json()['message']['items']:
dois.append(item['DOI'])
return list(set(dois))
@traced
def get_dois_from_journal_issn(self, issn, rows=500, pub_after=2000, mailto="null@null.com"):
'''
Grabs a set of unique DOIs based on a journal ISSN using the CrossRef API
:param issn: The ISSN of the journal
:type issn: str
:param rows: the maximum number of DOIs to find
:type rows: int
:param pub_after: the minimum publication year for DOIs returned
:type pub_after: int
:param mailto: mailto address for API
:type rows: str
:returns: the unique set of DOIs as a list
:rtype: list
'''
dois = []
base_url = 'https://api.crossref.org/journals/' + issn + '/works?filter=from-pub-date:' + str(pub_after)
max_rows = 1000 #Defined by CrossRef API
headers = {
'Accept': 'application/json',
'User-agent': 'mailto:' + mailto
}
if rows <= max_rows: #No multi-query needed
search_url = str(base_url) + '&rows=' + str(rows)
response = requests.get(search_url, headers=headers, timeout=self.timeout_sec).json()
for item in response["message"]["items"]:
dois.append(item["DOI"])
else: #Need to split queries
cursor = "*"
keep_paging = True
while (keep_paging):
sleep(self.sleep_sec)
r = requests.get(base_url + "&rows=" + str(max_rows) + "&cursor=" + cursor,
headers=headers, timeout=self.timeout_sec)
cursor = quote(r.json()['message']['next-cursor'], safe='')
if len(r.json()['message']['items']) == 0:
keep_paging = False
for item in r.json()['message']['items']:
dois.append(item['DOI'])
return list(set(dois))
@traced
def get_metadata_from_doi(self, doi, mailto="null@null.com"):
base_url = 'https://api.crossref.org/works/' + str(doi)
headers = {
'Accept': 'application/json',
'User-agent': 'mailto:' + mailto
}
search_url = str(base_url)
response = requests.get(search_url, headers=headers, timeout=self.timeout_sec).json()
item = response["message"]
metadata_record = None
try:
if "volume" in item:
volume = item["volume"]
else:
volume = None
if "published-print" in item:
year = item['published-print']['date-parts'][0][0]
else:
year = None
if "issue" in item:
issue = item["issue"]
else:
issue = None
if "page" in item:
page = item["page"]
else:
page = None
metadata_record = {
"doi": item["DOI"],
"issn": item["ISSN"][0],
"title": item["title"][0],
"prefix": item["prefix"],
"journal": item["container-title"][0],
"publisher": item["publisher"],
"volume": volume,
"issue": issue,
"page": page,
"year": year,
"num_references": item['references-count'],
"times_cited": item['is-referenced-by-count']
}
except:
pass
return metadata_record
@traced
def get_metadata_from_journal_issn(self, issn, rows=500, pub_after=2000, mailto="null@null.com"):
'''
Grabs metadata based on a journal ISSN using the CrossRef API
:param issn: The ISSN of the journal
:type issn: str
:param rows: the maximum number of DOIs to find
:type rows: int
:param pub_after: the minimum publication year for DOIs returned
:type pub_after: int
:param mailto: mailto address for API
:type rows: str
:returns: the metadata for the articles according to this ISSN
:rtype: list
'''
metadata_records = []
base_url = 'https://api.crossref.org/journals/' + issn + '/works?filter=from-pub-date:' + str(pub_after)
max_rows = 1000 #Defined by CrossRef API
headers = {
'Accept': 'application/json',
'User-agent': 'mailto:' + mailto
}
if rows <= max_rows: #No multi-query needed
search_url = str(base_url) + '&rows=' + str(rows)
response = requests.get(search_url, headers=headers, timeout=self.timeout_sec).json()
for item in response["message"]["items"]:
try:
if "volume" in item:
volume = item["volume"]
else:
volume = None
if "published-print" in item:
year = item['published-print']['date-parts'][0][0]
else:
year = None
if "issue" in item:
issue = item["issue"]
else:
issue = None
if "page" in item:
page = item["page"]
else:
page = None
metadata_records.append({
"doi": item["DOI"],
"issn": item["ISSN"][0],
"title": item["title"][0],
"prefix": item["prefix"],
"journal": item["container-title"][0],
"publisher": item["publisher"],
"volume": volume,
"issue": issue,
"page": page,
"year": year,
"num_references": item['references-count'],
"times_cited": item['is-referenced-by-count']
})
except:
pass
else: #Need to split queries
cursor = "*"
keep_paging = True
while (keep_paging):
sleep(self.sleep_sec)
r = requests.get(base_url + "&rows=" + str(max_rows) + "&cursor=" + cursor,
headers=headers, timeout=self.timeout_sec)
cursor = quote(r.json()['message']['next-cursor'], safe='')
if len(r.json()['message']['items']) == 0:
keep_paging = False
for item in r.json()['message']['items']:
try:
if "volume" in item:
volume = item["volume"]
else:
volume = None
if "published-print" in item:
year = item['published-print']['date-parts'][0][0]
else:
year = None
if "issue" in item:
issue = item["issue"]
else:
issue = None
if "page" in item:
page = item["page"]
else:
page = None
metadata_records.append({
"doi": item["DOI"],
"issn": item["ISSN"][0],
"title": item["title"][0],
"prefix": item["prefix"],
"journal": item["container-title"][0],
"publisher": item["publisher"],
"volume": volume,
"issue": issue,
"page": page,
"year": year,
"num_references": item['references-count'],
"times_cited": item['is-referenced-by-count']
})
except:
pass
return metadata_records
@traced
def get_xml_from_doi(self, doi, writefile, mode):
'''
Downloads and writes an HTML article to a file, given a DOI and operating mode
:param doi: DOI string for the article we want to download
:type doi: str
:param writefile: file object to write to
:type writefile: file
:param mode: choose from {'elsevier' | 'aps'}, depending on how we wish to access the file
:type mode: str
:returns: True on successful write, False otherwise
:rtype: bool
'''
if mode == 'elsevier':
try:
xml_url='https://api.elsevier.com/content/article/doi/' + doi + '?view=FULL'
headers = {
'X-ELS-APIKEY': self.els_api_key,
'Accept': 'text/xml'
}
r = requests.get(xml_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
# API download limit exceeded
return False
return False
if mode == 'aps':
try:
xml_url='http://harvest.aps.org/v2/journals/articles/' + doi
headers = {
'Accept': 'text/xml'
}
r = requests.get(xml_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
# API download limit exceeded
return False
return False
return False
@traced
def get_html_from_doi(self, doi, writefile, mode):
'''
Downloads and writes an HTML article to a file, given a DOI and operating mode
:param doi: DOI string for the article we want to download
:type doi: str
:param writefile: file object to write to
:type writefile: file
:param mode: choose from {'elsevier' | 'springer' | 'acs' | 'ecs' | 'rsc' | 'nature' | 'wiley' | 'aaas' | 'emerald'}, depending on how we wish to access the file
:type mode: str
:returns: True on successful write, False otherwise
:rtype: bool
'''
if mode == 'springer':
base_url = 'http://link.springer.com/'
api_url = base_url + doi + '.html'
try:
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'wiley':
base_url = 'http://onlinelibrary.wiley.com/doi/'
api_url = base_url + doi + '/full'
try:
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'acs':
base_url = 'http://pubs.acs.org/doi/full/'
api_url = base_url + doi
try:
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'emerald':
base_url = 'http://www.emeraldinsight.com/doi/full/'
api_url = base_url + doi
try:
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'rsc':
html_string = 'articlehtml'
download_url = 'https://doi.org/' + doi
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(download_url, headers=headers, timeout=self.timeout_sec)
url = r.url
url = url.encode('ascii')
url = url.split('/')
url = url[0] + '//' + url[2] + '/' + url[3] + '/' + url[4] + '/' + html_string + '/' + url[6] + '/' + url[7] + '/' + url[8]
r = requests.get(url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
try:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'nature':
download_url = 'https://doi.org/' + doi
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(download_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
try:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'aaas':
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
article_url = 'https://doi.org/' + doi
resp = requests.get(article_url, headers=headers, timeout=self.timeout_sec)
download_url = resp.url + '.full' #Capture fulltext from redirect
r = requests.get(download_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
try:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'ecs':
headers = {
'Accept': 'text/html',
'User-agent': 'Mozilla/5.0'
}
article_url = 'https://doi.org/' + doi
resp = requests.get(article_url, headers=headers, timeout=self.timeout_sec)
download_url = resp.url + '.full' #Capture fulltext from redirect
r = requests.get(download_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
try:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
return False
@traced
def get_pdf_from_doi(self, doi, writefile, mode):
'''
Downloads and writes a PDF article to a file, given a DOI and operating mode
:param doi: DOI string for the article we want to download
:type doi: str
:param writefile: file object to write to
:type writefile: file
:param mode: choose from {'crossref' | 'elsevier' | 'rsc' | 'springer' | 'ecs' | 'nature' | 'acs'}, depending on how we wish to access the file
:type mode: str
:returns: True on successful write, False otherwise
:rtype: bool
'''
if mode == 'crossref':
base_url = 'http://api.crossref.org/works/'
api_url = base_url + doi
headers = {
'Accept': 'application/json'
}
try:
response = json.loads(requests.get(api_url, headers=headers, timeout=self.timeout_sec).text)
pdf_url = response['message']['link'][0]['URL']
app_type = str(response['message']['link'][0]['content-type'])
if app_type in ['application/pdf', 'unspecified']:
headers['Accept'] = 'application/pdf'
r = requests.get(pdf_url, stream=True, headers=headers)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'elsevier':
try:
pdf_url='http://api.elsevier.com/content/article/doi:' + doi + '?view=FULL'
headers = {
'X-ELS-APIKEY': self.els_api_key,
'Accept': 'application/pdf'
}
r = requests.get(pdf_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
# API download limit exceeded
return False
return False
if mode == 'rsc':
scraper = scrapers.RSC()
scrape_url = 'https://doi.org/' + doi
download_url = None
r = requests.get(scrape_url, timeout=self.timeout_sec)
if r.status_code == 200:
scraper.feed(r.content)
if scraper.download_link is not None:
download_url = scraper.download_link
if download_url is not None:
headers = {
'Accept': 'application/pdf'
}
r = requests.get(download_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
try:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'ecs':
scraper = scrapers.ECS()
scrape_url = 'https://doi.org/' + doi
download_url = None
r = requests.get(scrape_url, timeout=self.timeout_sec)
if r.status_code == 200:
scraper.feed(r.content)
if scraper.download_link is not None:
download_url = scraper.download_link
if download_url is not None:
headers = {
'Accept': 'application/pdf'
}
r = requests.get(download_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
try:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'nature':
scraper = scrapers.Nature()
scrape_url = 'https://doi.org/' + doi
download_url = None
r = requests.get(scrape_url, timeout=self.timeout_sec)
if r.status_code == 200:
scraper.feed(r.content)
if scraper.download_link is not None:
download_url = scraper.download_link
if download_url is not None:
headers = {
'Accept': 'application/pdf'
}
r = requests.get(download_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
try:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'acs':
base_url = 'http://pubs.acs.org/doi/pdf/'
api_url = base_url + doi
try:
headers = {
'Accept': 'application/pdf',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
if mode == 'springer':
base_url = 'http://link.springer.com/content/pdf/'
api_url = base_url + doi
try:
headers = {
'Accept': 'application/pdf',
'User-agent': 'Mozilla/5.0'
}
r = requests.get(api_url, stream=True, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
for chunk in r.iter_content(2048):
writefile.write(chunk)
return True
except:
return False
return False
return False
@traced
def get_abstract_from_doi(self, doi, mode):
'''
Returns abstract as a unicode string given a DOI
:param doi: DOI string for the article we want to grab metadata for
:type doi: str
:param mode: Only supports 'elsevier' for now
:type mode: str
:returns: An abstract (or None on failure)
:rtype: unicode
'''
if mode == 'elsevier':
try:
url='http://api.elsevier.com/content/article/doi/' + doi + '?view=FULL'
headers = {
'X-ELS-APIKEY': self.els_api_key,
'Accept': 'application/json'
}
r = requests.get(url, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
abstract = unicode(json.loads(r.text)['full-text-retrieval-response']['coredata']['dc:description'])
return abstract
except:
# API download limit exceeded or no abstract exists
return None
return None
@traced
def get_title_from_doi(self, doi, mode):
'''
Returns title of an article as a unicode string given a DOI
:param doi: DOI string for the article we want to grab metadata for
:type doi: str
:param mode: Only supports 'crossref' for now
:type mode: str
:returns: A title (or None on failure)
:rtype: unicode
'''
if mode == 'crossref':
try:
url='http://api.crossref.org/works/' + doi
headers = {
'X-ELS-APIKEY': self.els_api_key,
'Accept': 'application/json'
}
r = requests.get(url, headers=headers, timeout=self.timeout_sec)
if r.status_code == 200:
title = unicode(r.json()['message']['title'][0])
return title
except:
# API download limit exceeded or no title exists
return None
return None
@traced
def load_queries_from_csv(self, csvf):
'''
Loads a list of queries from a CSV file
:param csvf: file object containing a CSV file with one query per line
:type csvf: file
:returns: a list of queries, processed to be insertable into REST API (GET) calls
:rtype: list
'''
csvf.seek(0)
csvreader = reader(csvf, delimiter=',')
queries = []
for line in csvreader:
#Build search query (assume 1st column is queries)
query = quote(line[0])
query = query.split()
query = '+'.join(query)
final_query = query
queries.append(final_query)
return queries
| olivettigroup/article-downloader | articledownloader/articledownloader.py | Python | mit | 23,239 |
from __future__ import absolute_import
from django.utils import timezone
from sentry.app import tsdb
from sentry.testutils.cases import RuleTestCase
from sentry.rules.conditions.event_frequency import EventFrequencyCondition, Interval
class EventFrequencyConditionTest(RuleTestCase):
rule_cls = EventFrequencyCondition
def test_one_minute(self):
event = self.get_event()
rule = self.get_rule({
'interval': Interval.ONE_MINUTE,
'value': '10',
})
self.assertDoesNotPass(rule, event)
tsdb.incr(tsdb.models.group, event.group_id, count=11)
rule.clear_cache(event)
rule = self.get_rule({
'interval': Interval.ONE_MINUTE,
'value': '10',
})
self.assertPasses(rule, event)
def test_one_hour(self):
event = self.get_event()
rule = self.get_rule({
'interval': Interval.ONE_HOUR,
'value': '10',
})
self.assertDoesNotPass(rule, event)
tsdb.incr(tsdb.models.group, event.group_id, count=11)
rule.clear_cache(event)
rule = self.get_rule({
'interval': Interval.ONE_HOUR,
'value': '10',
})
self.assertPasses(rule, event)
def test_doesnt_send_consecutive(self):
event = self.get_event()
rule = self.get_rule({
'interval': Interval.ONE_HOUR,
'value': '10',
})
tsdb.incr(tsdb.models.group, event.group_id, count=11)
rule = self.get_rule({
'interval': Interval.ONE_HOUR,
'value': '10',
})
self.assertPasses(rule, event)
self.assertDoesNotPass(rule, event, rule_last_active=timezone.now())
def test_more_than_zero(self):
event = self.get_event()
rule = self.get_rule({
'interval': Interval.ONE_MINUTE,
'value': '0',
})
self.assertDoesNotPass(rule, event)
tsdb.incr(tsdb.models.group, event.group_id, count=1)
rule.clear_cache(event)
rule = self.get_rule({
'interval': Interval.ONE_MINUTE,
'value': '0',
})
self.assertPasses(rule, event)
| wong2/sentry | tests/sentry/rules/conditions/test_event_frequency.py | Python | bsd-3-clause | 2,228 |
# todo: use minibatch size
BATCH_SIZE=250
import time
import tensorflow as tf
from tensorlog import simple
import bigexpt
def setup_tlog(maxD,factFile,trainFile,testFile):
tlog = simple.Compiler(db=factFile,prog="grid.ppr")
tlog.prog.db.markAsParameter('edge',2)
tlog.prog.maxDepth = maxD
trainData = tlog.load_small_dataset(trainFile)
testData = tlog.load_small_dataset(testFile)
return (tlog,trainData,testData)
# run timing experiment
def timingExpt(tlog,maxD,trainFile,minibatch):
print('depth',maxD,'minibatch',minibatch)
tlog.prog.maxDepth = maxD
dset = tlog.load_dataset(trainFile)
predicted_y = tlog.inference('path/io')
session = tf.Session()
session.run(tf.global_variables_initializer())
t0 = time.time()
for mode,(tx,ty) in tlog.minibatches(dset,batch_size=minibatch):
train_fd = {tlog.input_placeholder_name('path/io'):tx,
tlog.target_output_placeholder_name('path/io'):ty}
session.run(tlog.inference(mode), feed_dict=train_fd)
break
elapsed = time.time() - t0
print('learning takes',time.time()-t0,'sec')
print(tx.shape[0],'examples','time',elapsed,'qps',tx.shape[0]/elapsed)
return elapsed
def trainAndTest(tlog,trainDataFile,testDataFile,epochs):
mode = 'path/io'
trainData = tlog.load_dataset(trainDataFile)
testData = tlog.load_dataset(testDataFile)
predicted_y = tlog.inference(mode)
actual_y = tlog.target_output_placeholder(mode)
correct_predictions = tf.equal(tf.argmax(actual_y,1), tf.argmax(predicted_y,1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
unregularized_loss = tlog.loss(mode)
optimizer = tf.train.AdagradOptimizer(1.0)
train_step = optimizer.minimize(unregularized_loss)
session = tf.Session()
session.run(tf.global_variables_initializer())
t0 = time.time()
for i in range(epochs):
print('epoch',i+1,'elapsed',time.time()-t0)
for (mode,(tx,ty)) in tlog.minibatches(trainData):
train_fd = {tlog.input_placeholder_name(mode):tx, tlog.target_output_placeholder_name(mode):ty}
session.run(train_step,feed_dict=train_fd)
print('learning takes',time.time()-t0,'sec')
tot_test = 0
tot_acc = 0
i = 0
for (mode,(ux,uy)) in tlog.minibatches(testData):
i += 1
m = ux.shape[0] #examples
test_fd = {tlog.input_placeholder_name(mode):tx, tlog.target_output_placeholder_name(mode):ty}
acc = session.run(accuracy, feed_dict=test_fd)
print('minibatch acc for batch',i,acc)
tot_test += m
tot_acc += acc*m
acc = tot_acc/tot_test
print('weighted acc',acc)
return acc
def runMain():
(goal,n,maxD,epochsOrMinibatch) = bigexpt.getargs()
(factFile,trainFile,testFile) = bigexpt.genInputs(n)
(tlog,trainData,testData) = setup_tlog(maxD,factFile,trainFile,testFile)
print('tlog.prog.maxDepth',tlog.prog.maxDepth)
if goal=='time':
print(timingExpt(tlog,maxD,trainFile,epochsOrMinibatch))
elif goal=='acc':
print(trainAndTest(tlog,trainFile,testFile,epochsOrMinibatch))
else:
assert False,'bad goal %s' % goal
if __name__=="__main__":
runMain()
| TeamCohen/TensorLog | datasets/grid/bigtfexpt.py | Python | apache-2.0 | 3,109 |
# coding=utf-8
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
import Queue
import threading
import datetime
import yaml
import time
import logging
import octoprint.util as util
import octoprint.util.gcodeInterpreter as gcodeInterpreter
from octoprint.settings import settings
from werkzeug.utils import secure_filename
SUPPORTED_EXTENSIONS=["gcode", "gco", "s3g", "x3g"]
class GcodeManager:
def __init__(self):
self._logger = logging.getLogger(__name__)
self._uploadFolder = settings().getBaseFolder("uploads")
self._callbacks = []
self._metadata = {}
self._metadataDirty = False
self._metadataFile = os.path.join(self._uploadFolder, "metadata.yaml")
self._metadataFileAccessMutex = threading.Lock()
self._metadataAnalyzer = MetadataAnalyzer(getPathCallback=self.getAbsolutePath, loadedCallback=self._onMetadataAnalysisFinished)
self._loadMetadata()
self._processAnalysisBacklog()
def _processAnalysisBacklog(self):
for osFile in os.listdir(self._uploadFolder):
filename = self._getBasicFilename(osFile)
absolutePath = self.getAbsolutePath(filename)
if absolutePath is None:
continue
fileData = self.getFileData(filename)
if fileData is not None and "gcodeAnalysis" in fileData.keys():
continue
self._metadataAnalyzer.addFileToBacklog(filename)
def _onMetadataAnalysisFinished(self, filename, gcode):
if filename is None or gcode is None:
return
basename = os.path.basename(filename)
absolutePath = self.getAbsolutePath(basename)
if absolutePath is None:
return
analysisResult = {}
dirty = False
if gcode.totalMoveTimeMinute:
analysisResult["estimatedPrintTime"] = util.getFormattedTimeDelta(datetime.timedelta(minutes=gcode.totalMoveTimeMinute))
dirty = True
if gcode.extrusionAmount:
analysisResult["filament"] = "%.2fm" % (gcode.extrusionAmount / 1000)
if gcode.calculateVolumeCm3():
analysisResult["filament"] += " / %.2fcm³" % gcode.calculateVolumeCm3()
dirty = True
if dirty:
metadata = self.getFileMetadata(basename)
metadata["gcodeAnalysis"] = analysisResult
self._metadata[basename] = metadata
self._metadataDirty = True
self._saveMetadata()
def _loadMetadata(self):
if os.path.exists(self._metadataFile) and os.path.isfile(self._metadataFile):
with self._metadataFileAccessMutex:
with open(self._metadataFile, "r") as f:
self._metadata = yaml.safe_load(f)
if self._metadata is None:
self._metadata = {}
def _saveMetadata(self, force=False):
if not self._metadataDirty and not force:
return
with self._metadataFileAccessMutex:
with open(self._metadataFile, "wb") as f:
yaml.safe_dump(self._metadata, f, default_flow_style=False, indent=" ", allow_unicode=True)
self._metadataDirty = False
self._loadMetadata()
self._sendUpdateTrigger("gcodeFiles")
def _getBasicFilename(self, filename):
if filename.startswith(self._uploadFolder):
return filename[len(self._uploadFolder + os.path.sep):]
else:
return filename
#~~ callback handling
def registerCallback(self, callback):
self._callbacks.append(callback)
def unregisterCallback(self, callback):
if callback in self._callbacks:
self._callbacks.remove(callback)
def _sendUpdateTrigger(self, type):
for callback in self._callbacks:
try: callback.sendUpdateTrigger(type)
except: pass
#~~ file handling
def addFile(self, file):
if not file:
return None
absolutePath = self.getAbsolutePath(file.filename, mustExist=False)
if absolutePath is None:
return None
basename = self._getBasicFilename(absolutePath)
if basename in self._metadata.keys():
# delete existing metadata entry, since the file is going to get overwritten
del self._metadata[basename]
self._metadataDirty = True
self._saveMetadata()
file.save(absolutePath)
self._metadataAnalyzer.addFileToQueue(basename)
return basename
def getFutureFilename(self, file):
if not file:
return None
absolutePath = self.getAbsolutePath(file.filename, mustExist=False)
if absolutePath is None:
return None
return self._getBasicFilename(absolutePath)
def removeFile(self, filename):
filename = self._getBasicFilename(filename)
absolutePath = self.getAbsolutePath(filename)
if absolutePath is None:
return
os.remove(absolutePath)
if filename in self._metadata.keys():
del self._metadata[filename]
self._metadataDirty = True
self._saveMetadata()
def getAbsolutePath(self, filename, mustExist=True):
"""
Returns the absolute path of the given filename in the gcode upload folder.
Ensures that the file
<ul>
<li>has the extension ".gcode"</li>
<li>exists and is a file (not a directory) if "mustExist" is set to True</li>
</ul>
@param filename the name of the file for which to determine the absolute path
@param mustExist if set to true, the method also checks if the file exists and is a file
@return the absolute path of the file or None if the file is not valid
"""
filename = self._getBasicFilename(filename)
if not util.isAllowedFile(filename.lower(), set(SUPPORTED_EXTENSIONS)):
return None
secure = os.path.join(self._uploadFolder, secure_filename(self._getBasicFilename(filename)))
if mustExist and (not os.path.exists(secure) or not os.path.isfile(secure)):
return None
return secure
def getAllFileData(self):
files = []
for osFile in os.listdir(self._uploadFolder):
fileData = self.getFileData(osFile)
if fileData is not None:
files.append(fileData)
return files
def getFileData(self, filename):
filename = self._getBasicFilename(filename)
absolutePath = self.getAbsolutePath(filename)
if absolutePath is None:
return None
statResult = os.stat(absolutePath)
fileData = {
"name": filename,
"size": util.getFormattedSize(statResult.st_size),
"bytes": statResult.st_size,
"date": util.getFormattedDateTime(datetime.datetime.fromtimestamp(statResult.st_ctime))
}
# enrich with additional metadata from analysis if available
if filename in self._metadata.keys():
for key in self._metadata[filename].keys():
if key == "prints":
val = self._metadata[filename][key]
formattedLast = None
if val["last"] is not None:
formattedLast = {
"date": util.getFormattedDateTime(datetime.datetime.fromtimestamp(val["last"]["date"])),
"success": val["last"]["success"]
}
formattedPrints = {
"success": val["success"],
"failure": val["failure"],
"last": formattedLast
}
fileData["prints"] = formattedPrints
else:
fileData[key] = self._metadata[filename][key]
return fileData
def getFileMetadata(self, filename):
filename = self._getBasicFilename(filename)
if filename in self._metadata.keys():
return self._metadata[filename]
else:
return {
"prints": {
"success": 0,
"failure": 0,
"last": None
}
}
def setFileMetadata(self, filename, metadata):
filename = self._getBasicFilename(filename)
self._metadata[filename] = metadata
self._metadataDirty = True
#~~ print job data
def printSucceeded(self, filename):
filename = self._getBasicFilename(filename)
absolutePath = self.getAbsolutePath(filename)
if absolutePath is None:
return
metadata = self.getFileMetadata(filename)
metadata["prints"]["success"] += 1
metadata["prints"]["last"] = {
"date": time.time(),
"success": True
}
self.setFileMetadata(filename, metadata)
self._saveMetadata()
def printFailed(self, filename):
filename = self._getBasicFilename(filename)
absolutePath = self.getAbsolutePath(filename)
if absolutePath is None:
return
metadata = self.getFileMetadata(filename)
metadata["prints"]["failure"] += 1
metadata["prints"]["last"] = {
"date": time.time(),
"success": False
}
self.setFileMetadata(filename, metadata)
self._saveMetadata()
def changeLastPrintSuccess(self, filename, succeeded):
filename = self._getBasicFilename(filename)
absolutePath = self.getAbsolutePath(filename)
if absolutePath is None:
return
metadata = self.getFileMetadata(filename)
if metadata is None:
return
if "prints" in metadata.keys():
if "last" in metadata.keys() and metadata["prints"]["last"] is not None:
currentSucceeded = metadata["prints"]["last"]["success"]
if currentSucceeded != succeeded:
metadata["prints"]["last"]["success"] = succeeded
if currentSucceeded:
# last print job was counted as success but actually failed
metadata["prints"]["success"] -= 1
metadata["prints"]["failure"] += 1
else:
# last print job was counted as a failure but actually succeeded
metadata["prints"]["success"] += 1
metadata["prints"]["failure"] -= 1
self.setFileMetadata(filename, metadata)
self._saveMetadata()
#~~ analysis control
def pauseAnalysis(self):
self._metadataAnalyzer.pause()
def resumeAnalysis(self):
self._metadataAnalyzer.resume()
class MetadataAnalyzer:
def __init__(self, getPathCallback, loadedCallback):
self._logger = logging.getLogger(__name__)
self._getPathCallback = getPathCallback
self._loadedCallback = loadedCallback
self._active = threading.Event()
self._active.set()
self._currentFile = None
self._currentProgress = None
self._queue = Queue.PriorityQueue()
self._gcode = None
self._worker = threading.Thread(target=self._work)
self._worker.daemon = True
self._worker.start()
def addFileToQueue(self, filename):
self._logger.debug("Adding file %s to analysis queue (high priority)" % filename)
self._queue.put((0, filename))
def addFileToBacklog(self, filename):
self._logger.debug("Adding file %s to analysis backlog (low priority)" % filename)
self._queue.put((100, filename))
def working(self):
return self.isActive() and not (self._queue.empty() and self._currentFile is None)
def isActive(self):
return self._active.is_set()
def pause(self):
self._logger.debug("Pausing Gcode analyzer")
self._active.clear()
if self._gcode is not None:
self._logger.debug("Aborting running analysis, will restart when Gcode analyzer is resumed")
self._gcode.abort()
def resume(self):
self._logger.debug("Resuming Gcode analyzer")
self._active.set()
def _work(self):
aborted = None
while True:
if aborted is not None:
filename = aborted
aborted = None
self._logger.debug("Got an aborted analysis job for file %s, processing this instead of first item in queue" % filename)
else:
(priority, filename) = self._queue.get()
self._logger.debug("Processing file %s from queue (priority %d)" % (filename, priority))
self._active.wait()
try:
self._analyzeGcode(filename)
self._queue.task_done()
except gcodeInterpreter.AnalysisAborted:
aborted = filename
self._logger.debug("Running analysis of file %s aborted" % filename)
def _analyzeGcode(self, filename):
path = self._getPathCallback(filename)
if path is None:
return
self._currentFile = filename
self._currentProgress = 0
try:
self._logger.debug("Starting analysis of file %s" % filename)
self._gcode = gcodeInterpreter.gcode()
self._gcode.progressCallback = self._onParsingProgress
self._gcode.load(path)
self._logger.debug("Analysis of file %s finished, notifying callback" % filename)
self._loadedCallback(self._currentFile, self._gcode)
finally:
self._gcode = None
self._currentProgress = None
self._currentFile = None
def _onParsingProgress(self, progress):
self._currentProgress = progress
| StealthMicro/OctoPi-Makerbot | octoprint/gcodefiles.py | Python | agpl-3.0 | 11,658 |
"""
test utils module
"""
import os
from glob import glob
import hvc.utils
this_file_with_path = __file__
this_file_just_path = os.path.split(this_file_with_path)[0]
def test_fetch(tmp_output_dir):
hvc.utils.fetch(
dataset_str="sober.repo1.gy6or6.032612", # one of the smaller .gz, ~31 MB
destination_path=str(tmp_output_dir),
)
compare_dir = os.path.join(
this_file_just_path, os.path.normpath("../data_for_tests/cbins/gy6or6/032612")
)
os.chdir(compare_dir)
test_data_032612 = glob("gy6or6*")
test_data_fetched = os.listdir(os.path.join(str(tmp_output_dir), "032612"))
for file in test_data_032612:
assert file in test_data_fetched
| NickleDave/hybrid-vocal-classifier | tests/unit_test/test_utils.py | Python | bsd-3-clause | 703 |
import time
import urllib
import simplejson as json
from httplib2 import Http
from urlparse import urljoin
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
API_VERSION = '1'
class Agency(object):
debug = False
endpoints = {
'agency': 'agencies.json',
'client': 'clients/%(id)s.json',
'clients': 'clients.json',
'project': 'clients/%(client)s/projects/%(id)s.json',
'projects': 'clients/%(client)s/projects.json',
'approval': 'clients/%(client)s/projects/%(project)s/approvals/%(id)s.json',
'approvals': 'clients/%(client)s/projects/%(project)s/approvals.json',
'page': 'clients/%(client)s/projects/%(project)s/pages/%(id)s.json',
'pages': 'clients/%(client)s/projects/%(project)s/pages.json',
'revision': 'clients/%(client)s/projects/%(project)s/pages/%(page)s/revision/%(id).json',
'revisions': 'clients/%(client)s/projects/%(project)s/pages/%(page)s/revisions.json'
}
def __init__(self, api_key, api_version=API_VERSION, host='api.clientend.com'):
self.host = host
self.api_version = api_version
self.api_key = api_key
self.http = Http()
self.uri = 'http://%s/v%s' % (host, api_version)
if self.api_key is None:
raise ValueError('Must set API Key')
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return 'Agency (api_key=%s)' % (self.api_key)
def endpoint(self, name, **kwargs):
try:
endpoint = self.endpoints[name]
except KeyError:
raise Exception("No endpoint named '%s'" % name)
try:
endpoint = endpoint % kwargs
except KeyError, e:
raise TypeError("Missing required argument '%s'" % (e.args[0],))
return urljoin(urljoin(self.uri, 'v'+ self.api_version + '/'), endpoint)
# Clients
def get_clients(self):
endpoint = self.endpoint('clients')
return self._request(endpoint, 'GET')
def get_client(self, id):
endpoint = self.endpoint('client', id=id)
return self._request(endpoint, 'GET')
# Projects
def get_projects(self, client):
endpoint = self.endpoint('projects', client=client)
return self._request(endpoint, 'GET')
def get_project(self, client, id):
endpoint = self.endpoint('project', client=client, id=id)
return self._request(endpoint, 'GET')
# Approvals
def get_approvals(self, client, project):
endpoint = self.endpoint('approvals', client=client, project=project)
return self._request(endpoint, 'GET')
def get_approval(self, client, project, id):
endpoint = self.endpoint('approval', client=client, project=project, id=id)
return self._request(endpoint, 'GET')
# Projects
def get_pages(self, client, project):
endpoint = self.endpoint('pages', client=client, project=project)
return self._request(endpoint, 'GET')
def get_page(self, client, project, id):
endpoint = self.endpoint('page', client=client, project=project, id=id)
return self._request(endpoint, 'GET')
# Projects
def get_revisions(self, client, project, page):
endpoint = self.endpoint('revisions', client=client, project=project, page=page)
return self._request(endpoint, 'GET')
def get_revision(self, client, project, page, id):
endpoint = self.endpoint('revision', client=client, project=project, page=page, id=id)
return self._request(endpoint, 'GET')
def _request(self, endpoint, method, data=None):
body = None
if data is None:
data = { 'api_key': self.api_key }
else:
if isinstance(data, dict):
data['api_key'] = self.api_key
if method == 'GET' and isinstance(data, dict):
endpoint = endpoint + '?' + urllib.urlencode(data)
else:
if isinstance(data, dict):
body = urllib.urlencode(data)
else:
body = data
resp, content = self.http.request(endpoint, method, body=body)
if self.debug:
print resp
print content
if content:
try:
content = json.loads(content)
except ValueError:
return content
if resp['status'][0] != '2':
code = resp['status']
message = content
if isinstance(content, dict):
code = content['code']
message = content['message']
raise APIError(code, message, resp)
return content | dewski/clientend-python | clientend/__init__.py | Python | mit | 4,864 |
#!/usr/bin/python
#Client side code
from p2p.client import client
from p2p.super_client import super_client
from p2p.server import server
from p2p.super_server import super_server
from p2p.peer_client import peer_client
from threading import Thread
import time
import signal
import sys
from conf.params import *
def signal_handler(signal, frame):
print(" ")
print("Wait for some time as system is shutting down...")
if super_peer_enable:
super_client_thread.keepRunning = False
super_server_thread.keepRunning = False
if peer_enable:
server_thread.keepRunning = False
client_thread.keepRunning = False
else:
peer_client_thread.keepRunning = False
server_thread.keepRunning = False
client_thread.keepRunning = False
time.sleep(10)
sys.exit(0)
if __name__ == "__main__":
print("------------------------------")
print("Server Health Monitorig System")
print("02220 - Distributed Systems")
print("Project By:")
print("s135552 - Andrew Habib")
print("s135551 - Dheeraj Kumar Bansal")
print("------------------------------")
if super_peer_enable:
print("Running as a Super Peer")
if peer_enable:
print("Also Running as a Normal Peer")
else:
print("Running as a Normal Peer")
if super_peer_enable:
super_server_thread = super_server()
super_server_thread.start()
super_client_thread = super_client(super_client_interval)
super_client_thread.start()
if peer_enable:
client_thread = client(client_interval)
server_thread = server()
server_thread.start()
client_thread.start()
else:
peer_client_thread = peer_client(peer_client_interval)
peer_client_thread.start()
client_thread = client(client_interval)
server_thread = server()
server_thread.start()
client_thread.start()
signal.signal(signal.SIGINT, signal_handler)
while True:
time.sleep(1)
#signal.pause()
| cloudbansal/p2p_health | setup.py | Python | mit | 2,149 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.