repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
aaronorosen/horizon-congress | refs/heads/master | openstack_dashboard/dashboards/project/access_and_security/keypairs/urls.py | 5 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.project.access_and_security.keypairs \
import views
urlpatterns = patterns('',
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^import/$', views.ImportView.as_view(), name='import'),
url(r'^(?P<keypair_name>[^/]+)/download/$', views.DownloadView.as_view(),
name='download'),
url(r'^(?P<keypair_name>[^/]+)/generate/$', views.GenerateView.as_view(),
name='generate'),
)
|
klingebj/regreg | refs/heads/master | code/regreg/atoms/block_norms.py | 1 | """
This module contains the implementation of block norms, i.e.
l1/l*, linf/l* norms. These are used in multiresponse LASSOs.
"""
import warnings
import numpy as np
import seminorms
from ..identity_quadratic import identity_quadratic
from ..problems.composite import smooth_conjugate
from ..objdoctemplates import objective_doc_templater
from ..doctemplates import (doc_template_user, doc_template_provider)
@objective_doc_templater()
class block_sum(seminorms.seminorm):
_doc_dict = {'linear':r' + \text{Tr}(\eta^T X)',
'constant':r' + \tau',
'objective': '',
'shape':r'p \times q',
'var':r'X'}
objective_template = r"""\|%(var)s\|_{1,h}"""
objective_vars = {'var': r'X + A'}
def __init__(self, atom_cls, shape,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
seminorms.seminorm.__init__(self,
shape,
quadratic=quadratic,
offset=offset,
initial=initial,
lagrange=lagrange,
bound=bound)
self.atom = atom_cls(shape[1:], lagrange=lagrange,
bound=bound,
offset=None,
quadratic=quadratic)
def seminorms(self, x, lagrange=None, check_feasibility=False):
value = np.empty(self.shape[0])
for i in range(self.shape[0]):
value[i] = self.atom.seminorm(x[i], lagrange=lagrange,
check_feasibility=False)
return value
def seminorm(self, x, check_feasibility=False,
lagrange=None):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.seminorm(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
return lagrange * np.sum( \
self.seminorms(x, check_feasibility=check_feasibility,
lagrange=1.))
def constraint(self, x):
# XXX should we check feasibility here?
x = x.reshape(self.shape)
v = np.sum(self.seminorms(x, check_feasibility=False))
if v <= self.bound * (1 + self.tol):
return 0
return np.inf
def lagrange_prox(self, x, lipschitz=1, lagrange=None):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.lagrange_prox(self, x, lipschitz, lagrange)
v = np.empty(x.shape)
for i in xrange(self.shape[0]):
v[i] = self.atom.lagrange_prox(x[i], lipschitz=lipschitz,
lagrange=lagrange)
return v
def bound_prox(self, x, bound=None):
x = x.reshape(self.shape)
warnings.warn('bound_prox of block_sum requires a little thought -- should be like l1prox')
return 0 * x
def get_lagrange(self):
return self.atom.lagrange
def set_lagrange(self, lagrange):
self.atom.lagrange = lagrange
lagrange = property(get_lagrange, set_lagrange)
def get_bound(self):
return self.atom.bound
def set_bound(self, bound):
self.atom.bound = bound
bound = property(get_bound, set_bound)
@property
def conjugate(self):
if self.quadratic.coef == 0:
offset, outq = _work_out_conjugate(self.offset,
self.quadratic)
cls = conjugate_block_pairs[self.__class__]
conj_atom = self.atom.conjugate
atom_cls = conj_atom.__class__
atom = cls(atom_cls,
self.shape,
offset=offset,
lagrange=conj_atom.lagrange,
bound=conj_atom.bound)
else:
atom = smooth_conjugate(self)
self._conjugate = atom
self._conjugate._conjugate = self
return self._conjugate
@objective_doc_templater()
class block_max(block_sum):
objective_template = r"""\|%(var)s\|_{\infty,h}"""
objective_vars = {'var': r'X + A'}
def seminorm(self, x, lagrange=None, check_feasibility=False):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.seminorm(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
return lagrange * np.max(self.seminorms(x,
lagrange=1.,
check_feasibility=check_feasibility))
def constraint(self, x, bound=None):
x = x.reshape(self.shape)
bound = seminorms.seminorm.constraint(self, x, bound=bound)
# XXX should we check feasibility here?
v = np.max(self.seminorms(x, lagrange=1., check_feasibility=False))
if v <= self.bound * (1 + self.tol):
return 0
return np.inf
def lagrange_prox(self, x, lipschitz=1, lagrange=None):
warnings.warn('lagrange_prox of block_max requires a little thought -- should be like l1prox')
return 0 * x
def bound_prox(self, x, bound=None):
x = x.reshape(self.shape)
bound = seminorms.seminorm.bound_prox(self, x,
bound=bound)
v = np.empty(x.shape)
for i in xrange(self.shape[0]):
v[i] = self.atom.bound_prox(x[i],
bound=bound)
return v
@objective_doc_templater()
class linf_l2(block_max):
objective_template = r"""\|%(var)s\|_{\infty,2}"""
objective_vars = {'var': r'X + A'}
def __init__(self, shape,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
block_max.__init__(self, seminorms.l2norm,
shape,
lagrange=lagrange,
bound=bound,
offset=offset,
quadratic=quadratic,
initial=initial)
def constraint(self, x):
x = x.reshape(self.shape)
norm_max = np.sqrt((x**2).sum(1)).max()
if norm_max <= self.bound * (1 + self.tol):
return 0
return np.inf
def seminorm(self, x, lagrange=None, check_feasibility=False):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.seminorm(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
norm_max = np.sqrt((x**2).sum(1)).max()
return lagrange * norm_max
def bound_prox(self, x, bound=None):
x = x.reshape(self.shape)
norm = np.sqrt((x**2).sum(1))
bound = seminorms.seminorm.bound_prox(self, x,
bound=bound)
v = x.copy()
v[norm >= bound] *= bound / norm[norm >= bound][:,np.newaxis]
return v
@property
def conjugate(self):
if self.quadratic.coef == 0:
offset, outq = _work_out_conjugate(self.offset,
self.quadratic)
cls = conjugate_block_pairs[self.__class__]
conj_atom = self.atom.conjugate
atom = cls(self.shape,
offset=offset,
lagrange=conj_atom.lagrange,
bound=conj_atom.bound,
quadratic=outq)
else:
atom = smooth_conjugate(self)
self._conjugate = atom
self._conjugate._conjugate = self
return self._conjugate
@objective_doc_templater()
class linf_linf(linf_l2):
objective_template = r"""\|%(var)s\|_{\infty,\infty}"""
objective_vars = {'var': r'X + A'}
def __init__(self, shape,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
block_max.__init__(self, seminorms.l2norm,
shape,
lagrange=lagrange,
bound=bound,
offset=offset,
quadratic=quadratic,
initial=initial)
def constraint(self, x):
x = x.reshape(self.shape)
norm_max = np.fabs(x).max()
if norm_max <= self.bound * (1 + self.tol):
return 0
return np.inf
def seminorm(self, x, lagrange=None, check_feasibility=False):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.seminorm(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
norm_max = np.fabs(x).max()
return lagrange * norm_max
def bound_prox(self, x, bound=None):
x = x.reshape(self.shape)
bound = seminorms.seminorm.bound_prox(self, x,
bound=bound)
# print 'bound', bound
return np.clip(x, -bound, bound)
@objective_doc_templater()
class l1_l2(block_sum):
objective_template = r"""\|%(var)s\|_{1,2}"""
objective_vars = {'var': r'X + A'}
def __init__(self, shape,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
block_sum.__init__(self, seminorms.l2norm,
shape,
lagrange=lagrange,
bound=bound,
offset=offset,
quadratic=quadratic,
initial=initial)
def lagrange_prox(self, x, lipschitz=1, lagrange=None):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.lagrange_prox(self, x, lipschitz, lagrange)
norm = np.sqrt((x**2).sum(1))
mult = np.maximum(norm - lagrange / lipschitz, 0) / norm
return x * mult[:, np.newaxis]
@property
def conjugate(self):
if self.quadratic.coef == 0:
offset, outq = _work_out_conjugate(self.offset,
self.quadratic)
cls = conjugate_block_pairs[self.__class__]
conj_atom = self.atom.conjugate
atom = cls(self.shape,
offset=offset,
lagrange=conj_atom.lagrange,
bound=conj_atom.bound,
quadratic=outq)
else:
atom = smooth_conjugate(self)
self._conjugate = atom
self._conjugate._conjugate = self
return self._conjugate
def constraint(self, x):
x = x.reshape(self.shape)
norm_sum = np.sqrt((x**2).sum(1)).sum()
if norm_sum <= self.bound * (1 + self.tol):
return 0
return np.inf
def seminorm(self, x, lagrange=None, check_feasibility=False):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.seminorm(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
norm_sum = np.sum(np.sqrt((x**2).sum(1)))
return lagrange * norm_sum
@objective_doc_templater()
class l1_l1(l1_l2):
objective_template = r"""\|%(var)s\|_{1,1}"""
objective_vars = {'var': r'X + A'}
def __init__(self, shape,
lagrange=None,
bound=None,
offset=None,
quadratic=None,
initial=None):
block_sum.__init__(self, seminorms.l2norm,
shape,
lagrange=lagrange,
bound=bound,
offset=offset,
quadratic=quadratic,
initial=initial)
def lagrange_prox(self, x, lipschitz=1, lagrange=None):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.lagrange_prox(self, x, lipschitz, lagrange)
norm = np.fabs(x)
return np.maximum(norm - lagrange, 0) * np.sign(x)
def constraint(self, x):
x = x.reshape(self.shape)
norm_sum = np.fabs(x).sum()
if norm_sum <= self.bound * (1 + self.tol):
return 0
return np.inf
def seminorm(self, x, lagrange=None, check_feasibility=False):
x = x.reshape(self.shape)
lagrange = seminorms.seminorm.seminorm(self, x, lagrange=lagrange,
check_feasibility=check_feasibility)
norm_sum = np.fabs(x).sum()
return lagrange * norm_sum
conjugate_block_pairs = {}
for n1, n2 in [(block_max, block_sum),
(l1_l2, linf_l2),
(l1_l1, linf_linf)
]:
conjugate_block_pairs[n1] = n2
conjugate_block_pairs[n2] = n1
|
xuxiao/zulip | refs/heads/master | zerver/lib/socket.py | 121 | from __future__ import absolute_import
from django.conf import settings
from django.utils.importlib import import_module
from django.utils import timezone
from django.contrib.sessions.models import Session as djSession
import sockjs.tornado
import tornado.ioloop
import ujson
import logging
import time
from zerver.models import UserProfile, get_user_profile_by_id, get_client
from zerver.lib.queue import queue_json_publish
from zerver.lib.actions import check_send_message, extract_recipients
from zerver.decorator import JsonableError
from zerver.lib.utils import statsd
from zerver.lib.event_queue import get_client_descriptor
from zerver.middleware import record_request_start_data, record_request_stop_data, \
record_request_restart_data, write_log_line, format_timedelta
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.session_user import get_session_user
logger = logging.getLogger('zulip.socket')
djsession_engine = import_module(settings.SESSION_ENGINE)
def get_user_profile(session_id):
if session_id is None:
return None
try:
djsession = djSession.objects.get(expire_date__gt=timezone.now(),
session_key=session_id)
except djSession.DoesNotExist:
return None
try:
return UserProfile.objects.get(pk=get_session_user(djsession))
except (UserProfile.DoesNotExist, KeyError):
return None
connections = dict()
def get_connection(id):
return connections.get(id)
def register_connection(id, conn):
# Kill any old connections if they exist
if id in connections:
connections[id].close()
conn.client_id = id
connections[conn.client_id] = conn
def deregister_connection(conn):
del connections[conn.client_id]
redis_client = get_redis_client()
def req_redis_key(req_id):
return 'socket_req_status:%s' % (req_id,)
class SocketAuthError(Exception):
def __init__(self, msg):
self.msg = msg
class CloseErrorInfo(object):
def __init__(self, status_code, err_msg):
self.status_code = status_code
self.err_msg = err_msg
class SocketConnection(sockjs.tornado.SockJSConnection):
def on_open(self, info):
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
ioloop = tornado.ioloop.IOLoop.instance()
self.authenticated = False
self.session.user_profile = None
self.close_info = None
self.did_close = False
try:
self.browser_session_id = info.get_cookie(settings.SESSION_COOKIE_NAME).value
self.csrf_token = info.get_cookie(settings.CSRF_COOKIE_NAME).value
except AttributeError:
# The request didn't contain the necessary cookie values. We can't
# close immediately because sockjs-tornado doesn't expect a close
# inside on_open(), so do it on the next tick.
self.close_info = CloseErrorInfo(403, "Initial cookie lacked required values")
ioloop.add_callback(self.close)
return
def auth_timeout():
self.close_info = CloseErrorInfo(408, "Timeout while waiting for authentication")
self.close()
self.timeout_handle = ioloop.add_timeout(time.time() + 10, auth_timeout)
write_log_line(log_data, path='/socket/open', method='SOCKET',
remote_ip=info.ip, email='unknown', client_name='?')
def authenticate_client(self, msg):
if self.authenticated:
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'error', 'msg': 'Already authenticated'}})
return
user_profile = get_user_profile(self.browser_session_id)
if user_profile is None:
raise SocketAuthError('Unknown or missing session')
self.session.user_profile = user_profile
if msg['request']['csrf_token'] != self.csrf_token:
raise SocketAuthError('CSRF token does not match that in cookie')
if not 'queue_id' in msg['request']:
raise SocketAuthError("Missing 'queue_id' argument")
queue_id = msg['request']['queue_id']
client = get_client_descriptor(queue_id)
if client is None:
raise SocketAuthError('Bad event queue id: %s' % (queue_id,))
if user_profile.id != client.user_profile_id:
raise SocketAuthError("You are not the owner of the queue with id '%s'" % (queue_id,))
self.authenticated = True
register_connection(queue_id, self)
response = {'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'success', 'msg': ''}}
status_inquiries = msg['request'].get('status_inquiries')
if status_inquiries is not None:
results = {}
for inquiry in status_inquiries:
status = redis_client.hgetall(req_redis_key(inquiry))
if len(status) == 0:
status['status'] = 'not_received'
if 'response' in status:
status['response'] = ujson.loads(status['response'])
results[str(inquiry)] = status
response['response']['status_inquiries'] = results
self.session.send_message(response)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self.timeout_handle)
def on_message(self, msg):
log_data = dict(extra='[transport=%s' % (self.session.transport_name,))
record_request_start_data(log_data)
msg = ujson.loads(msg)
if self.did_close:
logger.info("Received message on already closed socket! transport=%s user=%s client_id=%s"
% (self.session.transport_name,
self.session.user_profile.email if self.session.user_profile is not None else 'unknown',
self.client_id))
self.session.send_message({'req_id': msg['req_id'], 'type': 'ack'})
if msg['type'] == 'auth':
log_data['extra'] += ']'
try:
self.authenticate_client(msg)
# TODO: Fill in the correct client
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email=self.session.user_profile.email,
client_name='?')
except SocketAuthError as e:
response = {'result': 'error', 'msg': e.msg}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
else:
if not self.authenticated:
response = {'result': 'error', 'msg': "Not yet authenticated"}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
redis_key = req_redis_key(msg['req_id'])
with redis_client.pipeline() as pipeline:
pipeline.hmset(redis_key, {'status': 'received'})
pipeline.expire(redis_key, 60 * 60 * 24)
pipeline.execute()
record_request_stop_data(log_data)
queue_json_publish("message_sender",
dict(request=msg['request'],
req_id=msg['req_id'],
server_meta=dict(user_id=self.session.user_profile.id,
client_id=self.client_id,
return_queue="tornado_return",
log_data=log_data,
request_environ=dict(REMOTE_ADDR=self.session.conn_info.ip))),
fake_message_sender)
def on_close(self):
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
if self.close_info is not None:
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email='unknown',
client_name='?', status_code=self.close_info.status_code,
error_content=self.close_info.err_msg)
else:
deregister_connection(self)
email = self.session.user_profile.email \
if self.session.user_profile is not None else 'unknown'
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email=email,
client_name='?')
self.did_close = True
def fake_message_sender(event):
log_data = dict()
record_request_start_data(log_data)
req = event['request']
try:
sender = get_user_profile_by_id(event['server_meta']['user_id'])
client = get_client(req['client'])
msg_id = check_send_message(sender, client, req['type'],
extract_recipients(req['to']),
req['subject'], req['content'],
local_id=req.get('local_id', None),
sender_queue_id=req.get('queue_id', None))
resp = {"result": "success", "msg": "", "id": msg_id}
except JsonableError as e:
resp = {"result": "error", "msg": str(e)}
server_meta = event['server_meta']
server_meta.update({'worker_log_data': log_data,
'time_request_finished': time.time()})
result = {'response': resp, 'req_id': event['req_id'],
'server_meta': server_meta}
respond_send_message(result)
def respond_send_message(data):
log_data = data['server_meta']['log_data']
record_request_restart_data(log_data)
worker_log_data = data['server_meta']['worker_log_data']
forward_queue_delay = worker_log_data['time_started'] - log_data['time_stopped']
return_queue_delay = log_data['time_restarted'] - data['server_meta']['time_request_finished']
service_time = data['server_meta']['time_request_finished'] - worker_log_data['time_started']
log_data['extra'] += ', queue_delay: %s/%s, service_time: %s]' % (
format_timedelta(forward_queue_delay), format_timedelta(return_queue_delay),
format_timedelta(service_time))
client_id = data['server_meta']['client_id']
connection = get_connection(client_id)
if connection is None:
logger.info("Could not find connection to send response to! client_id=%s" % (client_id,))
else:
connection.session.send_message({'req_id': data['req_id'], 'type': 'response',
'response': data['response']})
# TODO: Fill in client name
# TODO: Maybe fill in the status code correctly
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=connection.session.conn_info.ip,
email=connection.session.user_profile.email, client_name='?')
# We disable the eventsource and htmlfile transports because they cannot
# securely send us the zulip.com cookie, which we use as part of our
# authentication scheme.
sockjs_router = sockjs.tornado.SockJSRouter(SocketConnection, "/sockjs",
{'sockjs_url': 'https://%s/static/third/sockjs/sockjs-0.3.4.js' % (settings.EXTERNAL_HOST,),
'disabled_transports': ['eventsource', 'htmlfile']})
def get_sockjs_router():
return sockjs_router
|
xchenum/quantum | refs/heads/master | quantum/tests/unit/extensions/v2attributes.py | 2 | # Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
'v2attrs:something': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'v2attrs:something_else': {'allow_post': True,
'allow_put': False,
'is_visible': False},
}
}
class V2attributes(object):
def get_name(self):
return "V2 Extended Attributes Example"
def get_alias(self):
return "v2attrs"
def get_description(self):
return "Demonstrates extended attributes on V2 core resources"
def get_namespace(self):
return "http://docs.openstack.org/ext/examples/v2attributes/api/v1.0"
def get_updated(self):
return "2012-07-18T10:00:00-00:00"
def get_extended_attributes(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
|
MIPS/external-chromium_org-tools-gyp | refs/heads/dev-mips-jb-kitkat | test/intermediate_dir/gyptest-intermediate-dir.py | 243 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that targets have independent INTERMEDIATE_DIRs.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('test.gyp', chdir='src')
test.build('test.gyp', 'target1', chdir='src')
# Check stuff exists.
intermediate_file1 = test.read('src/outfile.txt')
test.must_contain(intermediate_file1, 'target1')
shared_intermediate_file1 = test.read('src/shared_outfile.txt')
test.must_contain(shared_intermediate_file1, 'shared_target1')
test.run_gyp('test2.gyp', chdir='src')
# Force the shared intermediate to be rebuilt.
test.sleep()
test.touch('src/shared_infile.txt')
test.build('test2.gyp', 'target2', chdir='src')
# Check INTERMEDIATE_DIR file didn't get overwritten but SHARED_INTERMEDIATE_DIR
# file did.
intermediate_file2 = test.read('src/outfile.txt')
test.must_contain(intermediate_file1, 'target1')
test.must_contain(intermediate_file2, 'target2')
shared_intermediate_file2 = test.read('src/shared_outfile.txt')
if shared_intermediate_file1 != shared_intermediate_file2:
test.fail_test(shared_intermediate_file1 + ' != ' + shared_intermediate_file2)
test.must_contain(shared_intermediate_file1, 'shared_target2')
test.must_contain(shared_intermediate_file2, 'shared_target2')
test.pass_test()
|
ruo91/letsencrypt | refs/heads/master | letsencrypt/proof_of_possession.py | 14 | """Proof of Possession Identifier Validation Challenge."""
import logging
import os
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import zope.component
from acme import challenges
from acme import jose
from acme import other
from letsencrypt import interfaces
from letsencrypt.display import util as display_util
logger = logging.getLogger(__name__)
class ProofOfPossession(object): # pylint: disable=too-few-public-methods
"""Proof of Possession Identifier Validation Challenge.
Based on draft-barnes-acme, section 6.5.
:ivar installer: Installer object
:type installer: :class:`~letsencrypt.interfaces.IInstaller`
"""
def __init__(self, installer):
self.installer = installer
def perform(self, achall):
"""Perform the Proof of Possession Challenge.
:param achall: Proof of Possession Challenge
:type achall: :class:`letsencrypt.achallenges.ProofOfPossession`
:returns: Response or None/False if the challenge cannot be completed
:rtype: :class:`acme.challenges.ProofOfPossessionResponse`
or False
"""
if (achall.alg in [jose.HS256, jose.HS384, jose.HS512] or
not isinstance(achall.hints.jwk, achall.alg.kty)):
return None
for cert, key, _ in self.installer.get_all_certs_keys():
with open(cert) as cert_file:
cert_data = cert_file.read()
try:
cert_obj = x509.load_pem_x509_certificate(
cert_data, default_backend())
except ValueError:
try:
cert_obj = x509.load_der_x509_certificate(
cert_data, default_backend())
except ValueError:
logger.warn("Certificate is neither PER nor DER: %s", cert)
cert_key = achall.alg.kty(key=cert_obj.public_key())
if cert_key == achall.hints.jwk:
return self._gen_response(achall, key)
# Is there are different prompt we should give the user?
code, key = zope.component.getUtility(
interfaces.IDisplay).input(
"Path to private key for identifier: %s " % achall.domain)
if code != display_util.CANCEL:
return self._gen_response(achall, key)
# If we get here, the key wasn't found
return False
def _gen_response(self, achall, key_path): # pylint: disable=no-self-use
"""Create the response to the Proof of Possession Challenge.
:param achall: Proof of Possession Challenge
:type achall: :class:`letsencrypt.achallenges.ProofOfPossession`
:param str key_path: Path to the key corresponding to the hinted to
public key.
:returns: Response or False if the challenge cannot be completed
:rtype: :class:`acme.challenges.ProofOfPossessionResponse`
or False
"""
if os.path.isfile(key_path):
with open(key_path, 'rb') as key:
try:
# Needs to be changed if JWKES doesn't have a key attribute
jwk = achall.alg.kty.load(key.read())
sig = other.Signature.from_msg(achall.nonce, jwk.key,
alg=achall.alg)
except (IndexError, ValueError, TypeError, jose.errors.Error):
return False
return challenges.ProofOfPossessionResponse(nonce=achall.nonce,
signature=sig)
return False
|
parker-mar/BB10-WebWorks-Community-Samples | refs/heads/master | Ant-Cordova-Build-Script/tools/apache-ant-1.8.2/bin/runant.py | 124 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
runant.py
This script is a translation of the runant.pl written by Steve Loughran.
It runs ant with/out arguments, it should be quite portable (thanks to
the python os library)
This script has been tested with Python2.0/Win2K
created: 2001-04-11
author: Pierre Dittgen pierre.dittgen@criltelecom.com
Assumptions:
- the "java" executable/script is on the command path
"""
import os, os.path, string, sys
# Change it to 1 to get extra debug information
debug = 0
#######################################################################
# If ANT_HOME is not set default to script's parent directory
if os.environ.has_key('ANT_HOME'):
ANT_HOME = os.environ['ANT_HOME']
else:
ANT_HOME = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
# set ANT_LIB location
ANT_LIB = os.path.join(ANT_HOME, 'lib')
# set JAVACMD (check variables JAVACMD and JAVA_HOME)
JAVACMD = None
if not os.environ.has_key('JAVACMD'):
if os.environ.has_key('JAVA_HOME'):
if not os.path.exists(os.environ['JAVA_HOME']):
print "Warning: JAVA_HOME is not defined correctly."
else:
JAVACMD = os.path.join(os.environ['JAVA_HOME'], 'bin', 'java')
else:
print "Warning: JAVA_HOME not set."
else:
JAVACMD = os.environ['JAVACMD']
if not JAVACMD:
JAVACMD = 'java'
launcher_jar = os.path.join(ANT_LIB, 'ant-launcher.jar')
if not os.path.exists(launcher_jar):
print 'Warning: Unable to locate ant-launcher.jar. Expected to find it in %s' % \
ANT_LIB
# Build up standard classpath (LOCALCLASSPATH)
LOCALCLASSPATH = launcher_jar
if os.environ.has_key('LOCALCLASSPATH'):
LOCALCLASSPATH += os.pathsep + os.environ['LOCALCLASSPATH']
ANT_OPTS = ""
if os.environ.has_key('ANT_OPTS'):
ANT_OPTS = os.environ['ANT_OPTS']
OPTS = ""
if os.environ.has_key('JIKESPATH'):
OPTS = '-Djikes.class.path=\"%s\"' % os.environ['JIKESPATH']
ANT_ARGS = ""
if os.environ.has_key('ANT_ARGS'):
ANT_ARGS = os.environ['ANT_ARGS']
CLASSPATH = ""
if os.environ.has_key('CLASSPATH'):
CLASSPATH = "-lib " + os.environ['CLASSPATH']
# Builds the commandline
cmdline = ('%s %s -classpath %s -Dant.home=%s %s ' + \
'org.apache.tools.ant.launch.Launcher %s %s %s') \
% (JAVACMD, ANT_OPTS, LOCALCLASSPATH, ANT_HOME, OPTS, ANT_ARGS, \
CLASSPATH, string.join(sys.argv[1:], ' '))
if debug:
print '\n%s\n\n' % (cmdline)
sys.stdout.flush()
# Run the biniou!
os.system(cmdline)
|
diorcety/translate | refs/heads/master | translate/filters/decorators.py | 3 | # -*- coding: utf-8 -*-
#
# Copyright 2012 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Decorators to categorize pofilter checks."""
from functools import wraps
#: Quality checks' failure categories
class Category(object):
CRITICAL = 100
FUNCTIONAL = 60
COSMETIC = 30
EXTRACTION = 10
NO_CATEGORY = 0
def critical(f):
@wraps(f)
def critical_f(self, *args, **kwargs):
if f.__name__ not in self.categories:
self.categories[f.__name__] = Category.CRITICAL
return f(self, *args, **kwargs)
return critical_f
def functional(f):
@wraps(f)
def functional_f(self, *args, **kwargs):
if f.__name__ not in self.categories:
self.categories[f.__name__] = Category.FUNCTIONAL
return f(self, *args, **kwargs)
return functional_f
def cosmetic(f):
@wraps(f)
def cosmetic_f(self, *args, **kwargs):
if f.__name__ not in self.categories:
self.categories[f.__name__] = Category.COSMETIC
return f(self, *args, **kwargs)
return cosmetic_f
def extraction(f):
@wraps(f)
def extraction_f(self, *args, **kwargs):
if f.__name__ not in self.categories:
self.categories[f.__name__] = Category.EXTRACTION
return f(self, *args, **kwargs)
return extraction_f
|
dim0/ansible-commander | refs/heads/master | lib/main/migrations/0006_changes.py | 3 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Team.organization'
db.add_column(u'main_team', 'organization',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='teams', null=True, on_delete=models.SET_NULL, to=orm['main.Organization']),
keep_default=False)
# Removing M2M table for field organizations on 'Team'
db.delete_table('main_team_organizations')
def backwards(self, orm):
# Deleting field 'Team.organization'
db.delete_column(u'main_team', 'organization_id')
# Adding M2M table for field organizations on 'Team'
db.create_table(u'main_team_organizations', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('team', models.ForeignKey(orm['main.team'], null=False)),
('organization', models.ForeignKey(orm['main.organization'], null=False))
))
db.create_unique(u'main_team_organizations', ['team_id', 'organization_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'djcelery.taskmeta': {
'Meta': {'object_name': 'TaskMeta', 'db_table': "'celery_taskmeta'"},
'date_done': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta': ('djcelery.picklefield.PickledObjectField', [], {'default': 'None', 'null': 'True'}),
'result': ('djcelery.picklefield.PickledObjectField', [], {'default': 'None', 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}),
'task_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'main.audittrail': {
'Meta': {'object_name': 'AuditTrail'},
'comment': ('django.db.models.fields.TextField', [], {}),
'delta': ('django.db.models.fields.TextField', [], {}),
'detail': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'resource_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Tag']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
'main.credential': {
'Meta': {'object_name': 'Credential'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'credential_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credentials'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['main.Project']", 'blank': 'True', 'null': 'True'}),
'ssh_key_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'ssh_key_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4096', 'blank': 'True'}),
'ssh_key_unlock': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'ssh_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'sudo_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'credential_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credentials'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Team']", 'blank': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credentials'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['auth.User']", 'blank': 'True', 'null': 'True'})
},
'main.group': {
'Meta': {'unique_together': "(('name', 'inventory'),)", 'object_name': 'Group'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'group_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups'", 'blank': 'True', 'to': "orm['main.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['main.Inventory']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'children'", 'blank': 'True', 'to': "orm['main.Group']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'group_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"}),
'variable_data': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'group'", 'unique': 'True', 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.VariableData']", 'blank': 'True', 'null': 'True'})
},
'main.host': {
'Meta': {'unique_together': "(('name', 'inventory'),)", 'object_name': 'Host'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'host_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts'", 'to': "orm['main.Inventory']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'host_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"}),
'variable_data': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'host'", 'unique': 'True', 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.VariableData']", 'blank': 'True', 'null': 'True'})
},
'main.inventory': {
'Meta': {'unique_together': "(('name', 'organization'),)", 'object_name': 'Inventory'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'inventory_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventories'", 'to': "orm['main.Organization']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'inventory_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"})
},
'main.launchjob': {
'Meta': {'object_name': 'LaunchJob'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'launchjob_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'launchjob\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'launch_jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'launch_jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Inventory']", 'blank': 'True', 'null': 'True'}),
'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'launch_jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['main.Project']", 'blank': 'True', 'null': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'launchjob_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'launch_jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['auth.User']", 'blank': 'True', 'null': 'True'})
},
'main.launchjobstatus': {
'Meta': {'object_name': 'LaunchJobStatus'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'launchjobstatus_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'celery_task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'launch_job_statuses'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': u"orm['djcelery.TaskMeta']", 'blank': 'True', 'null': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'launchjobstatus\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'launch_job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'launch_job_statuses'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.LaunchJob']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'result_stderr': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'result_stdout': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '20'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'launchjobstatus_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"})
},
'main.launchjobstatusevent': {
'Meta': {'object_name': 'LaunchJobStatusEvent'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'event_data': ('jsonfield.fields.JSONField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'launch_job_status': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'launch_job_status_events'", 'to': "orm['main.LaunchJobStatus']"})
},
'main.organization': {
'Meta': {'object_name': 'Organization'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_of_organizations'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organization_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': u"orm['main.Project']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organization_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
'main.permission': {
'Meta': {'object_name': 'Permission'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'permission_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'permission_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['main.Project']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'permission_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"})
},
u'main.project': {
'Meta': {'object_name': 'Project'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'project_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'project\', \'app_label\': u\'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_playbook': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['main.Inventory']"}),
'local_repository': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'scm_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'project_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"})
},
'main.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'main.team': {
'Meta': {'object_name': 'Team'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'team_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'teams'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': u"orm['main.Project']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'team_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
'main.variabledata': {
'Meta': {'object_name': 'VariableData'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'audit_trail': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'variabledata_by_audit_trail'", 'blank': 'True', 'to': "orm['main.AuditTrail']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': '"{\'class\': \'variabledata\', \'app_label\': \'main\'}(class)s_created"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'variabledata_by_tag'", 'blank': 'True', 'to': "orm['main.Tag']"})
}
}
complete_apps = ['main'] |
oleiade/Elevator | refs/heads/master | elevator/utils/patterns.py | 2 | from collections import Sequence
# Enums beautiful python implementation
# Used like this :
# Numbers = enum('ZERO', 'ONE', 'TWO')
# >>> Numbers.ZERO
# 0
# >>> Numbers.ONE
# 1
# Found here: http://stackoverflow.com/questions/36932/whats-the-best-way-to-implement-an-enum-in-python
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
class Singleton(type):
def __init__(cls, name, bases, dict):
super(Singleton, cls).__init__(name, bases, dict)
cls.instance = None
def __call__(cls, *args, **kw):
if cls.instance is None:
cls.instance = super(Singleton, cls).__call__(*args, **kw)
return cls.instance
def __del__(cls, *args, **kw):
cls.instance is None
class DestructurationError(Exception):
pass
def destructurate(container):
try:
return container[0], container[1:]
except (KeyError, AttributeError):
raise DestructurationError("Can't destructurate a non-sequence container")
|
yekeqiang/mypython | refs/heads/master | socketserver.py | 1 | #!/usr/bin/env python
# Basic SocketServer Example
from SocketServer import ThreadingMixIn, TCPServer, StreamRequestHandler
import time
class TimeRequestHandler(StreamRequestHandler):
def handle(self):
req = self.rfile.readline().strip()
if req == "asctime":
result = time.asctime()
elif req == "seconds":
result = str(int(time.time()))
elif req == "rfc822":
result = time.strftime(
"%a, %d %b %Y %H %H:%M:%S +0000", time.gmtime())
else:
result = """Unhandled request. Send a line with one of the following words:
asctime -- for human-readable time
seconds -- seconds since the Unix Epoch
rfc822 -- date/time in format used for mail and news posts"""
class TimeServer(ThreadingMixIn, TCPServer):
allow_reuse_address = 1
serveraddr = ('', 8765)
srvr = TimeServer(serveraddr, TimeRequestHandler)
srvr.serve_forever()
|
Geode/geonode | refs/heads/master | geonode/geoserver/tests.py | 12 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import base64
import json
from django.contrib.auth import get_user_model
from django.http import HttpRequest
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from guardian.shortcuts import assign_perm, get_anonymous_user
from geonode.geoserver.helpers import OGC_Servers_Handler
from geonode.base.populate_test_data import create_models
from geonode.layers.populate_layers_data import create_layer_data
from geonode.layers.models import Layer
class LayerTests(TestCase):
fixtures = ['initial_data.json', 'bobby']
def setUp(self):
self.user = 'admin'
self.passwd = 'admin'
create_models(type='layer')
create_layer_data()
def test_style_manager(self):
"""
Ensures the layer_style_manage route returns a 200.
"""
layer = Layer.objects.all()[0]
bob = get_user_model().objects.get(username='bobby')
assign_perm('change_layer_style', bob, layer)
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.get(reverse('layer_style_manage', args=(layer.typename,)))
self.assertEqual(response.status_code, 200)
def test_feature_edit_check(self):
"""Verify that the feature_edit_check view is behaving as expected
"""
# Setup some layer names to work with
valid_layer_typename = Layer.objects.all()[0].typename
Layer.objects.all()[0].set_default_permissions()
invalid_layer_typename = "n0ch@nc3"
# Test that an invalid layer.typename is handled for properly
response = self.client.post(
reverse(
'feature_edit_check',
args=(
invalid_layer_typename,
)))
self.assertEquals(response.status_code, 404)
# First test un-authenticated
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
# Next Test with a user that does NOT have the proper perms
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
# Login as a user with the proper permission and test the endpoint
logged_in = self.client.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
# Test that the method returns 401 because it's not a datastore
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], False)
layer = Layer.objects.all()[0]
layer.storeType = "dataStore"
layer.save()
# Test that the method returns authorized=True if it's a datastore
if settings.OGC_SERVER['default']['DATASTORE']:
# The check was moved from the template into the view
response = self.client.post(
reverse(
'feature_edit_check',
args=(
valid_layer_typename,
)))
response_json = json.loads(response.content)
self.assertEquals(response_json['authorized'], True)
def test_layer_acls(self):
""" Verify that the layer_acls view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = '%s:%s' % ('bobby', 'bob')
invalid_uname_pw = '%s:%s' % ('n0t', 'v@l1d')
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' +
base64.b64encode(invalid_uname_pw),
}
bob = get_user_model().objects.get(username='bobby')
layer_ca = Layer.objects.get(typename='geonode:CA')
assign_perm('change_layer_data', bob, layer_ca)
# Test that requesting when supplying the geoserver credentials returns
# the expected json
expected_result = {
u'email': u'bobby@bob.com',
u'fullname': u'bobby',
u'is_anonymous': False,
u'is_superuser': False,
u'name': u'bobby',
u'ro': [u'geonode:layer2',
u'geonode:mylayer',
u'geonode:foo',
u'geonode:whatever',
u'geonode:fooey',
u'geonode:quux',
u'geonode:fleem'],
u'rw': [u'geonode:CA']
}
response = self.client.get(reverse('layer_acls'), **valid_auth_headers)
response_json = json.loads(response.content)
# 'ro' and 'rw' are unsorted collections
self.assertEquals(sorted(expected_result), sorted(response_json))
# Test that requesting when supplying invalid credentials returns the
# appropriate error code
response = self.client.get(reverse('layer_acls'), **invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
self.client.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = self.client.get(reverse('layer_acls'))
response_json = json.loads(response.content)
self.assertEquals('admin', response_json['fullname'])
self.assertEquals('', response_json['email'])
# TODO Lots more to do here once jj0hns0n understands the ACL system
# better
def test_resolve_user(self):
"""Verify that the resolve_user view is behaving as expected
"""
# Test that HTTP_AUTHORIZATION in request.META is working properly
valid_uname_pw = "%s:%s" % ('admin', 'admin')
invalid_uname_pw = "%s:%s" % ("n0t", "v@l1d")
valid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' + base64.b64encode(valid_uname_pw),
}
invalid_auth_headers = {
'HTTP_AUTHORIZATION': 'basic ' +
base64.b64encode(invalid_uname_pw),
}
response = self.client.get(reverse('layer_resolve_user'), **valid_auth_headers)
response_json = json.loads(response.content)
self.assertEquals({'geoserver': False,
'superuser': True,
'user': 'admin',
'fullname': 'admin',
'email': ''},
response_json)
# Test that requesting when supplying invalid credentials returns the
# appropriate error code
response = self.client.get(reverse('layer_acls'), **invalid_auth_headers)
self.assertEquals(response.status_code, 401)
# Test logging in using Djangos normal auth system
self.client.login(username='admin', password='admin')
# Basic check that the returned content is at least valid json
response = self.client.get(reverse('layer_resolve_user'))
response_json = json.loads(response.content)
self.assertEquals('admin', response_json['user'])
self.assertEquals('admin', response_json['fullname'])
self.assertEquals('', response_json['email'])
class UtilsTests(TestCase):
def setUp(self):
self.OGC_DEFAULT_SETTINGS = {
'default': {
'BACKEND': 'geonode.geoserver',
'LOCATION': 'http://localhost:8080/geoserver/',
'USER': 'admin',
'PASSWORD': 'geoserver',
'MAPFISH_PRINT_ENABLED': True,
'PRINT_NG_ENABLED': True,
'GEONODE_SECURITY_ENABLED': True,
'GEOGIG_ENABLED': False,
'WMST_ENABLED': False,
'BACKEND_WRITE_ENABLED': True,
'WPS_ENABLED': False,
'DATASTORE': str(),
'GEOGIG_DATASTORE_DIR': str(),
}
}
self.UPLOADER_DEFAULT_SETTINGS = {
'BACKEND': 'geonode.rest',
'OPTIONS': {
'TIME_ENABLED': False,
'MOSAIC_ENABLED': False,
'GEOGIG_ENABLED': False}}
self.DATABASE_DEFAULT_SETTINGS = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'development.db'}}
def test_ogc_server_settings(self):
"""
Tests the OGC Servers Handler class.
"""
with override_settings(OGC_SERVER=self.OGC_DEFAULT_SETTINGS, UPLOADER=self.UPLOADER_DEFAULT_SETTINGS):
OGC_SERVER = self.OGC_DEFAULT_SETTINGS.copy()
OGC_SERVER.update(
{'PUBLIC_LOCATION': 'http://localhost:8080/geoserver/'})
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
default = OGC_SERVER.get('default')
self.assertEqual(ogc_settings.server, default)
self.assertEqual(ogc_settings.BACKEND, default.get('BACKEND'))
self.assertEqual(ogc_settings.LOCATION, default.get('LOCATION'))
self.assertEqual(
ogc_settings.PUBLIC_LOCATION,
default.get('PUBLIC_LOCATION'))
self.assertEqual(ogc_settings.USER, default.get('USER'))
self.assertEqual(ogc_settings.PASSWORD, default.get('PASSWORD'))
self.assertEqual(ogc_settings.DATASTORE, str())
self.assertEqual(ogc_settings.credentials, ('admin', 'geoserver'))
self.assertTrue(ogc_settings.MAPFISH_PRINT_ENABLED)
self.assertTrue(ogc_settings.PRINT_NG_ENABLED)
self.assertTrue(ogc_settings.GEONODE_SECURITY_ENABLED)
self.assertFalse(ogc_settings.GEOGIG_ENABLED)
self.assertFalse(ogc_settings.WMST_ENABLED)
self.assertTrue(ogc_settings.BACKEND_WRITE_ENABLED)
self.assertFalse(ogc_settings.WPS_ENABLED)
def test_ogc_server_defaults(self):
"""
Tests that OGC_SERVER_SETTINGS are built if they do not exist in the settings.
"""
OGC_SERVER = {'default': dict()}
defaults = self.OGC_DEFAULT_SETTINGS.get('default')
ogc_settings = OGC_Servers_Handler(OGC_SERVER)['default']
self.assertEqual(ogc_settings.server, defaults)
self.assertEqual(ogc_settings.rest, defaults['LOCATION'] + 'rest')
self.assertEqual(ogc_settings.ows, defaults['LOCATION'] + 'ows')
# Make sure we get None vs a KeyError when the key does not exist
self.assertIsNone(ogc_settings.SFDSDFDSF)
def test_importer_configuration(self):
"""
Tests that the OGC_Servers_Handler throws an ImproperlyConfigured exception when using the importer
backend without a vector database and a datastore configured.
"""
database_settings = self.DATABASE_DEFAULT_SETTINGS.copy()
ogc_server_settings = self.OGC_DEFAULT_SETTINGS.copy()
uploader_settings = self.UPLOADER_DEFAULT_SETTINGS.copy()
uploader_settings['BACKEND'] = 'geonode.importer'
self.assertTrue(['geonode_imports' not in database_settings.keys()])
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
# Test the importer backend without specifying a datastore or
# corresponding database.
with self.assertRaises(ImproperlyConfigured):
OGC_Servers_Handler(ogc_server_settings)['default']
ogc_server_settings['default']['DATASTORE'] = 'geonode_imports'
# Test the importer backend with a datastore but no corresponding
# database.
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
with self.assertRaises(ImproperlyConfigured):
OGC_Servers_Handler(ogc_server_settings)['default']
database_settings['geonode_imports'] = database_settings[
'default'].copy()
database_settings['geonode_imports'].update(
{'NAME': 'geonode_imports'})
# Test the importer backend with a datastore and a corresponding
# database, no exceptions should be thrown.
with self.settings(UPLOADER=uploader_settings, OGC_SERVER=ogc_server_settings, DATABASES=database_settings):
OGC_Servers_Handler(ogc_server_settings)['default']
class SecurityTest(TestCase):
"""
Tests for the Geonode security app.
"""
def setUp(self):
self.admin, created = get_user_model().objects.get_or_create(
username='admin', password='admin', is_superuser=True)
def test_login_middleware(self):
"""
Tests the Geonode login required authentication middleware.
"""
from geonode.security.middleware import LoginRequiredMiddleware
middleware = LoginRequiredMiddleware()
white_list = [
reverse('account_ajax_login'),
reverse('account_confirm_email', kwargs=dict(key='test')),
reverse('account_login'),
reverse('account_password_reset'),
reverse('forgot_username'),
reverse('layer_acls'),
reverse('layer_resolve_user'),
]
black_list = [
reverse('account_signup'),
reverse('document_browse'),
reverse('maps_browse'),
reverse('layer_browse'),
reverse('layer_detail', kwargs=dict(layername='geonode:Test')),
reverse('layer_remove', kwargs=dict(layername='geonode:Test')),
reverse('profile_browse'),
]
request = HttpRequest()
request.user = get_anonymous_user()
# Requests should be redirected to the the `redirected_to` path when un-authenticated user attempts to visit
# a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertEqual(response.status_code, 302)
self.assertTrue(
response.get('Location').startswith(
middleware.redirect_to))
# The middleware should return None when an un-authenticated user
# attempts to visit a white-listed url.
for path in white_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(
response,
msg="Middleware activated for white listed path: {0}".format(path))
self.client.login(username='admin', password='admin')
self.assertTrue(self.admin.is_authenticated())
request.user = self.admin
# The middleware should return None when an authenticated user attempts
# to visit a black-listed url.
for path in black_list:
request.path = path
response = middleware.process_request(request)
self.assertIsNone(response)
|
ahMarrone/solar_radiation_model | refs/heads/master | models/stats.py | 4 | # This file contains a fraction of the scipy library to simplify a complex
# instrallation process inside the Makefile. The entire source code of this
# file can be downloaded from the next URL:
# https://github.com/scipy/scipy/blob/v0.13.0/scipy/stats/stats.py
import numpy as np
from numpy import array
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the median along a flattened version of the array.
Returns
-------
score : float (or sequence of floats)
Score at percentile.
See Also
--------
percentileofscore
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function
a = np.asarray(a)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
if per == 0:
return a.min(axis=axis)
elif per == 100:
return a.max(axis=axis)
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
return [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval |
marhar/cx_OracleTools | refs/heads/master | cx_PyGenLib/cx_Utils.py | 1 | """Defines a number of utility functions."""
import cx_Exceptions
import cx_Logging
import glob
import os
import sys
if sys.platform == "win32":
import _winreg
class CommandExecutionFailed(cx_Exceptions.BaseException):
message = "Execution of command %(command)s failed with exit code " \
"%(exitCode)s."
def ExecuteOSCommands(*commands):
"""Execute OS commands, raising an error if any return errors."""
for command in commands:
cx_Logging.Debug("executing command %s", command)
exitCode = os.system(command)
if exitCode != 0:
raise CommandExecutionFailed(command = command,
exitCode = exitCode)
def FilesInDirectory(*entries):
"""Return a list of all of the files found in the directory. If the entry
is actually a file, it is returned instead."""
files = []
while entries:
newEntries = []
for entry in entries:
if os.path.isdir(entry):
newEntries += [os.path.join(entry, f) \
for f in os.listdir(entry)]
else:
files.append(entry)
entries = newEntries
return files
def FilesHierarchy(rootDir, namesToIgnore=[]):
"""Return a list of relative file names starting at rootDir.
The returned list of file names will contain relative path
information; that is, the portion of their full path less rootDir.
Files or directories that appear in namesToIgnore are ignored.
Note that this comparison is not case sensitive: if "foo.txt" is
specified then "FOO.TXT" will not appear in the final result. As
well, if "bar" is specified then any files in the directory "Bar"
will not appear in the final result.
"""
def Visit(args, dirname, names):
"""Append all legitimate files in dirname to the files list."""
rootDir, files, ignored = args
for name in names[:]:
if name.lower() in ignored:
names.remove(name)
else:
file = os.path.join(dirname, name)
if os.path.isfile(file):
files.append(PathRemainder(rootDir, file))
rootDir = os.path.normpath(rootDir)
files = []
ignored = [name.lower() for name in namesToIgnore]
os.path.walk(rootDir, Visit, (rootDir, files, ignored))
return files
def InlineIf(expr, trueValue, falseValue = None):
"""Method used for performing a simple if clause in an expression."""
if expr:
return trueValue
else:
return falseValue
def PathRemainder(path1, path2, caseSensitive=False, ignoreDriveLetters=True):
"""Return the right-hand part of path2 that is not in path1.
The matching directories are removed, one by one, starting from the left
side of the two paths. Once a mismatch is encountered, or either path
runs out, the remainder of path2 is returned. It is possible for an empty
string to be returned if path2 is equal to path1, or if path2 is shorter
than path1.
"""
if ignoreDriveLetters:
p1 = os.path.splitdrive(path1)[1]
p2 = os.path.splitdrive(path2)[1]
else:
p1 = path1
p2 = path2
while p1 and p2:
head1, tail1 = SplitFirst(p1)
head2, tail2 = SplitFirst(p2)
if not caseSensitive:
head1 = head1.lower()
head2 = head2.lower()
if head1 == head2:
p1 = tail1
p2 = tail2
else:
break
return p2
def PerformDiff(sourceDir, targetDir):
"""Perform a diff between two directories and return the results as a set
of three lists: new, modified and removed."""
newFiles = []
modifiedFiles = []
removedFiles = []
command = 'diff --recursive --brief "%s" "%s"' % (sourceDir, targetDir)
pipe = os.popen(command)
for line in pipe.readlines():
if line.startswith("Only"):
fileOrDir = os.path.join(*line[8:-1].split(": "))
if fileOrDir.startswith(sourceDir):
removedFiles += FilesInDirectory(fileOrDir)
else:
newFiles += FilesInDirectory(fileOrDir)
else:
modifiedFiles.append(line[line.find(" and "):][5:-8])
status = pipe.close()
if status is not None:
if not newFiles and not removedFiles and not modifiedFiles:
raise "Command %s failed." % command
return (newFiles, modifiedFiles, removedFiles)
def SplitFirst(path):
"""Return a tuple containing the first directory and the rest of path.
This is similar to os.path.split(), except that in this function the
(head, tail) tuple has the first directory in head, while the remainder of
path is in tail. As with os.path.split(), os.path.join(head, tail) will
produce path.
"""
pos = path.find(os.sep)
if pos == -1:
# No separator found, assume path is a directory.
head = path
tail = ""
else:
head = path[:pos]
tail = path[pos + len(os.sep):]
return (head, tail)
def Touch(fileName):
"""Update the modification date of the file, or create it if necessary."""
if os.path.exists(fileName):
os.utime(fileName, None)
else:
file(fileName, "w")
def TransformText(text, method, openDelim = "{", closeDelim = "}"):
"""Transform the text containing the given delimiters and return the
transformed value. The method will be called whenever text is found
between the given delimiters but not if the text contains another
opening delimiter. Whatever is returned by the method will replace the
text between the delimiters and the delimiters as well."""
lastPos = 0
results = []
while True:
startPos = text.find(openDelim, lastPos)
if startPos < 0:
break
endPos = text.find(closeDelim, startPos)
if endPos < 0:
break
textToReplace = text[startPos:endPos + 1]
textInDelimiters = textToReplace[1:-1]
if openDelim in textInDelimiters:
results.append(text[lastPos:startPos + 1])
lastPos = startPos + 1
continue
results.append(text[lastPos:startPos])
results.append(method(textToReplace, textInDelimiters))
lastPos = endPos + 1
results.append(text[lastPos:])
return "".join(results)
def WriteFile(fileName, contents=""):
"""Create or replace a file with the given contents.
If the file already exists then it is replaced. If the file has been set
Read-only, its permissions are changed first and then changed back after
the file has been written.
"""
writeable = True
if os.path.exists(fileName):
writeable = os.access(fileName, os.W_OK)
if not writeable:
permissions = os.stat(fileName).st_mode
os.chmod(fileName, permissions | os.W_OK << 6)
if type(contents) == type([]):
contents = reduce(lambda x, y: x + y, contents)
open(fileName, "w+").write(contents)
if not writeable:
os.chmod(fileName, permissions)
|
fran-penedo/dreal | refs/heads/master | tools/proofcheck/split.py | 16 | #!/usr/bin/python
import sys
import os
import shutil
trace_name = sys.argv[1]
base_name, ext = os.path.splitext(trace_name)
input = open(trace_name)
counter = 0
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
def get_newname():
global counter
counter = counter + 1
return base_name + "_" + str(counter) + ".trace"
# Create a init out
output = sys.stdout
#print "Split " + trace_name,
for line in input:
if line.startswith("Precision:"):
output = open(get_newname(), "w")
output.write(line)
#print " into " + str(counter) + " traces."
if counter == 1:
touch(trace_name)
os.remove(base_name + "_1.trace")
else:
os.remove(trace_name)
|
KousikaGanesh/purchaseandInventory | refs/heads/master | openerp/addons/web_linkedin/web_linkedin.py | 25 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import urllib2
from urlparse import urlparse, urlunparse
import openerp
from openerp.osv import fields, osv
class Binary(openerp.addons.web.http.Controller):
_cp_path = "/web_linkedin/binary"
@openerp.addons.web.http.jsonrequest
def url2binary(self, req, url):
"""Used exclusively to load images from LinkedIn profiles, must not be used for anything else."""
req.session.assert_valid(force=True)
_scheme, _netloc, path, params, query, fragment = urlparse(url)
# media.linkedin.com is the master domain for LinkedIn media (replicated to CDNs),
# so forcing it should always work and prevents abusing this method to load arbitrary URLs
url = urlunparse(('http', 'media.licdn.com', path, params, query, fragment))
bfile = urllib2.urlopen(url)
return base64.b64encode(bfile.read())
class web_linkedin_settings(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'api_key': fields.char(string="API Key", size=50),
'server_domain': fields.char(size=100),
}
def get_default_linkedin(self, cr, uid, fields, context=None):
key = self.pool.get("ir.config_parameter").get_param(cr, uid, "web.linkedin.apikey") or ""
dom = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
return {'api_key': key, 'server_domain': dom,}
def set_linkedin(self, cr, uid, ids, context=None):
key = self.browse(cr, uid, ids[0], context)["api_key"] or ""
self.pool.get("ir.config_parameter").set_param(cr, uid, "web.linkedin.apikey", key)
|
microsoft/LightGBM | refs/heads/master | examples/python-guide/sklearn_example.py | 1 | # coding: utf-8
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
import lightgbm as lgb
print('Loading data...')
# load or create your dataset
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
print('Starting training...')
# train
gbm = lgb.LGBMRegressor(num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmse_test = mean_squared_error(y_test, y_pred) ** 0.5
print(f'The RMSE of prediction is: {rmse_test}')
# feature importances
print(f'Feature importances: {list(gbm.feature_importances_)}')
# self-defined eval metric
# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool
# Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_true, y_pred):
return 'RMSLE', np.sqrt(np.mean(np.power(np.log1p(y_pred) - np.log1p(y_true), 2))), False
print('Starting training with custom eval function...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=rmsle,
early_stopping_rounds=5)
# another self-defined eval metric
# f(y_true: array, y_pred: array) -> name: string, eval_result: float, is_higher_better: bool
# Relative Absolute Error (RAE)
def rae(y_true, y_pred):
return 'RAE', np.sum(np.abs(y_pred - y_true)) / np.sum(np.abs(np.mean(y_true) - y_true)), False
print('Starting training with multiple custom eval functions...')
# train
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=[rmsle, rae],
early_stopping_rounds=5)
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
rmsle_test = rmsle(y_test, y_pred)[1]
rae_test = rae(y_test, y_pred)[1]
print(f'The RMSLE of prediction is: {rmsle_test}')
print(f'The RAE of prediction is: {rae_test}')
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid, cv=3)
gbm.fit(X_train, y_train)
print(f'Best parameters found by grid search are: {gbm.best_params_}')
|
ChameleonCloud/horizon | refs/heads/chameleoncloud/train | openstack_dashboard/dashboards/admin/flavors/panel.py | 7 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class Flavors(horizon.Panel):
name = _("Flavors")
slug = 'flavors'
permissions = ('openstack.services.compute',)
policy_rules = (("compute", "context_is_admin"),)
|
indictranstech/erpnext | refs/heads/develop | erpnext/hr/doctype/training_result/test_training_result.py | 51 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Training Result')
class TestTrainingResult(unittest.TestCase):
pass
|
pombredanne/pulp | refs/heads/master | nodes/child/pulp_node/__init__.py | 56 | # Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) |
olemis/zebra | refs/heads/master | tests/io/jsonrpclib/config.py | 188 | import sys
class LocalClasses(dict):
def add(self, cls):
self[cls.__name__] = cls
class Config(object):
"""
This is pretty much used exclusively for the 'jsonclass'
functionality... set use_jsonclass to False to turn it off.
You can change serialize_method and ignore_attribute, or use
the local_classes.add(class) to include "local" classes.
"""
use_jsonclass = True
# Change to False to keep __jsonclass__ entries raw.
serialize_method = '_serialize'
# The serialize_method should be a string that references the
# method on a custom class object which is responsible for
# returning a tuple of the constructor arguments and a dict of
# attributes.
ignore_attribute = '_ignore'
# The ignore attribute should be a string that references the
# attribute on a custom class object which holds strings and / or
# references of the attributes the class translator should ignore.
classes = LocalClasses()
# The list of classes to use for jsonclass translation.
version = 2.0
# Version of the JSON-RPC spec to support
user_agent = 'jsonrpclib/0.1 (Python %s)' % \
'.'.join([str(ver) for ver in sys.version_info[0:3]])
# User agent to use for calls.
_instance = None
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
|
levibostian/myBlanky | refs/heads/master | googleAppEngine/google/appengine/api/yaml_object.py | 13 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Builder for mapping YAML documents to object instances.
ObjectBuilder is responsible for mapping a YAML document to classes defined
using the validation mechanism (see google.appengine.api.validation.py).
"""
from google.appengine.api import validation
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_errors
import yaml
class _ObjectMapper(object):
"""Wrapper used for mapping attributes from a yaml file to an object.
This wrapper is required because objects do not know what property they are
associated with a creation time, and therefore can not be instantiated
with the correct class until they are mapped to their parents.
"""
def __init__(self):
"""Object mapper starts off with empty value."""
self.value = None
self.seen = set()
def set_value(self, value):
"""Set value of instance to map to.
Args:
value: Instance that this mapper maps to.
"""
self.value = value
def see(self, key):
if key in self.seen:
raise yaml_errors.DuplicateAttribute("Duplicate attribute '%s'." % key)
self.seen.add(key)
class _ObjectSequencer(object):
"""Wrapper used for building sequences from a yaml file to a list.
This wrapper is required because objects do not know what property they are
associated with a creation time, and therefore can not be instantiated
with the correct class until they are mapped to their parents.
"""
def __init__(self):
"""Object sequencer starts off with empty value."""
self.value = []
self.constructor = None
def set_constructor(self, constructor):
"""Set object used for constructing new sequence instances.
Args:
constructor: Callable which can accept no arguments. Must return
an instance of the appropriate class for the container.
"""
self.constructor = constructor
class ObjectBuilder(yaml_builder.Builder):
"""Builder used for constructing validated objects.
Given a class that implements validation.ValidatedBase, it will parse a YAML
document and attempt to build an instance of the class.
ObjectBuilder will only map YAML fields that are accepted by the
ValidatedBase's GetValidator function.
Lists are mapped to validated. Repeated attributes and maps are mapped to
validated.Type properties.
For a YAML map to be compatible with a class, the class must have a
constructor that can be called with no parameters. If the provided type
does not have such a constructor a parse time error will occur.
"""
def __init__(self, default_class):
"""Initialize validated object builder.
Args:
default_class: Class that is instantiated upon the detection of a new
document. An instance of this class will act as the document itself.
"""
self.default_class = default_class
def _GetRepeated(self, attribute):
"""Get the ultimate type of a repeated validator.
Looks for an instance of validation.Repeated, returning its constructor.
Args:
attribute: Repeated validator attribute to find type for.
Returns:
The expected class of of the Type validator, otherwise object.
"""
if isinstance(attribute, validation.Optional):
attribute = attribute.validator
if isinstance(attribute, validation.Repeated):
return attribute.constructor
return object
def BuildDocument(self):
"""Instantiate new root validated object.
Returns:
New instance of validated object.
"""
return self.default_class()
def BuildMapping(self, top_value):
"""New instance of object mapper for opening map scope.
Args:
top_value: Parent of nested object.
Returns:
New instance of object mapper.
"""
result = _ObjectMapper()
if isinstance(top_value, self.default_class):
result.value = top_value
return result
def EndMapping(self, top_value, mapping):
"""When leaving scope, makes sure new object is initialized.
This method is mainly for picking up on any missing required attributes.
Args:
top_value: Parent of closing mapping object.
mapping: _ObjectMapper instance that is leaving scope.
"""
try:
mapping.value.CheckInitialized()
except validation.ValidationError:
raise
except Exception, e:
try:
error_str = str(e)
except Exception:
error_str = '<unknown>'
raise validation.ValidationError(error_str, e)
def BuildSequence(self, top_value):
"""New instance of object sequence.
Args:
top_value: Object that contains the new sequence.
Returns:
A new _ObjectSequencer instance.
"""
return _ObjectSequencer()
def MapTo(self, subject, key, value):
"""Map key-value pair to an objects attribute.
Args:
subject: _ObjectMapper of object that will receive new attribute.
key: Key of attribute.
value: Value of new attribute.
Raises:
UnexpectedAttribute when the key is not a validated attribute of
the subject value class.
"""
assert isinstance(subject.value, validation.ValidatedBase)
try:
attribute = subject.value.GetValidator(key)
except validation.ValidationError, err:
raise yaml_errors.UnexpectedAttribute(err)
if isinstance(value, _ObjectMapper):
value.set_value(attribute.expected_type())
value = value.value
elif isinstance(value, _ObjectSequencer):
value.set_constructor(self._GetRepeated(attribute))
value = value.value
subject.see(key)
try:
subject.value.Set(key, value)
except validation.ValidationError, e:
try:
error_str = str(e)
except Exception:
error_str = '<unknown>'
try:
value_str = str(value)
except Exception:
value_str = '<unknown>'
e.message = ("Unable to assign value '%s' to attribute '%s':\n%s" %
(value_str, key, error_str))
raise e
except Exception, e:
try:
error_str = str(e)
except Exception:
error_str = '<unknown>'
try:
value_str = str(value)
except Exception:
value_str = '<unknown>'
message = ("Unable to assign value '%s' to attribute '%s':\n%s" %
(value_str, key, error_str))
raise validation.ValidationError(message, e)
def AppendTo(self, subject, value):
"""Append a value to a sequence.
Args:
subject: _ObjectSequence that is receiving new value.
value: Value that is being appended to sequence.
"""
if isinstance(value, _ObjectMapper):
value.set_value(subject.constructor())
subject.value.append(value.value)
else:
subject.value.append(value)
def BuildObjects(default_class, stream, loader=yaml.loader.SafeLoader):
"""Build objects from stream.
Handles the basic case of loading all the objects from a stream.
Args:
default_class: Class that is instantiated upon the detection of a new
document. An instance of this class will act as the document itself.
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work with the YAML parser.
loader_class: Used for dependency injection.
Returns:
List of default_class instances parsed from the stream.
"""
builder = ObjectBuilder(default_class)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(stream, loader)
return handler.GetResults()
def BuildSingleObject(default_class, stream, loader=yaml.loader.SafeLoader):
"""Build object from stream.
Handles the basic case of loading a single object from a stream.
Args:
default_class: Class that is instantiated upon the detection of a new
document. An instance of this class will act as the document itself.
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work with the YAML parser.
loader_class: Used for dependency injection.
"""
definitions = BuildObjects(default_class, stream, loader)
if len(definitions) < 1:
raise yaml_errors.EmptyConfigurationFile()
if len(definitions) > 1:
raise yaml_errors.MultipleConfigurationFile()
return definitions[0]
|
Madril/env | refs/heads/master | emacs.d/pycomplete.py | 5 |
"""
Python dot expression completion using Pymacs.
This almost certainly needs work, but if you add
(require 'pycomplete)
to your .xemacs/init.el file (untried w/ GNU Emacs so far) and have Pymacs
installed, when you hit M-TAB it will try to complete the dot expression
before point. For example, given this import at the top of the file:
import time
typing "time.cl" then hitting M-TAB should complete "time.clock".
This is unlikely to be done the way Emacs completion ought to be done, but
it's a start. Perhaps someone with more Emacs mojo can take this stuff and
do it right.
See pycomplete.el for the Emacs Lisp side of things.
"""
import sys
import os.path
try:
x = set
except NameError:
from sets import Set as set
else:
del x
def get_all_completions(s, imports=None):
"""Return contextual completion of s (string of >= zero chars).
If given, imports is a list of import statements to be executed first.
"""
locald = {}
if imports is not None:
for stmt in imports:
try:
exec stmt in globals(), locald
except TypeError:
raise TypeError, "invalid type: %s" % stmt
dots = s.split(".")
if not s or len(dots) == 1:
keys = set()
keys.update(locald.keys())
keys.update(globals().keys())
import __builtin__
keys.update(dir(__builtin__))
keys = list(keys)
keys.sort()
if s:
return [k for k in keys if k.startswith(s)]
else:
return keys
sym = None
for i in range(1, len(dots)):
s = ".".join(dots[:i])
try:
sym = eval(s, globals(), locald)
except NameError:
try:
sym = __import__(s, globals(), locald, [])
except ImportError:
return []
if sym is not None:
s = dots[-1]
return [k for k in dir(sym) if k.startswith(s)]
def pycomplete(s, imports=None):
completions = get_all_completions(s, imports)
dots = s.split(".")
return os.path.commonprefix([k[len(dots[-1]):] for k in completions])
if __name__ == "__main__":
print "<empty> ->", pycomplete("")
print "sys.get ->", pycomplete("sys.get")
print "sy ->", pycomplete("sy")
print "sy (sys in context) ->", pycomplete("sy", imports=["import sys"])
print "foo. ->", pycomplete("foo.")
print "Enc (email * imported) ->",
print pycomplete("Enc", imports=["from email import *"])
print "E (email * imported) ->",
print pycomplete("E", imports=["from email import *"])
print "Enc ->", pycomplete("Enc")
print "E ->", pycomplete("E")
# Local Variables :
# pymacs-auto-reload : t
# End :
|
Belgabor/django | refs/heads/master | django/contrib/auth/management/__init__.py | 96 | """
Creates permissions for all installed apps that need permissions.
"""
from django.db.models import get_models, signals
from django.contrib.auth import models as auth_app
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
app_models = get_models(app)
if not app_models:
return
for klass in app_models:
ctype = ContentType.objects.get_for_model(klass)
for codename, name in _get_all_permissions(klass._meta):
p, created = Permission.objects.get_or_create(codename=codename, content_type__pk=ctype.id,
defaults={'name': name, 'content_type': ctype})
if created and verbosity >= 2:
print "Adding permission '%s'" % p
def create_superuser(app, created_models, verbosity, **kwargs):
from django.contrib.auth.models import User
from django.core.management import call_command
if User in created_models and kwargs.get('interactive', True):
msg = "\nYou just installed Django's auth system, which means you don't have " \
"any superusers defined.\nWould you like to create one now? (yes/no): "
confirm = raw_input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True)
break
signals.post_syncdb.connect(create_permissions,
dispatch_uid = "django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
|
dmsul/econtools | refs/heads/master | econtools/util/gentools.py | 1 | from typing import Union, Generator, Optional, Any, Iterable
import string
import pandas as pd
def force_df(s: Union[pd.DataFrame, pd.Series],
name: Optional[str]=None) -> pd.DataFrame:
"""
Forces a Series to a DataFrame. DataFrames are returned unaffected. Other
objects raise `ValueError`.
"""
# Check if DF or Series
if isinstance(s, pd.core.frame.DataFrame):
return s
elif not isinstance(s, pd.core.series.Series):
raise ValueError("`s` is a {}".format(type(s)))
else:
return s.to_frame(name)
def force_list(x: Any) -> list:
"""If type not `list`, pass to `force_interable`, then convert to list."""
if isinstance(x, list):
return x
else:
return list(force_iterable(x))
def force_iterable(x) -> Iterable:
"""If not iterable, wrap in tuple"""
if hasattr(x, '__iter__') and type(x) is not str:
return x
else:
return (x,)
def generate_chunks(
iterable: Union[list, tuple],
chunk_size: int) -> Generator[Union[list, tuple], None, None]:
"""Go through `iterable` one chunk at a time."""
length = len(iterable)
N_chunks = length // chunk_size
runs = 0
while runs <= N_chunks:
i, j = chunk_size*runs, chunk_size*(runs + 1)
if runs < N_chunks:
yield iterable[i:j]
elif i < length:
yield iterable[i:]
else:
pass # Don't return an empty last list
runs += 1
def int2base(x: int, base: int) -> str:
"""
Convert decimal x >= 0 to base <= 62
Alpha values are case sensitive, with lowercase being higher value than
upper case.
"""
base_alphabet = _base62_alphabet()
if base > len(base_alphabet):
raise ValueError("Max base is 62. Passed base was {}".format(base))
new_base = ''
while x > 0:
x, i = divmod(x, base)
new_base = base_alphabet[i] + new_base
return new_base
def base2int(x: str, base: int) -> int:
"""
Convert x >= 0 of base `base` to decimal.
Alpha values are case sensitive, with lowercase being higher value than
upper case.
"""
base62_alphabet = _base62_alphabet()
if base > len(base62_alphabet):
raise ValueError("Max base is 62. Passed base was {}".format(base))
base_alphabet = base62_alphabet[:base]
base10 = 0
for place, value in enumerate(x[::-1]):
values_base10 = base_alphabet.find(value)
if values_base10 < 0:
err_str = "Value `{}` is not a valid digit for base {}"
raise ValueError(err_str.format(value, base))
base10 += values_base10 * base ** place
return base10
def _base62_alphabet() -> str:
return string.digits + string.ascii_uppercase + string.ascii_lowercase
|
Phil-LiDAR2-Geonode/pl2-geonode | refs/heads/master | geonode/base/forms.py | 11 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import autocomplete_light
from autocomplete_light.contrib.taggit_field import TaggitField, TaggitWidget
from django import forms
from django.utils.translation import ugettext as _
from mptt.forms import TreeNodeMultipleChoiceField
from bootstrap3_datetime.widgets import DateTimePicker
from modeltranslation.forms import TranslationModelForm
from geonode.base.models import TopicCategory, Region
from geonode.people.models import Profile
class CategoryChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return '<span class="has-popover" data-container="body" data-toggle="popover" data-placement="top" ' \
'data-content="' + obj.description + '" trigger="hover">' + obj.gn_description + '</span>'
class CategoryForm(forms.Form):
category_choice_field = CategoryChoiceField(required=False,
label='*' + _('Category'),
empty_label=None,
queryset=TopicCategory.objects.extra(order_by=['description']))
def clean(self):
cleaned_data = self.data
ccf_data = cleaned_data.get("category_choice_field")
if not ccf_data:
msg = _("Category is required.")
self._errors = self.error_class([msg])
# Always return the full collection of cleaned data.
return cleaned_data
class ResourceBaseForm(TranslationModelForm):
"""Base form for metadata, should be inherited by childres classes of ResourceBase"""
owner = forms.ModelChoiceField(
empty_label="Owner",
label="Owner",
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
_date_widget_options = {
"icon_attrs": {"class": "fa fa-calendar"},
"attrs": {"class": "form-control input-sm"},
"format": "%Y-%m-%d %I:%M %p",
# Options for the datetimepickers are not set here on purpose.
# They are set in the metadata_form_js.html template because
# bootstrap-datetimepicker uses jquery for its initialization
# and we need to ensure it is available before trying to
# instantiate a new datetimepicker. This could probably be improved.
"options": False,
}
date = forms.DateTimeField(
localize=True,
input_formats=['%Y-%m-%d %I:%M %p'],
widget=DateTimePicker(**_date_widget_options)
)
temporal_extent_start = forms.DateTimeField(
required=False,
localize=True,
input_formats=['%Y-%m-%d %I:%M %p'],
widget=DateTimePicker(**_date_widget_options)
)
temporal_extent_end = forms.DateTimeField(
required=False,
localize=True,
input_formats=['%Y-%m-%d %I:%M %p'],
widget=DateTimePicker(**_date_widget_options)
)
poc = forms.ModelChoiceField(
empty_label="Person outside GeoNode (fill form)",
label="Point Of Contact",
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
metadata_author = forms.ModelChoiceField(
empty_label="Person outside GeoNode (fill form)",
label="Metadata Author",
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
keywords = TaggitField(
required=False,
help_text=_("A space or comma-separated list of keywords"),
widget=TaggitWidget('TagAutocomplete'))
regions = TreeNodeMultipleChoiceField(
required=False,
queryset=Region.objects.all(),
level_indicator=u'___')
regions.widget.attrs = {"size": 20}
def __init__(self, *args, **kwargs):
super(ResourceBaseForm, self).__init__(*args, **kwargs)
for field in self.fields:
help_text = self.fields[field].help_text
self.fields[field].help_text = None
if help_text != '':
self.fields[field].widget.attrs.update(
{
'class': 'has-popover',
'data-content': help_text,
'data-placement': 'right',
'data-container': 'body',
'data-html': 'true'})
class Meta:
exclude = (
'contacts',
'name',
'uuid',
'bbox_x0',
'bbox_x1',
'bbox_y0',
'bbox_y1',
'srid',
'category',
'csw_typename',
'csw_schema',
'csw_mdsource',
'csw_type',
'csw_wkt_geometry',
'metadata_uploaded',
'metadata_xml',
'csw_anytext',
'popular_count',
'share_count',
'thumbnail',
'charset',
'rating',
'detail_url'
)
|
cysuncn/python | refs/heads/master | spark/crm/PROC_A_SUBJECT_D004031.py | 1 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_SUBJECT_D004031').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
ACRM_A_TARGET_D004011 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004011/*')
ACRM_A_TARGET_D004011.registerTempTable("ACRM_A_TARGET_D004011")
ACRM_A_TARGET_D004010 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004010/*')
ACRM_A_TARGET_D004010.registerTempTable("ACRM_A_TARGET_D004010")
ACRM_A_TARGET_D004012 = sqlContext.read.parquet(hdfs+'/ACRM_A_TARGET_D004012/*')
ACRM_A_TARGET_D004012.registerTempTable("ACRM_A_TARGET_D004012")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUST_ID AS CUST_ID
,'' AS ORG_ID
,'D004031 ' AS INDEX_CODE
,CASE WHEN A.INDEX_VALUE < B.INDEX_VALUE
AND B.INDEX_VALUE < C.INDEX_VALUE THEN 0 WHEN A.INDEX_VALUE > B.INDEX_VALUE
AND B.INDEX_VALUE > C.INDEX_VALUE THEN 2 ELSE 1 END AS INDEX_VALUE
,SUBSTR(V_DT, 1, 7) AS YEAR_MONTH
,V_DT AS ETL_DATE
,A.CUST_TYPE AS CUST_TYPE
,A.FR_ID AS FR_ID
FROM ACRM_A_TARGET_D004010 A --2个月前签约电子渠道
INNER JOIN ACRM_A_TARGET_D004011 B --1个月前持有电子渠道
ON A.CUST_ID = B.CUST_ID
AND A.FR_ID = B.FR_ID
INNER JOIN ACRM_A_TARGET_D004012 C --当月持有电子渠道
ON A.CUST_ID = C.CUST_ID
AND A.FR_ID = C.FR_ID
WHERE A.CUST_TYPE = '1' """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_TARGET_D004031 = sqlContext.sql(sql)
ACRM_A_TARGET_D004031.registerTempTable("ACRM_A_TARGET_D004031")
dfn="ACRM_A_TARGET_D004031/"+V_DT+".parquet"
ACRM_A_TARGET_D004031.cache()
nrows = ACRM_A_TARGET_D004031.count()
ACRM_A_TARGET_D004031.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_A_TARGET_D004031.unpersist()
ACRM_A_TARGET_D004011.unpersist()
ACRM_A_TARGET_D004010.unpersist()
ACRM_A_TARGET_D004012.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_TARGET_D004031/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_TARGET_D004031 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
|
rockyzhengwu/mlscratch | refs/heads/master | mxent/maxent.py | 1 | #!/usr/bin/env python
# encoding: utf-8
import sys;
import math;
from collections import defaultdict
class MaxEnt:
def __init__(self):
self._samples = []; #样本集, 元素是[y,x1,x2,...,xn]的元组
self._Y = set([]); #标签集合,相当于去重之后的y
self._numXY = defaultdict(int); #Key是(xi,yi)对,Value是count(xi,yi)
self._N = 0; #样本数量
self._n = 0; #特征对(xi,yi)总数量
self._xyID = {}; #对(x,y)对做的顺序编号(ID), Key是(xi,yi)对,Value是ID
self._C = 0; #样本最大的特征数量,用于求参数时的迭代,见IIS原理说明
self._ep_ = []; #样本分布的特征期望值
self._ep = []; #模型分布的特征期望值
self._w = []; #对应n个特征的权值
self._lastw = []; #上一轮迭代的权值
self._EPS = 0.01; #判断是否收敛的阈值
def load_data(self, filename):
for line in open(filename, "r"):
sample = line.strip().split("\t");
if len(sample) < 2: #至少:标签+一个特征
continue;
y = sample[0];
X = sample[1:];
self._samples.append(sample); #labe + features
self._Y.add(y); #label
for x in set(X): #set给X去重
self._numXY[(x, y)] += 1;
def _initparams(self):
self._N = len(self._samples);
self._n = len(self._numXY);
self._C = max([len(sample) - 1 for sample in self._samples]);
self._w = [0.0] * self._n;
self._lastw = self._w[:];
self._sample_ep();
def _convergence(self):
for w, lw in zip(self._w, self._lastw):
if math.fabs(w - lw) >= self._EPS:
return False;
return True;
def _sample_ep(self):
self._ep_ = [0.0] * self._n;
#计算方法参见公式(20)
for i, xy in enumerate(self._numXY):
self._ep_[i] = self._numXY[xy] * 1.0 / self._N;
self._xyID[xy] = i;
def _zx(self, X):
#calculate Z(X), 计算方法参见公式(15)
ZX = 0.0;
for y in self._Y:
sum = 0.0;
for x in X:
if (x, y) in self._numXY:
sum += self._w[self._xyID[(x, y)]];
ZX += math.exp(sum);
return ZX;
def _pyx(self, X):
#calculate p(y|x), 计算方法参见公式(22)
ZX = self._zx(X);
results = [];
for y in self._Y:
sum = 0.0;
for x in X:
if (x, y) in self._numXY: #这个判断相当于指示函数的作用
sum += self._w[self._xyID[(x, y)]];
pyx = 1.0 / ZX * math.exp(sum);
results.append((y, pyx));
return results;
def _model_ep(self):
self._ep = [0.0] * self._n;
#参见公式(21)
for sample in self._samples:
X = sample[1:];
pyx = self._pyx(X);
for y, p in pyx:
for x in X:
if (x, y) in self._numXY:
self._ep[self._xyID[(x, y)]] += p * 1.0 / self._N;
def train(self, maxiter = 1000):
self._initparams();
for i in range(0, maxiter):
print("Iter:%d..."%i)
self._lastw = self._w[:]; #保存上一轮权值
self._model_ep();
#更新每个特征的权值
for i, w in enumerate(self._w):
#参考公式(19)
self._w[i] += 1.0 / self._C * math.log(self._ep_[i] / self._ep[i]);
print(self._w)
#检查是否收敛
if self._convergence():
break;
def predict(self, input):
X = input.strip().split("\t");
prob = self._pyx(X)
return prob;
if __name__ == "__main__":
maxent = MaxEnt();
maxent.load_data('data.txt');
maxent.train();
print(maxent.predict("sunny\thot\thigh\tFALSE"))
print(maxent.predict("overcast\thot\thigh\tFALSE"))
print(maxent.predict("sunny\tcool\thigh\tTRUE"))
sys.exit(0);
|
Samsung/skia | refs/heads/dev/m36_1985 | third_party/externals/gyp/test/win/gyptest-link-embed-manifest.py | 244 | #!/usr/bin/env python
# Copyright (c) 2013 Yandex LLC. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure manifests are embedded in binaries properly. Handling of
AdditionalManifestFiles is tested too.
"""
import TestGyp
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('embed-manifest.gyp', chdir=CHDIR)
test.build('embed-manifest.gyp', test.ALL, chdir=CHDIR)
# The following binaries must contain a manifest embedded.
test.fail_test(not extract_manifest(test.built_file_path(
'test_manifest_exe.exe', chdir=CHDIR), 1))
test.fail_test(not extract_manifest(test.built_file_path(
'test_manifest_exe_inc.exe', chdir=CHDIR), 1))
test.fail_test(not extract_manifest(test.built_file_path(
'test_manifest_dll.dll', chdir=CHDIR), 2))
test.fail_test(not extract_manifest(test.built_file_path(
'test_manifest_dll_inc.dll', chdir=CHDIR), 2))
# Must contain the Win7 support GUID, but not the Vista one (from
# extra2.manifest).
test.fail_test(
'35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
extract_manifest(test.built_file_path('test_manifest_extra1.exe',
chdir=CHDIR), 1))
test.fail_test(
'e2011457-1546-43c5-a5fe-008deee3d3f0' in
extract_manifest(test.built_file_path('test_manifest_extra1.exe',
chdir=CHDIR), 1))
# Must contain both.
test.fail_test(
'35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
extract_manifest(test.built_file_path('test_manifest_extra2.exe',
chdir=CHDIR), 1))
test.fail_test(
'e2011457-1546-43c5-a5fe-008deee3d3f0' not in
extract_manifest(test.built_file_path('test_manifest_extra2.exe',
chdir=CHDIR), 1))
# Same as extra2, but using list syntax instead.
test.fail_test(
'35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
extract_manifest(test.built_file_path('test_manifest_extra_list.exe',
chdir=CHDIR), 1))
test.fail_test(
'e2011457-1546-43c5-a5fe-008deee3d3f0' not in
extract_manifest(test.built_file_path('test_manifest_extra_list.exe',
chdir=CHDIR), 1))
# Test that incremental linking doesn't force manifest embedding.
test.fail_test(extract_manifest(test.built_file_path(
'test_manifest_exe_inc_no_embed.exe', chdir=CHDIR), 1))
test.pass_test()
|
mdaniel/intellij-community | refs/heads/master | python/testData/inspections/PyUnresolvedReferencesInspection/DunderAll/m1.py | 24 | __all__ = ["m1m1"]
__all__ += ["m1m2"]
def m1m1():
pass
def m1m2():
pass
def m1m3():
pass |
jianlirong/incubator-hawq | refs/heads/master | tools/bin/pythonSrc/pexpect-4.2/pexpect/pty_spawn.py | 16 | import os
import sys
import time
import pty
import tty
import errno
import signal
from contextlib import contextmanager
import ptyprocess
from ptyprocess.ptyprocess import use_native_pty_fork
from .exceptions import ExceptionPexpect, EOF, TIMEOUT
from .spawnbase import SpawnBase
from .utils import which, split_command_line, select_ignore_interrupts
@contextmanager
def _wrap_ptyprocess_err():
"""Turn ptyprocess errors into our own ExceptionPexpect errors"""
try:
yield
except ptyprocess.PtyProcessError as e:
raise ExceptionPexpect(*e.args)
PY3 = (sys.version_info[0] >= 3)
class spawn(SpawnBase):
'''This is the main class interface for Pexpect. Use this class to start
and control child applications. '''
# This is purely informational now - changing it has no effect
use_native_pty_fork = use_native_pty_fork
def __init__(self, command, args=[], timeout=30, maxread=2000,
searchwindowsize=None, logfile=None, cwd=None, env=None,
ignore_sighup=False, echo=True, preexec_fn=None,
encoding=None, codec_errors='strict', dimensions=None):
'''This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn('/usr/bin/ftp')
child = pexpect.spawn('/usr/bin/ssh user@example.com')
child = pexpect.spawn('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn('/usr/bin/ftp', [])
child = pexpect.spawn('/usr/bin/ssh', ['user@example.com'])
child = pexpect.spawn('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a
common mistake. If you want to run a command and pipe it through
another command then you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > logs.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
When the keyword argument *searchwindowsize* is None (default), the
full buffer is searched at each iteration of receiving incoming data.
The default number of bytes scanned at each iteration is very large
and may be reduced to collaterally reduce search cost. After
:meth:`~.expect` returns, the full buffer attribute remains up to
size *maxread* irrespective of *searchwindowsize* value.
When the keyword argument ``timeout`` is specified as a number,
(default: *30*), then :class:`TIMEOUT` will be raised after the value
specified has elapsed, in seconds, for any of the :meth:`~.expect`
family of method calls. When None, TIMEOUT will not be raised, and
:meth:`~.expect` may block indefinitely until match.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = open('mylog.txt','wb')
child.logfile = fout
Example log to stdout::
# In Python 2:
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
# In Python 3, spawnu should be used to give str to stdout:
child = pexpect.spawnu('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
You will need to pass an encoding to spawn in the above code if you are
using Python 3.
To separately log output sent to the child use logfile_send::
child.logfile_send = fout
If ``ignore_sighup`` is True, the child process will ignore SIGHUP
signals. The default is False from Pexpect 4.0, meaning that SIGHUP
will be handled normally by the child.
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to None to return to the old behavior.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None::
child = pexpect.spawn('some_command')
child.close()
print(child.exitstatus, child.signalstatus)
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG.
The echo attribute may be set to False to disable echoing of input.
As a pseudo-terminal, all input echoed by the "keyboard" (send()
or sendline()) will be repeated to output. For many cases, it is
not desirable to have echo enabled, and it may be later disabled
using setecho(False) followed by waitnoecho(). However, for some
platforms such as Solaris, this is not possible, and should be
disabled immediately on spawn.
If preexec_fn is given, it will be called in the child process before
launching the given command. This is useful to e.g. reset inherited
signal handlers.
The dimensions attribute specifies the size of the pseudo-terminal as
seen by the subprocess, and is specified as a two-entry tuple (rows,
columns). If this is unspecified, the defaults in ptyprocess will apply.
'''
super(spawn, self).__init__(timeout=timeout, maxread=maxread, searchwindowsize=searchwindowsize,
logfile=logfile, encoding=encoding, codec_errors=codec_errors)
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.cwd = cwd
self.env = env
self.echo = echo
self.ignore_sighup = ignore_sighup
self.__irix_hack = sys.platform.lower().startswith('irix')
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn(command, args, preexec_fn, dimensions)
def __str__(self):
'''This returns a human-readable string that represents the state of
the object. '''
s = []
s.append(repr(self))
s.append('command: ' + str(self.command))
s.append('args: %r' % (self.args,))
s.append('buffer (last 100 chars): %r' % (
self.buffer[-100:] if self.buffer else self.buffer,))
s.append('before (last 100 chars): %r' % (
self.before[-100:] if self.before else self.before,))
s.append('after: %r' % (self.after,))
s.append('match: %r' % (self.match,))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
if hasattr(self, 'ptyproc'):
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self, command, args=[], preexec_fn=None, dimensions=None):
'''This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. '''
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if isinstance(command, type(0)):
raise ExceptionPexpect('Command is an int type. ' +
'If this is a file descriptor then maybe you want to ' +
'use fdpexpect.fdspawn which takes an existing ' +
'file descriptor instead of a command string.')
if not isinstance(args, type([])):
raise TypeError('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
# Make a shallow copy of the args list.
self.args = args[:]
self.args.insert(0, command)
self.command = command
command_with_path = which(self.command, env=self.env)
if command_with_path is None:
raise ExceptionPexpect('The command was not found or was not ' +
'executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join(self.args) + '>'
assert self.pid is None, 'The pid member must be None.'
assert self.command is not None, 'The command member must not be None.'
kwargs = {'echo': self.echo, 'preexec_fn': preexec_fn}
if self.ignore_sighup:
def preexec_wrapper():
"Set SIGHUP to be ignored, then call the real preexec_fn"
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if preexec_fn is not None:
preexec_fn()
kwargs['preexec_fn'] = preexec_wrapper
if dimensions is not None:
kwargs['dimensions'] = dimensions
if self.encoding is not None:
# Encode command line using the specified encoding
self.args = [a if isinstance(a, bytes) else a.encode(self.encoding)
for a in self.args]
self.ptyproc = self._spawnpty(self.args, env=self.env,
cwd=self.cwd, **kwargs)
self.pid = self.ptyproc.pid
self.child_fd = self.ptyproc.fd
self.terminated = False
self.closed = False
def _spawnpty(self, args, **kwargs):
'''Spawn a pty and return an instance of PtyProcess.'''
return ptyprocess.PtyProcess.spawn(args, **kwargs)
def close(self, force=True):
'''This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). '''
self.flush()
self.ptyproc.close(force=force)
self.isalive() # Update exit status from ptyproc
self.child_fd = -1
def isatty(self):
'''This returns True if the file descriptor is open and connected to a
tty(-like) device, else False.
On SVR4-style platforms implementing streams, such as SunOS and HP-UX,
the child pty may not appear as a terminal device. This means
methods such as setecho(), setwinsize(), getwinsize() may raise an
IOError. '''
return os.isatty(self.child_fd)
def waitnoecho(self, timeout=-1):
'''This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn('ssh user@example.com')
p.waitnoecho()
p.sendline(mypassword)
If timeout==-1 then this method will use the value in self.timeout.
If timeout==None then this method to block until ECHO flag is False.
'''
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho(self):
'''This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho().
Not supported on platforms where ``isatty()`` returns False. '''
return self.ptyproc.getecho()
def setecho(self, state):
'''This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat') # Echo is on by default.
p.sendline('1234') # We expect see this twice from the child...
p.expect(['1234']) # ... once from the tty echo...
p.expect(['1234']) # ... and again from cat itself.
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['abcd'])
p.expect(['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline('1234')
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['1234'])
p.expect(['1234'])
p.expect(['abcd'])
p.expect(['wxyz'])
Not supported on platforms where ``isatty()`` returns False.
'''
return self.ptyproc.setecho(state)
def read_nonblocking(self, size=1, timeout=-1):
'''This reads at most size characters from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a logfile is specified, a
copy is written to that log.
If timeout is None then the read may block indefinitely.
If timeout is -1 then the self.timeout value is used. If timeout is 0
then the child is polled and if there is no data immediately ready
then this will raise a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not affected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. '''
if self.closed:
raise ValueError('I/O operation on closed file.')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
# timeout of 0 means "poll"
r, w, e = select_ignore_interrupts([self.child_fd], [], [], 0)
if not r:
self.flag_eof = True
raise EOF('End Of File (EOF). Braindead platform.')
elif self.__irix_hack:
# Irix takes a long time before it realizes a child was terminated.
# FIXME So does this mean Irix systems are forced to always have
# FIXME a 2 second delay when calling read_nonblocking? That sucks.
r, w, e = select_ignore_interrupts([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF('End Of File (EOF). Slow platform.')
r, w, e = select_ignore_interrupts([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their
# processes are alive; timeout on the select; and
# then finally admit that they are not alive.
self.flag_eof = True
raise EOF('End of File (EOF). Very slow platform.')
else:
raise TIMEOUT('Timeout exceeded.')
if self.child_fd in r:
return super(spawn, self).read_nonblocking(size)
raise ExceptionPexpect('Reached an unexpected state.') # pragma: no cover
def write(self, s):
'''This is similar to send() except that there is no return value.
'''
self.send(s)
def writelines(self, sequence):
'''This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators. There is no return value.
'''
for s in sequence:
self.write(s)
def send(self, s):
'''Sends string ``s`` to the child process, returning the number of
bytes written. If a logfile is specified, a copy is written to that
log.
The default terminal input mode is canonical processing unless set
otherwise by the child process. This allows backspace and other line
processing to be performed prior to transmitting to the receiving
program. As this is buffered, there is a limited size of such buffer.
On Linux systems, this is 4096 (defined by N_TTY_BUF_SIZE). All
other systems honor the POSIX.1 definition PC_MAX_CANON -- 1024
on OSX, 256 on OpenSolaris, and 1920 on FreeBSD.
This value may be discovered using fpathconf(3)::
>>> from os import fpathconf
>>> print(fpathconf(0, 'PC_MAX_CANON'))
256
On such a system, only 256 bytes may be received per line. Any
subsequent bytes received will be discarded. BEL (``'\a'``) is then
sent to output if IMAXBEL (termios.h) is set by the tty driver.
This is usually enabled by default. Linux does not honor this as
an option -- it behaves as though it is always set on.
Canonical input processing may be disabled altogether by executing
a shell, then stty(1), before executing the final program::
>>> bash = pexpect.spawn('/bin/bash', echo=False)
>>> bash.sendline('stty -icanon')
>>> bash.sendline('base64')
>>> bash.sendline('x' * 5000)
'''
if self.delaybeforesend is not None:
time.sleep(self.delaybeforesend)
s = self._coerce_send_string(s)
self._log(s, 'send')
b = self._encoder.encode(s, final=False)
return os.write(self.child_fd, b)
def sendline(self, s=''):
'''Wraps send(), sending string ``s`` to child process, with
``os.linesep`` automatically appended. Returns number of bytes
written. Only a limited number of bytes may be sent for each
line in the default terminal mode, see docstring of :meth:`send`.
'''
s = self._coerce_send_string(s)
return self.send(s + self.linesep)
def _log_control(self, s):
"""Write control characters to the appropriate log files"""
if self.encoding is not None:
s = s.decode(self.encoding, 'replace')
self._log(s, 'send')
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
n, byte = self.ptyproc.sendcontrol(char)
self._log_control(byte)
return n
def sendeof(self):
'''This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. '''
n, byte = self.ptyproc.sendeof()
self._log_control(byte)
def sendintr(self):
'''This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. '''
n, byte = self.ptyproc.sendintr()
self._log_control(byte)
@property
def flag_eof(self):
return self.ptyproc.flag_eof
@flag_eof.setter
def flag_eof(self, value):
self.ptyproc.flag_eof = value
def eof(self):
'''This returns True if the EOF exception was ever raised.
'''
return self.flag_eof
def terminate(self, force=False):
'''This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. '''
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
'''This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent.
This method is non-blocking if :meth:`wait` has already been called
previously or :meth:`isalive` method returns False. It simply returns
the previously determined exit status.
'''
ptyproc = self.ptyproc
with _wrap_ptyprocess_err():
# exception may occur if "Is some other process attempting
# "job control with our child pid?"
exitstatus = ptyproc.wait()
self.status = ptyproc.status
self.exitstatus = ptyproc.exitstatus
self.signalstatus = ptyproc.signalstatus
self.terminated = True
return exitstatus
def isalive(self):
'''This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. '''
ptyproc = self.ptyproc
with _wrap_ptyprocess_err():
alive = ptyproc.isalive()
if not alive:
self.status = ptyproc.status
self.exitstatus = ptyproc.exitstatus
self.signalstatus = ptyproc.signalstatus
self.terminated = True
return alive
def kill(self, sig):
'''This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. '''
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def getwinsize(self):
'''This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). '''
return self.ptyproc.getwinsize()
def setwinsize(self, rows, cols):
'''This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. '''
return self.ptyproc.setwinsize(rows, cols)
def interact(self, escape_character=chr(29),
input_filter=None, output_filter=None):
'''This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will return None. The escape_character
will not be transmitted. The default for escape_character is
entered as ``Ctrl - ]``, the very same as BSD telnet. To prevent
escaping, escape_character may be set to None.
If a logfile is specified, then the data sent and received from the
child process in interact mode is duplicated to the given log.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
# Note this 'p' global and used in sigwinch_passthrough.
p = pexpect.spawn('/bin/bash')
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
'''
# Flush the buffer.
self.write_to_stdout(self.buffer)
self.stdout.flush()
self.buffer = self.string_type()
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
if escape_character is not None and PY3:
escape_character = escape_character.encode('latin-1')
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
'''This is used by the interact() method.
'''
while data != b'' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
'''This is used by the interact() method.
'''
return os.read(fd, 1000)
def __interact_copy(self, escape_character=None,
input_filter=None, output_filter=None):
'''This is used by the interact() method.
'''
while self.isalive():
r, w, e = select_ignore_interrupts([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
try:
data = self.__interact_read(self.child_fd)
except OSError as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
break
raise
if data == b'':
# BSD-style EOF
break
if output_filter:
data = output_filter(data)
self._log(data, 'read')
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter:
data = input_filter(data)
i = -1
if escape_character is not None:
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
if data:
self._log(data, 'send')
self.__interact_writen(self.child_fd, data)
break
self._log(data, 'send')
self.__interact_writen(self.child_fd, data)
def spawnu(*args, **kwargs):
"""Deprecated: pass encoding to spawn() instead."""
kwargs.setdefault('encoding', 'utf-8')
return spawn(*args, **kwargs)
|
abingham/ycmd | refs/heads/master | ycmd/tests/bindings/cpp_bindings_general_test.py | 1 | # Copyright (C) 2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from ycmd.utils import ToCppStringCompatible as ToCppStr
from ycmd.completers.cpp.clang_completer import ConvertCompletionData
from ycmd.responses import BuildDiagnosticData
from ycmd.tests.bindings import PathToTestFile
from ycmd.tests.test_utils import ClangOnly, TemporaryTestDir
from ycmd.tests.clang import TemporaryClangProject
from nose.tools import eq_
from hamcrest import ( assert_that,
contains,
contains_inanyorder,
contains_string,
has_entries,
has_properties )
import ycm_core
import os
def CppBindings_FilterAndSortCandidates_test():
candidates = [ 'foo1', 'foo2', 'foo3' ]
query = ToCppStr( 'oo' )
candidate_property = ToCppStr( '' )
result_full = ycm_core.FilterAndSortCandidates( candidates,
candidate_property,
query )
result_2 = ycm_core.FilterAndSortCandidates( candidates,
candidate_property,
query,
2 )
del candidates
del query
del candidate_property
assert_that( result_full, contains( 'foo1', 'foo2', 'foo3' ) )
assert_that( result_2, contains( 'foo1', 'foo2' ) )
def CppBindings_IdentifierCompleter_test():
identifier_completer = ycm_core.IdentifierCompleter()
identifiers = ycm_core.StringVector()
identifiers.append( ToCppStr( 'foo' ) )
identifiers.append( ToCppStr( 'bar' ) )
identifiers.append( ToCppStr( 'baz' ) )
identifier_completer.AddIdentifiersToDatabase( identifiers,
ToCppStr( 'foo' ),
ToCppStr( 'file' ) )
del identifiers
query_fo_10 = identifier_completer.CandidatesForQueryAndType(
ToCppStr( 'fo' ), ToCppStr( 'foo' ), 10 )
query_fo = identifier_completer.CandidatesForQueryAndType(
ToCppStr( 'fo' ), ToCppStr( 'foo' ) )
query_a = identifier_completer.CandidatesForQueryAndType(
ToCppStr( 'a' ), ToCppStr( 'foo' ) )
assert_that( query_fo_10, contains( 'foo' ) )
assert_that( query_fo, contains( 'foo' ) )
assert_that( query_a, contains( 'bar', 'baz' ) )
identifiers = ycm_core.StringVector()
identifiers.append( ToCppStr( 'oof' ) )
identifiers.append( ToCppStr( 'rab' ) )
identifiers.append( ToCppStr( 'zab' ) )
identifier_completer.ClearForFileAndAddIdentifiersToDatabase(
identifiers, ToCppStr( 'foo' ), ToCppStr( 'file' ) )
query_a_10 = identifier_completer.CandidatesForQueryAndType(
ToCppStr( 'a' ), ToCppStr( 'foo' ) )
assert_that( query_a_10, contains( 'rab', 'zab' ) )
@ClangOnly
def CppBindings_UnsavedFile_test():
unsaved_file = ycm_core.UnsavedFile()
filename = ToCppStr( 'foo' )
contents = ToCppStr( 'bar\\n' )
length = len( contents )
unsaved_file.filename_ = filename
unsaved_file.contents_ = contents
unsaved_file.length_ = length
del filename
del contents
del length
assert_that( unsaved_file, has_properties( {
'filename_': 'foo',
'contents_': 'bar\\n',
'length_': len( 'bar\\n' )
} ) )
@ClangOnly
def CppBindings_DeclarationLocation_test():
translation_unit = ToCppStr( PathToTestFile( 'foo.c' ) )
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc++' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
location = clang_completer.GetDeclarationLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that( location,
has_properties( { 'line_number_': 2,
'column_number_': 5,
'filename_': PathToTestFile( 'foo.c' ) } ) )
@ClangOnly
def CppBindings_DefinitionOrDeclarationLocation_test():
translation_unit = ToCppStr( PathToTestFile( 'foo.c' ) )
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc++' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
location = ( clang_completer.
GetDefinitionOrDeclarationLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse ) )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that( location,
has_properties( { 'line_number_': 2,
'column_number_': 5,
'filename_': PathToTestFile( 'foo.c' ) } ) )
@ClangOnly
def CppBindings_DefinitionLocation_test():
translation_unit = ToCppStr( PathToTestFile( 'foo.c' ) )
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc++' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
location = clang_completer.GetDefinitionLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that( location,
has_properties( { 'line_number_': 2,
'column_number_': 5,
'filename_': PathToTestFile( 'foo.c' ) } ) )
@ClangOnly
def CppBindings_Candidates_test():
translation_unit = ToCppStr( PathToTestFile( 'foo.c' ) )
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
line = 11
column = 6
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
candidates = ( clang_completer
.CandidatesForLocationInFile( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags ) )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
candidates = [ ConvertCompletionData( x ) for x in candidates ]
assert_that( candidates, contains_inanyorder(
has_entries( {
'detailed_info': 'float b\n',
'extra_menu_info': 'float',
'insertion_text': 'b',
'kind': 'MEMBER',
'menu_text': 'b'
} ),
has_entries( {
'detailed_info': 'int a\n',
'extra_menu_info': 'int',
'insertion_text': 'a',
'kind': 'MEMBER',
'menu_text': 'a'
} )
) )
@ClangOnly
def CppBindings_GetType_test():
translation_unit = ToCppStr( PathToTestFile( 'foo.c' ) )
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc++' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
type_at_cursor = clang_completer.GetTypeAtLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
eq_( 'int ()', type_at_cursor )
@ClangOnly
def CppBindings_GetParent_test():
translation_unit = ToCppStr( PathToTestFile( 'foo.c' ) )
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
line = 9
column = 17
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc++' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
enclosing_function = ( clang_completer
.GetEnclosingFunctionAtLocation( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse ) )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
eq_( 'bar', enclosing_function )
@ClangOnly
def CppBindings_FixIt_test():
translation_unit = ToCppStr( PathToTestFile( 'foo.c' ) )
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
line = 3
column = 5
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc++' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
fixits = clang_completer.GetFixItsForLocationInFile( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that(
fixits,
contains( has_properties( {
'text': ( PathToTestFile( 'foo.c' ) +
':3:16: error: expected \';\' at end of declaration' ),
'location': has_properties( {
'line_number_': 3,
'column_number_': 16,
'filename_': PathToTestFile( 'foo.c' )
} ),
'chunks': contains( has_properties( {
'replacement_text': ';',
'range': has_properties( {
'start_': has_properties( {
'line_number_': 3,
'column_number_': 16,
} ),
'end_': has_properties( {
'line_number_': 3,
'column_number_': 16,
} ),
} )
} ) ),
} ) ) )
@ClangOnly
def CppBindings_Docs_test():
translation_unit = ToCppStr( PathToTestFile( 'foo.c' ) )
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
line = 9
column = 16
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc++' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
docs = clang_completer.GetDocsForLocationInFile( translation_unit,
filename,
line,
column,
unsaved_file_vector,
flags,
reparse )
del translation_unit
del filename
del line
del column
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that(
docs,
has_properties( {
'comment_xml': '<Function file="' + PathToTestFile( 'foo.c' ) + '"'
' line="2" column="5"><Name>foooo</Name><USR>c:@F@foooo#'
'</USR><Declaration>int foooo()</Declaration><Abstract>'
'<Para> Foo</Para></Abstract></Function>',
'brief_comment': 'Foo',
'raw_comment': '/// Foo',
'canonical_type': 'int ()',
'display_name': 'foooo' } ) )
@ClangOnly
def CppBindings_Diags_test():
filename = ToCppStr( PathToTestFile( 'foo.c' ) )
unsaved_file_vector = ycm_core.UnsavedFileVector()
flags = ycm_core.StringVector()
flags.append( ToCppStr( '-xc++' ) )
reparse = True
clang_completer = ycm_core.ClangCompleter()
diag_vector = clang_completer.UpdateTranslationUnit( filename,
unsaved_file_vector,
flags )
diags = [ BuildDiagnosticData( x ) for x in diag_vector ]
del diag_vector
del filename
del unsaved_file_vector
del flags
del clang_completer
del reparse
assert_that(
diags,
contains(
has_entries( {
'kind': 'ERROR',
'text': contains_string( 'expected \';\' at end of declaration' ),
'ranges': contains(),
'location': has_entries( {
'line_num': 3,
'column_num': 16,
} ),
'location_extent': has_entries( {
'start': has_entries( {
'line_num': 3,
'column_num': 16,
} ),
'end': has_entries( {
'line_num': 3,
'column_num': 16,
} ),
} ),
} ) ) )
@ClangOnly
def CppBindings_CompilationDatabase_test():
with TemporaryTestDir() as tmp_dir:
compile_commands = [
{
'directory': tmp_dir,
'command': 'clang++ -x c++ -I. -I/absolute/path -Wall',
'file': os.path.join( tmp_dir, 'test.cc' ),
},
]
with TemporaryClangProject( tmp_dir, compile_commands ):
db = ycm_core.CompilationDatabase( tmp_dir )
db_successful = db.DatabaseSuccessfullyLoaded()
db_busy = db.AlreadyGettingFlags()
db_dir = db.database_directory
compilation_info = db.GetCompilationInfoForFile(
compile_commands[ 0 ][ 'file' ] )
del db
del compile_commands
eq_( db_successful, True )
eq_( db_busy, False )
eq_( db_dir, tmp_dir )
assert_that( compilation_info,
has_properties( {
'compiler_working_dir_': tmp_dir,
'compiler_flags_': contains( 'clang++',
'-x',
'c++',
'-I.',
'-I/absolute/path',
'-Wall' )
} ) )
|
jcftang/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_vmsnapshot.py | 48 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_vmsnapshot
short_description: Manages VM snapshots on Apache CloudStack based clouds.
description:
- Create, remove and revert VM from snapshots.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Unique Name of the snapshot. In CloudStack terms display name.
required: true
aliases: ['display_name']
vm:
description:
- Name of the virtual machine.
required: true
description:
description:
- Description of the snapshot.
required: false
default: null
snapshot_memory:
description:
- Snapshot memory if set to true.
required: false
default: false
zone:
description:
- Name of the zone in which the VM is in. If not set, default zone is used.
required: false
default: null
project:
description:
- Name of the project the VM is assigned to.
required: false
default: null
state:
description:
- State of the snapshot.
required: false
default: 'present'
choices: [ 'present', 'absent', 'revert' ]
domain:
description:
- Domain the VM snapshot is related to.
required: false
default: null
account:
description:
- Account the VM snapshot is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a VM snapshot of disk and memory before an upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
snapshot_memory: yes
# Revert a VM to a snapshot after a failed upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
state: revert
# Remove a VM snapshot after successful upgrade
- local_action:
module: cs_vmsnapshot
name: Snapshot before upgrade
vm: web-01
state: absent
'''
RETURN = '''
---
id:
description: UUID of the snapshot.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the snapshot.
returned: success
type: string
sample: snapshot before update
display_name:
description: Display name of the snapshot.
returned: success
type: string
sample: snapshot before update
created:
description: date of the snapshot.
returned: success
type: string
sample: 2015-03-29T14:57:06+0200
current:
description: true if snapshot is current
returned: success
type: boolean
sample: True
state:
description: state of the vm snapshot
returned: success
type: string
sample: Allocated
type:
description: type of vm snapshot
returned: success
type: string
sample: DiskAndMemory
description:
description: description of vm snapshot
returned: success
type: string
sample: snapshot brought to you by Ansible
domain:
description: Domain the the vm snapshot is related to.
returned: success
type: string
sample: example domain
account:
description: Account the vm snapshot is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the vm snapshot is related to.
returned: success
type: string
sample: Production
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVmSnapshot, self).__init__(module)
self.returns = {
'type': 'type',
'current': 'current',
}
def get_snapshot(self):
args = {}
args['virtualmachineid'] = self.get_vm('id')
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
args['projectid'] = self.get_project('id')
args['name'] = self.module.params.get('name')
snapshots = self.cs.listVMSnapshot(**args)
if snapshots:
return snapshots['vmSnapshot'][0]
return None
def create_snapshot(self):
snapshot = self.get_snapshot()
if not snapshot:
self.result['changed'] = True
args = {}
args['virtualmachineid'] = self.get_vm('id')
args['name'] = self.module.params.get('name')
args['description'] = self.module.params.get('description')
args['snapshotmemory'] = self.module.params.get('snapshot_memory')
if not self.module.check_mode:
res = self.cs.createVMSnapshot(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
snapshot = self.poll_job(res, 'vmsnapshot')
return snapshot
def remove_snapshot(self):
snapshot = self.get_snapshot()
if snapshot:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'vmsnapshot')
return snapshot
def revert_vm_to_snapshot(self):
snapshot = self.get_snapshot()
if snapshot:
self.result['changed'] = True
if snapshot['state'] != "Ready":
self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state'])
if not self.module.check_mode:
res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id'])
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'vmsnapshot')
return snapshot
self.module.fail_json(msg="snapshot not found, could not revert VM")
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True, aliases=['display_name']),
vm = dict(required=True),
description = dict(default=None),
zone = dict(default=None),
snapshot_memory = dict(type='bool', default=False),
state = dict(choices=['present', 'absent', 'revert'], default='present'),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['icmp_type', 'icmp_code'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
supports_check_mode=True
)
try:
acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module)
state = module.params.get('state')
if state in ['revert']:
snapshot = acs_vmsnapshot.revert_vm_to_snapshot()
elif state in ['absent']:
snapshot = acs_vmsnapshot.remove_snapshot()
else:
snapshot = acs_vmsnapshot.create_snapshot()
result = acs_vmsnapshot.get_result(snapshot)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
littlstar/chromium.src | refs/heads/nw | tools/telemetry/telemetry/web_perf/metrics/rendering_stats.py | 4 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from operator import attrgetter
from telemetry.page import page_test
from telemetry.web_perf.metrics import rendering_frame
# These are LatencyInfo component names indicating the various components
# that the input event has travelled through.
# This is when the input event first reaches chrome.
UI_COMP_NAME = 'INPUT_EVENT_LATENCY_UI_COMPONENT'
# This is when the input event was originally created by OS.
ORIGINAL_COMP_NAME = 'INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT'
# This is when the input event was sent from browser to renderer.
BEGIN_COMP_NAME = 'INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT'
# This is when an input event is turned into a scroll update.
BEGIN_SCROLL_UPDATE_COMP_NAME = (
'INPUT_EVENT_LATENCY_BEGIN_SCROLL_UPDATE_MAIN_COMPONENT')
# This is when a scroll update is forwarded to the main thread.
FORWARD_SCROLL_UPDATE_COMP_NAME = (
'INPUT_EVENT_LATENCY_FORWARD_SCROLL_UPDATE_TO_MAIN_COMPONENT')
# This is when the input event has reached swap buffer.
END_COMP_NAME = 'INPUT_EVENT_LATENCY_TERMINATED_FRAME_SWAP_COMPONENT'
# Name for a main thread scroll update latency event.
SCROLL_UPDATE_EVENT_NAME = 'InputLatency:ScrollUpdate'
# Name for a gesture scroll update latency event.
GESTURE_SCROLL_UPDATE_EVENT_NAME = 'InputLatency:GestureScrollUpdate'
class NotEnoughFramesError(page_test.MeasurementFailure):
def __init__(self, frame_count):
super(NotEnoughFramesError, self).__init__(
'Only %i frame timestamps were collected ' % frame_count +
'(at least two are required).\n'
'Issues that have caused this in the past:\n' +
'- Browser bugs that prevents the page from redrawing\n' +
'- Bugs in the synthetic gesture code\n' +
'- Page and benchmark out of sync (e.g. clicked element was renamed)\n' +
'- Pages that render extremely slow\n' +
'- Pages that can\'t be scrolled')
def GetInputLatencyEvents(process, timeline_range):
"""Get input events' LatencyInfo from the process's trace buffer that are
within the timeline_range.
Input events dump their LatencyInfo into trace buffer as async trace event
with name "InputLatency". The trace event has a memeber 'data' containing
its latency history.
"""
input_events = []
if not process:
return input_events
for event in process.IterAllAsyncSlicesOfName('InputLatency'):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
for ss in event.sub_slices:
if 'data' in ss.args:
input_events.append(ss)
return input_events
def ComputeInputEventLatencies(input_events):
""" Compute input event latencies.
Input event latency is the time from when the input event is created to
when its resulted page is swap buffered.
Input event on differnt platforms uses different LatencyInfo component to
record its creation timestamp. We go through the following component list
to find the creation timestamp:
1. INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT -- when event is created in OS
2. INPUT_EVENT_LATENCY_UI_COMPONENT -- when event reaches Chrome
3. INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT -- when event reaches RenderWidget
If the latency starts with a
INPUT_EVENT_LATENCY_BEGIN_SCROLL_UPDATE_MAIN_COMPONENT component, then it is
classified as a scroll update instead of a normal input latency measure.
Returns:
A list sorted by increasing start time of latencies which are tuples of
(input_event_name, latency_in_ms).
"""
input_event_latencies = []
for event in input_events:
data = event.args['data']
if END_COMP_NAME in data:
end_time = data[END_COMP_NAME]['time']
if ORIGINAL_COMP_NAME in data:
start_time = data[ORIGINAL_COMP_NAME]['time']
elif UI_COMP_NAME in data:
start_time = data[UI_COMP_NAME]['time']
elif BEGIN_COMP_NAME in data:
start_time = data[BEGIN_COMP_NAME]['time']
elif BEGIN_SCROLL_UPDATE_COMP_NAME in data:
start_time = data[BEGIN_SCROLL_UPDATE_COMP_NAME]['time']
else:
raise ValueError, 'LatencyInfo has no begin component'
latency = (end_time - start_time) / 1000.0
input_event_latencies.append((start_time, event.name, latency))
input_event_latencies.sort()
return [(name, latency) for _, name, latency in input_event_latencies]
def HasRenderingStats(process):
""" Returns True if the process contains at least one
BenchmarkInstrumentation::*RenderingStats event with a frame.
"""
if not process:
return False
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::MainThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::ImplThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
return False
class RenderingStats(object):
def __init__(self, renderer_process, browser_process, timeline_ranges):
"""
Utility class for extracting rendering statistics from the timeline (or
other loggin facilities), and providing them in a common format to classes
that compute benchmark metrics from this data.
Stats are lists of lists of numbers. The outer list stores one list per
timeline range.
All *_time values are measured in milliseconds.
"""
assert(len(timeline_ranges) > 0)
# Find the top level process with rendering stats (browser or renderer).
if HasRenderingStats(browser_process):
timestamp_process = browser_process
else:
timestamp_process = renderer_process
self.frame_timestamps = []
self.frame_times = []
self.paint_times = []
self.painted_pixel_counts = []
self.record_times = []
self.recorded_pixel_counts = []
self.rasterize_times = []
self.rasterized_pixel_counts = []
self.approximated_pixel_percentages = []
# End-to-end latency for input event - from when input event is
# generated to when the its resulted page is swap buffered.
self.input_event_latency = []
self.frame_queueing_durations = []
# Latency from when a scroll update is sent to the main thread until the
# resulting frame is swapped.
self.scroll_update_latency = []
# Latency for a GestureScrollUpdate input event.
self.gesture_scroll_update_latency = []
for timeline_range in timeline_ranges:
self.frame_timestamps.append([])
self.frame_times.append([])
self.paint_times.append([])
self.painted_pixel_counts.append([])
self.record_times.append([])
self.recorded_pixel_counts.append([])
self.rasterize_times.append([])
self.rasterized_pixel_counts.append([])
self.approximated_pixel_percentages.append([])
self.input_event_latency.append([])
self.scroll_update_latency.append([])
self.gesture_scroll_update_latency.append([])
if timeline_range.is_empty:
continue
self._InitFrameTimestampsFromTimeline(timestamp_process, timeline_range)
self._InitMainThreadRenderingStatsFromTimeline(
renderer_process, timeline_range)
self._InitImplThreadRenderingStatsFromTimeline(
renderer_process, timeline_range)
self._InitInputLatencyStatsFromTimeline(
browser_process, renderer_process, timeline_range)
self._InitFrameQueueingDurationsFromTimeline(
renderer_process, timeline_range)
# Check if we have collected at least 2 frames in every range. Otherwise we
# can't compute any meaningful metrics.
for segment in self.frame_timestamps:
if len(segment) < 2:
raise NotEnoughFramesError(len(segment))
def _InitInputLatencyStatsFromTimeline(
self, browser_process, renderer_process, timeline_range):
latency_events = GetInputLatencyEvents(browser_process, timeline_range)
# Plugin input event's latency slice is generated in renderer process.
latency_events.extend(GetInputLatencyEvents(renderer_process,
timeline_range))
input_event_latencies = ComputeInputEventLatencies(latency_events)
self.input_event_latency[-1] = [
latency for name, latency in input_event_latencies]
self.scroll_update_latency[-1] = [
latency for name, latency in input_event_latencies
if name == SCROLL_UPDATE_EVENT_NAME]
self.gesture_scroll_update_latency[-1] = [
latency for name, latency in input_event_latencies
if name == GESTURE_SCROLL_UPDATE_EVENT_NAME]
def _GatherEvents(self, event_name, process, timeline_range):
events = []
for event in process.IterAllSlicesOfName(event_name):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
if 'data' not in event.args:
continue
events.append(event)
events.sort(key=attrgetter('start'))
return events
def _AddFrameTimestamp(self, event):
frame_count = event.args['data']['frame_count']
if frame_count > 1:
raise ValueError('trace contains multi-frame render stats')
if frame_count == 1:
self.frame_timestamps[-1].append(
event.start)
if len(self.frame_timestamps[-1]) >= 2:
self.frame_times[-1].append(round(self.frame_timestamps[-1][-1] -
self.frame_timestamps[-1][-2], 2))
def _InitFrameTimestampsFromTimeline(self, process, timeline_range):
event_name = 'BenchmarkInstrumentation::MainThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
self._AddFrameTimestamp(event)
event_name = 'BenchmarkInstrumentation::ImplThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
self._AddFrameTimestamp(event)
def _InitMainThreadRenderingStatsFromTimeline(self, process, timeline_range):
event_name = 'BenchmarkInstrumentation::MainThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
data = event.args['data']
self.paint_times[-1].append(1000.0 * data['paint_time'])
self.painted_pixel_counts[-1].append(data['painted_pixel_count'])
self.record_times[-1].append(1000.0 * data['record_time'])
self.recorded_pixel_counts[-1].append(data['recorded_pixel_count'])
def _InitImplThreadRenderingStatsFromTimeline(self, process, timeline_range):
event_name = 'BenchmarkInstrumentation::ImplThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
data = event.args['data']
self.rasterize_times[-1].append(1000.0 * data['rasterize_time'])
self.rasterized_pixel_counts[-1].append(data['rasterized_pixel_count'])
if data.get('visible_content_area', 0):
self.approximated_pixel_percentages[-1].append(
round(float(data['approximated_visible_content_area']) /
float(data['visible_content_area']) * 100.0, 3))
else:
self.approximated_pixel_percentages[-1].append(0.0)
def _InitFrameQueueingDurationsFromTimeline(self, process, timeline_range):
try:
events = rendering_frame.GetFrameEventsInsideRange(process,
timeline_range)
new_frame_queueing_durations = [e.queueing_duration for e in events]
self.frame_queueing_durations.append(new_frame_queueing_durations)
except rendering_frame.NoBeginFrameIdException:
logging.warning('Current chrome version does not support the queueing '
'delay metric.')
|
demon-ru/iml-crm | refs/heads/master | addons/purchase_requisition/__openerp__.py | 61 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Requisitions',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Purchase Management',
'images': ['images/purchase_requisitions.jpeg'],
'website': 'http://www.openerp.com',
'description': """
This module allows you to manage your Purchase Requisition.
===========================================================
When a purchase order is created, you now have the opportunity to save the
related requisition. This new object will regroup and will allow you to easily
keep track and order all your purchase orders.
""",
'depends' : ['purchase'],
'demo': ['purchase_requisition_demo.xml'],
'data': ['views/purchase_requisition.xml',
'security/purchase_tender.xml',
'wizard/purchase_requisition_partner_view.xml',
'wizard/bid_line_qty_view.xml',
'purchase_requisition_data.xml',
'purchase_requisition_view.xml',
'purchase_requisition_report.xml',
'purchase_requisition_workflow.xml',
'security/ir.model.access.csv','purchase_requisition_sequence.xml',
'views/report_purchaserequisition.xml',
],
'auto_install': False,
'test': [
'test/purchase_requisition_users.yml',
'test/purchase_requisition_demo.yml',
'test/cancel_purchase_requisition.yml',
'test/purchase_requisition.yml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
partofthething/home-assistant | refs/heads/dev | homeassistant/components/smarthab/config_flow.py | 9 | """SmartHab configuration flow."""
import logging
import pysmarthab
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
# pylint: disable=unused-import
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
class SmartHabConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""SmartHab config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_EMAIL, default=user_input.get(CONF_EMAIL, "")
): str,
vol.Required(CONF_PASSWORD): str,
}
),
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return self._show_setup_form(user_input, None)
username = user_input[CONF_EMAIL]
password = user_input[CONF_PASSWORD]
# Check if already configured
if self.unique_id is None:
await self.async_set_unique_id(username)
self._abort_if_unique_id_configured()
# Setup connection with SmartHab API
hub = pysmarthab.SmartHab()
try:
await hub.async_login(username, password)
# Verify that passed in configuration works
if hub.is_logged_in():
return self.async_create_entry(
title=username, data={CONF_EMAIL: username, CONF_PASSWORD: password}
)
errors["base"] = "invalid_auth"
except pysmarthab.RequestFailedException:
_LOGGER.exception("Error while trying to reach SmartHab API")
errors["base"] = "service"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error during login")
errors["base"] = "unknown"
return self._show_setup_form(user_input, errors)
async def async_step_import(self, import_info):
"""Handle import from legacy config."""
return await self.async_step_user(import_info)
|
robertkowalski/node-gyp | refs/heads/master | gyp/setup.py | 2462 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from setuptools import setup
setup(
name='gyp',
version='0.1',
description='Generate Your Projects',
author='Chromium Authors',
author_email='chromium-dev@googlegroups.com',
url='http://code.google.com/p/gyp',
package_dir = {'': 'pylib'},
packages=['gyp', 'gyp.generator'],
entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
)
|
morian/blacknet | refs/heads/master | blacknet/updater.py | 1 | import codecs
import csv
import os
import shutil
import sys
import tempfile
import zipfile
from .config import BlacknetConfig, BlacknetConfigurationInterface
from .database import BlacknetDatabase
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
GEOLITE_CSV_URL="https://geolite.maxmind.com/download/geoip/database/GeoLiteCity_CSV/GeoLiteCity-latest.zip"
def utf8_ensure(csv_file):
for line in csv_file:
# Those lines are 'unicode' in python2
if not isinstance(line, str):
line = line.strip().encode('utf-8')
yield line
class BlacknetGeoUpdater(BlacknetConfigurationInterface):
""" Blacknet geolocation database updater """
def __init__(self, cfg_file=None):
""" load configuration file and database parameters """
self.__database = None
self.__dirname = None
self.__filepath = {}
self.__test_mode = None
config = BlacknetConfig()
config.load(cfg_file)
BlacknetConfigurationInterface.__init__(self, config, 'server')
self.__database = BlacknetDatabase(config)
def __del__(self):
if not self.test_mode:
dirname = self.__dirname
else:
# That's the ZipDir (extracted)
dirname = os.path.join(self.dirname, 'geolitecity')
if dirname:
shutil.rmtree(dirname)
self.__dirname = None
@property
def test_mode(self):
if self.__test_mode is None:
if self.has_config('test_mode'):
self.__test_mode = bool(self.get_config('test_mode'))
else:
self.__test_mode = False
return self.__test_mode
@property
def dirname(self):
if self.__dirname is None:
if self.test_mode:
self.__dirname = os.path.join('tests', 'geo-updater')
else:
self.__dirname = tempfile.mkdtemp()
return self.__dirname
def log(self, message):
sys.stdout.write("%s\n" % message)
def fetch_zip(self):
if not self.test_mode:
zip_file = os.path.join(self.dirname, 'geolitecity.zip')
zipf = open(zip_file, 'wb')
res = urlopen(GEOLITE_CSV_URL)
content = res.read()
zipf.write(content)
zipf.close()
self.log("[+] Fetched zipfile successfully")
def extract_zip(self):
zip_dir = os.path.join(self.dirname, 'geolitecity')
if not os.path.exists(zip_dir):
os.mkdir(zip_dir)
zip_file = os.path.join(self.dirname, 'geolitecity.zip')
zip_ref = zipfile.ZipFile(zip_file, 'r')
for item in zip_ref.namelist():
filepath = zip_ref.extract(item, zip_dir)
filename = os.path.basename(filepath)
if filename == 'GeoLiteCity-Blocks.csv':
self.__filepath['blocks'] = filepath
elif filename == 'GeoLiteCity-Location.csv':
self.__filepath['locations'] = filepath
# Unknown file?
self.log("[+] Extracted file %s" % item)
zip_ref.close()
def csv_blocks_import(self):
block_file = self.__filepath['blocks']
block_f = codecs.open(block_file, 'r', 'latin1')
cursor = self.__database.cursor()
cursor.truncate('blocks')
self.log("[+] Trimmed blocks table")
line_count = 0
csv_data = csv.reader(utf8_ensure(block_f))
for row in csv_data:
line_count += 1
if line_count < 3:
continue
cursor.insert_block(row)
block_f.close()
self.log("[+] Updated blocks table (%u entries)" % (line_count - 2))
def csv_locations_import(self):
block_file = self.__filepath['locations']
block_f = codecs.open(block_file, 'r', 'latin1')
cursor = self.__database.cursor()
cursor.truncate('locations')
self.log("[+] Trimmed locations table")
line_count = 0
csv_data = csv.reader(utf8_ensure(block_f))
for row in csv_data:
line_count += 1
if line_count < 3:
continue
cursor.insert_location(row)
block_f.close()
self.log("[+] Updated locations table (%u entries)" % (line_count - 2))
def csv_to_database(self):
self.csv_blocks_import()
self.csv_locations_import()
def update(self):
self.fetch_zip()
self.extract_zip()
self.csv_to_database()
self.log("[+] Update Complete")
if not self.test_mode:
self.log("[!] We *STRONGLY* suggest running \"blacknet-scrubber --full-check --fix\" to update gelocation positions.")
|
matbu/ansible-modules-extras | refs/heads/devel | packaging/os/pkgng.py | 11 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, bleader
# Written by bleader <bleader@ratonland.org>
# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: pkgng
short_description: Package manager for FreeBSD >= 9.0
description:
- Manage binary packages for FreeBSD using 'pkgng' which
is available in versions after 9.0.
version_added: "1.2"
options:
name:
description:
- Name of package to install/remove.
required: true
state:
description:
- State of the package.
choices: [ 'present', 'absent' ]
required: false
default: present
cached:
description:
- Use local package base instead of fetching an updated one.
choices: [ 'yes', 'no' ]
required: false
default: no
annotation:
description:
- A comma-separated list of keyvalue-pairs of the form
C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
C(-) denotes removing an annotation, and C(:) denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
version_added: "1.6"
pkgsite:
description:
- For pkgng versions before 1.1.4, specify packagesite to use
for downloading packages. If not specified, use settings from
C(/usr/local/etc/pkg.conf).
- For newer pkgng versions, specify a the name of a repository
configured in C(/usr/local/etc/pkg/repos).
required: false
rootdir:
description:
- For pkgng versions 1.5 and later, pkg will install all packages
within the specified root directory.
- Can not be used together with I(chroot) option.
required: false
chroot:
version_added: "2.1"
description:
- Pkg will chroot in the specified environment.
- Can not be used together with I(rootdir) option.
required: false
autoremove:
version_added: "2.2"
description:
- Remove automatically installed packages which are no longer needed.
required: false
choices: [ "yes", "no" ]
default: no
author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
'''
EXAMPLES = '''
# Install package foo
- pkgng:
name: foo
state: present
# Annotate package foo and bar
- pkgng:
name: foo,bar
annotation: '+test1=baz,-test2,:test3=foobar'
# Remove packages foo and bar
- pkgng:
name: foo,bar
state: absent
'''
import re
from ansible.module_utils.basic import AnsibleModule
def query_package(module, pkgng_path, name, dir_arg):
rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
if rc == 0:
return True
return False
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
version = [int(x) for x in re.split(r'[\._]', out)]
i = 0
new_pkgng = True
while compare_version[i] == version[i]:
i += 1
if i == min(len(compare_version), len(version)):
break
else:
if compare_version[i] > version[i]:
new_pkgng = False
return not new_pkgng
def remove_packages(module, pkgng_path, packages, dir_arg):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
return (True, "removed %s package(s)" % remove_c)
return (False, "package(s) already absent")
def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg):
install_c = 0
# as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
# in /usr/local/etc/pkg/repos
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
if pkgsite != "":
if old_pkgng:
pkgsite = "PACKAGESITE=%s" % (pkgsite)
else:
pkgsite = "-r %s" % (pkgsite)
batch_var = 'env BATCH=yes' # This environment variable skips mid-install prompts,
# setting them to their default values.
if not module.check_mode and not cached:
if old_pkgng:
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
else:
rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
if rc != 0:
module.fail_json(msg="Could not update catalogue")
for package in packages:
if query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
if old_pkgng:
rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package))
else:
rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, pkgsite, package))
if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
install_c += 1
if install_c > 0:
return (True, "added %s package(s)" % (install_c))
return (False, "package(s) already present")
def annotation_query(module, pkgng_path, package, tag, dir_arg):
rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
if match:
return match.group('value')
return False
def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not annotate %s: %s"
% (package, out), stderr=err)
return True
elif _value != value:
# Annotation exists, but value differs
module.fail_json(
mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
% (package, tag, _value, value))
return False
else:
# Annotation exists, nothing to do
return False
def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
% (pkgng_path, dir_arg, package, tag))
if rc != 0:
module.fail_json("could not delete annotation to %s: %s"
% (package, out), stderr=err)
return True
return False
def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not value:
# No such tag
module.fail_json("could not change annotation to %s: tag %s does not exist"
% (package, tag))
elif _value == value:
# No change in value
return False
else:
rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not change annotation annotation to %s: %s"
% (package, out), stderr=err)
return True
def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
_annotation).groupdict(),
re.split(r',', annotation))
operation = {
'+': annotation_add,
'-': annotation_delete,
':': annotation_modify
}
for package in packages:
for _annotation in annotations:
if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
annotate_c += 1
if annotate_c > 0:
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
def autoremove_packages(module, pkgng_path, dir_arg):
rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
autoremove_c = 0
match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
if match:
autoremove_c = int(match.group(1))
if autoremove_c == 0:
return False, "no package(s) to autoremove"
if not module.check_mode:
rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
return True, "autoremoved %d package(s)" % (autoremove_c)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"], required=False),
name = dict(aliases=["pkg"], required=True, type='list'),
cached = dict(default=False, type='bool'),
annotation = dict(default="", required=False),
pkgsite = dict(default="", required=False),
rootdir = dict(default="", required=False, type='path'),
chroot = dict(default="", required=False, type='path'),
autoremove = dict(default=False, type='bool')),
supports_check_mode = True,
mutually_exclusive =[["rootdir", "chroot"]])
pkgng_path = module.get_bin_path('pkg', True)
p = module.params
pkgs = p["name"]
changed = False
msgs = []
dir_arg = ""
if p["rootdir"] != "":
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
if old_pkgng:
module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
else:
dir_arg = "--rootdir %s" % (p["rootdir"])
if p["chroot"] != "":
dir_arg = '--chroot %s' % (p["chroot"])
if p["state"] == "present":
_changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
elif p["state"] == "absent":
_changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["autoremove"]:
_changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["annotation"]:
_changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
module.exit_json(changed=changed, msg=", ".join(msgs))
if __name__ == '__main__':
main()
|
AI-Innovation/cs231n_ass1 | refs/heads/master | cs231n/vis_utils.py | 65 | from math import sqrt, ceil
import numpy as np
def visualize_grid(Xs, ubound=255.0, padding=1):
"""
Reshape a 4D tensor of image data to a grid for easy visualization.
Inputs:
- Xs: Data of shape (N, H, W, C)
- ubound: Output grid will have values scaled to the range [0, ubound]
- padding: The number of blank pixels between elements of the grid
"""
(N, H, W, C) = Xs.shape
grid_size = int(ceil(sqrt(N)))
grid_height = H * grid_size + padding * (grid_size - 1)
grid_width = W * grid_size + padding * (grid_size - 1)
grid = np.zeros((grid_height, grid_width, C))
next_idx = 0
y0, y1 = 0, H
for y in xrange(grid_size):
x0, x1 = 0, W
for x in xrange(grid_size):
if next_idx < N:
img = Xs[next_idx]
low, high = np.min(img), np.max(img)
grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
# grid[y0:y1, x0:x1] = Xs[next_idx]
next_idx += 1
x0 += W + padding
x1 += W + padding
y0 += H + padding
y1 += H + padding
# grid_max = np.max(grid)
# grid_min = np.min(grid)
# grid = ubound * (grid - grid_min) / (grid_max - grid_min)
return grid
def vis_grid(Xs):
""" visualize a grid of images """
(N, H, W, C) = Xs.shape
A = int(ceil(sqrt(N)))
G = np.ones((A*H+A, A*W+A, C), Xs.dtype)
G *= np.min(Xs)
n = 0
for y in range(A):
for x in range(A):
if n < N:
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = Xs[n,:,:,:]
n += 1
# normalize to [0,1]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
def vis_nn(rows):
""" visualize array of arrays of images """
N = len(rows)
D = len(rows[0])
H,W,C = rows[0][0].shape
Xs = rows[0][0]
G = np.ones((N*H+N, D*W+D, C), Xs.dtype)
for y in range(N):
for x in range(D):
G[y*H+y:(y+1)*H+y, x*W+x:(x+1)*W+x, :] = rows[y][x]
# normalize to [0,1]
maxg = G.max()
ming = G.min()
G = (G - ming)/(maxg-ming)
return G
|
imanolarrieta/RL | refs/heads/master | examples/gridworld/ucrl.py | 1 |
"""
Agent Tutorial for RLPy
=================================
Assumes you have created the SARSA0.py agent according to the tutorial and
placed it in the current working directory.
Tests the agent on the GridWorld domain.
"""
__author__ = "Robert H. Klein"
from rlpy.Domains import GridWorld
from rlpy.Agents import UCRL
from rlpy.Representations import Tabular
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import os
def make_experiment(exp_id=1, path="./Results/Tests/gridworld-ucrl"):
"""
Each file specifying an experimental setup should contain a
make_experiment function which returns an instance of the Experiment
class with everything set up.
@param id: number used to seed the random number generators
@param path: output directory where logs and results are stored
"""
opt = {}
opt["exp_id"] = exp_id
opt["path"] = path
## Domain:
maze = os.path.join(GridWorld.default_map_dir, '4x5.txt')
domain = GridWorld(maze, noise=0.1)
opt["domain"] = domain
## Representation
# discretization only needed for continuous state spaces, discarded otherwise
representation = Tabular(domain, discretization=20)
## Policy
policy = eGreedy(representation, epsilon=0.2)
## Agent
opt["agent"] = UCRL(representation=representation, policy=policy,
discount_factor=domain.discount_factor,
initial_learn_rate=0.1)
opt["checks_per_policy"] = 100
opt["max_steps"] = 10000
opt["num_policy_checks"] = 10
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run(visualize_steps=False, # should each learning step be shown?
visualize_learning=False, # show policy / value function?
visualize_performance=1) # show performance runs?
experiment.plot()
experiment.save()
|
daishi4u/J7_Afterburner | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
MartyMacGyver/DirTreeDigest | refs/heads/master | python3/dirtreedigest/comparator.py | 1 | """
Copyright (c) 2017-2021 Martin F. Falatic
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from collections import defaultdict
from datetime import datetime
from enum import Enum
import dirtreedigest.digester as dtdigester
import dirtreedigest.utils as dtutils
class DiffType(Enum):
M_UNDEF = 0 # Undefined (error case) # noqa: E221
M_NONE = 1 # Diff file Name, Diff Data ("no match" anywhere) # noqa: E221
M_SFSD = 2 # Same Full name, Same Data (Opposite only) # noqa: E221
M_SFDD = 3 # Same Full name, Diff Data (Opposite only) # noqa: E221
M_SNSD = 4 # Same file Name, Same Data (Opposite and Same sides) # noqa: E221
M_DNSD = 5 # Diff file Name, Same Data (Opposite and Same sides) # noqa: E221
class Comparator(object):
""" Digest blob comparator and supporting functions """
elements_l = []
elements_r = []
files_by_name_l = {}
files_by_name_r = {}
basepath_l = ''
basepath_r = ''
best_digest = None
control_data = None
def __init__(self, control_data):
self.logger = logging.getLogger('comparator')
self.control_data = control_data
def choose_best_digest_for_compare(self, elems1, elems2):
best = -1
if not len(elems1) > 0 and len(elems2) > 0:
self.logger.error('Cannot choose a digest - not found')
return None
if 'digests' not in elems1[0] and 'digests' in elems2[0]:
self.logger.error('Cannot choose a digest - not found')
return None
s1 = set(elems1[0]['digests'])
s2 = set(elems2[0]['digests'])
for digest in (s1 & s2):
if digest in dtdigester.DIGEST_PRIORITY:
best = max(best, dtdigester.DIGEST_PRIORITY.index(digest))
if best < 0:
self.logger.error('Cannot choose a digest - none in common')
return None
best_name = dtdigester.DIGEST_PRIORITY[best]
return best_name
def slice_data(self, elements):
files_by_name = {elem['full_name']: elem for elem in elements if elem['type'] == 'F'}
files_by_digest = defaultdict(list)
for elem in elements:
if elem['type'] == 'F':
cmp_digest = elem['digests'][self.best_digest] #TODO: Mix in file size here?
files_by_digest[cmp_digest].append(elem)
return (files_by_name, files_by_digest)
def compare_by_full_names(self, name_same):
elems_changed = []
for name in name_same:
digest_l = self.files_by_name_l[name]['digests'][self.best_digest]
digest_r = self.files_by_name_r[name]['digests'][self.best_digest]
self.files_by_name_l[name]['match'] = [self.files_by_digest_r[digest_l]]
self.files_by_name_r[name]['match'] = [self.files_by_digest_l[digest_r]]
if digest_l == digest_r:
self.files_by_name_l[name]['status'] = 'same'
self.files_by_name_r[name]['status'] = 'same'
if not self.control_data['notimestamps'] and self.files_by_name_l[name]['mtime'] != self.files_by_name_r[name]['mtime']:
time_l = datetime.fromtimestamp(int("0x"+self.files_by_name_l[name]['mtime'], 16))
time_r = datetime.fromtimestamp(int("0x"+self.files_by_name_r[name]['mtime'], 16))
self.logger.info("SAME-T: %ss: \"%s\"", int((time_r - time_l).total_seconds()), name)
else:
self.files_by_name_l[name]['status'] = 'changed'
self.files_by_name_r[name]['status'] = 'changed'
if not self.control_data['notimestamps'] and self.files_by_name_l[name]['mtime'] == self.files_by_name_r[name]['mtime']:
self.logger.info("MOD-T : \"%s\"", name)
elems_changed.append(self.files_by_name_r[name])
return (elems_changed)
def check_lhs(self, name_diff_l):
elems_moved = []
elems_deleted = []
for name in name_diff_l:
digest_l = self.files_by_name_l[name]['digests'][self.best_digest]
# print("checking", name)
if digest_l in self.files_by_digest_r:
# print(name, digest_l)
# matched_elems = [elem['full_name'] for elem in self.files_by_digest_r[digest_l]]
# matched_names = ','.join(matched_elems)
# print("< MOVED {} == {}".format(name, matched_names))
self.files_by_name_l[name]['status'] = 'moved'
self.files_by_name_l[name]['match'] = self.files_by_digest_r[digest_l]
elems_moved.append(self.files_by_name_l[name])
else:
# print("< DELETED {}".format(name))
self.files_by_name_l[name]['status'] = 'deleted'
elems_deleted.append(self.files_by_name_l[name])
return (elems_moved, elems_deleted)
def check_rhs(self, name_diff_r):
elems_copied = []
elems_added = []
for name in name_diff_r:
digest_r = self.files_by_name_r[name]['digests'][self.best_digest]
# print("checking", name)
if digest_r in self.files_by_digest_l:
# print(name, digest_r)
# matched_elems = [elem['full_name'] for elem in self.files_by_digest_l[digest_r]]
# matched_names = ','.join(matched_elems)
# print("> COPIED {} == {}".format(name, matched_names))
self.files_by_name_r[name]['status'] = 'copied'
for elem in self.files_by_digest_l[digest_r]:
if elem['file_name'] == self.files_by_name_r[name]['file_name']:
self.logger.info("------: Found likely source: \"%s\"", self.files_by_name_r[name]['full_name'])
break
self.files_by_name_r[name]['match'] = self.files_by_digest_l[digest_r]
elems_copied.append(self.files_by_name_r[name])
else:
# print("> ADDED {}".format(name))
self.files_by_name_r[name]['status'] = 'added'
elems_added.append(self.files_by_name_r[name])
return (elems_copied, elems_added)
def compare(self, file_l, file_r):
""" Main entry: compare two dirtreedigest reports """
(self.basepath_l, self.elements_l) = dtutils.read_dtd_report(file_l, self.logger)
(self.basepath_r, self.elements_r) = dtutils.read_dtd_report(file_r, self.logger)
self.logger.info("Root L: %s", self.basepath_l)
self.logger.info("Root R: %s", self.basepath_r)
self.best_digest = self.choose_best_digest_for_compare(self.elements_l, self.elements_r)
if self.best_digest is None:
return None
self.logger.info("BestDG: %s", self.best_digest)
(self.files_by_name_l, self.files_by_digest_l) = self.slice_data(self.elements_l)
(self.files_by_name_r, self.files_by_digest_r) = self.slice_data(self.elements_r)
name_set_l = set(self.files_by_name_l)
name_set_r = set(self.files_by_name_r)
name_diff_l = name_set_l - name_set_r
name_diff_r = name_set_r - name_set_l
name_same = name_set_r & name_set_l
(elems_changed) = self.compare_by_full_names(name_same)
(elems_moved, elems_deleted) = self.check_lhs(name_diff_l)
(elems_copied, elems_added) = self.check_rhs(name_diff_r)
for elem in sorted(elems_changed, key=lambda k: k['full_name']):
self.logger.info(f"MOD : \"{elem['full_name']}\"")
for elem in sorted(elems_added, key=lambda k: k['full_name']):
self.logger.info(f"ADD : \"{elem['full_name']}\"")
for elem in sorted(elems_deleted, key=lambda k: k['full_name']):
self.logger.info(f"DEL : \"{elem['full_name']}\"")
for elem in sorted(elems_copied, key=lambda k: k['full_name']):
self.logger.info(f"COPY : \"{elem['full_name']}\" == \"{'---'}\"")
for elem in sorted(elems_moved, key=lambda k: k['full_name']):
self.logger.info(f"MOVE : \"{elem['full_name']}\" == \"{'---'}\"")
self.logger.info("ElemsL: %d", len(self.elements_l))
self.logger.info("ElemsR: %d", len(self.elements_r))
self.logger.info("FilesL: %d", len(self.files_by_name_l))
self.logger.info("FilesR: %d", len(self.files_by_name_r))
self.logger.info(" Both: %d", len(name_same))
self.logger.info("Only L: %d", len(name_diff_l))
self.logger.info("Only R: %d", len(name_diff_r))
|
Snazz2001/BDA_py_demos | refs/heads/master | demos_ch11/demo11_1.py | 19 | """Bayesian data analysis
Chapter 11, demo 1
Gibbs sampling demonstration
"""
from __future__ import division
import threading
import numpy as np
import scipy.io # For importing a matlab file
from scipy import linalg, stats
import matplotlib as mpl
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2, markeredgewidth=1.5,
markersize=8)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Parameters of a Normal distribution used as a toy target distribution
y1 = 0
y2 = 0
r = 0.8
S = np.array([[1.0, r], [r, 1.0]])
# Starting value of the chain
t1 = -2.5
t2 = 2.5
# Number of iterations.
M = 2*1000
# N.B. In this implementation one iteration updates only one parameter and one
# complete iteration updating both parameters takes two basic iterations. This
# implementation was used to make plotting of Gibbs sampler's zig-zagging. In
# plots You can implement this also by saving only the final state of complete
# iteration updating all parameters.
# ====== Gibbs sampling here
# Allocate memory for the samples
tt = np.empty((M,2))
tt[0] = [t1, t2] # Save starting point
# For demonstration load pre-computed values
# Replace this with your algorithm!
# tt is a M x 2 array, with M samples of both theta_1 and theta_2
res_path = '../utilities_and_data/demo11_2.mat'
res = scipy.io.loadmat(res_path)
''' Content information of the precalculated results:
>>> scipy.io.whosmat(res_path)
[('tt', (2001, 2), 'double')]
'''
tt = res['tt']
# ====== The rest is just for illustration
# Grid
Y1 = np.linspace(-4.5, 4.5, 150)
Y2 = np.linspace(-4.5, 4.5, 150)
# Plot 90% HPD.
# In 2d-case contour for 90% HPD is an ellipse, whose semimajor
# axes can be computed from the eigenvalues of the covariance
# matrix scaled by a value selected to get ellipse match the
# density at the edge of 90% HPD. Angle of the ellipse could be
# computed from the eigenvectors, but since marginals are same
# we know that angle is 45 degrees.
q = np.sort(np.sqrt(linalg.eigh(S, eigvals_only=True)) * 2.147)
el = mpl.patches.Ellipse(
xy = (y1,y2),
width = 2 * q[1],
height = 2 * q[0],
angle = 45,
facecolor = 'none',
edgecolor = '#e41a1c'
)
el_legend = mpl.lines.Line2D([], [], color='#e41a1c', linewidth=1)
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, aspect='equal')
ax.add_artist(el)
samp_legend, = ax.plot(
tt[0,0], tt[0,1], 'o', markerfacecolor='none', markeredgecolor='#377eb8')
ax.set_xlim([-4.5, 4.5])
ax.set_ylim([-4.5, 4.5])
ax.set_xlabel(r'$\theta_1$', fontsize=18)
ax.set_ylabel(r'$\theta_2$', fontsize=18)
htext = ax.set_title('Gibbs sampling\npress any key to continue...',
fontsize=18)
ax.legend((el_legend, samp_legend), ('90% HPD', 'Starting point'), numpoints=1,
loc='lower right')
pdfline_legend = mpl.lines.Line2D([], [], color='#377eb8')
chain_legend = mpl.lines.Line2D(
[], [], color='#377eb8', marker='o',
markerfacecolor='none', markeredgecolor='#377eb8'
)
burnchain_legend = mpl.lines.Line2D(
[], [], color='m', marker='o',
markerfacecolor='none', markeredgecolor='m'
)
# function for interactively updating the figure
def update_figure(event):
if icontainer.stage == 0 and icontainer.i < 7 and icontainer.drawdist:
i = icontainer.i
icontainer.drawdist = False
# Remove previous lines
for l in icontainer.remove_lines:
ax.lines.remove(l)
icontainer.remove_lines = []
if i % 2 == 0:
line = ax.axhline(y=tt[i,1], linestyle='--', color='k')
icontainer.remove_lines.append(line)
line, = ax.plot(
Y1,
tt[i,1] + stats.norm.pdf(
Y1,
loc = y1 + r*(tt[i,1] - y2),
scale = np.sqrt((1 - r**2))
),
color = '#377eb8'
)
icontainer.remove_lines.append(line)
if i == 0:
ax.legend(
(el_legend, samp_legend, pdfline_legend),
( '90% HPD',
'Starting point',
r'Conditional density given $\theta_2$'
),
numpoints=1,
loc='lower right'
)
else:
ax.legend(
(el_legend, samp_legend, pdfline_legend),
( '90% HPD',
'Samples from the chain',
r'Conditional density given $\theta_2$'
),
loc='lower right'
)
else:
line = ax.axvline(x=tt[i,0], linestyle='--', color='k')
icontainer.remove_lines.append(line)
line, = ax.plot(
tt[i,0] + stats.norm.pdf(
Y2,
loc = y2 + r*(tt[i,0] - y1),
scale = np.sqrt((1 - r**2))
),
Y2,
color = '#377eb8'
)
icontainer.remove_lines.append(line)
ax.legend(
(el_legend, samp_legend, pdfline_legend),
( '90% HPD',
'Samples from the chain',
r'Conditional density given $\theta_1$'
),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 0 and icontainer.i < 7 and not icontainer.drawdist:
icontainer.i += 1
i = icontainer.i
if i == 6:
icontainer.stage += 1
icontainer.drawdist = True
sampi, = ax.plot(tt[i,0], tt[i,1], 'o', markerfacecolor='none',
markeredgecolor='#377eb8')
icontainer.samps.append(sampi)
if i == 1:
ax.legend(
(el_legend, samp_legend, pdfline_legend),
( '90% HPD',
'Samples from the chain',
r'Conditional density given $\theta_2$'
),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 1:
icontainer.stage += 1
for l in icontainer.remove_lines:
ax.lines.remove(l)
icontainer.remove_lines = []
ax.legend(
(el_legend, samp_legend),
('90% HPD', 'Samples from the chain'),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 2:
icontainer.stage += 1
for s in icontainer.samps:
ax.lines.remove(s)
icontainer.samps = []
line, = ax.plot(
tt[:icontainer.i+1,0], tt[:icontainer.i+1,1], color='#377eb8')
icontainer.samps.append(line)
line, = ax.plot(
tt[:icontainer.i+1:2,0], tt[:icontainer.i+1:2,1],
'o', markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
ax.legend((el_legend, chain_legend), ('90% HPD', 'Markov chain'),
loc='lower right')
fig.canvas.draw()
elif icontainer.stage == 3:
icontainer.stage += 1
# modify helper text
htext.set_text('Gibbs sampling\npress `q` to skip animation')
# start the timer
anim_thread.start()
elif icontainer.stage == 4 and event.key == 'q':
# stop the animation
stop_anim.set()
elif icontainer.stage == 5:
icontainer.stage += 1
for s in icontainer.samps:
ax.lines.remove(s)
icontainer.samps = []
# remove helper text
icontainer.itertext.remove()
line, = ax.plot(tt[:burnin,0], tt[:burnin,1], color='m')
icontainer.samps.append(line)
line, = ax.plot(tt[:burnin:2,0], tt[:burnin:2,1], 'o',
markerfacecolor='none', markeredgecolor='m')
icontainer.samps.append(line)
line, = ax.plot(
tt[burnin:nanim+1,0], tt[burnin:nanim+1,1], color='#377eb8')
icontainer.samps.append(line)
line, = ax.plot(tt[burnin:nanim+1:2,0], tt[burnin:nanim+1:2,1], 'o',
markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
ax.legend(
(el_legend, chain_legend, burnchain_legend),
('90% HPD', 'Markov chain', 'burn-in'),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 6:
icontainer.stage += 1
for s in icontainer.samps:
ax.lines.remove(s)
icontainer.samps = []
line, = ax.plot(tt[burnin:nanim+1:2,0], tt[burnin:nanim+1:2,1], 'o',
markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
ax.legend(
(el_legend, samp_legend),
('90% HPD', 'samples from the chain after burn-in'),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 7:
icontainer.stage += 1
for s in icontainer.samps:
ax.lines.remove(s)
icontainer.samps = []
points = ax.scatter(
tt[burnin::2,0], tt[burnin::2,1], 10, alpha=0.5, color='#377eb8')
icontainer.samps.append(points)
ax.legend(
(el_legend, points),
('90% HPD', '950 samples from the chain'),
loc='lower right'
)
fig.canvas.draw()
elif icontainer.stage == 8:
icontainer.stage += 1
fig.clear()
indexes = np.arange(burnin,M,2)
samps = tt[indexes]
ax1 = fig.add_subplot(3,1,1)
ax1.axhline(y=0, linewidth=1, color='gray')
line1, line2, = ax1.plot(indexes/2, samps, linewidth=1)
ax1.legend((line1, line2), (r'$\theta_1$', r'$\theta_2$'))
ax1.set_xlabel('iteration')
ax1.set_title('trends')
ax1.set_xlim([burnin/2, 1000])
ax2 = fig.add_subplot(3,1,2)
ax2.axhline(y=0, linewidth=1, color='gray')
ax2.plot(
indexes/2,
np.cumsum(samps, axis=0)/np.arange(1,len(samps)+1)[:,None],
linewidth=1.5
)
ax2.set_xlabel('iteration')
ax2.set_title('cumulative average')
ax2.set_xlim([burnin/2, 1000])
ax3 = fig.add_subplot(3,1,3)
maxlag = 20
sampsc = samps - np.mean(samps, axis=0)
acorlags = np.arange(maxlag+1)
ax3.axhline(y=0, linewidth=1, color='gray')
for i in [0,1]:
t = np.correlate(sampsc[:,i], sampsc[:,i], 'full')
t = t[-len(sampsc):-len(sampsc)+maxlag+1] / t[-len(sampsc)]
ax3.plot(acorlags, t)
ax3.set_xlabel('lag')
ax3.set_title('estimate of the autocorrelation function')
fig.suptitle('Gibbs sampling - press any key to continue...',
fontsize=18)
fig.subplots_adjust(hspace=0.6)
fig.canvas.draw()
elif icontainer.stage == 9:
icontainer.stage += 1
fig.clear()
indexes = np.arange(burnin,M,2)
samps = tt[indexes]
nsamps = np.arange(1,len(samps)+1)
ax1 = fig.add_subplot(1,1,1)
ax1.axhline(y=0, linewidth=1, color='gray')
line1, line2, = ax1.plot(
indexes/2,
np.cumsum(samps, axis=0)/nsamps[:,None],
linewidth=1.5
)
er1, = ax1.plot(
indexes/2, 1.96/np.sqrt(nsamps/4), 'k--', linewidth=1)
ax1.plot(indexes/2, -1.96/np.sqrt(nsamps/4), 'k--', linewidth=1)
er2, = ax1.plot(
indexes/2, 1.96/np.sqrt(nsamps), 'k:', linewidth=1)
ax1.plot(indexes/2, -1.96/np.sqrt(nsamps), 'k:', linewidth=1)
ax1.set_xlabel('iteration')
ax1.set_title('Gibbs sampling\ncumulative average')
ax1.legend(
(line1, line2, er1, er2),
(r'$\theta_1$', r'$\theta_2$',
'95% interval for MCMC error',
'95% interval for independent MC'
)
)
ax1.set_xlim([burnin/2, 1000])
ax1.set_ylim([-2, 2])
fig.canvas.draw()
# function for performing the figure animation in thread
def animation():
icontainer.itertext = ax.text(-4, 4, '', fontsize=18)
delay0 = 0.4
delayk = 0.85
while icontainer.i < nanim:
icontainer.i += 1
i = icontainer.i
icontainer.itertext.set_text('iter {}'.format(i//2))
# show next sample
line, = ax.plot(tt[i-1:i+1,0], tt[i-1:i+1,1], color='#377eb8')
icontainer.samps.append(line)
if i % 2 == 0:
line, = ax.plot(
tt[i,0], tt[i,1], 'o',
markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
# update figure
fig.canvas.draw()
if i < nanim and (i < 16 or i % 2 == 0):
# wait animation delay time or until animation is cancelled
stop_anim.wait(delay0)
delay0 *= delayk
if stop_anim.isSet():
# animation cancelled
break
# skip the rest if the figure does not exist anymore
if not plt.fignum_exists(fig.number):
return
# advance stage
icontainer.stage += 1
# modify helper text
htext.set_text('Gibbs sampling\npress any key to continue...')
# plot the rest of the samples
if i < nanim:
icontainer.itertext.set_text('iter {}'.format(nanim//2))
line, = ax.plot(tt[i:nanim+1,0], tt[i:nanim+1,1], color='#377eb8')
icontainer.samps.append(line)
line, = ax.plot(tt[nanim:i-1:-2,0], tt[nanim:i-1:-2,1], 'o',
markerfacecolor='none', markeredgecolor='#377eb8')
icontainer.samps.append(line)
icontainer.i = nanim
fig.canvas.draw()
# animation related variables
stop_anim = threading.Event()
anim_thread = threading.Thread(target=animation)
nanim = 200
burnin = 50
# store the information of the current stage of the figure
class icontainer(object):
stage = 0
i = 0
drawdist = True
remove_lines = []
samps = [samp_legend]
itertext = None
# set figure to react to keypress events
fig.canvas.mpl_connect('key_press_event', update_figure)
# start blocking figure
plt.show()
|
hgl888/chromium-crosswalk | refs/heads/master | tools/telemetry/telemetry/internal/backends/chrome_inspector/websocket.py | 25 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import socket
from telemetry.core import util
util.AddDirToPythonPath(
util.GetTelemetryDir(), 'third_party', 'websocket-client')
# pylint: disable=import-error, unused-import
from websocket import create_connection as _create_connection
from websocket import WebSocketException
from websocket import WebSocketTimeoutException
def create_connection(*args, **kwargs):
sockopt = kwargs.get('sockopt', [])
# By default, we set SO_REUSEADDR on all websockets used by Telemetry.
# This prevents spurious address in use errors on Windows.
#
# TODO(tonyg): We may want to set SO_NODELAY here as well.
sockopt.append((socket.SOL_SOCKET, socket.SO_REUSEADDR, 1))
kwargs['sockopt'] = sockopt
return _create_connection(*args, **kwargs)
|
zzcclp/spark | refs/heads/master | sql/hive/src/test/resources/data/scripts/doubleescapedtab.py | 131 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
for line in sys.stdin:
print("1\\\\\\t2")
print("1\\\\\\\\t2")
|
drodri/common | refs/heads/develop | settings/cppsettings.py | 5 | from biicode.common.settings.smart_serial import smart_serialize, smart_deserialize
from biicode.common.exception import ConfigurationFileError
import os
from biicode.common.settings.loader import yaml_dumps
class CPPSettings(object):
""" Store specific C++ settings
"""
def __init__(self):
self.generator = None
self.cross_build = None
self.toolchain = None
def __nonzero__(self):
return True if self.generator or self.cross_build or self.toolchain else False
def serialize(self):
serial = smart_serialize(self)
return serial
@classmethod
def deserialize(cls, data):
try:
d = smart_deserialize(cls, data)
except ValueError as error:
raise ConfigurationFileError("Error parsing settings.bii %s %s" % (os.linesep, error))
return d
def __eq__(self, other):
if self is other:
return True
return isinstance(other, self.__class__) \
and self.generator == other.generator \
and self.cross_build == other.cross_build \
and self.toolchain == other.toolchain
def __ne__(self, other):
return not self.__eq__(other)
smart_serial = {'generator': ('generator', None, None),
'cross_build': ('cross_build', None, None),
'toolchain': ('toolchain', None, None)}
def __repr__(self):
return yaml_dumps(self)
serialize_dict = serialize
deserialize_dict = deserialize
|
leonardowolf/bookfree | refs/heads/master | flask/lib/python2.7/ntpath.py | 4 | /usr/lib/python2.7/ntpath.py |
bcornwellmott/frappe | refs/heads/develop | frappe/custom/doctype/customize_form/customize_form.py | 5 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import frappe
import frappe.translate
from frappe import _
from frappe.utils import cint
from frappe.model.document import Document
from frappe.model import no_value_fields
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
doctype_properties = {
'search_fields': 'Data',
'title_field': 'Data',
'image_field': 'Data',
'sort_field': 'Data',
'sort_order': 'Data',
'default_print_format': 'Data',
'read_only_onload': 'Check',
'allow_copy': 'Check',
'istable': 'Check',
'quick_entry': 'Check',
'editable_grid': 'Check',
'max_attachments': 'Int',
'image_view': 'Check',
'track_changes': 'Check',
}
docfield_properties = {
'idx': 'Int',
'label': 'Data',
'fieldtype': 'Select',
'options': 'Text',
'permlevel': 'Int',
'width': 'Data',
'print_width': 'Data',
'reqd': 'Check',
'unique': 'Check',
'ignore_user_permissions': 'Check',
'in_list_view': 'Check',
'in_standard_filter': 'Check',
'in_global_search': 'Check',
'bold': 'Check',
'hidden': 'Check',
'collapsible': 'Check',
'collapsible_depends_on': 'Data',
'print_hide': 'Check',
'print_hide_if_no_value': 'Check',
'report_hide': 'Check',
'allow_on_submit': 'Check',
'depends_on': 'Data',
'description': 'Text',
'default': 'Text',
'precision': 'Select',
'read_only': 'Check',
'length': 'Int',
'columns': 'Int',
'remember_last_selected_value': 'Check',
'allow_bulk_edit': 'Check',
}
allowed_fieldtype_change = (('Currency', 'Float', 'Percent'), ('Small Text', 'Data'),
('Text', 'Data'), ('Text', 'Text Editor', 'Code', 'Signature'), ('Data', 'Select'),
('Text', 'Small Text'))
allowed_fieldtype_for_options_change = ('Read Only', 'HTML', 'Select',)
class CustomizeForm(Document):
def on_update(self):
frappe.db.sql("delete from tabSingles where doctype='Customize Form'")
frappe.db.sql("delete from `tabCustomize Form Field`")
def fetch_to_customize(self):
self.clear_existing_doc()
if not self.doc_type:
return
meta = frappe.get_meta(self.doc_type)
# doctype properties
for property in doctype_properties:
self.set(property, meta.get(property))
for d in meta.get("fields"):
new_d = {"fieldname": d.fieldname, "is_custom_field": d.get("is_custom_field"), "name": d.name}
for property in docfield_properties:
new_d[property] = d.get(property)
self.append("fields", new_d)
# load custom translation
translation = self.get_name_translation()
self.label = translation.target_name if translation else ''
# NOTE doc is sent to clientside by run_method
def get_name_translation(self):
'''Get translation object if exists of current doctype name in the default language'''
return frappe.get_value('Translation',
{'source_name': self.doc_type, 'language': frappe.local.lang or 'en'},
['name', 'target_name'], as_dict=True)
def set_name_translation(self):
'''Create, update custom translation for this doctype'''
current = self.get_name_translation()
if current:
if self.label and current!=self.label:
frappe.db.set_value('Translation', current.name, 'target_name', self.label)
frappe.translate.clear_cache()
else:
# clear translation
frappe.delete_doc('Translation', current.name)
else:
if self.label:
frappe.get_doc(dict(doctype='Translation',
source_name=self.doc_type,
target_name=self.label,
language_code=frappe.local.lang or 'en')).insert()
def clear_existing_doc(self):
doc_type = self.doc_type
for fieldname in self.meta.get_valid_columns():
self.set(fieldname, None)
for df in self.meta.get_table_fields():
self.set(df.fieldname, [])
self.doc_type = doc_type
self.name = "Customize Form"
def save_customization(self):
if not self.doc_type:
return
self.flags.update_db = False
self.set_property_setters()
self.update_custom_fields()
self.set_name_translation()
validate_fields_for_doctype(self.doc_type)
if self.flags.update_db:
from frappe.model.db_schema import updatedb
updatedb(self.doc_type)
if not hasattr(self, 'hide_success') or not self.hide_success:
frappe.msgprint(_("{0} updated").format(_(self.doc_type)))
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
def set_property_setters(self):
meta = frappe.get_meta(self.doc_type)
# doctype property setters
for property in doctype_properties:
if self.get(property) != meta.get(property):
self.make_property_setter(property=property, value=self.get(property),
property_type=doctype_properties[property])
for df in self.get("fields"):
if df.get("__islocal"):
continue
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not meta_df or meta_df[0].get("is_custom_field"):
continue
for property in docfield_properties:
if property != "idx" and df.get(property) != meta_df[0].get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
elif property == "allow_on_submit" and df.get(property):
frappe.msgprint(_("Row {0}: Not allowed to enable Allow on Submit for standard fields")\
.format(df.idx))
continue
elif property == "in_list_view" and df.get(property) \
and df.fieldtype!="Attach Image" and df.fieldtype in no_value_fields:
frappe.msgprint(_("'In List View' not allowed for type {0} in row {1}")
.format(df.fieldtype, df.idx))
continue
elif property == "precision" and cint(df.get("precision")) > 6 \
and cint(df.get("precision")) > cint(meta_df[0].get("precision")):
self.flags.update_db = True
elif property == "unique":
self.flags.update_db = True
elif (property == "read_only" and cint(df.get("read_only"))==0
and frappe.db.get_value("DocField", {"parent": self.doc_type, "fieldname": df.fieldname}, "read_only")==1):
# if docfield has read_only checked and user is trying to make it editable, don't allow it
frappe.msgprint(_("You cannot unset 'Read Only' for field {0}").format(df.label))
continue
elif property == "options" and df.get("fieldtype") not in allowed_fieldtype_for_options_change:
frappe.msgprint(_("You can't set 'Options' for field {0}").format(df.label))
continue
self.make_property_setter(property=property, value=df.get(property),
property_type=docfield_properties[property], fieldname=df.fieldname)
def update_custom_fields(self):
for i, df in enumerate(self.get("fields")):
if df.get("is_custom_field"):
if not frappe.db.exists('Custom Field', {'dt': self.doc_type, 'fieldname': df.fieldname}):
self.add_custom_field(df, i)
self.flags.update_db = True
else:
self.update_in_custom_field(df, i)
self.delete_custom_fields()
def add_custom_field(self, df, i):
d = frappe.new_doc("Custom Field")
d.dt = self.doc_type
for property in docfield_properties:
d.set(property, df.get(property))
if i!=0:
d.insert_after = self.fields[i-1].fieldname
d.idx = i
d.insert()
df.fieldname = d.fieldname
def update_in_custom_field(self, df, i):
meta = frappe.get_meta(self.doc_type)
meta_df = meta.get("fields", {"fieldname": df.fieldname})
if not (meta_df and meta_df[0].get("is_custom_field")):
# not a custom field
return
custom_field = frappe.get_doc("Custom Field", meta_df[0].name)
changed = False
for property in docfield_properties:
if df.get(property) != custom_field.get(property):
if property == "fieldtype":
self.validate_fieldtype_change(df, meta_df[0].get(property), df.get(property))
custom_field.set(property, df.get(property))
changed = True
# check and update `insert_after` property
if i!=0:
insert_after = self.fields[i-1].fieldname
if custom_field.insert_after != insert_after:
custom_field.insert_after = insert_after
custom_field.idx = i
changed = True
if changed:
custom_field.db_update()
self.flags.update_db = True
#custom_field.save()
def delete_custom_fields(self):
meta = frappe.get_meta(self.doc_type)
fields_to_remove = (set([df.fieldname for df in meta.get("fields")])
- set(df.fieldname for df in self.get("fields")))
for fieldname in fields_to_remove:
df = meta.get("fields", {"fieldname": fieldname})[0]
if df.get("is_custom_field"):
frappe.delete_doc("Custom Field", df.name)
def make_property_setter(self, property, value, property_type, fieldname=None):
self.delete_existing_property_setter(property, fieldname)
property_value = self.get_existing_property_value(property, fieldname)
if property_value==value:
return
# create a new property setter
# ignore validation becuase it will be done at end
frappe.make_property_setter({
"doctype": self.doc_type,
"doctype_or_field": "DocField" if fieldname else "DocType",
"fieldname": fieldname,
"property": property,
"value": value,
"property_type": property_type
}, ignore_validate=True)
def delete_existing_property_setter(self, property, fieldname=None):
# first delete existing property setter
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.doc_type,
"property": property, "field_name['']": fieldname or ''})
if existing_property_setter:
frappe.db.sql("delete from `tabProperty Setter` where name=%s", existing_property_setter)
def get_existing_property_value(self, property_name, fieldname=None):
# check if there is any need to make property setter!
if fieldname:
property_value = frappe.db.get_value("DocField", {"parent": self.doc_type,
"fieldname": fieldname}, property_name)
else:
try:
property_value = frappe.db.get_value("DocType", self.doc_type, property_name)
except Exception as e:
if e.args[0]==1054:
property_value = None
else:
raise
return property_value
def validate_fieldtype_change(self, df, old_value, new_value):
allowed = False
for allowed_changes in allowed_fieldtype_change:
if (old_value in allowed_changes and new_value in allowed_changes):
allowed = True
break
if not allowed:
frappe.throw(_("Fieldtype cannot be changed from {0} to {1} in row {2}").format(old_value, new_value, df.idx))
def reset_to_defaults(self):
if not self.doc_type:
return
frappe.db.sql("""delete from `tabProperty Setter` where doc_type=%s
and ifnull(field_name, '')!='naming_series'""", self.doc_type)
frappe.clear_cache(doctype=self.doc_type)
self.fetch_to_customize()
|
SimtterCom/gyp | refs/heads/master | test/relative/gyptest-default.py | 336 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simplest-possible build of a "Hello, world!" program
using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default', formats=['msvs'])
# Run from down in foo.
test.run_gyp('a.gyp', chdir='foo/a')
sln = test.workpath('foo/a/a.sln')
sln_data = open(sln, 'rb').read()
vcproj = sln_data.count('b.vcproj')
vcxproj = sln_data.count('b.vcxproj')
if (vcproj, vcxproj) not in [(1, 0), (0, 1)]:
test.fail_test()
test.pass_test()
|
Ju2ender/CSharp-Exercise | refs/heads/master | shadowsocks/shadowsocks/daemon.py | 386 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
# this module is ported from ShadowVPN daemon.c
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command)
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def freopen(f, mode, stream):
oldf = open(f, mode)
oldfd = oldf.fileno()
newfd = stream.fileno()
os.close(newfd)
os.dup2(oldfd, newfd)
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIG_IGN, signal.SIGHUP)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid)
|
ck1125/sikuli | refs/heads/master | sikuli-ide/resources/scripts/clean-dot-sikuli.py | 3 | import os
import re
import glob
import sys
def grep(string,list):
expr = re.compile(string)
return filter(expr.search,list)
if locals().has_key('bundle_path'):
path = bundle_path
#path = sys.argv[1]
f_py = glob.glob(path + "/*.py")
pngFilter = re.compile("\"([^\"]+\.png)\"", re.I)
goodFiles = []
for py in f_py:
src = open(py, "r")
for line in src:
m = pngFilter.findall(line)
if m:
goodFiles += m
src.close()
for png in glob.glob(path + "/*.png"):
if not os.path.basename(png) in goodFiles:
os.remove(png)
|
bobobox/ansible | refs/heads/devel | lib/ansible/modules/packaging/os/apt_key.py | 18 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: apt_key
author: "Jayson Vantuyl & others (@jvantuyl)"
version_added: "1.0"
short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it
notes:
- doesn't download the key unless it really needs it
- as a sanity check, downloaded key id must match the one specified
- best practice is to specify the key id and the url
options:
id:
required: false
default: none
description:
- identifier of key. Including this allows check mode to correctly report the changed state.
- "If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead."
data:
required: false
default: none
description:
- keyfile contents to add to the keyring
file:
required: false
default: none
description:
- path to a keyfile on the remote server to add to the keyring
keyring:
required: false
default: none
description:
- path to specific keyring file in /etc/apt/trusted.gpg.d
version_added: "1.3"
url:
required: false
default: none
description:
- url to retrieve key from.
keyserver:
version_added: "1.6"
required: false
default: none
description:
- keyserver to retrieve key from.
state:
required: false
choices: [ absent, present ]
default: present
description:
- used to specify if key is being added or revoked
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Add an apt key by id from a keyserver
- apt_key:
keyserver: keyserver.ubuntu.com
id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
# Add an Apt signing key, uses whichever key is at the URL
- apt_key:
url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
state: present
# Add an Apt signing key, will not download if present
- apt_key:
id: 473041FA
url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
state: present
# Remove an Apt signing key, uses whichever key is at the URL
- apt_key:
url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
state: absent
# Remove a Apt specific signing key, leading 0x is valid
- apt_key:
id: 0x473041FA
state: absent
# Add a key from a file on the Ansible server. Use armored file since utf-8 string is expected. Must be of "PGP PUBLIC KEY BLOCK" type.
- apt_key:
data: "{{ lookup('file', 'apt.asc') }}"
state: present
# Add an Apt signing key to a specific keyring file
- apt_key:
id: 473041FA
url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
keyring: /etc/apt/trusted.gpg.d/debian.gpg
# Add Apt signing key on remote server to keyring
- apt_key:
id: 473041FA
file: /tmp/apt.gpg
state: present
'''
# FIXME: standardize into module_common
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
apt_key_bin = None
def find_needed_binaries(module):
global apt_key_bin
apt_key_bin = module.get_bin_path('apt-key', required=True)
### FIXME: Is there a reason that gpg and grep are checked? Is it just
# cruft or does the apt .deb package not require them (and if they're not
# installed, /usr/bin/apt-key fails?)
module.get_bin_path('gpg', required=True)
module.get_bin_path('grep', required=True)
def parse_key_id(key_id):
"""validate the key_id and break it into segments
:arg key_id: The key_id as supplied by the user. A valid key_id will be
8, 16, or more hexadecimal chars with an optional leading ``0x``.
:returns: The portion of key_id suitable for apt-key del, the portion
suitable for comparisons with --list-public-keys, and the portion that
can be used with --recv-key. If key_id is long enough, these will be
the last 8 characters of key_id, the last 16 characters, and all of
key_id. If key_id is not long enough, some of the values will be the
same.
* apt-key del <= 1.10 has a bug with key_id != 8 chars
* apt-key adv --list-public-keys prints 16 chars
* apt-key adv --recv-key can take more chars
"""
# Make sure the key_id is valid hexadecimal
int(key_id, 16)
key_id = key_id.upper()
if key_id.startswith('0X'):
key_id = key_id[2:]
key_id_len = len(key_id)
if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
short_key_id = key_id[-8:]
fingerprint = key_id
if key_id_len > 16:
fingerprint = key_id[-16:]
return short_key_id, fingerprint, key_id
def all_keys(module, keyring, short_format):
if keyring:
cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
else:
cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
(rc, out, err) = module.run_command(cmd)
results = []
lines = to_native(out).split('\n')
for line in lines:
if line.startswith("pub") or line.startswith("sub"):
tokens = line.split()
code = tokens[1]
(len_type, real_code) = code.split("/")
results.append(real_code)
if short_format:
results = shorten_key_ids(results)
return results
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
by reducing them to their last 8 characters.
"""
short = []
for key in key_id_list:
short.append(key[-8:])
return short
def download_key(module, url):
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
# and reuse here
if url is None:
module.fail_json(msg="needed a URL but was not specified")
try:
rsp, info = fetch_url(module, url)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
return rsp.read()
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
def import_key(module, keyring, keyserver, key_id):
if keyring:
cmd = "%s --keyring %s adv --keyserver %s --recv %s" % (apt_key_bin, keyring, keyserver, key_id)
else:
cmd = "%s adv --keyserver %s --recv %s" % (apt_key_bin, keyserver, key_id)
for retry in range(5):
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(cmd, environ_update=lang_env)
if rc == 0:
break
else:
# Out of retries
if rc == 2 and 'not found on keyserver' in out:
msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg)
else:
msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg, rc=rc, stdout=out, stderr=err)
return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
else:
cmd = "%s add -" % apt_key_bin
(rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
else:
if keyring:
cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
else:
cmd = "%s add %s" % (apt_key_bin, keyfile)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def remove_key(module, key_id, keyring):
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
if keyring:
cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
else:
cmd = '%s del %s' % (apt_key_bin, key_id)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(required=False, default=None),
url=dict(required=False),
data=dict(required=False),
file=dict(required=False, type='path'),
key=dict(required=False),
keyring=dict(required=False, type='path'),
validate_certs=dict(default='yes', type='bool'),
keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
supports_check_mode=True,
mutually_exclusive=(('filename', 'keyserver', 'data', 'url'),),
)
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
changed = False
fingerprint = short_key_id = key_id
short_format = False
if key_id:
try:
short_key_id, fingerprint, key_id = parse_key_id(key_id)
except ValueError:
module.fail_json(msg='Invalid key_id', id=key_id)
if len(fingerprint) == 8:
short_format = True
find_needed_binaries(module)
keys = all_keys(module, keyring, short_format)
return_values = {}
if state == 'present':
if fingerprint and fingerprint in keys:
module.exit_json(changed=False)
elif fingerprint and fingerprint not in keys and module.check_mode:
### TODO: Someday we could go further -- write keys out to
# a temporary file and then extract the key id from there via gpg
# to decide if the key is installed or not.
module.exit_json(changed=True)
else:
if not filename and not data and not keyserver:
data = download_key(module, url)
if filename:
add_key(module, filename, keyring)
elif keyserver:
import_key(module, keyring, keyserver, key_id)
else:
add_key(module, "-", keyring, data)
changed = False
keys2 = all_keys(module, keyring, short_format)
if len(keys) != len(keys2):
changed=True
if fingerprint and fingerprint not in keys2:
module.fail_json(msg="key does not seem to have been added", id=key_id)
module.exit_json(changed=changed)
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required")
if fingerprint in keys:
if module.check_mode:
module.exit_json(changed=True)
# we use the "short" id: key_id[-8:], short_format=True
# it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
if remove_key(module, short_key_id, keyring):
keys = all_keys(module, keyring, short_format)
if fingerprint in keys:
module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)", id=key_id)
changed = True
else:
# FIXME: module.fail_json or exit-json immediately at point of failure
module.fail_json(msg="error removing key_id", **return_values)
module.exit_json(changed=changed, **return_values)
if __name__ == '__main__':
main()
|
gauribhoite/personfinder | refs/heads/master | env/google_appengine/lib/django-1.4/django/conf/locale/es/formats.py | 100 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'j \d\e F \d\e Y \a \l\a\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
# '31/12/2009', '31/12/09'
'%d/%m/%Y', '%d/%m/%y'
)
TIME_INPUT_FORMATS = (
# '14:30:59', '14:30'
'%H:%M:%S', '%H:%M'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M',
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
opepin/container-transform | refs/heads/develop | container_transform/client.py | 5 | import click
from .converter import Converter
from .schema import InputTransformationTypes, OutputTransformationTypes
from .version import __version__
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument(
'input_file',
default='/dev/stdin',
type=click.Path(exists=False, file_okay=True, dir_okay=False),
)
@click.option(
'--input-type',
'input_type',
type=click.Choice([v.value.lower() for v in list(InputTransformationTypes)]),
default=InputTransformationTypes.COMPOSE.value,
)
@click.option(
'--output-type',
'output_type',
type=click.Choice([v.value.lower() for v in list(OutputTransformationTypes)]),
default=OutputTransformationTypes.ECS.value,
)
@click.option('-v/--no-verbose', default=True, help='Expand/minify json output')
@click.option('-q', default=False, is_flag=True, help='Silence error messages')
@click.version_option(__version__)
def transform(input_file, input_type, output_type, v, q):
"""
container-transform is a small utility to transform various docker
container formats to one another.
Default input type is compose, default output type is ECS
Default is to read from STDIN if no INPUT_FILE is provided
"""
converter = Converter(input_file, input_type, output_type)
output = converter.convert(v)
click.echo(click.style(output, fg='green'))
if not q:
for message in converter.messages:
click.echo(click.style(message, fg='red', bold=True), err=True)
|
JohnDenker/brython | refs/heads/master | www/src/Lib/test/test_tempfile.py | 23 | # tempfile.py unit tests.
import tempfile
import errno
import io
import os
import signal
import sys
import re
import warnings
import contextlib
import unittest
from test import support
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform.startswith('openbsd'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class BaseTestCase(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
class TestExports(BaseTestCase):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
class TestRandomNameSequence(BaseTestCase):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
for s in r:
i += 1
if i == 20:
break
@unittest.skipUnless(hasattr(os, 'fork'),
"os.fork is required for this test")
def test_process_awareness(self):
# ensure that the random source differs between
# child and parent.
read_fd, write_fd = os.pipe()
pid = None
try:
pid = os.fork()
if not pid:
os.close(read_fd)
os.write(write_fd, next(self.r).encode("ascii"))
os.close(write_fd)
# bypass the normal exit handlers- leave those to
# the parent.
os._exit(0)
parent_value = next(self.r)
child_value = os.read(read_fd, len(parent_value)).decode("ascii")
finally:
if pid:
# best effort to ensure the process can't bleed out
# via any bugs above
try:
os.kill(pid, signal.SIGKILL)
except EnvironmentError:
pass
os.close(read_fd)
os.close(write_fd)
self.assertNotEqual(child_value, parent_value)
class TestCandidateTempdirList(BaseTestCase):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
# We test _get_default_tempdir some more by testing gettempdir.
class TestGetDefaultTempdir(BaseTestCase):
"""Test _get_default_tempdir()."""
def test_no_files_left_behind(self):
# use a private empty directory
with tempfile.TemporaryDirectory() as our_temp_directory:
# force _get_default_tempdir() to consider our empty directory
def our_candidate_list():
return [our_temp_directory]
with support.swap_attr(tempfile, "_candidate_tempdir_list",
our_candidate_list):
# verify our directory is empty after _get_default_tempdir()
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
def raise_OSError(*args, **kwargs):
raise OSError()
with support.swap_attr(io, "open", raise_OSError):
# test again with failing io.open()
with self.assertRaises(FileNotFoundError):
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
open = io.open
def bad_writer(*args, **kwargs):
fp = open(*args, **kwargs)
fp.write = raise_OSError
return fp
with support.swap_attr(io, "open", bad_writer):
# test again with failing write()
with self.assertRaises(FileNotFoundError):
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
class TestGetCandidateNames(BaseTestCase):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
@contextlib.contextmanager
def _inside_empty_temp_dir():
dir = tempfile.mkdtemp()
try:
with support.swap_attr(tempfile, 'tempdir', dir):
yield
finally:
support.rmtree(dir)
def _mock_candidate_names(*names):
return support.swap_attr(tempfile,
'_get_candidate_names',
lambda: iter(names))
class TestMkstempInner(BaseTestCase):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
file = self.mkstemped(dir, pre, suf, bin)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use SkipTest.
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use SkipTest.
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
def default_mkstemp_inner(self):
return tempfile._mkstemp_inner(tempfile.gettempdir(),
tempfile.template,
'',
tempfile._bin_openflags)
def test_collision_with_existing_file(self):
# _mkstemp_inner tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
(fd1, name1) = self.default_mkstemp_inner()
os.close(fd1)
self.assertTrue(name1.endswith('aaa'))
(fd2, name2) = self.default_mkstemp_inner()
os.close(fd2)
self.assertTrue(name2.endswith('bbb'))
def test_collision_with_existing_directory(self):
# _mkstemp_inner tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('aaa'))
(fd, name) = self.default_mkstemp_inner()
os.close(fd)
self.assertTrue(name.endswith('bbb'))
class TestGetTempPrefix(BaseTestCase):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
fd = os.open(p, os.O_RDWR | os.O_CREAT)
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
class TestGetTempDir(BaseTestCase):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
class TestMkstemp(BaseTestCase):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
class TestMkdtemp(BaseTestCase):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform in ('win32', 'os2emx'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
def test_collision_with_existing_file(self):
# mkdtemp tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
file = tempfile.NamedTemporaryFile(delete=False)
file.close()
self.assertTrue(file.name.endswith('aaa'))
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('bbb'))
def test_collision_with_existing_directory(self):
# mkdtemp tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir1 = tempfile.mkdtemp()
self.assertTrue(dir1.endswith('aaa'))
dir2 = tempfile.mkdtemp()
self.assertTrue(dir2.endswith('bbb'))
class TestMktemp(BaseTestCase):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
file = self.mktemped(self.dir, pre, suf)
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class TestNamedTemporaryFile(BaseTestCase):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
class TestSpooledTemporaryFile(BaseTestCase):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
f.close()
f.close()
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
f.close()
f.close()
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_properties(self):
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+b')
self.assertIsNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
f.write(b'x')
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'rb+')
self.assertIsNotNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNone(f.name)
self.assertIsNone(f.newlines)
self.assertIsNone(f.encoding)
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNotNone(f.name)
self.assertEqual(f.newlines, os.linesep)
self.assertIsNotNone(f.encoding)
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNone(f.name)
self.assertIsNone(f.newlines)
self.assertIsNone(f.encoding)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNotNone(f.name)
self.assertIsNotNone(f.newlines)
self.assertEqual(f.encoding, 'utf-8')
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_truncate_with_size_parameter(self):
# A SpooledTemporaryFile can be truncated to zero size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.seek(0)
f.truncate()
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'')
# A SpooledTemporaryFile can be truncated to a specific size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(4)
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'abcd')
# A SpooledTemporaryFile rolls over if truncated to large size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(20)
self.assertTrue(f._rolled)
if has_stat:
self.assertEqual(os.fstat(f.fileno()).st_size, 20)
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
class TestTemporaryFile(BaseTestCase):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
tempfile.TemporaryFile()
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
# cleanup
f.close()
os.rmdir(dir)
raise
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class TestTemporaryDirectory(BaseTestCase):
"""Test TemporaryDirectory()."""
def do_create(self, dir=None, pre="", suf="", recurse=1):
if dir is None:
dir = tempfile.gettempdir()
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
self.nameCheck(tmp.name, dir, pre, suf)
# Create a subdirectory and some files
if recurse:
self.do_create(tmp.name, pre, suf, recurse-1)
with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
f.write(b"Hello world!")
return tmp
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(FileNotFoundError) as cm:
tempfile.TemporaryDirectory(dir=nonexistent)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create()
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
@unittest.expectedFailure # See issue #10188
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
# Make sure it works with the relevant modules nulled out
with self.do_create() as dir:
d = self.do_create(dir=dir)
# Mimic the nulling out of modules that
# occurs during system shutdown
modules = [os, os.path]
if has_stat:
modules.append(stat)
# Currently broken, so suppress the warning
# that is otherwise emitted on stdout
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
# Currently broken, so stop spurious exception by
# indicating the object has already been closed
d._closed = True
# And this assert will fail, as expected by the
# unittest decorator...
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
def test_warnings_on_cleanup(self):
# Two kinds of warning on shutdown
# Issue 10888: may write to stderr if modules are nulled out
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
if os.sep != '\\':
# Embed a backslash in order to make sure string escaping
# in the displayed error message is dealt with correctly
suffix = '\\check_backslash_handling'
else:
suffix = ''
d = self.do_create(dir=dir, suf=suffix)
#Check for the Issue 10888 message
modules = [os, os.path]
if has_stat:
modules.append(stat)
with support.captured_stderr() as err:
with NulledModules(*modules):
d.cleanup()
message = err.getvalue().replace('\\\\', '\\')
self.assertIn("while cleaning up", message)
self.assertIn(d.name, message)
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
d.__del__()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after __del__" % d.name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
d.cleanup()
d.cleanup()
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
Alwnikrotikz/cortex-vfx | refs/heads/master | test/IECore/BoolVectorData.py | 12 | ##########################################################################
#
# Copyright (c) 2007-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
from IECore import *
import random
import os
class BoolVectorDataTest( unittest.TestCase ) :
def test( self ) :
trueFalse = [ True, False ]
random.seed( 0 )
for i in range( 0, 100 ) :
s = random.randint( 0, 100 )
b = BoolVectorData( s )
self.assertEqual( s, len( b ) )
for j in range( 0, len( b ) ) :
self.assertEqual( b[j], False )
v = random.choice( trueFalse )
b[j] = v
self.assertEqual( b[j], v )
bb = b.copy()
self.assertEqual( b, bb )
ObjectWriter( b, "test/boolVector.cob" ).write()
bbb = ObjectReader( "test/boolVector.cob" ).read()
self.assertEqual( b, bbb )
def testStrAndRepr( self ) :
self.assertEqual( str( BoolVectorData( [True, False] ) ), "1 0" )
self.assertEqual( repr( BoolVectorData( [False, True] ) ), "IECore.BoolVectorData( [ 0, 1 ] )" )
def testHasBase( self ) :
self.failIf( BoolVectorData.hasBase() )
def tearDown( self ) :
if os.path.isfile( "test/boolVector.cob" ):
os.remove( "test/boolVector.cob" )
if __name__ == "__main__":
unittest.main()
|
CERT-BDF/Cortex-Analyzers | refs/heads/master | analyzers/URLhaus/URLhaus_analyzer.py | 1 | #!/usr/bin/env python3
from cortexutils.analyzer import Analyzer
from URLhaus_client import URLhausClient
class URLhausAnalyzer(Analyzer):
def __init__(self):
Analyzer.__init__(self)
def run(self):
data = self.get_data()
if not data:
self.error('No observable or file given.')
results = {}
if self.data_type == 'url':
results = URLhausClient.search_url(data)
elif self.data_type in ['domain', 'ip']:
results = URLhausClient.search_host(data)
elif self.data_type == 'hash':
if len(data) in [32, 64]:
results = URLhausClient.search_payload(data)
else:
self.error('Only sha256 and md5 supported by URLhaus.')
else:
self.error('Datatype not supported.')
results.update({
'data_type': self.data_type
})
self.report(results)
def summary(self, raw):
taxonomies = []
namespace = "URLhaus"
if raw['query_status'] == 'no_results':
taxonomies.append(self.build_taxonomy(
'info',
namespace,
'Search',
'No results'
))
else:
if self.data_type == 'url':
taxonomies.append(self.build_taxonomy(
'malicious',
namespace,
'Threat',
raw['threat']
))
elif self.data_type in ['domain', 'ip']:
threat_types = []
for url in raw['urls']:
if url['threat'] not in threat_types:
threat_types.append(url['threat'])
taxonomies.append(self.build_taxonomy(
'malicious',
namespace,
'Threat' if len(threat_types) == 1 else 'Threats',
','.join(threat_types)
))
elif self.data_type == 'hash':
taxonomies.append(self.build_taxonomy(
'malicious',
namespace,
'Signature',
raw['signature'] if raw['signature'] and raw['signature'] != 'null' else 'Unknown'
))
return {"taxonomies": taxonomies}
if __name__ == '__main__':
URLhausAnalyzer().run()
|
kikusu/chainer | refs/heads/master | chainer/dataset/dataset_mixin.py | 1 | class DatasetMixin(object):
"""Default implementation of dataset indexing.
DatasetMixin provides the :meth:`__getitem__` operator. The default
implementation uses :meth:`get_exmample` to extract each example, and
combines the results into a list. This mixin makes it easy to implement a
new dataset that does not support efficient slicing.
Dataset implementation using DatasetMixin still has to provide the
:meth:`__len__` operator explicitly.
"""
def __getitem__(self, index):
"""Returns an example or a sequence of examples.
It implements the standard Python indexing. It uses the
:meth:`get_example` method by default, but it may be overridden by the
implementation to, for example, improve the slicing performance.
"""
if isinstance(index, slice):
current, stop, step = index.indices(len(self))
ret = []
while current < stop and step > 0 or current > stop and step < 0:
ret.append(self.get_example(current))
current += step
return ret
else:
return self.get_example(index)
def __len__(self):
"""Returns the number of data points."""
raise NotImplementedError
def get_example(self, i):
"""Returns the i-th example.
Implementations should override it. It should raise :class:`IndexError`
if the index is invalid.
Args:
i (int): The index of the example.
Returns:
The i-th example.
"""
raise NotImplementedError
|
oneconvergence/group-based-policy | refs/heads/oneconvergence_service_node_driver | gbpservice/neutron/db/migration/alembic_migrations/versions/3791adbf0045_sc_shared_attribute.py | 4 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# revision identifiers, used by Alembic.
revision = '3791adbf0045'
down_revision = '2f3834ea746b'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'sc_nodes',
sa.Column('shared', sa.Boolean)
)
op.add_column(
'sc_specs',
sa.Column('shared', sa.Boolean)
)
def downgrade():
op.drop_column('sc_nodes', 'shared')
op.drop_column('sc_specs', 'shared')
|
MrNuggles/HeyBoet-Telegram-Bot | refs/heads/master | temboo/Library/Foursquare/Users/UpdatePhoto.py | 5 | # -*- coding: utf-8 -*-
###############################################################################
#
# UpdatePhoto
# Updates the user's profile photo.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdatePhoto(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdatePhoto Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdatePhoto, self).__init__(temboo_session, '/Library/Foursquare/Users/UpdatePhoto')
def new_input_set(self):
return UpdatePhotoInputSet()
def _make_result_set(self, result, path):
return UpdatePhotoResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdatePhotoChoreographyExecution(session, exec_id, path)
class UpdatePhotoInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdatePhoto
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_ContentType(self, value):
"""
Set the value of the ContentType input for this Choreo. ((required, string) The content type of the image. Valid types are: image/jpeg, image/gif, or image/png.)
"""
super(UpdatePhotoInputSet, self)._set_input('ContentType', value)
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((required, string) The Foursquare API Oauth token string.)
"""
super(UpdatePhotoInputSet, self)._set_input('OauthToken', value)
def set_Photo(self, value):
"""
Set the value of the Photo input for this Choreo. ((conditional, string) The Base64-encoded contents of the image you want to upload. Total Image size (before encoding) must be under 100KB.)
"""
super(UpdatePhotoInputSet, self)._set_input('Photo', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(UpdatePhotoInputSet, self)._set_input('ResponseFormat', value)
class UpdatePhotoResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdatePhoto Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class UpdatePhotoChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdatePhotoResultSet(response, path)
|
magic0704/neutron | refs/heads/master | neutron/tests/unit/agent/linux/test_bridge_lib.py | 17 | # Copyright 2015 Intel Corporation.
# Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com>
# <isaku.yamahata at gmail com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import utils
from neutron.tests import base
class BridgeLibTest(base.BaseTestCase):
"""A test suite to exercise the bridge libraries """
_NAMESPACE = 'test-namespace'
_BR_NAME = 'test-br'
_IF_NAME = 'test-if'
def setUp(self):
super(BridgeLibTest, self).setUp()
self.execute = mock.patch.object(
utils, "execute", spec=utils.execute).start()
def _verify_bridge_mock(self, cmd, namespace=None):
if namespace is not None:
cmd = ['ip', 'netns', 'exec', namespace] + cmd
self.execute.assert_called_once_with(cmd, run_as_root=True,
log_fail_as_error=True)
self.execute.reset_mock()
def _test_br(self, namespace=None):
br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace)
self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME], namespace)
br.addif(self._IF_NAME)
self._verify_bridge_mock(
['brctl', 'addif', self._BR_NAME, self._IF_NAME], namespace)
br.delif(self._IF_NAME)
self._verify_bridge_mock(
['brctl', 'delif', self._BR_NAME, self._IF_NAME], namespace)
br.delbr()
self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME], namespace)
def test_addbr_with_namespace(self):
self._test_br(self._NAMESPACE)
def test_addbr_without_namespace(self):
self._test_br()
|
ProfMobius/ThinLauncher | refs/heads/master | surfaces/StatusBar.py | 1 | import pygame
from Constants import *
class StatusBar(pygame.Surface):
def __init__(self, w, h):
super(StatusBar, self).__init__((w, h), pygame.SRCALPHA)
def redraw(self, screen, x, y):
self.fill((150, 150, 150, 150))
screen.blit(self, self.get_rect(x=x, y=y))
|
jorge2703/scikit-learn | refs/heads/master | sklearn/preprocessing/imputation.py | 208 | # Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils import as_float_array
from ..utils.fixes import astype
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Imputer',
]
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or np.isnan(value_to_mask):
return np.isnan(X)
else:
return X == value_to_mask
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class Imputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <imputation>`.
Parameters
----------
missing_values : integer or "NaN", optional (default="NaN")
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For missing values encoded as np.nan,
use the string value "NaN".
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
the axis.
- If "median", then replace missing values using the median along
the axis.
- If "most_frequent", then replace missing using the most frequent
value along the axis.
axis : integer, optional (default=0)
The axis along which to impute.
- If `axis=0`, then impute along columns.
- If `axis=1`, then impute along rows.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is sparse and `missing_values=0`;
- If `axis=0` and X is encoded as a CSR matrix;
- If `axis=1` and X is encoded as a CSC matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature if axis == 0.
Notes
-----
- When ``axis=0``, columns which only contained missing values at `fit`
are discarded upon `transform`.
- When ``axis=1``, an exception is raised if there are rows for which it is
not possible to fill in the missing values (e.g., because they only
contain missing values).
"""
def __init__(self, missing_values="NaN", strategy="mean",
axis=0, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.axis = axis
self.verbose = verbose
self.copy = copy
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
# Check parameters
allowed_strategies = ["mean", "median", "most_frequent"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.axis not in [0, 1]:
raise ValueError("Can only impute missing values on axis 0 and 1, "
" got axis={0}".format(self.axis))
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data will be computed in transform()
# when the imputation is done per sample (i.e., when axis=1).
if self.axis == 0:
X = check_array(X, accept_sparse='csc', dtype=np.float64,
force_all_finite=False)
if sparse.issparse(X):
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
return self
def _sparse_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on sparse data."""
# Imputation is done "by column", so if we want to do it
# by row we only need to convert the matrix to csr format.
if axis == 1:
X = X.tocsr()
else:
X = X.tocsc()
# Count the zeros
if missing_values == 0:
n_zeros_axis = np.zeros(X.shape[not axis], dtype=int)
else:
n_zeros_axis = X.shape[axis] - np.diff(X.indptr)
# Mean
if strategy == "mean":
if missing_values != 0:
n_non_missing = n_zeros_axis
# Mask the missing elements
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.logical_not(mask_missing_values)
# Sum only the valid elements
new_data = X.data.copy()
new_data[mask_missing_values] = 0
X = sparse.csc_matrix((new_data, X.indices, X.indptr),
copy=False)
sums = X.sum(axis=0)
# Count the elements != 0
mask_non_zeros = sparse.csc_matrix(
(mask_valids.astype(np.float64),
X.indices,
X.indptr), copy=False)
s = mask_non_zeros.sum(axis=0)
n_non_missing = np.add(n_non_missing, s)
else:
sums = X.sum(axis=axis)
n_non_missing = np.diff(X.indptr)
# Ignore the error, columns with a np.nan statistics_
# are not an error at this point. These columns will
# be removed in transform
with np.errstate(all="ignore"):
return np.ravel(sums) / np.ravel(n_non_missing)
# Median + Most frequent
else:
# Remove the missing values, for each column
columns_all = np.hsplit(X.data, X.indptr[1:-1])
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.hsplit(np.logical_not(mask_missing_values),
X.indptr[1:-1])
# astype necessary for bug in numpy.hsplit before v1.9
columns = [col[astype(mask, bool, copy=False)]
for col, mask in zip(columns_all, mask_valids)]
# Median
if strategy == "median":
median = np.empty(len(columns))
for i, column in enumerate(columns):
median[i] = _get_median(column, n_zeros_axis[i])
return median
# Most frequent
elif strategy == "most_frequent":
most_frequent = np.empty(len(columns))
for i, column in enumerate(columns):
most_frequent[i] = _most_frequent(column,
0,
n_zeros_axis[i])
return most_frequent
def _dense_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on dense data."""
X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5):
# In old versions of numpy, calling a median on an array
# containing nans returns nan. This is different is
# recent versions of numpy, which we want to mimic
masked_X.mask = np.logical_or(masked_X.mask,
np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if it's frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
if axis == 0:
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The input data to complete.
"""
if self.axis == 0:
check_is_fitted(self, 'statistics_')
# Copy just once
X = as_float_array(X, copy=self.copy, force_all_finite=False)
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data need to be recomputed
# when the imputation is done per sample
if self.axis == 1:
X = check_array(X, accept_sparse='csr', force_all_finite=False,
copy=False)
if sparse.issparse(X):
statistics = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
statistics = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
X = check_array(X, accept_sparse='csc', force_all_finite=False,
copy=False)
statistics = self.statistics_
# Delete the invalid rows/columns
invalid_mask = np.isnan(statistics)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.where(valid_mask)[0]
missing = np.arange(X.shape[not self.axis])[invalid_mask]
if self.axis == 0 and invalid_mask.any():
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
elif self.axis == 1 and invalid_mask.any():
raise ValueError("Some rows only contain "
"missing values: %s" % missing)
# Do actual imputation
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = astype(valid_statistics[indexes], X.dtype,
copy=False)
else:
if sparse.issparse(X):
X = X.toarray()
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=self.axis)
values = np.repeat(valid_statistics, n_missing)
if self.axis == 0:
coordinates = np.where(mask.transpose())[::-1]
else:
coordinates = mask
X[coordinates] = values
return X
|
bittner/django-allauth | refs/heads/master | allauth/socialaccount/providers/robinhood/urls.py | 12 | from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import RobinhoodProvider
urlpatterns = default_urlpatterns(RobinhoodProvider)
|
hengyicai/OnlineAggregationUCAS | refs/heads/master | dev/audit-release/audit_release.py | 8 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Audits binary and maven artifacts for a Spark release.
# Requires GPG and Maven.
# usage:
# python audit_release.py
import os
import re
import shutil
import subprocess
import sys
import time
import urllib2
# Note: The following variables must be set before use!
RELEASE_URL = "http://people.apache.org/~andrewor14/spark-1.1.1-rc1/"
RELEASE_KEY = "XXXXXXXX" # Your 8-digit hex
RELEASE_REPOSITORY = "https://repository.apache.org/content/repositories/orgapachespark-1033"
RELEASE_VERSION = "1.1.1"
SCALA_VERSION = "2.10.4"
SCALA_BINARY_VERSION = "2.10"
# Do not set these
LOG_FILE_NAME = "spark_audit_%s" % time.strftime("%h_%m_%Y_%I_%M_%S")
LOG_FILE = open(LOG_FILE_NAME, 'w')
WORK_DIR = "/tmp/audit_%s" % int(time.time())
MAVEN_CMD = "mvn"
GPG_CMD = "gpg"
SBT_CMD = "sbt -Dsbt.log.noformat=true"
# Track failures to print them at the end
failures = []
# Log a message. Use sparingly because this flushes every write.
def log(msg):
LOG_FILE.write(msg + "\n")
LOG_FILE.flush()
def log_and_print(msg):
print msg
log(msg)
# Prompt the user to delete the scratch directory used
def clean_work_files():
response = raw_input("OK to delete scratch directory '%s'? (y/N) " % WORK_DIR)
if response == "y":
shutil.rmtree(WORK_DIR)
# Run the given command and log its output to the log file
def run_cmd(cmd, exit_on_failure=True):
log("Running command: %s" % cmd)
ret = subprocess.call(cmd, shell=True, stdout=LOG_FILE, stderr=LOG_FILE)
if ret != 0 and exit_on_failure:
log_and_print("Command failed: %s" % cmd)
clean_work_files()
sys.exit(-1)
return ret
def run_cmd_with_output(cmd):
log_and_print("Running command: %s" % cmd)
return subprocess.check_output(cmd, shell=True, stderr=LOG_FILE)
# Test if the given condition is successful
# If so, print the pass message; otherwise print the failure message
def test(cond, msg):
return passed(msg) if cond else failed(msg)
def passed(msg):
log_and_print("[PASSED] %s" % msg)
def failed(msg):
failures.append(msg)
log_and_print("[**FAILED**] %s" % msg)
def get_url(url):
return urllib2.urlopen(url).read()
# If the path exists, prompt the user to delete it
# If the resource is not deleted, abort
def ensure_path_not_present(path):
full_path = os.path.expanduser(path)
if os.path.exists(full_path):
print "Found %s locally." % full_path
response = raw_input("This can interfere with testing published artifacts. OK to delete? (y/N) ")
if response == "y":
shutil.rmtree(full_path)
else:
print "Abort."
sys.exit(-1)
log_and_print("|-------- Starting Spark audit tests for release %s --------|" % RELEASE_VERSION)
log_and_print("Log output can be found in %s" % LOG_FILE_NAME)
original_dir = os.getcwd()
# For each of these modules, we'll test an 'empty' application in sbt and
# maven that links against them. This will catch issues with messed up
# dependencies within those projects.
modules = [
"spark-core", "spark-bagel", "spark-mllib", "spark-streaming", "spark-repl",
"spark-graphx", "spark-streaming-flume", "spark-streaming-kafka",
"spark-streaming-mqtt", "spark-streaming-twitter", "spark-streaming-zeromq",
"spark-catalyst", "spark-sql", "spark-hive", "spark-streaming-kinesis-asl"
]
modules = map(lambda m: "%s_%s" % (m, SCALA_BINARY_VERSION), modules)
# Check for directories that might interfere with tests
local_ivy_spark = "~/.ivy2/local/org.apache.spark"
cache_ivy_spark = "~/.ivy2/cache/org.apache.spark"
local_maven_kafka = "~/.m2/repository/org/apache/kafka"
local_maven_kafka = "~/.m2/repository/org/apache/spark"
map(ensure_path_not_present, [local_ivy_spark, cache_ivy_spark, local_maven_kafka])
# SBT build tests
log_and_print("==== Building SBT modules ====")
os.chdir("blank_sbt_build")
os.environ["SPARK_VERSION"] = RELEASE_VERSION
os.environ["SCALA_VERSION"] = SCALA_VERSION
os.environ["SPARK_RELEASE_REPOSITORY"] = RELEASE_REPOSITORY
os.environ["SPARK_AUDIT_MASTER"] = "local"
for module in modules:
log("==== Building module %s in SBT ====" % module)
os.environ["SPARK_MODULE"] = module
ret = run_cmd("%s clean update" % SBT_CMD, exit_on_failure=False)
test(ret == 0, "SBT build against '%s' module" % module)
os.chdir(original_dir)
# SBT application tests
log_and_print("==== Building SBT applications ====")
for app in ["sbt_app_core", "sbt_app_graphx", "sbt_app_streaming", "sbt_app_sql", "sbt_app_hive", "sbt_app_kinesis"]:
log("==== Building application %s in SBT ====" % app)
os.chdir(app)
ret = run_cmd("%s clean run" % SBT_CMD, exit_on_failure=False)
test(ret == 0, "SBT application (%s)" % app)
os.chdir(original_dir)
# Maven build tests
os.chdir("blank_maven_build")
log_and_print("==== Building Maven modules ====")
for module in modules:
log("==== Building module %s in maven ====" % module)
cmd = ('%s --update-snapshots -Dspark.release.repository="%s" -Dspark.version="%s" '
'-Dspark.module="%s" clean compile' %
(MAVEN_CMD, RELEASE_REPOSITORY, RELEASE_VERSION, module))
ret = run_cmd(cmd, exit_on_failure=False)
test(ret == 0, "maven build against '%s' module" % module)
os.chdir(original_dir)
# Maven application tests
log_and_print("==== Building Maven applications ====")
os.chdir("maven_app_core")
mvn_exec_cmd = ('%s --update-snapshots -Dspark.release.repository="%s" -Dspark.version="%s" '
'-Dscala.binary.version="%s" clean compile '
'exec:java -Dexec.mainClass="SimpleApp"' %
(MAVEN_CMD, RELEASE_REPOSITORY, RELEASE_VERSION, SCALA_BINARY_VERSION))
ret = run_cmd(mvn_exec_cmd, exit_on_failure=False)
test(ret == 0, "maven application (core)")
os.chdir(original_dir)
# Binary artifact tests
if os.path.exists(WORK_DIR):
print "Working directory '%s' already exists" % WORK_DIR
sys.exit(-1)
os.mkdir(WORK_DIR)
os.chdir(WORK_DIR)
index_page = get_url(RELEASE_URL)
artifact_regex = r = re.compile("<a href=\"(.*.tgz)\">")
artifacts = r.findall(index_page)
# Verify artifact integrity
for artifact in artifacts:
log_and_print("==== Verifying download integrity for artifact: %s ====" % artifact)
artifact_url = "%s/%s" % (RELEASE_URL, artifact)
key_file = "%s.asc" % artifact
run_cmd("wget %s" % artifact_url)
run_cmd("wget %s/%s" % (RELEASE_URL, key_file))
run_cmd("wget %s%s" % (artifact_url, ".sha"))
# Verify signature
run_cmd("%s --keyserver pgp.mit.edu --recv-key %s" % (GPG_CMD, RELEASE_KEY))
run_cmd("%s %s" % (GPG_CMD, key_file))
passed("Artifact signature verified.")
# Verify md5
my_md5 = run_cmd_with_output("%s --print-md MD5 %s" % (GPG_CMD, artifact)).strip()
release_md5 = get_url("%s.md5" % artifact_url).strip()
test(my_md5 == release_md5, "Artifact MD5 verified.")
# Verify sha
my_sha = run_cmd_with_output("%s --print-md SHA512 %s" % (GPG_CMD, artifact)).strip()
release_sha = get_url("%s.sha" % artifact_url).strip()
test(my_sha == release_sha, "Artifact SHA verified.")
# Verify Apache required files
dir_name = artifact.replace(".tgz", "")
run_cmd("tar xvzf %s" % artifact)
base_files = os.listdir(dir_name)
test("CHANGES.txt" in base_files, "Tarball contains CHANGES.txt file")
test("NOTICE" in base_files, "Tarball contains NOTICE file")
test("LICENSE" in base_files, "Tarball contains LICENSE file")
os.chdir(WORK_DIR)
# Report result
log_and_print("\n")
if len(failures) == 0:
log_and_print("*** ALL TESTS PASSED ***")
else:
log_and_print("XXXXX SOME TESTS DID NOT PASS XXXXX")
for f in failures:
log_and_print(" %s" % f)
os.chdir(original_dir)
# Clean up
clean_work_files()
log_and_print("|-------- Spark release audit complete --------|")
|
alexlo03/ansible | refs/heads/devel | lib/ansible/modules/network/netvisor/pn_trunk.py | 32 | #!/usr/bin/python
""" PN CLI trunk-create/trunk-delete/trunk-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_trunk
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to create/delete/modify a trunk.
description:
- Execute trunk-create or trunk-delete command.
- Trunks can be used to aggregate network links at Layer 2 on the local
switch. Use this command to create a new trunk.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to create trunk,
'absent' to delete trunk and 'update' to modify trunk.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name for the trunk configuration.
required: true
pn_ports:
description:
- Specify the port number(s) for the link(s) to aggregate into the trunk.
- Required for trunk-create.
pn_speed:
description:
- Specify the port speed or disable the port.
choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g']
pn_egress_rate_limit:
description:
- Specify an egress port data rate limit for the configuration.
pn_jumbo:
description:
- Specify if the port can receive jumbo frames.
pn_lacp_mode:
description:
- Specify the LACP mode for the configuration.
choices: ['off', 'passive', 'active']
pn_lacp_priority:
description:
- Specify the LACP priority. This is a number between 1 and 65535 with a
default value of 32768.
pn_lacp_timeout:
description:
- Specify the LACP time out as slow (30 seconds) or fast (4seconds).
The default value is slow.
choices: ['slow', 'fast']
pn_lacp_fallback:
description:
- Specify the LACP fallback mode as bundles or individual.
choices: ['bundle', 'individual']
pn_lacp_fallback_timeout:
description:
- Specify the LACP fallback timeout in seconds. The range is between 30
and 60 seconds with a default value of 50 seconds.
pn_edge_switch:
description:
- Specify if the switch is an edge switch.
pn_pause:
description:
- Specify if pause frames are sent.
pn_description:
description:
- Specify a description for the trunk configuration.
pn_loopback:
description:
- Specify loopback if you want to use loopback.
pn_mirror_receive:
description:
- Specify if the configuration receives mirrored traffic.
pn_unknown_ucast_level:
description:
- Specify an unknown unicast level in percent. The default value is 100%.
pn_unknown_mcast_level:
description:
- Specify an unknown multicast level in percent. The default value is 100%.
pn_broadcast_level:
description:
- Specify a broadcast level in percent. The default value is 100%.
pn_port_macaddr:
description:
- Specify the MAC address of the port.
pn_loopvlans:
description:
- Specify a list of looping vlans.
pn_routing:
description:
- Specify if the port participates in routing on the network.
pn_host:
description:
- Host facing port control setting.
"""
EXAMPLES = """
- name: create trunk
pn_trunk:
state: 'present'
pn_name: 'spine-to-leaf'
pn_ports: '11,12,13,14'
- name: delete trunk
pn_trunk:
state: 'absent'
pn_name: 'spine-to-leaf'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the trunk command.
returned: always
type: list
stderr:
description: The set of error responses from the trunk command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
TRUNK_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the trunk-show command.
If a trunk with given name exists, return TRUNK_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: TRUNK_EXISTS
"""
name = module.params['pn_name']
show = cli + ' trunk-show format switch,name no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global TRUNK_EXISTS
if name in out:
TRUNK_EXISTS = True
else:
TRUNK_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'trunk-create'
if state == 'absent':
command = 'trunk-delete'
if state == 'update':
command = 'trunk-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_ports=dict(type='str'),
pn_speed=dict(type='str',
choices=['disable', '10m', '100m', '1g', '2.5g',
'10g', '40g']),
pn_egress_rate_limit=dict(type='str'),
pn_jumbo=dict(type='bool'),
pn_lacp_mode=dict(type='str', choices=[
'off', 'passive', 'active']),
pn_lacp_priority=dict(type='int'),
pn_lacp_timeout=dict(type='str'),
pn_lacp_fallback=dict(type='str', choices=[
'bundle', 'individual']),
pn_lacp_fallback_timeout=dict(type='str'),
pn_edge_switch=dict(type='bool'),
pn_pause=dict(type='bool'),
pn_description=dict(type='str'),
pn_loopback=dict(type='bool'),
pn_mirror_receive=dict(type='bool'),
pn_unknown_ucast_level=dict(type='str'),
pn_unknown_mcast_level=dict(type='str'),
pn_broadcast_level=dict(type='str'),
pn_port_macaddr=dict(type='str'),
pn_loopvlans=dict(type='str'),
pn_routing=dict(type='bool'),
pn_host=dict(type='bool')
),
required_if=(
["state", "present", ["pn_name", "pn_ports"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
ports = module.params['pn_ports']
speed = module.params['pn_speed']
egress_rate_limit = module.params['pn_egress_rate_limit']
jumbo = module.params['pn_jumbo']
lacp_mode = module.params['pn_lacp_mode']
lacp_priority = module.params['pn_lacp_priority']
lacp_timeout = module.params['pn_lacp_timeout']
lacp_fallback = module.params['pn_lacp_fallback']
lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
edge_switch = module.params['pn_edge_switch']
pause = module.params['pn_pause']
description = module.params['pn_description']
loopback = module.params['pn_loopback']
mirror_receive = module.params['pn_mirror_receive']
unknown_ucast_level = module.params['pn_unknown_ucast_level']
unknown_mcast_level = module.params['pn_unknown_mcast_level']
broadcast_level = module.params['pn_broadcast_level']
port_macaddr = module.params['pn_port_macaddr']
loopvlans = module.params['pn_loopvlans']
routing = module.params['pn_routing']
host = module.params['pn_host']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'trunk-delete':
check_cli(module, cli)
if TRUNK_EXISTS is False:
module.exit_json(
skipped=True,
msg='Trunk with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'trunk-create':
check_cli(module, cli)
if TRUNK_EXISTS is True:
module.exit_json(
skipped=True,
msg='Trunk with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
# Appending options
if ports:
cli += ' ports ' + ports
if speed:
cli += ' speed ' + speed
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if jumbo is True:
cli += ' jumbo '
if jumbo is False:
cli += ' no-jumbo '
if lacp_mode:
cli += ' lacp-mode ' + lacp_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if lacp_timeout:
cli += ' lacp-timeout ' + lacp_timeout
if lacp_fallback:
cli += ' lacp-fallback ' + lacp_fallback
if lacp_fallback_timeout:
cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
if edge_switch is True:
cli += ' edge-switch '
if edge_switch is False:
cli += ' no-edge-switch '
if pause is True:
cli += ' pause '
if pause is False:
cli += ' no-pause '
if description:
cli += ' description ' + description
if loopback is True:
cli += ' loopback '
if loopback is False:
cli += ' no-loopback '
if mirror_receive is True:
cli += ' mirror-receive-only '
if mirror_receive is False:
cli += ' no-mirror-receive-only '
if unknown_ucast_level:
cli += ' unknown-ucast-level ' + unknown_ucast_level
if unknown_mcast_level:
cli += ' unknown-mcast-level ' + unknown_mcast_level
if broadcast_level:
cli += ' broadcast-level ' + broadcast_level
if port_macaddr:
cli += ' port-mac-address ' + port_macaddr
if loopvlans:
cli += ' loopvlans ' + loopvlans
if routing is True:
cli += ' routing '
if routing is False:
cli += ' no-routing '
if host is True:
cli += ' host-enable '
if host is False:
cli += ' host-disable '
run_cli(module, cli)
if __name__ == '__main__':
main()
|
heimdalerp/heimdalerp | refs/heads/master | common/validators.py | 2 | import datetime
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def date_is_future(value):
if isinstance(value, datetime.date):
if value <= datetime.date.today():
raise ValidationError(
_("The date entered must be greater than today.")
)
elif isinstance(value, datetime.datetime):
if value.date() <= datetime.today():
raise ValidationError(
_("The date entered must be greater than today.")
)
else:
raise ValidationError(
_("The value entered isn't a valid type of date or datetime.")
)
def date_is_present_or_future(value):
if isinstance(value, datetime.date):
if value < datetime.date.today():
raise ValidationError(
_("The date entered must be today or lesser.")
)
elif isinstance(value, datetime.datetime):
if value.date() < datetime.datetime.today():
raise ValidationError(
_("The date entered must be today or greater.")
)
else:
raise ValidationError(
_("The value entered isn't a valid type of date or datetime.")
)
def date_is_past(value):
if isinstance(value, datetime.date):
if value >= datetime.date.today():
raise ValidationError(
_("The date entered must be today or lesser.")
)
elif isinstance(value, datetime.datetime):
if value.date() >= datetime.datetime.today():
raise ValidationError(
_("The date entered must be lesser than today.")
)
else:
raise ValidationError(
_("The value entered isn't a valid type of date or datetime.")
)
def date_is_present_or_past(value):
if isinstance(value, datetime.date):
if value > datetime.date.today():
raise ValidationError(
_("The date entered must be today or lesser.")
)
elif isinstance(value, datetime.datetime):
if value.date() > datetime.datetime.today():
raise ValidationError(
_("The date entered must be today or lesser.")
)
else:
raise ValidationError(
_("The value entered isn't a valid type of date or datetime.")
)
|
nanolearningllc/edx-platform-cypress | refs/heads/master | openedx/core/djangoapps/util/testing.py | 107 | """ Mixins for setting up particular course structures (such as split tests or cohorted content) """
from datetime import datetime
from pytz import UTC
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.partitions.partitions import UserPartition, Group
from student.tests.factories import CourseEnrollmentFactory, UserFactory
class ContentGroupTestCase(ModuleStoreTestCase):
"""
Sets up discussion modules visible to content groups 'Alpha' and
'Beta', as well as a module visible to all students. Creates a
staff user, users with access to Alpha/Beta (by way of cohorts),
and a non-cohorted user with no special access.
"""
def setUp(self):
super(ContentGroupTestCase, self).setUp()
self.course = CourseFactory.create(
org='org', number='number', run='run',
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime(2012, 2, 3, tzinfo=UTC),
user_partitions=[
UserPartition(
0,
'Content Group Configuration',
'',
[Group(1, 'Alpha'), Group(2, 'Beta')],
scheme_id='cohort'
)
],
grading_policy={
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}]
},
cohort_config={'cohorted': True},
discussion_topics={}
)
self.staff_user = UserFactory.create(is_staff=True)
self.alpha_user = UserFactory.create()
self.beta_user = UserFactory.create()
self.non_cohorted_user = UserFactory.create()
for user in [self.staff_user, self.alpha_user, self.beta_user, self.non_cohorted_user]:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
alpha_cohort = CohortFactory(
course_id=self.course.id,
name='Cohort Alpha',
users=[self.alpha_user]
)
beta_cohort = CohortFactory(
course_id=self.course.id,
name='Cohort Beta',
users=[self.beta_user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=alpha_cohort,
partition_id=self.course.user_partitions[0].id,
group_id=self.course.user_partitions[0].groups[0].id
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=beta_cohort,
partition_id=self.course.user_partitions[0].id,
group_id=self.course.user_partitions[0].groups[1].id
)
self.alpha_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='alpha_group_discussion',
discussion_target='Visible to Alpha',
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]}
)
self.beta_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='beta_group_discussion',
discussion_target='Visible to Beta',
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]}
)
self.global_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='global_group_discussion',
discussion_target='Visible to Everyone'
)
self.course = self.store.get_item(self.course.location)
class TestConditionalContent(ModuleStoreTestCase):
"""
Construct a course with graded problems that exist within a split test.
"""
TEST_SECTION_NAME = 'Problem'
def setUp(self):
"""
Set up a course with graded problems within a split test.
Course hierarchy is as follows (modeled after how split tests
are created in studio):
-> course
-> chapter
-> sequential (graded)
-> vertical
-> split_test
-> vertical (Group A)
-> problem
-> vertical (Group B)
-> problem
"""
super(TestConditionalContent, self).setUp()
# Create user partitions
self.user_partition_group_a = 0
self.user_partition_group_b = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_a, 'Group A'),
Group(self.user_partition_group_b, 'Group B')
]
)
# Create course with group configurations and grading policy
self.course = CourseFactory.create(
user_partitions=[self.partition],
grading_policy={
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": 1.0
}]
}
)
chapter = ItemFactory.create(parent_location=self.course.location,
display_name='Chapter')
# add a sequence to the course to which the problems can be added
self.problem_section = ItemFactory.create(parent_location=chapter.location,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name=self.TEST_SECTION_NAME)
# Create users and partition them
self.student_a = UserFactory.create(username='student_a', email='student_a@example.com')
CourseEnrollmentFactory.create(user=self.student_a, course_id=self.course.id)
self.student_b = UserFactory.create(username='student_b', email='student_b@example.com')
CourseEnrollmentFactory.create(user=self.student_b, course_id=self.course.id)
UserCourseTagFactory(
user=self.student_a,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id),
value=str(self.user_partition_group_a)
)
UserCourseTagFactory(
user=self.student_b,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id),
value=str(self.user_partition_group_b)
)
# Create a vertical to contain our split test
problem_vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
display_name='Problem Unit'
)
# Create the split test and child vertical containers
vertical_a_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_a')
vertical_b_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_b')
self.split_test = ItemFactory.create(
parent_location=problem_vertical.location,
category='split_test',
display_name='Split Test',
user_partition_id=self.partition.id,
group_id_to_child={str(index): url for index, url in enumerate([vertical_a_url, vertical_b_url])}
)
self.vertical_a = ItemFactory.create(
parent_location=self.split_test.location,
category='vertical',
display_name='Group A problem container',
location=vertical_a_url
)
self.vertical_b = ItemFactory.create(
parent_location=self.split_test.location,
category='vertical',
display_name='Group B problem container',
location=vertical_b_url
)
|
scorpionis/docklet | refs/heads/master | client/venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/filepost.py | 713 | from __future__ import absolute_import
import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
|
j-mracek/dnf | refs/heads/master | tests/test_base.py | 3 | # -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import binascii
import itertools
import re
import hawkey
import libdnf.transaction
import rpm
import dnf
import dnf.exceptions
import dnf.package
import dnf.subject
import dnf.transaction
import tests.support
from tests.support import mock
class BaseTest(tests.support.TestCase):
@staticmethod
def _setup_packages(history):
pkg = tests.support.MockPackage('pepper-20-0.x86_64')
pkg._force_swdb_repoid = "main"
history.rpm.add_install(pkg)
history.beg("", [], [])
for tsi in history._swdb.getItems():
if tsi.getState() == libdnf.transaction.TransactionItemState_UNKNOWN:
tsi.setState(libdnf.transaction.TransactionItemState_DONE)
history.end("")
history.close()
def test_instance(self):
base = tests.support.MockBase()
self.assertIsNotNone(base)
base.close()
@mock.patch('dnf.rpm.detect_releasever', lambda x: 'x')
@mock.patch('dnf.util.am_i_root', lambda: True)
def test_default_config_root(self):
base = dnf.Base()
self.assertIsNotNone(base.conf)
self.assertIsNotNone(base.conf.cachedir)
reg = re.compile('/var/cache/dnf')
self.assertIsNotNone(reg.match(base.conf.cachedir))
base.close()
@mock.patch('dnf.rpm.detect_releasever', lambda x: 'x')
@mock.patch('dnf.util.am_i_root', lambda: False)
def test_default_config_user(self):
base = dnf.Base()
self.assertIsNotNone(base.conf)
self.assertIsNotNone(base.conf.cachedir)
reg = re.compile('/var/tmp/dnf-[a-zA-Z0-9_-]+')
self.assertIsNotNone(reg.match(base.conf.cachedir))
base.close()
def test_reset(self):
base = tests.support.MockBase('main')
base.reset(sack=True, repos=False)
self.assertIsNone(base._sack)
self.assertLength(base.repos, 1)
base.close()
@mock.patch('dnf.rpm.transaction.TransactionWrapper')
def test_ts(self, mock_ts):
base = dnf.Base()
self.assertEqual(base._priv_ts, None)
ts = base._ts
# check the setup is correct
ts.setFlags.call_args.assert_called_with(0)
flags = ts.setProbFilter.call_args[0][0]
self.assertTrue(flags & rpm.RPMPROB_FILTER_OLDPACKAGE)
self.assertFalse(flags & rpm.RPMPROB_FILTER_REPLACEPKG)
# check file conflicts are reported:
self.assertFalse(flags & rpm.RPMPROB_FILTER_REPLACENEWFILES)
# check we can close the connection
del base._ts
self.assertEqual(base._priv_ts, None)
ts.close.assert_called_once_with()
base.close()
def test_iter_userinstalled(self):
"""Test iter_userinstalled with a package installed by the user."""
base = tests.support.MockBase()
self._setup_packages(base.history)
base._sack = tests.support.mock_sack('main')
pkg, = base.sack.query().installed().filter(name='pepper')
# reason and repo are set in _setup_packages() already
self.assertEqual(base.history.user_installed(pkg), True)
self.assertEqual(base.history.repo(pkg), 'main')
base.close()
def test_iter_userinstalled_badfromrepo(self):
"""Test iter_userinstalled with a package installed from a bad repository."""
base = tests.support.MockBase()
base._sack = tests.support.mock_sack('main')
self._setup_packages(base.history)
history = base.history
pkg = tests.support.MockPackage('pepper-20-0.x86_64')
pkg._force_swdb_repoid = "anakonda"
history.rpm.add_install(pkg)
history.beg("", [], [])
for tsi in history._swdb.getItems():
if tsi.getState() == libdnf.transaction.TransactionItemState_UNKNOWN:
tsi.setState(libdnf.transaction.TransactionItemState_DONE)
history.end("")
history.close()
pkg, = base.sack.query().installed().filter(name='pepper')
self.assertEqual(base.history.user_installed(pkg), True)
self.assertEqual(base.history.repo(pkg), 'anakonda')
base.close()
def test_iter_userinstalled_badreason(self):
"""Test iter_userinstalled with a package installed for a wrong reason."""
base = tests.support.MockBase()
base._sack = tests.support.mock_sack('main')
self._setup_packages(base.history)
history = base.history
pkg = tests.support.MockPackage('pepper-20-0.x86_64')
pkg._force_swdb_repoid = "main"
history.rpm.add_install(pkg, reason=libdnf.transaction.TransactionItemReason_DEPENDENCY)
history.beg("", [], [])
for tsi in history._swdb.getItems():
if tsi.getState() == libdnf.transaction.TransactionItemState_UNKNOWN:
tsi.setState(libdnf.transaction.TransactionItemState_DONE)
history.end("")
history.close()
pkg, = base.sack.query().installed().filter(name='pepper')
self.assertEqual(base.history.user_installed(pkg), False)
self.assertEqual(base.history.repo(pkg), 'main')
base.close()
def test_translate_comps_pkg_types(self):
base = tests.support.MockBase()
num = base._translate_comps_pkg_types(('mandatory', 'optional'))
self.assertEqual(num, 12)
base.close()
class MockBaseTest(tests.support.DnfBaseTestCase):
"""Test the Base methods that need a Sack."""
REPOS = ["main"]
def test_add_remote_rpms(self):
pkgs = self.base.add_remote_rpms([tests.support.TOUR_50_PKG_PATH])
self.assertIsInstance(pkgs[0], dnf.package.Package)
self.assertEqual(pkgs[0].name, 'tour')
class BuildTransactionTest(tests.support.DnfBaseTestCase):
REPOS = ["updates"]
def test_resolve(self):
self.base.upgrade("pepper")
self.assertTrue(self.base.resolve())
self.base._ds_callback.assert_has_calls([
mock.call.start(),
mock.call.pkg_added(mock.ANY, 'ud'),
mock.call.pkg_added(mock.ANY, 'u')
])
self.assertLength(self.base.transaction, 2)
# verify transaction test helpers
HASH = "68e9ded8ea25137c964a638f12e9987c"
def mock_sack_fn():
return (lambda base: tests.support.TestSack(tests.support.REPO_DIR, base))
@property
def ret_pkgid(self):
return self.name
class VerifyTransactionTest(tests.support.DnfBaseTestCase):
REPOS = ["main"]
INIT_TRANSACTION = True
@mock.patch('dnf.sack._build_sack', new_callable=mock_sack_fn)
@mock.patch('dnf.package.Package._pkgid', ret_pkgid) # neutralize @property
def test_verify_transaction(self, unused_build_sack):
# we don't simulate the transaction itself here, just "install" what is
# already there and "remove" what is not.
tsis = []
new_pkg = self.base.sack.query().available().filter(name="pepper")[1]
new_pkg._chksum = (hawkey.CHKSUM_MD5, binascii.unhexlify(HASH))
new_pkg.repo = mock.Mock()
new_pkg._force_swdb_repoid = "main"
self.history.rpm.add_install(new_pkg)
removed_pkg = self.base.sack.query().available().filter(name="mrkite")[0]
removed_pkg._force_swdb_repoid = "main"
self.history.rpm.add_remove(removed_pkg)
self._swdb_commit(tsis)
self.base._verify_transaction()
pkg = self.base.history.package_data(new_pkg)
self.assertEqual(pkg.ui_from_repo(), '@main')
self.assertEqual(pkg.action_name, "Install")
self.assertEqual(pkg.get_reason(), libdnf.transaction.TransactionItemReason_USER)
class InstallReasonTest(tests.support.ResultTestCase):
REPOS = ["main"]
def test_reason(self):
self.base.install("mrkite")
self.base.resolve()
new_pkgs = self.base._transaction._get_items(dnf.transaction.PKG_INSTALL)
pkg_reasons = [(tsi.name, tsi.reason) for tsi in new_pkgs]
self.assertCountEqual([
("mrkite", libdnf.transaction.TransactionItemReason_USER),
("trampoline", libdnf.transaction.TransactionItemReason_DEPENDENCY)],
pkg_reasons
)
class InstalledMatchingTest(tests.support.ResultTestCase):
REPOS = ["main"]
def test_query_matching(self):
subj = dnf.subject.Subject("pepper")
query = subj.get_best_query(self.sack)
inst, avail = self.base._query_matches_installed(query)
self.assertCountEqual(['pepper-20-0.x86_64'], map(str, inst))
self.assertCountEqual(['pepper-20-0.src'], map(str, itertools.chain.from_iterable(avail)))
def test_selector_matching(self):
subj = dnf.subject.Subject("pepper")
sltr = subj.get_best_selector(self.sack)
inst = self.base._sltr_matches_installed(sltr)
self.assertCountEqual(['pepper-20-0.x86_64'], map(str, inst))
class CompsTest(tests.support.DnfBaseTestCase):
# Also see test_comps.py
REPOS = ["main"]
COMPS = True
# prevent creating the gen/ directory:
@mock.patch('dnf.yum.misc.repo_gen_decompress', lambda x, y: x)
def test_read_comps(self):
self.assertLength(self.base.comps.groups, tests.support.TOTAL_GROUPS)
def test_read_comps_disabled(self):
self.base.repos['main'].enablegroups = False
self.assertEmpty(self.base.read_comps())
class Goal2TransactionTest(tests.support.DnfBaseTestCase):
REPOS = ["main", "updates"]
def test_upgrade(self):
self.base.upgrade("hole")
self.assertTrue(self.base._run_hawkey_goal(self.goal, allow_erasing=False))
ts = self.base._goal2transaction(self.goal)
self.assertLength(ts, 3)
tsis = list(ts)
tsi = tsis[0]
self.assertEqual(str(tsi.pkg), "hole-2-1.x86_64")
self.assertEqual(tsi.action, libdnf.transaction.TransactionItemAction_UPGRADE)
tsi = tsis[1]
self.assertEqual(str(tsi.pkg), "hole-1-1.x86_64")
self.assertEqual(tsi.action, libdnf.transaction.TransactionItemAction_UPGRADED)
tsi = tsis[2]
self.assertEqual(str(tsi.pkg), "tour-5-0.noarch")
self.assertEqual(tsi.action, libdnf.transaction.TransactionItemAction_OBSOLETED)
|
daviwesley/OWASP-ZSC | refs/heads/master | lib/generator/windows_x64/exc.py | 20 | #!/usr/bin/env python
'''
OWASP ZSC | ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
'''
def run(filename):
return 'N'
|
acsproj/acscore | refs/heads/master | test/file_length_test.py | 1 | import unittest
from unittest.mock import patch
from unittest.mock import mock_open
from acscore import metrics
class FileLengthTest(unittest.TestCase):
def setUp(self):
self.file_length = metrics.FileLength()
self.data = {'5': 10, '35': 5, '100': 4, '105': 6}
def test_count(self):
with patch('acscore.metric.file_length.open', mock_open(read_data='hello\n\n\n\n\world\n')):
result = self.file_length.count('')
self.assertEqual({'5': 1}, result)
def test_discretize(self):
result = self.file_length.discretize(self.data)
expected = {
'From0To40': 0.6,
'From41To200': 0.4,
'From201To600': 0.0,
'From601To1500': 0.0,
'From1501To5000': 0.0,
'From5000ToInf': 0.0,
}
self.assertEqual(expected, result)
def test_inspect(self):
# Test with ok file
discrete = self.file_length.discretize(self.data)
result1 = self.file_length.inspect(discrete, {'10': 1})
self.assertEqual({}, result1)
# Test with too long file
result2 = self.file_length.inspect(discrete, {'1000': 1})
expected = {
metrics.FileLength.TOO_MANY_LINES: {
'message': metrics.FileLength.inspections[metrics.FileLength.TOO_MANY_LINES]
}
}
self.assertEqual(expected, result2)
|
passiweinberger/passiweinberger.github.io | refs/heads/master | _site/presentations/IBM_Lab_Lecture/node_modules/utf8/tests/generate-test-data.py | 1788 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
JohannesFeldmann/pism | refs/heads/pik | examples/searise-greenland/future_forcing/average.py | 5 | #!/usr/bin/env python
from numpy import arange, squeeze, zeros, shape
from sys import exit
try:
from netCDF4 import Dataset as NC
except:
from netCDF3 import Dataset as NC
## prepare the output file:
def prepare_file(fname, x, y):
print " preparing file '%s'..." % fname
nc = NC(fname, "w",format="NETCDF3_CLASSIC")
nc.set_fill_off()
nc.createDimension("time", None)
nc.createDimension("x", size=x.size)
nc.createDimension("y", size=y.size)
t_var = nc.createVariable("time", 'f', dimensions=("time",))
x_var = nc.createVariable("x", 'f', dimensions=("x",))
x_var[:] = x
y_var = nc.createVariable("y", 'f', dimensions=("y",))
y_var[:] = y
# put dimensions metadata in a dictionary:
# name : (unit, long_name, standard_name)
attributes = {"x" : ("m", "X-coordinate in Cartesian system", "projection_x_coordinate"),
"y" : ("m", "Y-coordinate in Cartesian system", "projection_y_coordinate"),
"time" : ("years since 2004-1-1", "time", "time")
}
for each in list(attributes.keys()):
var = nc.variables[each]
var.units = attributes[each][0]
var.long_name = attributes[each][1]
var.standard_name = attributes[each][2]
ps = nc.createVariable("mapping", 'b')
ps.grid_mapping_name = "polar_stereographic"
ps.straight_vertical_longitude_from_pole = -39.0
ps.latitude_of_projection_origin = 90.0
ps.standard_parallel = 71.0
nc.Conventions = "CF-1.3"
nc.createDimension("nv", size=2)
t_bounds = nc.createVariable('time_bounds', 'f4', ('time', 'nv'))
nc.variables['time'].bounds = 'time_bounds'
return (nc, t_var, t_bounds)
input_temp = "air_temp.nc"
input_precip = "precipitation.nc"
## read air temperatures, and get space/time grid info from this file:
nc_temp = NC(input_temp, 'r')
temp_in = nc_temp.variables['air_temp']
x = nc_temp.variables['x'][:]
y = nc_temp.variables['y'][:]
N = len(nc_temp.dimensions['time'])
years = N/12 # number of years covered
print " found N = %d frames covering %d years in file %s" % (N, years, input_temp)
## read precipitation:
nc_precip = NC(input_precip, 'r')
precip_in = nc_precip.variables['precipitation']
if len(nc_precip.dimensions['time']) != N:
print "ERROR: number of frames in precipitation file '%s' does not match that in air temperature file '%s'" \
% (input_temp,input_precip)
exit(1)
else:
print " found same number of frames in file %s" % input_precip
output_temp = "ar4_temp_anomaly.nc"
output_precip = "ar4_precip_anomaly.nc"
nc_temp_out, t_temp, t_temp_bounds = prepare_file(output_temp, x, y)
nc_precip_out, t_precip, t_precip_bounds = prepare_file(output_precip, x, y)
temp = nc_temp_out.createVariable("air_temp_anomaly", 'f', dimensions=("time", "y", "x"))
temp.units = "Kelvin"
temp.long_name = \
"mean annual air temperature at 2m above the surface (as anomaly from first year)"
temp.mapping = "mapping"
temp.description = "AR4 temperature anomaly"
precip = nc_precip_out.createVariable("precipitation_anomaly", 'f', dimensions=("time", "y", "x"))
precip.units = "m year-1"
precip.long_name = \
"mean annual ice-equivalent precipitation rate (as anomaly from first year)"
precip.mapping = "mapping"
precip.description = "AR4 precipitation anomaly"
print " averaging monthly temperature data to give annual mean"
for year in arange(years):
t_temp[year] = year + 0.5
t_temp_bounds[year,0] = year
t_temp_bounds[year,1] = year + 1
temp[year,:,:] = zeros((y.size, x.size))
for month in arange(12):
j = 12 * year + month
#print " [year=%d,j=%d]" % (year,j)
temp[year,:,:] += temp_in[j,:,:]
temp[year,:,:] /= 12.0
print " averaging monthly precipitation data to give annual mean"
for year in arange(years):
t_precip[year] = year + 0.5
t_precip_bounds[year,0] = year
t_precip_bounds[year,1] = year + 1
precip[year,:,:] = zeros((y.size, x.size))
for month in arange(12):
j = 12 * year + month
precip[year,:,:] += precip_in[j,:,:]
precip[year,:,:] /= 12.0
# convert to anomalies by subtracting-off first year averages:
print " converting annual mean temperature to 'air_temp_anomaly'"
temp_0 = temp[0,:,:].copy()
for year in arange(years):
temp[year,:,:] -= temp_0
print " converting annual mean precipitation to 'precipitation_anomaly'"
precip_0 = precip[0,:,:].copy()
for year in arange(years):
precip[year,:,:] -= precip_0
## warning: earlier closure of these input files causes problems; why?
nc_temp.close()
nc_precip.close()
nc_temp_out.close()
nc_precip_out.close()
print "done"
|
hanvo/MusicCloud | refs/heads/master | Crawler/Install Files/pygame/build/lib.linux-x86_64-2.7/pygame/tests/run_tests__tests/everything/magic_tag_test.py | 36 | __tags__ = ['magic']
if __name__ == '__main__':
import sys
import os
pkg_dir = (os.path.split(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0])[0])
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils import unittest
else:
from test import test_utils
from test.test_utils import unittest
class KeyModuleTest(unittest.TestCase):
def test_get_focused(self):
self.assert_(True)
def test_get_mods(self):
self.assert_(True)
def test_get_pressed(self):
self.assert_(True)
def test_name(self):
self.assert_(True)
def test_set_mods(self):
self.assert_(True)
if __name__ == '__main__':
unittest.main()
|
k3nnyfr/s2a_fr-nsis | refs/heads/master | s2a/Python/Lib/bdb.py | 144 | """Debugger basics"""
import fnmatch
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
class BdbQuit(Exception):
"""Exception to give up completely"""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self, skip=None):
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
self.frame_returning = None
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self._set_stopinfo(None, None)
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
try:
self.frame_returning = frame
self.user_return(frame, arg)
finally:
self.frame_returning = None
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def is_skipped_module(self, module_name):
for pattern in self.skip:
if fnmatch.fnmatch(module_name, pattern):
return True
return False
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if self.skip and \
self.is_skipped_module(frame.f_globals.get('__name__')):
return False
if frame is self.stopframe:
if self.stoplineno == -1:
return False
return frame.f_lineno >= self.stoplineno
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return True
frame = frame.f_back
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not filename in self.breaks:
return False
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if not lineno in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.canonic(frame.f_code.co_filename) in self.breaks
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
self.stopframe = stopframe
self.returnframe = returnframe
self.quitting = 0
# stoplineno >= 0 means: stop at line >= the stoplineno
# stoplineno -1 means: don't stop at all
self.stoplineno = stoplineno
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_until(self, frame): #the name "until" is borrowed from gdb
"""Stop when the line with the line no greater than the current one is
reached or when returning from current frame"""
self._set_stopinfo(frame, frame, frame.f_lineno+1)
def set_step(self):
"""Stop after one line of code."""
# Issue #13183: pdb skips frames after hitting a breakpoint and running
# step commands.
# Restore the trace function in the caller (that may not have been set
# for performance reasons) when returning from the current frame.
if self.frame_returning:
caller_frame = self.frame_returning.f_back
if caller_frame and not caller_frame.f_trace:
caller_frame.f_trace = self.trace_dispatch
self._set_stopinfo(None, None)
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self._set_stopinfo(frame, None)
def set_return(self, frame):
"""Stop when returning from the given frame."""
self._set_stopinfo(frame.f_back, frame)
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not filename in self.breaks:
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def _prune_breaks(self, filename, lineno):
if (filename, lineno) not in Breakpoint.bplist:
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
self._prune_breaks(filename, lineno)
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
bp.deleteMe()
self._prune_breaks(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno, frame.f_globals)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if (file, line) in self.bplist:
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self, out=None):
if out is None:
out = sys.stdout
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print >>out, '\tstop only if %s' % (self.cond,)
if self.ignore:
print >>out, '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print >>out, ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end
|
qtproject/pyside-pyside | refs/heads/dev | tests/QtQml/bug_557.py | 1 | #############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
from helper import adjust_filename
from PySide2.QtCore import QUrl
from PySide2.QtGui import QGuiApplication
from PySide2.QtQml import QQmlEngine, QQmlComponent
app = QGuiApplication(sys.argv)
engine = QQmlEngine()
component = QQmlComponent(engine)
# This should segfault if the QDeclarativeComponent has not QQmlEngine
component.loadUrl(QUrl.fromLocalFile(adjust_filename('foo.qml', __file__)))
|
aquarimeter/aquarimeter | refs/heads/master | lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py | 714 | # urllib3/contrib/ntlmpool.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
scigghia/account-invoicing | refs/heads/8.0 | account_invoice_supplierinfo_update_discount/__openerp__.py | 6 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-Today: GRAP (http://www.grap.coop)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Account Invoice - Discount Supplier Info Update',
'summary': 'In the supplier invoice, automatically update all products '
'whose discount on the line is different from '
'the supplier discount',
'version': '8.0.1.0.0',
'category': 'Accounting & Finance',
'website': 'http://odoo-community.org',
'author': 'GRAP,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'depends': [
'account_invoice_supplierinfo_update',
'product_supplierinfo_discount',
],
'installable': True,
'auto_install': True,
'data': [
'wizard/wizard_update_invoice_supplierinfo.xml',
],
}
|
nacc/autotest | refs/heads/master | client/tests/kvm/tests/pci_hotplug.py | 1 | import re
from autotest.client.shared import error
from autotest.client.virt import virt_utils, virt_vm, aexpect
def run_pci_hotplug(test, params, env):
"""
Test hotplug of PCI devices.
(Elements between [] are configurable test parameters)
1) PCI add a deivce (NIC / block)
2) Compare output of monitor command 'info pci'.
3) Compare output of guest command [reference_cmd].
4) Verify whether pci_model is shown in [pci_find_cmd].
5) Check whether the newly added PCI device works fine.
6) PCI delete the device, verify whether could remove the PCI device.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Modprobe the module if specified in config file
module = params.get("modprobe_module")
if module:
session.cmd("modprobe %s" % module)
# Get output of command 'info pci' as reference
info_pci_ref = vm.monitor.info("pci")
# Get output of command as reference
reference = session.cmd_output(params.get("reference_cmd"))
tested_model = params.get("pci_model")
test_type = params.get("pci_type")
image_format = params.get("image_format_stg")
# Probe qemu to verify what is the supported syntax for PCI hotplug
cmd_output = vm.monitor.cmd("?")
if len(re.findall("\ndevice_add", cmd_output)) > 0:
cmd_type = "device_add"
elif len(re.findall("\npci_add", cmd_output)) > 0:
cmd_type = "pci_add"
else:
raise error.TestError("Unknow version of qemu")
# Determine syntax of drive hotplug
# __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6
if len(re.findall("\n__com.redhat_drive_add", cmd_output)) > 0:
drive_cmd_type = "__com.redhat_drive_add"
# drive_add == qemu-kvm-0.13 onwards
elif len(re.findall("\ndrive_add", cmd_output)) > 0:
drive_cmd_type = "drive_add"
else:
raise error.TestError("Unknow version of qemu")
# Probe qemu for a list of supported devices
devices_support = vm.monitor.cmd("%s ?" % cmd_type)
if cmd_type == "pci_add":
if test_type == "nic":
pci_add_cmd = "pci_add pci_addr=auto nic model=%s" % tested_model
elif test_type == "block":
image_params = params.object_params("stg")
image_filename = virt_utils.get_image_filename(image_params,
test.bindir)
pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" %
(image_filename, tested_model))
# Execute pci_add (should be replaced by a proper monitor method call)
add_output = vm.monitor.cmd(pci_add_cmd)
if not "OK domain" in add_output:
raise error.TestFail("Add PCI device failed. "
"Monitor command is: %s, Output: %r" %
(pci_add_cmd, add_output))
after_add = vm.monitor.info("pci")
elif cmd_type == "device_add":
driver_id = test_type + "-" + virt_utils.generate_random_id()
device_id = test_type + "-" + virt_utils.generate_random_id()
if test_type == "nic":
if tested_model == "virtio":
tested_model = "virtio-net-pci"
pci_add_cmd = "device_add id=%s,driver=%s" % (device_id,
tested_model)
elif test_type == "block":
image_params = params.object_params("stg")
image_filename = virt_utils.get_image_filename(image_params,
test.bindir)
controller_model = None
if tested_model == "virtio":
tested_model = "virtio-blk-pci"
if tested_model == "scsi":
tested_model = "scsi-disk"
controller_model = "lsi53c895a"
if len(re.findall(controller_model, devices_support)) == 0:
raise error.TestError("scsi controller device (%s) not "
"supported by qemu" %
controller_model)
if controller_model is not None:
controller_id = "controller-" + device_id
controller_add_cmd = ("device_add %s,id=%s" %
(controller_model, controller_id))
vm.monitor.cmd(controller_add_cmd)
if drive_cmd_type == "drive_add":
driver_add_cmd = ("drive_add auto "
"file=%s,if=none,id=%s,format=%s" %
(image_filename, driver_id, image_format))
elif drive_cmd_type == "__com.redhat_drive_add":
driver_add_cmd = ("__com.redhat_drive_add "
"file=%s,format=%s,id=%s" %
(image_filename, image_format, driver_id))
pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" %
(device_id, tested_model, driver_id))
vm.monitor.cmd(driver_add_cmd)
# Check if the device is support in qemu
if len(re.findall(tested_model, devices_support)) > 0:
add_output = vm.monitor.cmd(pci_add_cmd)
else:
raise error.TestError("%s doesn't support device: %s" %
(cmd_type, tested_model))
after_add = vm.monitor.info("pci")
if not device_id in after_add:
raise error.TestFail("Add device failed. Monitor command is: %s"
". Output: %r" % (pci_add_cmd, add_output))
# Define a helper function to delete the device
def pci_del(ignore_failure=False):
if cmd_type == "pci_add":
result_domain, bus, slot, function = add_output.split(',')
domain = int(result_domain.split()[2])
bus = int(bus.split()[1])
slot = int(slot.split()[1])
pci_addr = "%x:%x:%x" % (domain, bus, slot)
cmd = "pci_del pci_addr=%s" % pci_addr
elif cmd_type == "device_add":
cmd = "device_del %s" % device_id
# This should be replaced by a proper monitor method call
vm.monitor.cmd(cmd)
def device_removed():
after_del = vm.monitor.info("pci")
return after_del != after_add
if (not virt_utils.wait_for(device_removed, 10, 0, 1)
and not ignore_failure):
raise error.TestFail("Failed to hot remove PCI device: %s. "
"Monitor command: %s" %
(tested_model, cmd))
try:
# Compare the output of 'info pci'
if after_add == info_pci_ref:
raise error.TestFail("No new PCI device shown after executing "
"monitor command: 'info pci'")
# Define a helper function to compare the output
def new_shown():
o = session.cmd_output(params.get("reference_cmd"))
return o != reference
secs = int(params.get("wait_secs_for_hook_up"))
if not virt_utils.wait_for(new_shown, 30, secs, 3):
raise error.TestFail("No new device shown in output of command "
"executed inside the guest: %s" %
params.get("reference_cmd"))
# Define a helper function to catch PCI device string
def find_pci():
o = session.cmd_output(params.get("find_pci_cmd"))
return params.get("match_string") in o
if not virt_utils.wait_for(find_pci, 30, 3, 3):
raise error.TestFail("PCI %s %s device not found in guest. "
"Command was: %s" %
(tested_model, test_type,
params.get("find_pci_cmd")))
# Test the newly added device
try:
session.cmd(params.get("pci_test_cmd"))
except aexpect.ShellError, e:
raise error.TestFail("Check for %s device failed after PCI "
"hotplug. Output: %r" % (test_type, e.output))
session.close()
except Exception:
pci_del(ignore_failure=True)
raise
else:
pci_del()
|
orlandi/connectomicsPerspectivesPaper | refs/heads/master | participants_codes/lukasz8000/DEMO/layers.py | 2 | import numpy as np
import cPickle
import gzip
import os
import sys
import time
import theano
import theano.tensor as T
from theano.ifelse import ifelse
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
import theano.printing
import theano.tensor.shared_randomstreams
from logisticRegression import LogisticRegression
def ReLU(x):
y = T.maximum(0.0, x)
return(y)
#### sigmoid
def Sigmoid(x):
y = T.nnet.sigmoid(x)
return(y)
#### tanh
def Tanh(x):
y = T.tanh(x)
return(y)
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out,
activation, W=None, b=None,
use_bias=False):
self.input = input
self.activation = activation
if W is None:
W_values = np.asarray(0.01 * rng.standard_normal(
size=(n_in, n_out)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W')
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b')
self.W = W
self.b = b
if use_bias:
lin_output = T.dot(input, self.W) + self.b
else:
lin_output = T.dot(input, self.W)
self.output = (lin_output if activation is None else activation(lin_output))
# parameters of the model
if use_bias:
self.params = [self.W, self.b]
else:
self.params = [self.W]
class HiddenLayer2d(object):
def __init__(self, rng, input, n_in, n_in2, n_out,
activation, W=None, b=None,
use_bias=False):
self.input = input
self.activation = activation
if W is None:
W_values = np.asarray(0.01 * rng.standard_normal(
size=(n_out, n_in, 1, n_in2)), dtype=theano.config.floatX)
W = theano.shared(value=W_values, name='W')
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b')
self.W = W
self.b = b
if use_bias:
lin_output = T.dot(input, self.W,) + self.b
else:
lin_output = T.tensordot(input, self.W, axes = [[1,2,3],[1,2,3]])
self.output = (lin_output if activation is None else activation(lin_output))
# parameters of the model
if use_bias:
self.params = [self.W, self.b]
else:
self.params = [self.W]
def _dropout_from_layer(rng, layer, p):
"""p is the probablity of dropping a unit
"""
srng = theano.tensor.shared_randomstreams.RandomStreams(
rng.randint(999999))
# p=1-p because 1's indicate keep and p is prob of dropping
mask = srng.binomial(n=1, p=1-p, size=layer.shape)
# The cast is important because
# int * float32 = float64 which pulls things off the gpu
output = layer * T.cast(mask, theano.config.floatX)
return output
class DropoutHiddenLayer(HiddenLayer):
def __init__(self, rng, input, n_in, n_out,
activation, dropout_rate, use_bias, W=None, b=None):
super(DropoutHiddenLayer, self).__init__(
rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b,
activation=activation, use_bias=use_bias)
self.output = _dropout_from_layer(rng, self.output, p=dropout_rate)
class ConvolutionalLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(4, 4), activation=T.tanh, fac = 0, W=None, b=None):
"""
Allocate a ConvolutionalLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4 :param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps, filter height,filter width)
:type image_shape: tuple or list of length 4 :param image_shape: (batch size, num input feature maps, image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
self.filter_shape = filter_shape
self.image_shape = image_shape
# there are "num input feature maps * filter height * filter width" # inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize))
# initialize weights with random weights
W_bound = 1.5 * np.sqrt(6. / (fan_in + fan_out))
initt =rng.uniform(low=-W_bound, high=W_bound, size=filter_shape)
if fac == 1:
mask = rng.binomial(n=1, p= 1 - 0.2, size=filter_shape)
initt = initt * mask
self.mm = np.asarray(mask, dtype=theano.config.floatX)
self.W = W
if W is None:
self.W = theano.shared(np.asarray(initt, dtype=theano.config.floatX),
borrow=True)
# the bias is a 1D tensor -- one bias per output feature map
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = b
if b is None:
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
self.conv_out = conv.conv2d(input=input, filters=self.W,
filter_shape=filter_shape, image_shape=image_shape)
pooled_out = downsample.max_pool_2d(input=self.conv_out, ds=poolsize, ignore_border=True)
# add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
# thus be broadcasted across mini-batches and feature map # width & height
self.output = activation(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
class ConvolutionalHiddenSoftmax(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(4, 4), activation=T.tanh, fac = 0, W=None, b=None, WSoft = None, bSoft = None):
assert image_shape[1] == filter_shape[1]
self.input = input
self.filter_shape = filter_shape
self.image_shape = image_shape
# there are "num input feature maps * filter height * filter width" # inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize))
# initialize weights with random weights
W_bound = 1.5 * np.sqrt(6. / (fan_in + fan_out))
initt =rng.uniform(low=-W_bound, high=W_bound, size=filter_shape)
if fac == 1:
mask = rng.binomial(n=1, p= 1 - 0.2, size=filter_shape)
initt = initt * mask
self.mm = np.asarray(mask, dtype=theano.config.floatX)
self.W = W
if W is None:
self.W = theano.shared(np.asarray(initt, dtype=theano.config.floatX),
borrow=True)
# the bias is a 1D tensor -- one bias per output feature map
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = b
if b is None:
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
self.conv_out = conv.conv2d(input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape)
pooled_out = downsample.max_pool_2d(input=self.conv_out, ds=poolsize, ignore_border=True)
self.WSoft=WSoft
self.bSoft=bSoft
# add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
# thus be broadcasted across mini-batches and feature map # width & height
# T.nnet.softmax(T.dot(input, self.WSoft) + self.bSoft)
self.outputH = activation(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')).flatten(3)
#self.output, updates = theano.map(lambda x: T.nnet.softmax(T.tensordot(x, self.WSoft) + self.bSoft), self.outputH.dimshuffle(0,2,1))
o, updates = theano.scan(fn = lambda x: T.nnet.softmax(T.dot(x, WSoft) + bSoft),
outputs_info=None,
sequences=[self.outputH.dimshuffle(0,2,1)],
)
self.output = o |
da-mkay/subsynco | refs/heads/master | src/subsynco/utils/textfile.py | 1 | #!/usr/bin/env python
'''
SubSynco - a tool for synchronizing subtitle files
Copyright (C) 2015 da-mkay
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
if sys.platform == 'win32':
# python-magic would require Cygwin on windows, so we fall back to
# chardet.
import chardet
else:
import magic
import codecs
import re
class TextFile(object):
@staticmethod
def detect_encoding(file_):
blob = open(file_).read()
# utf8 files with BOM are not correctly detected by
# magic/chardet --> check manually for BOM
if blob.startswith(codecs.BOM_UTF8):
return 'utf-8-sig'
# Detect charset using chardet on windows and magic on other
# platforms.
if sys.platform == 'win32':
encoding = chardet.detect(blob)['encoding']
if encoding is None:
return None
encoding = encoding.replace('-', '_').lower()
encoding = TextFile._fix_chardet_iso_8859_7(blob, encoding)
else:
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
encoding = m.buffer(blob).replace('-', '_').lower()
# Try to fix wrong detected encodings
encoding = TextFile._fix_latin1_latin2(blob, encoding)
if encoding in TextFile.get_available_encodings():
return encoding
return None
@staticmethod
def _fix_chardet_iso_8859_7(blob, detected_encoding):
"""Check if the iso-8859-7 (greek) detected by chardet should be
latin1.
chardet does not support latin1 which is often used for german
texts. chardet often detects iso-8859-7 instead. Here we check
if iso-8859-7 was detected and if so we search for typical
german character-sequences. If enough is found we assume the
encoding should be latin1.
Returns 'latin1' or the passed detected_encoding.
"""
if detected_encoding in ['iso8859_7', 'iso-8859-7', 'greek', 'greek8',
'iso_8859_7']:
# Frequent german character-sequences:
part_exists = {
'der': 0,
'sch': 0,
'er': 0,
'en': 0,
}
pat_german = re.compile(r'(der|sch|er\b|en\b)', re.IGNORECASE)
for s in pat_german.findall(blob):
if len(s) == 1:
part_exists[s] = 1
else:
part_exists[s.lower()] = 1
score = sum(part_exists.values())
if score > 1:
detected_encoding = 'latin1'
return detected_encoding
@staticmethod
def _fix_latin1_latin2(blob, detected_encoding):
"""Check if the detected latin1/latin2 should be cp1252.
If latin1 or latin2 was detected we check for bytes in the range
from 0x7F to 0x9F. These are undefined for latin1/latin2 but
they exist in cp1252 which is a superset of latin1. If any of
these characters is found, we assume the encoding should be
cp1252.
Returns 'cp1252' or the passed detected_encoding.
"""
if detected_encoding in ['latin_1', 'iso_8859_1', 'iso-8859-1',
'iso8859-1', '8859', 'cp819', 'latin',
'latin1', 'L1', 'iso_8859_2', 'iso8859_2',
'iso-8859-2', 'latin2', 'L2']:
unsupported_chars = map(chr, [128, 130, 131, 132, 133, 134, 135,
136, 137, 138, 139, 140, 142, 145,
146, 147, 148, 149, 150, 151, 152,
153, 154, 155, 156, 158, 159])
for char in unsupported_chars:
if blob.find(char) >= 0:
detected_encoding = 'cp1252'
break
return detected_encoding
@staticmethod
def get_available_encodings():
return ['utf8', 'maccyrillic', 'chinese', 'mskanji', 's_jis', 'cp1140',
'euc_jp', 'cp932', 'cp424', 'iso_2022_jp_2004', 'ibm1140',
'eucjis2004', 'iso_2022_jp', 'iso_8859_16', 'utf_7', 'macgreek',
'cp500', 'eucjp', 'iso_2022_jp_1', '932', 'ibm1026', 'latin3',
'936', 'mac_turkish', 'big5hkscs', 'uhc', 'ksc5601', 'ibm424',
'mac_latin2', 'euc_jis_2004', 'ibm500', 'cp936', 'cp862', 'latin10',
'iso2022_jp_3', 'iso2022_jp_2', 'iso2022_jp_1', 'iso_2022_kr',
'maccentraleurope', 'eucjisx0213', 'gbk', 'ibm857', 'iso8859_7',
'ibm855', 'euckr', 'l2', 'ibm852', 'ibm850', 'cp950', 'ibm858',
'utf_16be', '862', 'iso2022_jp_2004', 'latin', 'gb18030_2000',
'sjis', 'iso_2022_jp_2', 'ebcdic_cp_he', 'ibm437', 'csbig5',
'cp1361', 'maciceland', 'csptcp154', 'big5', 'sjis2004',
'cyrillic_asian', 'l6', 'iso2022jp', 'l7', 'euc_jisx0213', 'l10',
'l4', 'macturkish', 'korean', 'shiftjisx0213', 'l5', 'u32',
'mac_iceland', 'unicode_1_1_utf_7', 'shift_jisx0213', 'ms950',
'utf_32le', 'l3', 'gb2312_1980', 'iso2022_jp', 'hzgb', 'sjisx0213',
'ms1361', 'csiso58gb231280', 'l1', 'iso_ir_58', 'u16', 'ms932',
's_jisx0213', 'iso8859_4', 'ksx1001', 'euc_kr', 'ks_c_5601', 'u8',
'ibm039', 'johab', 'greek8', 'iso8859_6', 'ptcp154', 'iso2022kr',
'utf_32_be', 'ms949', 'ibm037', 'ms_kanji', 'cp850', 'shift_jis',
'cp852', 'cp855', 'l8', 'cp857', 'cp856', 'cp775', 'iso2022jp_ext',
'l9', 'jisx0213', 'hkscs', 'latin_1', 'us_ascii', 'iso_2022_jp_ext',
'cp1026', 'cp_is', 'cp1252', 'iso2022jp_1', 'iso2022jp_3',
'iso2022jp_2', 'shiftjis', 'utf_32', 'ujis', 'mac_cyrillic',
'maclatin2', 'csiso2022kr', 'iso8859_16', '855', '857', '850',
'ks_c_5601_1987', '852', 'ms936', 'u7', 'iso_8859_8', '858',
'utf_16_be', 'cp1258', 'windows_1258', 'utf_16_le', 'windows_1254',
'windows_1255', 'big5_tw', 'windows_1257', 'windows_1250',
'windows_1251', 'windows_1252', 'windows_1253', 'hz', 'utf_8',
'csshiftjis', 'ibm869', 'ibm866', 'mac_greek', 'ibm864', 'ibm865',
'ibm862', 'ibm863', 'ibm860', 'ibm861', 'utf_8_sig', 'iso_8859_1',
'ks_x_1001', 'cp949', 'pt154', 'windows_1256', 'utf32', '869',
'utf', 'cp_gr', 'hz_gb_2312', '861', '860', '863', 'cp737', '865',
'sjis_2004', '866', 'u_jis', 'iso8859_9', 'iso8859_8', 'iso8859_5',
'iso2022_kr', 'cp875', 'cp874', 'iso8859_1', 'iso8859_3',
'iso8859_2', 'gb18030', 'cp819', 'iso_8859_9', 'euccn',
'iso_8859_7', 'iso_8859_6', 'iso_8859_5', 'iso_8859_4',
'iso_8859_3', 'iso_8859_2', 'cp1006', 'gb2312', 'shift_jis_2004',
'utf_32_le', 'eucgb2312_cn', 'hebrew', 'arabic', 'ascii',
'mac_roman', 'iso8859_15', 'iso8859_14', 'hz_gb', 'iso8859_10',
'iso8859_13', 'cp720', '950', 'koi8_u', 'utf16', 'utf_16', 'cp869',
'iso_8859_15', 'iso_8859_14', 'iso_8859_13', 'iso2022jp_2004',
'iso_8859_10', 'cp860', 'cp861', 'ebcdic_cp_be', 'cp863', 'cp864',
'cp865', 'cp866', 'cp154', 'iso_2022_jp_3', 'shiftjis2004', '646',
'ebcdic_cp_ch', 'cp1255', 'cp1254', 'cp1257', 'cp1256', 'cp1251',
'cp1250', 'cp1253', '437', 'cp437', 'ibm775', 'big5_hkscs',
'csiso2022jp', 'gb2312_80', 'latin4', 'latin5', 'latin6', 'latin7',
'latin1', 'latin2', '949', 'macroman', 'utf_16le', 'cyrillic',
'latin8', 'latin9', 'koi8_r', 'greek', '8859', 'cp037', 'euc_cn',
'iso2022_jp_ext', 'utf_32be', 'cp858']
@staticmethod
def get_available_encodings_with_title():
return [
[_('ascii [English]'), 'ascii'],
#[_('646 [English]'), 'ascii'],
#[_('us-ascii [English]'), 'ascii'],
[_('big5 [Traditional Chinese]'), 'big5'],
#[_('big5-tw [Traditional Chinese]'), 'big5'],
#[_('csbig5 [Traditional Chinese]'), 'big5'],
#[_('big5hkscs [Traditional Chinese]'), 'big5hkscs'],
[_('big5-hkscs [Traditional Chinese]'), 'big5hkscs'],
#[_('hkscs [Traditional Chinese]'), 'big5hkscs'],
#[_('cp037 [English]'), 'cp037'],
[_('IBM037 [English]'), 'cp037'],
#[_('IBM039 [English]'), 'cp037'],
#[_('cp424 [Hebrew]'), 'cp424'],
#[_('EBCDIC-CP-HE [Hebrew]'), 'cp424'],
[_('IBM424 [Hebrew]'), 'cp424'],
#[_('cp437 [English]'), 'cp437'],
#[_('437 [English]'), 'cp437'],
[_('IBM437 [English]'), 'cp437'],
#[_('cp500 [Western Europe]'), 'cp500'],
#[_('EBCDIC-CP-BE [Western Europe]'), 'cp500'],
#[_('EBCDIC-CP-CH [Western Europe]'), 'cp500'],
[_('IBM500 [Western Europe]'), 'cp500'],
[_('cp720 [Arabic]'), 'cp720'],
[_('cp737 [Greek]'), 'cp737'],
#[_('cp775 [Baltic languages]'), 'cp775'],
[_('IBM775 [Baltic languages]'), 'cp775'],
#[_('cp850 [Western Europe]'), 'cp850'],
#[_('850 [Western Europe]'), 'cp850'],
[_('IBM850 [Western Europe]'), 'cp850'],
#[_('cp852 [Central and Eastern Europe]'), 'cp852'],
#[_('852 [Central and Eastern Europe]'), 'cp852'],
[_('IBM852 [Central and Eastern Europe]'), 'cp852'],
#[_('cp855 [Bulgarian, Byelorussian, Macedonian, Russian,
#Serbian]'), 'cp855'],
#[_('855 [Bulgarian, Byelorussian, Macedonian, Russian,
#Serbian]'), 'cp855'],
[_('IBM855 [Bulgarian, Byelorussian, Macedonian, Russian, Serbian]'
), 'cp855'],
[_('cp856 [Hebrew]'), 'cp856'],
#[_('cp857 [Turkish]'), 'cp857'],
#[_('857 [Turkish]'), 'cp857'],
[_('IBM857 [Turkish]'), 'cp857'],
#[_('cp858 [Western Europe]'), 'cp858'],
#[_('858 [Western Europe]'), 'cp858'],
[_('IBM858 [Western Europe]'), 'cp858'],
#[_('cp860 [Portuguese]'), 'cp860'],
#[_('860 [Portuguese]'), 'cp860'],
[_('IBM860 [Portuguese]'), 'cp860'],
#[_('cp861 [Icelandic]'), 'cp861'],
#[_('861 [Icelandic]'), 'cp861'],
#[_('CP-IS [Icelandic]'), 'cp861'],
[_('IBM861 [Icelandic]'), 'cp861'],
#[_('cp862 [Hebrew]'), 'cp862'],
#[_('862 [Hebrew]'), 'cp862'],
[_('IBM862 [Hebrew]'), 'cp862'],
#[_('cp863 [Canadian]'), 'cp863'],
#[_('863 [Canadian]'), 'cp863'],
[_('IBM863 [Canadian]'), 'cp863'],
#[_('cp864 [Arabic]'), 'cp864'],
[_('IBM864 [Arabic]'), 'cp864'],
#[_('cp865 [Danish, Norwegian]'), 'cp865'],
#[_('865 [Danish, Norwegian]'), 'cp865'],
[_('IBM865 [Danish, Norwegian]'), 'cp865'],
#[_('cp866 [Russian]'), 'cp866'],
#[_('866 [Russian]'), 'cp866'],
[_('IBM866 [Russian]'), 'cp866'],
#[_('cp869 [Greek]'), 'cp869'],
#[_('869 [Greek]'), 'cp869'],
#[_('CP-GR [Greek]'), 'cp869'],
[_('IBM869 [Greek]'), 'cp869'],
[_('cp874 [Thai]'), 'cp874'],
[_('cp875 [Greek]'), 'cp875'],
#[_('cp932 [Japanese]'), 'cp932'],
#[_('932 [Japanese]'), 'cp932'],
#[_('ms932 [Japanese]'), 'cp932'],
#[_('mskanji [Japanese]'), 'cp932'],
[_('ms-kanji [Japanese]'), 'cp932'],
#[_('cp949 [Korean]'), 'cp949'],
#[_('949 [Korean]'), 'cp949'],
[_('ms949 [Korean]'), 'cp949'],
#[_('uhc [Korean]'), 'cp949'],
#[_('cp950 [Traditional Chinese]'), 'cp950'],
#[_('950 [Traditional Chinese]'), 'cp950'],
[_('ms950 [Traditional Chinese]'), 'cp950'],
[_('cp1006 [Urdu]'), 'cp1006'],
#[_('cp1026 [Turkish]'), 'cp1026'],
[_('ibm1026 [Turkish]'), 'cp1026'],
#[_('cp1140 [Western Europe]'), 'cp1140'],
[_('ibm1140 [Western Europe]'), 'cp1140'],
#[_('cp1250 [Central and Eastern Europe]'), 'cp1250'],
[_('windows-1250 [Central and Eastern Europe]'), 'cp1250'],
#[_('cp1251 [Bulgarian, Byelorussian,Macedonian, Russian,
#Serbian]'), 'cp1251'],
[_('windows-1251 [Bulgarian, Byelorussian,Macedonian, Russian, Serb'
'ian]'), 'cp1251'],
#[_('cp1252 [Western Europe]'), 'cp1252'],
[_('windows-1252 [Western Europe]'), 'cp1252'],
#[_('cp1253 [Greek]'), 'cp1253'],
[_('windows-1253 [Greek]'), 'cp1253'],
#[_('cp1254 [Turkish]'), 'cp1254'],
[_('windows-1254 [Turkish]'), 'cp1254'],
#[_('cp1255 [Hebrew]'), 'cp1255'],
[_('windows-1255 [Hebrew]'), 'cp1255'],
#[_('cp1256 [Arabic]'), 'cp1256'],
[_('windows-1256 [Arabic]'), 'cp1256'],
#[_('cp1257 [Baltic languages]'), 'cp1257'],
[_('windows-1257 [Baltic languages]'), 'cp1257'],
#[_('cp1258 [Vietnamese]'), 'cp1258'],
[_('windows-1258 [Vietnamese]'), 'cp1258'],
[_('euc_jp [Japanese]'), 'euc_jp'],
#[_('eucjp [Japanese]'), 'euc_jp'],
#[_('ujis [Japanese]'), 'euc_jp'],
#[_('u-jis [Japanese]'), 'euc_jp'],
[_('euc_jis_2004 [Japanese]'), 'euc_jis_2004'],
#[_('jisx0213 [Japanese]'), 'euc_jis_2004'],
#[_('eucjis2004 [Japanese]'), 'euc_jis_2004'],
[_('euc_jisx0213 [Japanese]'), 'euc_jisx0213'],
#[_('eucjisx0213 [Japanese]'), 'euc_jisx0213'],
[_('euc_kr [Korean]'), 'euc_kr'],
#[_('euckr [Korean]'), 'euc_kr'],
#[_('korean [Korean]'), 'euc_kr'],
#[_('ksc5601 [Korean]'), 'euc_kr'],
#[_('ks_c-5601 [Korean]'), 'euc_kr'],
#[_('ks_c-5601-1987 [Korean]'), 'euc_kr'],
#[_('ksx1001 [Korean]'), 'euc_kr'],
#[_('ks_x-1001 [Korean]'), 'euc_kr'],
[_('gb2312 [Simplified Chinese]'), 'gb2312'],
#[_('chinese [Simplified Chinese]'), 'gb2312'],
#[_('csiso58gb231280 [Simplified Chinese]'), 'gb2312'],
#[_('euc-cn [Simplified Chinese]'), 'gb2312'],
#[_('euccn [Simplified Chinese]'), 'gb2312'],
#[_('eucgb2312-cn [Simplified Chinese]'), 'gb2312'],
#[_('gb2312-1980 [Simplified Chinese]'), 'gb2312'],
#[_('gb2312-80 [Simplified Chinese]'), 'gb2312'],
#[_('iso-ir-58 [Simplified Chinese]'), 'gb2312'],
#[_('gbk [Unified Chinese]'), 'gbk'],
#[_('936 [Unified Chinese]'), 'gbk'],
#[_('cp936 [Unified Chinese]'), 'gbk'],
[_('ms936 [Unified Chinese]'), 'gbk'],
[_('gb18030 [Unified Chinese]'), 'gb18030'],
#[_('gb18030-2000 [Unified Chinese]'), 'gb18030'],
[_('hz [Simplified Chinese]'), 'hz'],
#[_('hzgb [Simplified Chinese]'), 'hz'],
#[_('hz-gb [Simplified Chinese]'), 'hz'],
#[_('hz-gb-2312 [Simplified Chinese]'), 'hz'],
#[_('iso2022_jp [Japanese]'), 'iso2022_jp'],
#[_('csiso2022jp [Japanese]'), 'iso2022_jp'],
#[_('iso2022jp [Japanese]'), 'iso2022_jp'],
#[_('iso-2022-jp [Japanese]'), 'iso2022_jp'],
#[_('iso2022_jp_1 [Japanese]'), 'iso2022_jp_1'],
#[_('iso2022jp-1 [Japanese]'), 'iso2022_jp_1'],
[_('iso-2022-jp-1 [Japanese]'), 'iso2022_jp_1'],
#[_('iso2022_jp_2 [Japanese, Korean, Simplified Chinese, Wes
#tern Europe, Greek]'), 'iso2022_jp_2'],
#[_('iso2022jp-2 [Japanese, Korean, Simplified Chinese, West
#ern Europe, Greek]'), 'iso2022_jp_2'],
[_('iso-2022-jp-2 [Japanese, Korean, Simplified Chinese, Western Eu'
'rope, Greek]'), 'iso2022_jp_2'],
#[_('iso2022_jp_2004 [Japanese]'), 'iso2022_jp_2004'],
#[_('iso2022jp-2004 [Japanese]'), 'iso2022_jp_2004'],
[_('iso-2022-jp-2004 [Japanese]'), 'iso2022_jp_2004'],
#[_('iso2022_jp_3 [Japanese]'), 'iso2022_jp_3'],
#[_('iso2022jp-3 [Japanese]'), 'iso2022_jp_3'],
[_('iso-2022-jp-3 [Japanese]'), 'iso2022_jp_3'],
#[_('iso2022_jp_ext [Japanese]'), 'iso2022_jp_ext'],
#[_('iso2022jp-ext [Japanese]'), 'iso2022_jp_ext'],
[_('iso-2022-jp-ext [Japanese]'), 'iso2022_jp_ext'],
#[_('iso2022_kr [Korean]'), 'iso2022_kr'],
#[_('csiso2022kr [Korean]'), 'iso2022_kr'],
#[_('iso2022kr [Korean]'), 'iso2022_kr'],
[_('iso-2022-kr [Korean]'), 'iso2022_kr'],
#[_('latin_1 [West Europe]'), 'latin_1'],
#[_('iso-8859-1 [West Europe]'), 'latin_1'],
#[_('iso8859-1 [West Europe]'), 'latin_1'],
#[_('8859 [West Europe]'), 'latin_1'],
#[_('cp819 [West Europe]'), 'latin_1'],
#[_('latin [West Europe]'), 'latin_1'],
[_('latin1 [West Europe]'), 'latin_1'],
#[_('L1 [West Europe]'), 'latin_1'],
#[_('iso8859_2 [Central and Eastern Europe]'), 'iso8859_2'],
#[_('iso-8859-2 [Central and Eastern Europe]'),
#'iso8859_2'],
[_('latin2 [Central and Eastern Europe]'), 'iso8859_2'],
#[_('L2 [Central and Eastern Europe]'), 'iso8859_2'],
#[_('iso8859_3 [Esperanto, Maltese]'), 'iso8859_3'],
#[_('iso-8859-3 [Esperanto, Maltese]'), 'iso8859_3'],
[_('latin3 [Esperanto, Maltese]'), 'iso8859_3'],
#[_('L3 [Esperanto, Maltese]'), 'iso8859_3'],
#[_('iso8859_4 [Baltic languages]'), 'iso8859_4'],
#[_('iso-8859-4 [Baltic languages]'), 'iso8859_4'],
[_('latin4 [Baltic languages]'), 'iso8859_4'],
#[_('L4 [Baltic languages]'), 'iso8859_4'],
#[_('iso8859_5 [Bulgarian, Byelorussian, Macedonian,
#Russian, Serbian]'), 'iso8859_5'],
#[_('iso-8859-5 [Bulgarian, Byelorussian, Macedonian,
#Russian, Serbian]'), 'iso8859_5'],
[_('cyrillic [Bulgarian, Byelorussian, Macedonian, Russian, Serbian'
']'), 'iso8859_5'],
#[_('iso8859_6 [Arabic]'), 'iso8859_6'],
#[_('iso-8859-6 [Arabic]'), 'iso8859_6'],
[_('arabic [Arabic]'), 'iso8859_6'],
#[_('iso8859_7 [Greek]'), 'iso8859_7'],
#[_('iso-8859-7 [Greek]'), 'iso8859_7'],
[_('greek [Greek]'), 'iso8859_7'],
#[_('greek8 [Greek]'), 'iso8859_7'],
#[_('iso8859_8 [Hebrew]'), 'iso8859_8'],
#[_('iso-8859-8 [Hebrew]'), 'iso8859_8'],
[_('hebrew [Hebrew]'), 'iso8859_8'],
#[_('iso8859_9 [Turkish]'), 'iso8859_9'],
#[_('iso-8859-9 [Turkish]'), 'iso8859_9'],
[_('latin5 [Turkish]'), 'iso8859_9'],
#[_('L5 [Turkish]'), 'iso8859_9'],
#[_('iso8859_10 [Nordic languages]'), 'iso8859_10'],
#[_('iso-8859-10 [Nordic languages]'), 'iso8859_10'],
[_('latin6 [Nordic languages]'), 'iso8859_10'],
#[_('L6 [Nordic languages]'), 'iso8859_10'],
#[_('iso8859_13 [Baltic languages]'), 'iso8859_13'],
#[_('iso-8859-13 [Baltic languages]'), 'iso8859_13'],
[_('latin7 [Baltic languages]'), 'iso8859_13'],
#[_('L7 [Baltic languages]'), 'iso8859_13'],
#[_('iso8859_14 [Celtic languages]'), 'iso8859_14'],
#[_('iso-8859-14 [Celtic languages]'), 'iso8859_14'],
[_('latin8 [Celtic languages]'), 'iso8859_14'],
#[_('L8 [Celtic languages]'), 'iso8859_14'],
#[_('iso8859_15 [Western Europe]'), 'iso8859_15'],
#[_('iso-8859-15 [Western Europe]'), 'iso8859_15'],
[_('latin9 [Western Europe]'), 'iso8859_15'],
#[_('L9 [Western Europe]'), 'iso8859_15'],
#[_('iso8859_16 [South-Eastern Europe]'), 'iso8859_16'],
#[_('iso-8859-16 [South-Eastern Europe]'), 'iso8859_16'],
[_('latin10 [South-Eastern Europe]'), 'iso8859_16'],
#[_('L10 [South-Eastern Europe]'), 'iso8859_16'],
#[_('johab [Korean]'), 'johab'],
#[_('cp1361 [Korean]'), 'johab'],
[_('ms1361 [Korean]'), 'johab'],
[_('koi8_r [Russian]'), 'koi8_r'],
[_('koi8_u [Ukrainian]'), 'koi8_u'],
[_('mac_cyrillic [Bulgarian, Byelorussian, Macedonian, Russian, Ser'
'bian]'), 'mac_cyrillic'],
#[_('maccyrillic [Bulgarian, Byelorussian, Macedonian, Russi
#an, Serbian]'), 'mac_cyrillic'],
[_('mac_greek [Greek]'), 'mac_greek'],
#[_('macgreek [Greek]'), 'mac_greek'],
[_('mac_iceland [Icelandic]'), 'mac_iceland'],
#[_('maciceland [Icelandic]'), 'mac_iceland'],
[_('mac_latin2 [Central and Eastern Europe]'), 'mac_latin2'],
#[_('maclatin2 [Central and Eastern Europe]'), 'mac_latin2'],
#[_('maccentraleurope [Central and Eastern Europe]'), 'mac_l
#atin2'],
[_('mac_roman [Western Europe]'), 'mac_roman'],
#[_('macroman [Western Europe]'), 'mac_roman'],
[_('mac_turkish [Turkish]'), 'mac_turkish'],
#[_('macturkish [Turkish]'), 'mac_turkish'],
#[_('ptcp154 [Kazakh]'), 'ptcp154'],
#[_('csptcp154 [Kazakh]'), 'ptcp154'],
#[_('pt154 [Kazakh]'), 'ptcp154'],
#[_('cp154 [Kazakh]'), 'ptcp154'],
[_('cyrillic-asian [Kazakh]'), 'ptcp154'],
[_('shift_jis [Japanese]'), 'shift_jis'],
#[_('csshiftjis [Japanese]'), 'shift_jis'],
#[_('shiftjis [Japanese]'), 'shift_jis'],
#[_('sjis [Japanese]'), 'shift_jis'],
#[_('s_jis [Japanese]'), 'shift_jis'],
[_('shift_jis_2004 [Japanese]'), 'shift_jis_2004'],
#[_('shiftjis2004 [Japanese]'), 'shift_jis_2004'],
#[_('sjis_2004 [Japanese]'), 'shift_jis_2004'],
#[_('sjis2004 [Japanese]'), 'shift_jis_2004'],
[_('shift_jisx0213 [Japanese]'), 'shift_jisx0213'],
#[_('shiftjisx0213 [Japanese]'), 'shift_jisx0213'],
#[_('sjisx0213 [Japanese]'), 'shift_jisx0213'],
#[_('s_jisx0213 [Japanese]'), 'shift_jisx0213'],
[_('utf_32 [all languages]'), 'utf_32'],
#[_('U32 [all languages]'), 'utf_32'],
#[_('utf32 [all languages]'), 'utf_32'],
#[_('utf_32_be [all languages]'), 'utf_32_be'],
[_('UTF-32BE [all languages]'), 'utf_32_be'],
#[_('utf_32_le [all languages]'), 'utf_32_le'],
[_('UTF-32LE [all languages]'), 'utf_32_le'],
[_('utf_16 [all languages]'), 'utf_16'],
#[_('U16 [all languages]'), 'utf_16'],
#[_('utf16 [all languages]'), 'utf_16'],
#[_('utf_16_be [all languages (BMP only)]'), 'utf_16_be'],
[_('UTF-16BE [all languages (BMP only)]'), 'utf_16_be'],
#[_('utf_16_le [all languages (BMP only)]'), 'utf_16_le'],
[_('UTF-16LE [all languages (BMP only)]'), 'utf_16_le'],
[_('utf_7 [all languages]'), 'utf_7'],
#[_('U7 [all languages]'), 'utf_7'],
#[_('unicode-1-1-utf-7 [all languages]'), 'utf_7'],
[_('utf_8 [all languages]'), 'utf_8'],
#[_('U8 [all languages]'), 'utf_8'],
#[_('UTF [all languages]'), 'utf_8'],
#[_('utf8 [all languages]'), 'utf_8'],
[_('utf_8_sig [all languages]'), 'utf_8_sig']
]
|
anisyonk/pilot | refs/heads/master | saga/utils/pty_shell.py | 4 |
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import re
import os
import sys
import errno
import saga.utils.misc as sumisc
import radical.utils.logger as rul
import saga.utils.pty_shell_factory as supsf
import saga.utils.pty_process as supp
import saga.url as surl
import saga.exceptions as se
import saga.session as ss
import pty_exceptions as ptye
# ------------------------------------------------------------------------------
#
_PTY_TIMEOUT = 2.0
# ------------------------------------------------------------------------------
#
# iomode flags
#
IGNORE = 0 # discard stdout / stderr
MERGED = 1 # merge stdout and stderr
SEPARATE = 2 # fetch stdout and stderr individually (one more hop)
STDOUT = 3 # fetch stdout only, discard stderr
STDERR = 4 # fetch stderr only, discard stdout
# --------------------------------------------------------------------
#
class PTYShell (object) :
"""
This class wraps a shell process and runs it as a :class:`PTYProcess`. The
user of this class can start that shell, and run arbitrary commands on it.
The shell to be run is expected to be POSIX compliant (bash, dash, sh, ksh
etc.) -- in particular, we expect the following features:
``$?``,
``$!``,
``$#``,
``$*``,
``$@``,
``$$``,
``$PPID``,
``>&``,
``>>``,
``>``,
``<``,
``|``,
``||``,
``()``,
``&``,
``&&``,
``wait``,
``kill``,
``nohup``,
``shift``,
``export``,
``PS1``, and
``PS2``.
Note that ``PTYShell`` will change the shell prompts (``PS1`` and ``PS2``),
to simplify output parsing. ``PS2`` will be empty, ``PS1`` will be set
``PROMPT-$?->`` -- that way, the prompt will report the exit value of the
last command, saving an extra roundtrip. Users of this class should be
careful when setting other prompts -- see :func:`set_prompt` for more
details.
Usage Example::
# start the shell, find its prompt.
self.shell = saga.utils.pty_shell.PTYShell ("ssh://user@remote.host.net/", contexts, self._logger)
# run a simple shell command, merge stderr with stdout. $$ is the pid
# of the shell instance.
ret, out, _ = self.shell.run_sync (" mkdir -p /tmp/data.$$/" )
# check if mkdir reported success
if ret != 0 :
raise saga.NoSuccess ("failed to prepare base dir (%s)(%s)" % (ret, out))
# stage some data from a local string variable into a file on the remote system
self.shell.stage_to_remote (src = pbs_job_script,
tgt = "/tmp/data.$$/job_1.pbs")
# check size of staged script (this is actually done on PTYShell level
# already, with no extra hop):
ret, out, _ = self.shell.run_sync (" stat -c '%s' /tmp/data.$$/job_1.pbs" )
if ret != 0 :
raise saga.NoSuccess ("failed to check size (%s)(%s)" % (ret, out))
assert (len(pbs_job_script) == int(out))
**Data Staging and Data Management:**
The PTYShell class does not only support command execution, but also basic
data management: for SSH based shells, it will create a tunneled scp/sftp
connection for file staging. Other data management operations (mkdir, size,
list, ...) are executed either as shell commands, or on the scp/sftp channel
(if possible on the data channel, to keep the shell pty free for concurrent
command execution). Ssh tunneling is implemented via ssh.v2 'ControlMaster'
capabilities (see `ssh_config(5)`).
For local shells, PTYShell will create an additional shell pty for data
management operations.
**Asynchronous Notifications:**
A third pty process will be created for asynchronous notifications. For
that purpose, the shell started on the first channel will create a named
pipe, at::
$HOME/.saga/adaptors/shell/async.$$
``$$`` here represents the pid of the shell process. It will also set the
environment variable ``SAGA_ASYNC_PIPE`` to point to that named pipe -- any
application running on the remote host can write event messages to that
pipe, which will be available on the local end (see below). `PTYShell`
leaves it unspecified what format those messages have, but messages are
expected to be separated by newlines.
An adaptor using `PTYShell` can subscribe for messages via::
self.pty_shell.subscribe (callback)
where callback is a Python callable. PTYShell will listen on the event
channel *in a separate thread* and invoke that callback on any received
message, passing the message text (sans newline) to the callback.
An example usage: the command channel may run the following command line::
( sh -c 'sleep 100 && echo "job $$ done" > $SAGA_ASYNC_PIPE" \
|| echo "job $$ fail" > $SAGA_ASYNC_PIPE" ) &
which will return immediately, and send a notification message at job
completion.
Note that writes to named pipes are not atomic. From POSIX:
``A write is atomic if the whole amount written in one operation is not
interleaved with data from any other process. This is useful when there are
multiple writers sending data to a single reader. Applications need to know
how large a write request can be expected to be performed atomically. This
maximum is called {PIPE_BUF}. This volume of IEEE Std 1003.1-2001 does not
say whether write requests for more than {PIPE_BUF} bytes are atomic, but
requires that writes of {PIPE_BUF} or fewer bytes shall be atomic.`
Thus the user is responsible for ensuring that either messages are smaller
than *PIPE_BUF* bytes on the remote system (usually at least 1024, on Linux
usually 4096), or to lock the pipe on larger writes.
**Automated Restart, Timeouts:**
For timeout and restart semantics, please see the documentation to the
underlying :class:`saga.utils.pty_process.PTYProcess` class.
"""
# TODO:
# - on client shell activitites, also mark the master as active, to
# avoid timeout garbage collection.
# - use ssh mechanisms for master timeout (and persist), as custom
# mechanisms will interfere with gc_timout.
# unique ID per connection, for debugging
_pty_id = 0
# ----------------------------------------------------------------
#
def __init__ (self, url, session=None, logger=None, init=None, opts={}, posix=True) :
# print 'new pty shell to %s' % url
if logger : self.logger = logger
else : self.logger = rul.getLogger ('saga', 'PTYShell')
if session : self.session = session
else : self.session = ss.Session (default=True)
self.logger.debug ("PTYShell init %s" % self)
self.url = url # describes the shell to run
self.init = init # call after reconnect
self.opts = opts # options...
self.posix = posix # /bin/sh compatible?
self.latency = 0.0 # set by factory
self.cp_slave = None # file copy channel
self.prompt = "[\$#%>\]]\s*$"
self.prompt_re = re.compile ("^(.*?)%s\s*$" % self.prompt, re.DOTALL)
self.initialized = False
self.pty_id = PTYShell._pty_id
PTYShell._pty_id += 1
# get prompt pattern from config
self.cfg = self.session.get_config('saga.utils.pty')
if 'prompt_pattern' in self.cfg :
self.prompt = self.cfg['prompt_pattern'].get_value ()
self.prompt_re = re.compile ("^(.*?)%s" % self.prompt, re.DOTALL)
else :
self.prompt = "[\$#%>\]]\s*$"
self.prompt_re = re.compile ("^(.*?)%s" % self.prompt, re.DOTALL)
self.logger.info ("PTY prompt pattern: %s" % self.prompt)
# we need a local dir for file staging caches. At this point we use
# $HOME, but should make this configurable (FIXME)
self.base = os.environ['HOME'] + '/.saga/adaptors/shell/'
try:
os.makedirs (self.base)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir (self.base):
pass
else:
raise se.NoSuccess ("could not create staging dir: %s" % e)
self.factory = supsf.PTYShellFactory ()
self.pty_info = self.factory.initialize (self.url, self.session,
self.prompt, self.logger,
posix=self.posix)
self.pty_shell = self.factory.run_shell (self.pty_info)
self._trace ('init : %s' % self.pty_shell.command)
self.initialize ()
# ----------------------------------------------------------------
#
def _trace (self, msg) :
# print " === %5d : %s : %s" % (self._pty_id, self.pty_shell, msg)
pass
# ----------------------------------------------------------------
#
def __del__ (self) :
self.logger.debug ("PTYShell del %s" % self)
self.finalize (kill_pty=True)
# ----------------------------------------------------------------
#
def initialize (self) :
""" initialize the shell connection. """
with self.pty_shell.rlock :
if self.initialized :
self.logger.warn ("initialization race")
return
if self.posix :
# run a POSIX compatible shell, usually /bin/sh, in interactive mode
# also, turn off tty echo
command_shell = "exec /bin/sh -i"
# use custom shell if so requested
if 'shell' in self.opts and self.opts['shell'] :
command_shell = "exec %s" % self.opts['shell']
self.logger.info ("custom command shell: %s" % command_shell)
self.logger.debug ("running command shell: %s" % command_shell)
self.pty_shell.write (" stty -echo ; unset HISTFILE ; %s\n" % command_shell)
# make sure this worked, and that we find the prompt. We use
# a versatile prompt pattern to account for the custom shell case.
_, out = self.find ([self.prompt])
# make sure this worked, and that we find the prompt. We use
# a versatile prompt pattern to account for the custom shell case.
try :
# set and register new prompt
self.run_async ( " unset PROMPT_COMMAND ; "
+ " unset HISTFILE ; "
+ "PS1='PROMPT-$?->'; "
+ "PS2=''; "
+ "export PS1 PS2 2>&1 >/dev/null\n")
self.set_prompt (new_prompt="PROMPT-(\d+)->$")
self.logger.debug ("got new shell prompt")
except Exception as e :
raise se.NoSuccess ("Shell startup on target host failed: %s" % e)
try :
# got a command shell, finally!
# for local shells, we now change to the current working
# directory. Remote shells will remain in the default pwd
# (usually $HOME).
if sumisc.host_is_local (surl.Url(self.url).host) :
pwd = os.getcwd ()
self.run_sync (' cd %s' % pwd)
except Exception as e :
# We will ignore any errors.
self.logger.warning ("local cd to %s failed" % pwd)
self.initialized = True
self.finalized = False
# ----------------------------------------------------------------
#
def finalize (self, kill_pty = False) :
try :
if kill_pty and self.pty_shell :
with self.pty_shell.rlock :
if not self.finalized :
self.pty_shell.finalize ()
self.finalized = True
except Exception as e :
pass
# ----------------------------------------------------------------
#
def alive (self, recover=False) :
"""
The shell is assumed to be alive if the shell processes lives.
Attempt to restart shell if recover==True
"""
with self.pty_shell.rlock :
try :
return self.pty_shell.alive (recover)
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def find_prompt (self, timeout=_PTY_TIMEOUT) :
"""
If run_async was called, a command is running on the shell. find_prompt
can be used to collect its output up to the point where the shell prompt
re-appears (i.e. when the command finishes).
Note that this method blocks until the command finishes. Future
versions of this call may add a timeout parameter.
"""
with self.pty_shell.rlock :
try :
match = None
fret = None
while fret == None :
fret, match = self.pty_shell.find ([self.prompt], timeout)
# self.logger.debug ("find prompt '%s' in '%s'" % (self.prompt, match))
ret, txt = self._eval_prompt (match)
return (ret, txt)
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def find (self, patterns, timeout=-1) :
"""
Note that this method blocks until pattern is found in the shell I/O.
"""
with self.pty_shell.rlock :
try :
return self.pty_shell.find (patterns, timeout=timeout)
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def set_prompt (self, new_prompt) :
"""
:type new_prompt: string
:param new_prompt: a regular expression matching the shell prompt
The new_prompt regex is expected to be a regular expression with one set
of catching brackets, which MUST return the previous command's exit
status. This method will send a newline to the client, and expects to
find the prompt with the exit value '0'.
As a side effect, this method will discard all previous data on the pty,
thus effectively flushing the pty output.
By encoding the exit value in the command prompt, we safe one roundtrip.
The prompt on Posix compliant shells can be set, for example, via::
PS1='PROMPT-$?->'; export PS1
The newline in the example above allows to nicely anchor the regular
expression, which would look like::
PROMPT-(\d+)->$
The regex is compiled with 're.DOTALL', so the dot character matches
all characters, including line breaks. Be careful not to match more
than the exact prompt -- otherwise, a prompt search will swallow stdout
data. For example, the following regex::
PROMPT-(.+)->$
would capture arbitrary strings, and would thus match *all* of::
PROMPT-0->ls
data/ info
PROMPT-0->
and thus swallow the ls output...
Note that the string match *before* the prompt regex is non-gready -- if
the output contains multiple occurrences of the prompt, only the match
up to the first occurence is returned.
"""
def escape (txt) :
pat = re.compile(r'\x1b[^m]*m')
return pat.sub ('', txt)
with self.pty_shell.rlock :
old_prompt = self.prompt
self.prompt = new_prompt
self.prompt_re = re.compile ("^(.*?)%s\s*$" % self.prompt, re.DOTALL)
retries = 0
triggers = 0
while True :
try :
# make sure we have a non-zero waiting delay (default to
# 1 second)
delay = 10 * self.latency
if not delay :
delay = 1.0
# FIXME: how do we know that _PTY_TIMOUT suffices? In particular if
# we actually need to flush...
fret, match = self.pty_shell.find ([self.prompt], delay)
if fret == None :
retries += 1
if retries > 10 :
self.prompt = old_prompt
raise se.BadParameter ("Cannot use new prompt, parsing failed (10 retries)")
self.pty_shell.write ("\n")
self.logger.debug ("sent prompt trigger again (%d)" % retries)
triggers += 1
continue
# found a match -- lets see if this is working now...
ret, _ = self._eval_prompt (match)
if ret != 0 :
self.prompt = old_prompt
raise se.BadParameter ("could not parse exit value (%s)" \
% match)
# prompt looks valid...
break
except Exception as e :
self.prompt = old_prompt
raise ptye.translate_exception (e, "Could not set shell prompt")
# got a valid prompt -- but we have to sync the output again in
# those cases where we had to use triggers to actually get the
# prompt
if triggers > 0 :
self.run_async (' printf "SYNCHRONIZE_PROMPT\n"')
# FIXME: better timout value?
fret, match = self.pty_shell.find (["SYNCHRONIZE_PROMPT"], timeout=10.0)
if fret == None :
# not find prompt after blocking? BAD! Restart the shell
self.finalize (kill_pty=True)
raise se.NoSuccess ("Could not synchronize prompt detection")
self.find_prompt ()
# ----------------------------------------------------------------
#
def _eval_prompt (self, data, new_prompt=None) :
"""
This method will match the given data against the current prompt regex,
and expects to find an integer as match -- which is then returned, along
with all leading data, in a tuple
"""
with self.pty_shell.rlock :
try :
prompt = self.prompt
prompt_re = self.prompt_re
if new_prompt :
prompt = new_prompt
prompt_re = re.compile ("^(.*)%s\s*$" % prompt, re.DOTALL)
result = None
if not data :
raise se.NoSuccess ("cannot not parse prompt (%s), invalid data (%s)" \
% (prompt, data))
result = prompt_re.match (data)
if not result :
self.logger.debug ("could not parse prompt (%s) (%s)" % (prompt, data))
raise se.NoSuccess ("could not parse prompt (%s) (%s)" % (prompt, data))
txt = result.group (1)
ret = 0
if len (result.groups ()) != 2 :
if new_prompt :
self.logger.warn ("prompt does not capture exit value (%s)" % prompt)
# raise se.NoSuccess ("prompt does not capture exit value (%s)" % prompt)
else :
try :
ret = int(result.group (2))
except ValueError :
# apparently, this is not an integer. Print a warning, and
# assume success -- the calling entity needs to evaluate the
# remainder...
ret = 0
self.logger.warn ("prompt not suitable for error checks (%s)" % prompt)
txt += "\n%s" % result.group (2)
# if that worked, we can permanently set new_prompt
if new_prompt :
self.set_prompt (new_prompt)
return (ret, txt)
except Exception as e :
raise ptye.translate_exception (e, "Could not eval prompt")
# ----------------------------------------------------------------
#
def run_sync (self, command, iomode=None, new_prompt=None) :
"""
Run a shell command, and report exit code, stdout and stderr (all three
will be returned in a tuple). The call will block until the command
finishes (more exactly, until we find the prompt again on the shell's
I/O stream), and cannot be interrupted.
:type command: string
:param command: shell command to run.
:type iomode: enum
:param iomode: Defines how stdout and stderr are captured.
:type new_prompt: string
:param new_prompt: regular expression matching the prompt after
command succeeded.
We expect the ``command`` to not to do stdio redirection, as this is we want
to capture that separately. We *do* allow pipes and stdin/stdout
redirection. Note that SEPARATE mode will break if the job is run in
the background
The following iomode values are valid:
* *IGNORE:* both stdout and stderr are discarded, `None` will be
returned for each.
* *MERGED:* both streams will be merged and returned as stdout;
stderr will be `None`. This is the default.
* *SEPARATE:* stdout and stderr will be captured separately, and
returned individually. Note that this will require
at least one more network hop!
* *STDOUT:* only stdout is captured, stderr will be `None`.
* *STDERR:* only stderr is captured, stdout will be `None`.
* *None:* do not perform any redirection -- this is effectively
the same as `MERGED`
If any of the requested output streams does not return any data, an
empty string is returned.
If the command to be run changes the prompt to be expected for the
shell, the ``new_prompt`` parameter MUST contain a regex to match the
new prompt. The same conventions as for set_prompt() hold -- i.e. we
expect the prompt regex to capture the exit status of the process.
"""
with self.pty_shell.rlock :
self._trace ("run sync : %s" % command)
# we expect the shell to be in 'ground state' when running a syncronous
# command -- thus we can check if the shell is alive before doing so,
# and restart if needed
if not self.pty_shell.alive (recover=True) :
raise se.IncorrectState ("Can't run command -- shell died:\n%s" \
% self.pty_shell.autopsy ())
try :
command = command.strip ()
if command.endswith ('&') :
raise se.BadParameter ("run_sync can only run foreground jobs ('%s')" \
% command)
redir = ""
_err = "/tmp/saga-python.ssh-job.stderr.$$"
if iomode == IGNORE :
redir = " 1>>/dev/null 2>>/dev/null"
if iomode == MERGED :
redir = " 2>&1"
if iomode == SEPARATE :
redir = " 2>%s" % _err
if iomode == STDOUT :
redir = " 2>/dev/null"
if iomode == STDERR :
redir = " 2>&1 1>/dev/null"
if iomode == None :
redir = ""
self.logger.debug ('run_sync: %s%s' % (command, redir))
self.pty_shell.write ( "%s%s\n" % (command, redir))
# If given, switch to new prompt pattern right now...
prompt = self.prompt
if new_prompt :
prompt = new_prompt
# command has been started - now find prompt again.
fret, match = self.pty_shell.find ([prompt], timeout=-1.0) # blocks
if fret == None :
# not find prompt after blocking? BAD! Restart the shell
self.finalize (kill_pty=True)
raise se.IncorrectState ("run_sync failed, no prompt (%s)" % command)
ret, txt = self._eval_prompt (match, new_prompt)
stdout = None
stderr = None
if iomode == None :
iomode = STDOUT
if iomode == IGNORE :
pass
if iomode == MERGED :
stdout = txt
if iomode == STDOUT :
stdout = txt
if iomode == SEPARATE or \
iomode == STDERR :
stdout = txt
self.pty_shell.write (" cat %s\n" % _err)
fret, match = self.pty_shell.find ([self.prompt], timeout=-1.0) # blocks
if fret == None :
# not find prompt after blocking? BAD! Restart the shell
self.finalize (kill_pty=True)
raise se.IncorrectState ("run_sync failed, no prompt (%s)" \
% command)
_ret, _stderr = self._eval_prompt (match)
if _ret :
raise se.IncorrectState ("run_sync failed, no stderr (%s: %s)" \
% (_ret, _stderr))
stderr = _stderr
if iomode == STDERR :
# got stderr in branch above
stdout = None
return (ret, stdout, stderr)
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def run_async (self, command) :
"""
Run a shell command, but don't wait for prompt -- just return. It is up
to caller to eventually search for the prompt again (see
:func:`find_prompt`. Meanwhile, the caller can interact with the called
command, via the I/O channels.
:type command: string
:param command: shell command to run.
For async execution, we don't care if the command is doing i/o redirection or not.
"""
with self.pty_shell.rlock :
# self._trace ("run async : %s" % command)
# we expect the shell to be in 'ground state' when running an asyncronous
# command -- thus we can check if the shell is alive before doing so,
# and restart if needed
if not self.pty_shell.alive (recover=True) :
raise se.IncorrectState ("Cannot run command:\n%s" \
% self.pty_shell.autopsy ())
try :
command = command.strip ()
self.send (" %s\n" % command)
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def send (self, data) :
"""
send data to the shell. No newline is appended!
"""
with self.pty_shell.rlock :
if not self.pty_shell.alive (recover=False) :
raise se.IncorrectState ("Cannot send data:\n%s" \
% self.pty_shell.autopsy ())
try :
self.pty_shell.write ("%s" % data)
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def write_to_remote (self, src, tgt) :
"""
:type src: string
:param src: data to be staged into the target file
:type tgt: string
:param tgt: path to target file to staged to
The tgt path is not an URL, but expected to be a path
relative to the shell's URL.
The content of the given string is pasted into a file (specified by tgt)
on the remote system. If that file exists, it is overwritten.
A NoSuccess exception is raised if writing the file was not possible
(missing permissions, incorrect path, etc.).
"""
try :
# self._trace ("write : %s -> %s" % (src, tgt))
# FIXME: make this relative to the shell's pwd? Needs pwd in
# prompt, and updating pwd state on every find_prompt.
# first, write data into a tmp file
fname = self.base + "/staging.%s" % id(self)
fhandle = open (fname, 'wb')
fhandle.write (src)
fhandle.flush ()
fhandle.close ()
ret = self.stage_to_remote (fname, tgt)
os.remove (fname)
return ret
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def read_from_remote (self, src) :
"""
:type src: string
:param src: path to source file to staged from
The src path is not an URL, but expected to be a path
relative to the shell's URL.
"""
try :
# self._trace ("read : %s" % src)
# FIXME: make this relative to the shell's pwd? Needs pwd in
# prompt, and updating pwd state on every find_prompt.
# first, write data into a tmp file
fname = self.base + "/staging.%s" % id(self)
_ = self.stage_from_remote (src, fname)
os.system ('sync') # WTF? Why do I need this?
fhandle = open (fname, 'r')
out = fhandle.read ()
fhandle.close ()
os.remove (fname)
return out
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def stage_to_remote (self, src, tgt, cp_flags="") :
"""
:type src: string
:param src: path of local source file to be stage from.
The tgt path is not an URL, but expected to be a path
relative to the current working directory.
:type tgt: string
:param tgt: path to target file to stage to.
The tgt path is not an URL, but expected to be a path
relative to the shell's URL.
"""
self._trace ("stage to : %s -> %s" % (src, tgt))
# FIXME: make this relative to the shell's pwd? Needs pwd in
# prompt, and updating pwd state on every find_prompt.
try :
return self.run_copy_to (src, tgt, cp_flags)
except Exception as e :
raise ptye.translate_exception (e)
# ----------------------------------------------------------------
#
def stage_from_remote (self, src, tgt, cp_flags="") :
"""
:type src: string
:param tgt: path to source file to stage from.
The tgt path is not an URL, but expected to be a path
relative to the shell's URL.
:type tgt: string
:param src: path of local target file to stage to.
The tgt path is not an URL, but expected to be a path
relative to the current working directory.
"""
self._trace ("stage from: %s -> %s" % (src, tgt))
# FIXME: make this relative to the shell's pwd? Needs pwd in
# prompt, and updating pwd state on every find_prompt.
try :
return self.run_copy_from (src, tgt, cp_flags)
except Exception as e :
raise ptye.translate_exception (e)
# --------------------------------------------------------------------------
#
def run_copy_to (self, src, tgt, cp_flags="") :
"""
This initiates a slave copy connection. Src is interpreted as local
path, tgt as path on the remote host.
Now, this is ugly when over sftp: sftp supports recursive copy, and
wildcards, all right -- but for recursive copies, it wants the target
dir to exist -- so, we have to check if the local src is a dir, and if
so, we first create the target before the copy. Worse, for wildcards we
have to do a local expansion, and the to do the same for each entry...
"""
self._trace ("copy to : %s -> %s" % (src, tgt))
with self.pty_shell.rlock :
info = self.pty_info
repl = dict ({'src' : src,
'tgt' : tgt,
'cp_flags' : cp_flags}.items () + info.items ())
# at this point, we do have a valid, living master
s_cmd = info['scripts'][info['copy_type']]['copy_to'] % repl
s_in = info['scripts'][info['copy_type']]['copy_to_in'] % repl
if not s_in :
# this code path does not use an interactive shell for copy --
# so the above s_cmd is all we want to run, really. We get
# do not use the chached cp_slave in this case, but just run the
# command. We do not have a list of transferred files though,
# yet -- that should be parsed from the proc output.
cp_proc = supp.PTYProcess (s_cmd)
out = cp_proc.wait ()
if cp_proc.exit_code :
raise ptye.translate_exception (se.NoSuccess ("file copy failed: %s" % out))
return list()
# this code path uses an interactive shell to transfer files, of
# some form, such as sftp. Get the shell cp_slave from cache, and
# run the actual copy command.
if not self.cp_slave :
self._trace ("get cp slave")
self.cp_slave = self.factory.get_cp_slave (s_cmd, info)
prep = ""
if 'sftp' in s_cmd :
# prepare target dirs for recursive copy, if needed
import glob
src_list = glob.glob (src)
for s in src_list :
if os.path.isdir (s) :
prep += "mkdir %s/%s\n" % (tgt, os.path.basename (s))
_ = self.cp_slave.write ("%s%s\n" % (prep, s_in))
_, out = self.cp_slave.find (['[\$\>\]]\s*$'], -1)
_, out = self.cp_slave.find (['[\$\>\]]\s*$'], 1.0)
# FIXME: we don't really get exit codes from copy
# if self.cp_slave.exit_code != 0 :
# raise se.NoSuccess._log (info['logger'], "file copy failed: %s" % str(out))
if 'Invalid flag' in out :
raise se.NoSuccess._log (info['logger'], "sftp version not supported (%s)" % str(out))
if 'No such file or directory' in out :
raise se.DoesNotExist._log (info['logger'], "file copy failed: %s" % str(out))
if 'is not a directory' in out :
raise se.BadParameter._log (info['logger'], "File copy failed: %s" % str(out))
if 'sftp' in s_cmd :
if 'not found' in out :
raise se.BadParameter._log (info['logger'], "file copy failed: %s" % out)
# we interpret the first word on the line as name of src file -- we
# will return a list of those
lines = out.split ('\n')
files = []
for line in lines :
elems = line.split (' ', 2)
if elems :
f = elems[0]
# remove quotes
if f :
if f[ 0] in ["'", '"', '`'] : f = f[1: ]
if f[-1] in ["'", '"', '`'] : f = f[ :-1]
# ignore empty lines
if f :
files.append (f)
info['logger'].debug ("copy done: %s" % files)
return files
# --------------------------------------------------------------------------
#
def run_copy_from (self, src, tgt, cp_flags="") :
"""
This initiates a slave copy connection. Src is interpreted as path on
the remote host, tgt as local path.
We have to do the same mkdir trick as for the run_copy_to, but here we
need to expand wildcards on the *remote* side :/
"""
self._trace ("copy from: %s -> %s" % (src, tgt))
with self.pty_shell.rlock :
info = self.pty_info
repl = dict ({'src' : src,
'tgt' : tgt,
'cp_flags' : cp_flags}.items ()+ info.items ())
# at this point, we do have a valid, living master
s_cmd = info['scripts'][info['copy_type']]['copy_from'] % repl
s_in = info['scripts'][info['copy_type']]['copy_from_in'] % repl
if not s_in :
# this code path does not use an interactive shell for copy --
# so the above s_cmd is all we want to run, really. We get
# do not use the chached cp_slave in this case, but just run the
# command. We do not have a list of transferred files though,
# yet -- that should be parsed from the proc output.
cp_proc = supp.PTYProcess (s_cmd)
cp_proc.wait ()
if cp_proc.exit_code :
raise ptye.translate_exception (se.NoSuccess ("file copy failed: %s" % out))
return list()
if not self.cp_slave :
self._trace ("get cp slave")
self.cp_slave = self.factory.get_cp_slave (s_cmd, info)
prep = ""
if 'sftp' in s_cmd :
# prepare target dirs for recursive copy, if needed
self.cp_slave.write (" ls %s\n" % src)
_, out = self.cp_slave.find (["^sftp> "], -1)
src_list = out[1].split ('/n')
for s in src_list :
if os.path.isdir (s) :
prep += "lmkdir %s/%s\n" % (tgt, os.path.basename (s))
_ = self.cp_slave.write ("%s%s\n" % (prep, s_in))
_, out = self.cp_slave.find (['[\$\>\]] *$'], -1)
# FIXME: we don't really get exit codes from copy
# if self.cp_slave.exit_code != 0 :
# raise se.NoSuccess._log (info['logger'], "file copy failed: %s" % out)
if 'Invalid flag' in out :
raise se.NoSuccess._log (info['logger'], "sftp version not supported (%s)" % out)
if 'No such file or directory' in out :
raise se.DoesNotExist._log (info['logger'], "file copy failed: %s" % out)
if 'is not a directory' in out :
raise se.BadParameter._log (info['logger'], "file copy failed: %s" % out)
if 'sftp' in s_cmd :
if 'not found' in out :
raise se.BadParameter._log (info['logger'], "file copy failed: %s" % out)
# we run copy with -v, so get a list of files which have been copied
# -- we parse that list and return it. we interpret the *second*
# word on the line as name of src file.
lines = out.split ('\n')
files = []
for line in lines :
elems = line.split (' ', 3)
if elems and len(elems) > 1 and elems[0] == 'Fetching' :
f = elems[1]
# remove quotes
if f :
if f[ 0] in ["'", '"', '`'] : f = f[1: ]
if f[-1] in ["'", '"', '`'] : f = f[ :-1]
# ignore empty lines
if f :
files.append (f)
info['logger'].debug ("copy done: %s" % files)
return files
|
bbc/kamaelia | refs/heads/master | Code/Python/Kamaelia/Kamaelia/Apps/SA/Chassis.py | 3 | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This file contains some utility classes which are used by both the client and
server components of the port tester application.
'''
import time
import Axon
from Axon.Ipc import producerFinished, shutdownMicroprocess, shutdown
from Kamaelia.IPC import serverShutdown
from Kamaelia.Apps.SA.Time import SingleTick
# FIXME: Needs example of usage.
class TTL(Axon.Component.component):
'''
This "Time To Live" component is designed to wrap another existing component.
The TTL starts an embedded SingleTick component which waits for "delay"
seconds and then the TTL progressivly becomes more aggressive in its attempts
to shutdown the wrapped component. Ideally this component should not be
needed, but it is handy for components that do not have their own timeout
functionality.
TTL(comp, delay)
'''
Inboxes = {'_trigger':'Receives True message to cause embedded component to shutdown'}
Outboxes= {'_sigkill':'Dynamically links to a emedded component control',
'_disarm':'Stop timebomb early'}
def __init__(self, comp, delay):
# One of the rare cases where we do not call the parent class' init()
# right off the bat. Instead we first replicate the wrapped component's
# inboxes and outboxes. Private "_name" boxes are not replicated.
self.child = comp
for inbox in (item for item in self.child.Inboxes if not item.startswith('_')):
try:
self.Inboxes[inbox] = self.child.Inboxes.get(inbox, "")
except AttributeError: # not a dict
self.Inboxes[inbox] = ""
for outbox in (item for item in self.child.Outboxes if not item.startswith('_')):
try:
self.Outboxes[outbox] = self.child.Outboxes.get(outbox, "")
except AttributeError: # not a dict
self.Outboxes[outbox] = ""
super(TTL, self).__init__()
self.timebomb = SingleTick(delay=delay, check_interval=1)
# We can now create the mailbox linkages now that the parent class'
# init() has been called.
self.link((self.timebomb, 'outbox'), (self, '_trigger'))
self.link((self, '_disarm'), (self.timebomb, 'control'))
try:
self.link((self, '_sigkill'), (self.child, 'control'))
self.nochildcontrol = False
except KeyError:
self.nochildcontrol = True
for inbox in (item for item in self.child.Inboxes if not item.startswith('_')):
self.link((self, inbox), (self.child, inbox), passthrough=1)
for outbox in (item for item in self.child.Outboxes if not item.startswith('_')):
self.link((self.child, outbox), (self, outbox), passthrough=2)
self.addChildren(self.child)
# FIXME: Really a fixme for Axon, but it strikes me (MPS) that what a
# FIXME: huge chunk of this code is crying out for really is a way of
# FIXME: killing components. Until that happens, this is pretty good,
# FIXME: but we can go a stage further here and add in sabotaging the
# FIXME: components methods as well to force it to crash if all else
# FIXME: fails (!) (akin to using ctypes to force a stack trace in
# FIXME: python(!))
def main(self):
self.timebomb.activate()
self.child.activate()
yield 1
while not (self.child._isStopped() or (self.dataReady('_trigger') and self.recv('_trigger') is True)):
self.pause()
yield 1
if not self.timebomb._isStopped():
self.send(producerFinished(), '_disarm')
shutdown_messages = [ producerFinished(), shutdownMicroprocess(), serverShutdown(), shutdown() ]
for msg in shutdown_messages:
if not self.child._isStopped():
self.send( msg, "_sigkill")
yield 1
yield 1
else:
break
self.removeChild(self.child)
yield 1
if not self.child._isStopped():
self.child.stop()
yield 1
if 'signal' in self.Outboxes:
self.send(shutdownMicroprocess(), 'signal')
yield 1
if __name__=="__main__":
class WellBehaved1(Axon.Component.component):
def main(self):
t = time.time()
while not self.dataReady("control"):
if time.time() - t>0.3:
self.send("hello", "outbox")
print (self)
t = time.time()
yield 1
self.send(self.recv("control"), "signal")
TTL( WellBehaved1(), 1 ).run()
class WellBehaved2(Axon.Component.component):
Inboxes = {
"inbox" : "Foo Bar",
"control" : "Foo Bar",
}
Outboxes = {
"outbox" : "Foo Bar",
"signal" : "Foo Bar",
}
def main(self):
t = time.time()
while not self.dataReady("control"):
if time.time() - t>0.3:
self.send("hello", "outbox")
print (self)
t = time.time()
yield 1
self.send(self.recv("control"), "signal")
TTL( WellBehaved2(), 1 ).run()
class WellBehaved3(Axon.Component.component):
Inboxes = [ "inbox", "control" ]
Outboxes = [ "outbox", "signal" ]
def main(self):
t = time.time()
while not self.dataReady("control"):
if time.time() - t>0.3:
self.send("hello", "outbox")
print (self)
t = time.time()
yield 1
self.send(self.recv("control"), "signal")
TTL( WellBehaved3(), 1 ).run()
class WellBehaved4(Axon.Component.component):
Inboxes = [ "inbox", "control" ]
Outboxes = {
"outbox" : "Foo Bar",
"signal" : "Foo Bar",
}
def main(self):
t = time.time()
while not self.dataReady("control"):
if time.time() - t>0.3:
self.send("hello", "outbox")
print (self)
t = time.time()
yield 1
self.send(self.recv("control"), "signal")
TTL( WellBehaved4(), 1 ).run()
class BadlyBehaved1(Axon.Component.component):
Inboxes = [ ]
Outboxes = [ ]
def main(self):
t = time.time()
while 1:
if time.time() - t>0.3:
print (self)
t = time.time()
yield 1
TTL( BadlyBehaved1(), 1 ).run()
|
nhmc/xastropy | refs/heads/master | xastropy/igm/abs_sys/abs_survey.py | 2 | """
#;+
#; NAME:
#; abssys_utils
#; Version 1.0
#;
#; PURPOSE:
#; Module for Absorption Systems
#; 23-Oct-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import imp, json, copy
from abc import ABCMeta, abstractmethod
from astropy.io import ascii
from astropy import units as u
from astropy.table import QTable, Table, Column
from linetools.spectra import io as lsio
from xastropy.xutils import xdebug as xdb
from xastropy.xutils import arrays as xarray
#
xa_path = imp.find_module('xastropy')[1]
###################### ######################
###################### ######################
# Class for Absorption Line Survey
class AbslineSurvey(object):
"""A survey of absorption line systems. Each system may be a
collection of Absline_System's
Attributes:
nsys: An integer representing the number of absorption systems
abs_type: Type of Absorption system (DLA, LLS)
ref: Reference to the Survey
"""
__metaclass__ = ABCMeta
# Init
def __init__(self, abs_type, flist=None, summ_fits=None, tree='', ref=''):
# Expecting a list of files describing the absorption systems
""" Initiator
Parameters
----------
abs_type : str
Type of Abs Line System, e.g. MgII, DLA, LLS
flist : str, optional
ASCII file giving a list of systems (usually .dat files)
summ_fits : str, optional
FITS file for generating the Survey
mask : Boolean array
Mask for the systems
ref : string
Reference(s) for the survey
"""
self.flist = flist
self.tree = tree
self.abs_type = abs_type
self.ref = ref
self._abs_sys = []
self.summ_fits = summ_fits
self.mask = None
# Load
if flist is not None:
self.from_flist()
elif summ_fits is not None:
self.from_sfits()
else:
self.nsys = 0
# Mask
if self.mask is None:
if self.nsys > 0:
self.mask = np.array([True]*self.nsys)
def from_sfits(self):
'''Generate the Survey from a summary FITS file
'''
# Read
systems = QTable.read(self.summ_fits)
self.nsys = len(systems)
# Dict
kdict = dict(NHI=['NHI','logNHI'],
sigNHI=['sig(logNHI)'],
name=['Name'],
vlim=['vlim'],
zabs=['Z_LLS'],
zem=['Z_QSO'],
RA=['RA'],
Dec=['DEC','Dec'])
# Parse the Table
inputs = {}
for key in kdict.keys():
vals, tag = lsio.get_table_column(kdict[key],[systems],idx=0)
if vals is not None:
inputs[key] = vals
# Generate
for kk,row in enumerate(systems):
# Generate keywords
kwargs = {}
for key in inputs.keys():
kwargs[key] = inputs[key][kk]
# Instantiate
self._abs_sys.append(set_absclass(self.abs_type)(**kwargs))
def from_flist(self):
'''Generate the Survey from a file list.
Typically .dat files of JXP format
'''
# Load up (if possible)
data = ascii.read(self.tree+self.flist, data_start=0,
guess=False,format='no_header')
self.dat_files = list(data['col1'])
self.nsys = len(self.dat_files)
print('Read {:d} files from {:s} in the tree {:s}'.format(
self.nsys, self.flist, self.tree))
# Generate AbsSys list
for dat_file in self.dat_files:
self._abs_sys.append(set_absclass(self.abs_type)(dat_file=dat_file,tree=self.tree))
'''
if self.abs_type == 'LLS':
self._abs_sys.append(set(dat_file=dat_file,tree=tree))
elif self.abs_type == 'DLA':
self._abs_sys.append(DLA_System(dat_file=dat_file,tree=tree))
else: # Generic
self._abs_sys.append(GenericAbsSystem(abs_type,dat_file=tree+dat_file))
'''
# Get abs_sys (with mask)
def abs_sys(self):
lst = self._abs_sys
return xarray.lst_to_array(lst,mask=self.mask)
# Get attributes
def __getattr__(self, k):
try:
lst = [getattr(abs_sys,k) for abs_sys in self._abs_sys]
except ValueError:
raise ValueError
return xarray.lst_to_array(lst,mask=self.mask)
# Get ions
def fill_ions(self,jfile=None): # This may be overloaded!
'''
Loop on systems to fill in ions
Parameters:
-----------
jfile: str, optional
JSON file containing the information
'''
if jfile is not None:
# Load
with open(jfile) as data_file:
ions_dict = json.load(data_file)
# Loop on systems
for abs_sys in self._abs_sys:
abs_sys.get_ions(idict=ions_dict[abs_sys.name])
else:
for abs_sys in self._abs_sys:
# Line list
if (abs_sys.linelist is None) & (self.linelist is not None):
abs_sys.linelist = self.linelist
#
abs_sys.get_ions()
# Get ions
def ions(self,iZion, skip_null=False):
'''
Generate a Table of columns and so on
Restrict to those systems where flg_clm > 0
Parameters
----------
iZion : tuple
Z, ion e.g. (6,4) for CIV
skip_null : boolean (False)
Skip systems without an entry, else pad with zeros
Returns
----------
Table of values for the Survey
'''
from astropy.table import Table
keys = [u'name',] + self.abs_sys()[0]._ionclms._data.keys()
t = copy.deepcopy(self.abs_sys()[0]._ionclms._data[0:1])
t.add_column(Column(['dum'],name='name',dtype='<U32'))
#key_dtype= ('<U32',) + self.abs_sys()[0]._ionclms.data.key_dtype
#key_dtype= ('<U32',) + self.abs_sys()[0]._ionclms.data.dtype
#t = Table(names=keys, dtype=key_dtype)
t = t[keys]
# Loop on systems (Masked)
for abs_sys in self.abs_sys():
## Mask?
#if not self.mask[self.abs_sys.index(abs_sys)]:
# continue
# Grab
try:
idict = abs_sys._ionclms[iZion]
except KeyError:
if skip_null is False:
row = [abs_sys.name] + [0 for key in keys[1:]]
t.add_row( row )
continue
# Cut on flg_clm
if idict['flg_clm'] > 0:
row = [abs_sys.name] + [idict[key] for key in keys[1:]]
t.add_row( row ) # This could be slow
else:
if skip_null is False:
row = [abs_sys.name] + [0 for key in keys[1:]]
t.add_row( row )
# Return
return t[1:]
'''
# Loop on systems (Masked)
for abs_sys in self.abs_sys():
## Mask?
#if not self.mask[self.abs_sys.index(abs_sys)]:
# continue
# Grab
try:
itab = abs_sys._ionclms[iZion]
except KeyError:
if skip_null is False:
row = [abs_sys.name] + [0 for key in keys[1:]]
t.add_row( row )
continue
# Cut on flg_clm
if itab['flg_clm'] > 0:
row = [abs_sys.name] + [idict[key] for key in keys[1:]]
t.add_row( row ) # This could be slow
else:
if skip_null is False:
row = [abs_sys.name] + [0 for key in keys[1:]]
t.add_row( row )
# Return
return t[1:] # Slice dummy row
'''
# Mask
def upd_mask(self, msk, increment=False):
'''
Update the Mask for the abs_sys
Parameters
----------
msk : array (usually Boolean)
Mask of systems
increment : Boolean (False)
Increment the mask (i.e. keep False as False)
'''
if len(msk) == len(self._abs_sys): # Boolean mask
if increment is False:
self.mask = msk
else:
self.mask = (self.mask == True) & (msk == True)
else:
raise ValueError('abs_survey: Needs developing!')
# Printing
def __repr__(self):
if self.flist is not None:
return '[AbslineSurvey: {:s} {:s}, nsys={:d}, type={:s}, ref={:s}]'.format(
self.tree, self.flist, self.nsys, self.abs_type, self.ref)
else:
return '[AbslineSurvey: nsys={:d}, type={:s}, ref={:s}]'.format(
self.nsys, self.abs_type, self.ref)
# Class for Generic Absorption Line System
class GenericAbsSurvey(AbslineSurvey):
"""A simple absorption line survey
"""
def __init__(self, **kwargs):
AbslineSurvey.__init__(self,'Generic',**kwargs)
# Set AbsClass
def set_absclass(abstype):
'''Translate abstype into Class
Parameters:
----------
abstype: str
AbsSystem type, e.g. 'LLS', 'DLA'
Returns:
--------
Class name
'''
from xastropy.igm.abs_sys.lls_utils import LLSSystem
from xastropy.igm.abs_sys.dla_utils import DLASystem
from xastropy.igm.abs_sys.abssys_utils import GenericAbsSystem
cdict = dict(LLS=LLSSystem,DLA=DLASystem)
try:
return cdict[abstype]
except KeyError:
return GenericAbsSystem
###################### ###################### ######################
###################### ###################### ######################
###################### ###################### ######################
# Testing
###################### ###################### ######################
if __name__ == '__main__':
# Test the Survey
tmp = AbslineSurvey('Lists/lls_metals.lst',abs_type='LLS',
tree='/Users/xavier/LLS/')
print(tmp)
print('z NHI')
xdb.xpcol(tmp.zabs, tmp.NHI)
#xdb.set_trace()
print('abssys_utils: All done testing..')
|
jaruba/chromium.src | refs/heads/nw12 | tools/perf/metrics/speedindex.py | 9 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from metrics import Metric
from telemetry.image_processing import image_util
from telemetry.image_processing import rgba_color
from telemetry.value import scalar
class SpeedIndexMetric(Metric):
"""The speed index metric is one way of measuring page load speed.
It is meant to approximate user perception of page load speed, and it
is based on the amount of time that it takes to paint to the visual
portion of the screen. It includes paint events that occur after the
onload event, and it doesn't include time loading things off-screen.
This speed index metric is based on WebPageTest.org (WPT).
For more info see: http://goo.gl/e7AH5l
"""
def __init__(self):
super(SpeedIndexMetric, self).__init__()
self._impl = None
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs('--disable-infobars')
def Start(self, _, tab):
"""Start recording events.
This method should be called in the WillNavigateToPage method of
a PageTest, so that all the events can be captured. If it's called
in DidNavigateToPage, that will be too late.
"""
self._impl = (VideoSpeedIndexImpl() if tab.video_capture_supported else
PaintRectSpeedIndexImpl())
self._impl.Start(tab)
def Stop(self, _, tab):
"""Stop timeline recording."""
assert self._impl, 'Must call Start() before Stop()'
assert self.IsFinished(tab), 'Must wait for IsFinished() before Stop()'
self._impl.Stop(tab)
# Optional argument chart_name is not in base class Metric.
# pylint: disable=W0221
def AddResults(self, tab, results, chart_name=None):
"""Calculate the speed index and add it to the results."""
index = self._impl.CalculateSpeedIndex(tab)
# Release the tab so that it can be disconnected.
self._impl = None
results.AddValue(scalar.ScalarValue(
results.current_page, '%s_speed_index' % chart_name, 'ms', index,
description='Speed Index. This focuses on time when visible parts of '
'page are displayed and shows the time when the '
'first look is "almost" composed. If the contents of the '
'testing page are composed by only static resources, load '
'time can measure more accurately and speed index will be '
'smaller than the load time. On the other hand, If the '
'contents are composed by many XHR requests with small '
'main resource and javascript, speed index will be able to '
'get the features of performance more accurately than load '
'time because the load time will measure the time when '
'static resources are loaded. If you want to get more '
'detail, please refer to http://goo.gl/Rw3d5d. Currently '
'there are two implementations: for Android and for '
'Desktop. The Android version uses video capture; the '
'Desktop one uses paint events and has extra overhead to '
'catch paint events.'))
def IsFinished(self, tab):
"""Decide whether the timeline recording should be stopped.
When the timeline recording is stopped determines which paint events
are used in the speed index metric calculation. In general, the recording
should continue if there has just been some data received, because
this suggests that painting may continue.
A page may repeatedly request resources in an infinite loop; a timeout
should be placed in any measurement that uses this metric, e.g.:
def IsDone():
return self._speedindex.IsFinished(tab)
util.WaitFor(IsDone, 60)
Returns:
True if 2 seconds have passed since last resource received, false
otherwise.
"""
return tab.HasReachedQuiescence()
class SpeedIndexImpl(object):
def Start(self, tab):
raise NotImplementedError()
def Stop(self, tab):
raise NotImplementedError()
def GetTimeCompletenessList(self, tab):
"""Returns a list of time to visual completeness tuples.
In the WPT PHP implementation, this is also called 'visual progress'.
"""
raise NotImplementedError()
def CalculateSpeedIndex(self, tab):
"""Calculate the speed index.
The speed index number conceptually represents the number of milliseconds
that the page was "visually incomplete". If the page were 0% complete for
1000 ms, then the score would be 1000; if it were 0% complete for 100 ms
then 90% complete (ie 10% incomplete) for 900 ms, then the score would be
1.0*100 + 0.1*900 = 190.
Returns:
A single number, milliseconds of visual incompleteness.
"""
time_completeness_list = self.GetTimeCompletenessList(tab)
prev_completeness = 0.0
speed_index = 0.0
prev_time = time_completeness_list[0][0]
for time, completeness in time_completeness_list:
# Add the incemental value for the interval just before this event.
elapsed_time = time - prev_time
incompleteness = (1.0 - prev_completeness)
speed_index += elapsed_time * incompleteness
# Update variables for next iteration.
prev_completeness = completeness
prev_time = time
return int(speed_index)
class VideoSpeedIndexImpl(SpeedIndexImpl):
def __init__(self, image_util_module=image_util):
# Allow image_util to be passed in so we can fake it out for testing.
super(VideoSpeedIndexImpl, self).__init__()
self._time_completeness_list = None
self._image_util_module = image_util_module
def Start(self, tab):
assert tab.video_capture_supported
# Blank out the current page so it doesn't count towards the new page's
# completeness.
tab.Highlight(rgba_color.WHITE)
# TODO(tonyg): Bitrate is arbitrary here. Experiment with screen capture
# overhead vs. speed index accuracy and set the bitrate appropriately.
tab.StartVideoCapture(min_bitrate_mbps=4)
def Stop(self, tab):
# Ignore white because Chrome may blank out the page during load and we want
# that to count as 0% complete. Relying on this fact, we also blank out the
# previous page to white. The tolerance of 8 experimentally does well with
# video capture at 4mbps. We should keep this as low as possible with
# supported video compression settings.
video_capture = tab.StopVideoCapture()
histograms = [(time, self._image_util_module.GetColorHistogram(
image, ignore_color=rgba_color.WHITE, tolerance=8))
for time, image in video_capture.GetVideoFrameIter()]
start_histogram = histograms[0][1]
final_histogram = histograms[-1][1]
total_distance = start_histogram.Distance(final_histogram)
def FrameProgress(histogram):
if total_distance == 0:
if histogram.Distance(final_histogram) == 0:
return 1.0
else:
return 0.0
return 1 - histogram.Distance(final_histogram) / total_distance
self._time_completeness_list = [(time, FrameProgress(hist))
for time, hist in histograms]
def GetTimeCompletenessList(self, tab):
assert self._time_completeness_list, 'Must call Stop() first.'
return self._time_completeness_list
class PaintRectSpeedIndexImpl(SpeedIndexImpl):
def __init__(self):
super(PaintRectSpeedIndexImpl, self).__init__()
def Start(self, tab):
tab.StartTimelineRecording()
def Stop(self, tab):
tab.StopTimelineRecording()
def GetTimeCompletenessList(self, tab):
events = tab.timeline_model.GetAllEvents()
viewport = self._GetViewportSize(tab)
paint_events = self._IncludedPaintEvents(events)
time_area_dict = self._TimeAreaDict(paint_events, viewport)
total_area = sum(time_area_dict.values())
assert total_area > 0.0, 'Total paint event area must be greater than 0.'
completeness = 0.0
time_completeness_list = []
# TODO(tonyg): This sets the start time to the start of the first paint
# event. That can't be correct. The start time should be navigationStart.
# Since the previous screen is not cleared at navigationStart, we should
# probably assume the completeness is 0 until the first paint and add the
# time of navigationStart as the start. We need to confirm what WPT does.
time_completeness_list.append(
(tab.timeline_model.GetAllEvents()[0].start, completeness))
for time, area in sorted(time_area_dict.items()):
completeness += float(area) / total_area
# Visual progress is rounded to the nearest percentage point as in WPT.
time_completeness_list.append((time, round(completeness, 2)))
return time_completeness_list
def _GetViewportSize(self, tab):
"""Returns dimensions of the viewport."""
return tab.EvaluateJavaScript('[ window.innerWidth, window.innerHeight ]')
def _IncludedPaintEvents(self, events):
"""Get all events that are counted in the calculation of the speed index.
There's one category of paint event that's filtered out: paint events
that occur before the first 'ResourceReceiveResponse' and 'Layout' events.
Previously in the WPT speed index, paint events that contain children paint
events were also filtered out.
"""
def FirstLayoutTime(events):
"""Get the start time of the first layout after a resource received."""
has_received_response = False
for event in events:
if event.name == 'ResourceReceiveResponse':
has_received_response = True
elif has_received_response and event.name == 'Layout':
return event.start
assert False, 'There were no layout events after resource receive events.'
first_layout_time = FirstLayoutTime(events)
paint_events = [e for e in events
if e.start >= first_layout_time and e.name == 'Paint']
return paint_events
def _TimeAreaDict(self, paint_events, viewport):
"""Make a dict from time to adjusted area value for events at that time.
The adjusted area value of each paint event is determined by how many paint
events cover the same rectangle, and whether it's a full-window paint event.
"Adjusted area" can also be thought of as "points" of visual completeness --
each rectangle has a certain number of points and these points are
distributed amongst the paint events that paint that rectangle.
Args:
paint_events: A list of paint events
viewport: A tuple (width, height) of the window.
Returns:
A dictionary of times of each paint event (in milliseconds) to the
adjusted area that the paint event is worth.
"""
width, height = viewport
fullscreen_area = width * height
def ClippedArea(rectangle):
"""Returns rectangle area clipped to viewport size."""
_, x0, y0, x1, y1 = rectangle
clipped_width = max(0, min(width, x1) - max(0, x0))
clipped_height = max(0, min(height, y1) - max(0, y0))
return clipped_width * clipped_height
grouped = self._GroupEventByRectangle(paint_events)
event_area_dict = collections.defaultdict(int)
for rectangle, events in grouped.items():
# The area points for each rectangle are divided up among the paint
# events in that rectangle.
area = ClippedArea(rectangle)
update_count = len(events)
adjusted_area = float(area) / update_count
# Paint events for the largest-area rectangle are counted as 50%.
if area == fullscreen_area:
adjusted_area /= 2
for event in events:
# The end time for an event is used for that event's time.
event_time = event.end
event_area_dict[event_time] += adjusted_area
return event_area_dict
def _GetRectangle(self, paint_event):
"""Get the specific rectangle on the screen for a paint event.
Each paint event belongs to a frame (as in html <frame> or <iframe>).
This, together with location and dimensions, comprises a rectangle.
In the WPT source, this 'rectangle' is also called a 'region'.
"""
def GetBox(quad):
"""Gets top-left and bottom-right coordinates from paint event.
In the timeline data from devtools, paint rectangle dimensions are
represented x-y coordinates of four corners, clockwise from the top-left.
See: function WebInspector.TimelinePresentationModel.quadFromRectData
in file src/out/Debug/obj/gen/devtools/TimelinePanel.js.
"""
x0, y0, _, _, x1, y1, _, _ = quad
return (x0, y0, x1, y1)
assert paint_event.name == 'Paint'
frame = paint_event.args['frameId']
return (frame,) + GetBox(paint_event.args['data']['clip'])
def _GroupEventByRectangle(self, paint_events):
"""Group all paint events according to the rectangle that they update."""
result = collections.defaultdict(list)
for event in paint_events:
assert event.name == 'Paint'
result[self._GetRectangle(event)].append(event)
return result
|
ActionLuzifer/python-oauth2 | refs/heads/master | setup.py | 7 | #!/usr/bin/env python
from setuptools import setup, find_packages
import os, re
PKG='oauth2'
VERSIONFILE = os.path.join('oauth2', '_version.py')
verstr = "unknown"
try:
verstrline = open(VERSIONFILE, "rt").read()
except EnvironmentError:
pass # Okay, there is no version file.
else:
MVSRE = r"^manual_verstr *= *['\"]([^'\"]*)['\"]"
mo = re.search(MVSRE, verstrline, re.M)
if mo:
mverstr = mo.group(1)
else:
print("unable to find version in %s" % (VERSIONFILE,))
raise RuntimeError("if %s.py exists, it must be well-formed" % (VERSIONFILE,))
AVSRE = r"^auto_build_num *= *['\"]([^'\"]*)['\"]"
mo = re.search(AVSRE, verstrline, re.M)
if mo:
averstr = mo.group(1)
else:
averstr = ''
verstr = '.'.join([mverstr, averstr])
setup(name=PKG,
version=verstr,
description="library for OAuth version 1.0",
author="Joe Stump",
author_email="joe@simplegeo.com",
url="http://github.com/simplegeo/python-oauth2",
packages = find_packages(),
install_requires = ['httplib2'],
license = "MIT License",
keywords="oauth",
zip_safe = True,
test_suite="tests",
tests_require=['coverage', 'mock'])
|
ZTH1970/alcide | refs/heads/wip-Django1.7 | alcide/models.py | 1 | # -*- coding: utf-8 -*-
from django.db import models
from django.db.models import fields
from django import forms
from localflavor.fr.forms import FRPhoneNumberField, FRZipCodeField
from django.utils.text import capfirst
class BaseModelMixin(object):
def __repr__(self):
return '<%s %s %r>' % (self.__class__.__name__, self.id, unicode(self))
class PhoneNumberField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 20
super(PhoneNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
default = { 'form_class': FRPhoneNumberField }
default.update(kwargs)
return super(PhoneNumberField, self).formfield(**kwargs)
class ZipCodeField(models.CharField):
def __init__(self, **kwargs):
kwargs['max_length'] = 5
super(ZipCodeField, self).__init__(**kwargs)
def formfield(self, **kwargs):
default = { 'form_class': FRZipCodeField }
default.update(kwargs)
return super(ZipCodeField, self).formfield(**kwargs)
class WeekRankField(models.PositiveIntegerField):
'''Map a list of integers to its encoding as a binary number'''
__metaclass__ = models.SubfieldBase
def __init__(self, **kwargs):
kwargs['blank'] = True
kwargs['null'] = True
super(WeekRankField, self).__init__(**kwargs)
def to_python(self, value):
if isinstance(value, list):
if value:
try:
value = map(int, value)
except ValueError:
raise forms.ValidationError('value must be a sequence of value coercible to integers')
if any((i < 0 or i > 4 for i in value)):
raise forms.ValidationError('value must be a list of integers between 0 and 4')
return map(int, set(value))
else:
return None
value = super(WeekRankField, self).to_python(value)
if value is None:
return None
try:
value = int(value)
except ValueError:
raise forms.ValidationError('value must be convertible to an integer')
if value < 0 or value >= 64:
raise forms.ValidationError('value must be between 0 and 64')
return tuple((i for i in range(0, 5) if (1 << i) & value))
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
[self.validate(v, model_instance) for v in value]
self.run_validators(value)
return value
def get_prep_lookup(self, lookup_type, value):
if lookup_type in ('exact', 'in'):
s = set(((1 << v) | i for v in value for i in range(0, 64)))
return s
elif lookup_type == 'range':
value = sorted(value)
return set(((1 << v) | i for v in range(value[0], value[1]) for i in range(0, 64)))
else:
return fields.Field.get_prep_lookup(self, lookup_type, value)
def get_prep_value(self, value):
if value:
x = sum((1 << int(i) for i in value))
return x
else:
return None
def formfield(self, **kwargs):
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name),
'help_text': self.help_text, 'choices': self.get_choices(include_blank=False)}
if self.has_default():
defaults['initial'] = self.get_default()
defaults.update(kwargs)
return forms.MultipleChoiceField(**defaults)
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^alcide\.models\..*Field"])
|
jean/sentry | refs/heads/master | src/sentry/south_migrations/0047_migrate_project_slugs.py | 5 | # encoding: utf-8
import datetime
import six
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from django.template.defaultfilters import slugify
from sentry.db.models import update
for project in orm['sentry.Project'].objects.all():
if project.slug:
continue
base_slug = slugify(project.name)
slug = base_slug
n = 0
while orm['sentry.Project'].objects.filter(slug=slug).exists():
n += 1
slug = base_slug + '-' + six.text_type(n)
update(project, slug=slug)
def backwards(self, orm):
pass
models = {
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '30'
})
},
'contenttypes.contenttype': {
'Meta': {
'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"
},
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'model': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '100'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'server_name': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'time_spent': ('django.db.models.fields.FloatField', [], {
'null': 'True'
})
},
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
),
'views': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.View']",
'symmetrical': 'False',
'blank': 'True'
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {
'unique_together': "(('project', 'group', 'date'),)",
'object_name': 'MessageCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.messagefiltervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'MessageFilterValue'
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingprojectmember': {
'Meta': {
'unique_together': "(('project', 'email'),)",
'object_name': 'PendingProjectMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Project']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
})
},
'sentry.project': {
'Meta': {
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': (
'django.db.models.fields.SlugField', [], {
'max_length': '50',
'unique': 'True',
'null': 'True',
'db_index': 'True'
}
),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']",
'null': 'True'
}
)
},
'sentry.projectcountbyminute': {
'Meta': {
'unique_together': "(('project', 'date'),)",
'object_name': 'ProjectCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.projectdomain': {
'Meta': {
'unique_together': "(('project', 'domain'),)",
'object_name': 'ProjectDomain'
},
'domain': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'domain_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.projectmember': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'ProjectMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_project_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {
'unique_together': "(('project', 'group'),)",
'object_name': 'SearchDocument'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_changed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
})
},
'sentry.searchtoken': {
'Meta': {
'unique_together': "(('document', 'field', 'token'),)",
'object_name': 'SearchToken'
},
'document': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'token_set'",
'to': "orm['sentry.SearchDocument']"
}
),
'field':
('django.db.models.fields.CharField', [], {
'default': "'text'",
'max_length': '64'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
}),
'token': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.team': {
'Meta': {
'object_name': 'Team'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'slug': (
'django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50',
'db_index': 'True'
}
)
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'team':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_teammember_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.view': {
'Meta': {
'object_name': 'View'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '100'
}),
'verbose_name':
('django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True'
}),
'verbose_name_plural':
('django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True'
})
}
}
complete_apps = ['sentry']
|
sudheesh001/oh-mainline | refs/heads/master | vendor/packages/docutils/test/DocutilsTestSupport.py | 18 | # $Id: DocutilsTestSupport.py 7539 2012-11-26 13:50:06Z milde $
# Authors: David Goodger <goodger@python.org>;
# Garth Kidd <garth@deadlybloodyserious.com>
# Copyright: This module has been placed in the public domain.
"""
Exports the following:
:Modules:
- `statemachine` is 'docutils.statemachine'
- `nodes` is 'docutils.nodes'
- `urischemes` is 'docutils.utils.urischemes'
- `utils` is 'docutils.utils'
- `transforms` is 'docutils.transforms'
- `states` is 'docutils.parsers.rst.states'
- `tableparser` is 'docutils.parsers.rst.tableparser'
:Classes:
- `StandardTestCase`
- `CustomTestCase`
- `CustomTestSuite`
- `TransformTestCase`
- `TransformTestSuite`
- `ParserTestCase`
- `ParserTestSuite`
- `ParserTransformTestCase`
- `PEPParserTestCase`
- `PEPParserTestSuite`
- `GridTableParserTestCase`
- `GridTableParserTestSuite`
- `SimpleTableParserTestCase`
- `SimpleTableParserTestSuite`
- `WriterPublishTestCase`
- `LatexWriterPublishTestCase`
- `PseudoXMLWriterPublishTestCase`
- `HtmlWriterPublishTestCase`
- `PublishTestSuite`
- `HtmlFragmentTestSuite`
- `DevNull` (output sink)
"""
__docformat__ = 'reStructuredText'
import sys
import os
import unittest
import re
import inspect
import traceback
from pprint import pformat
testroot = os.path.abspath(os.path.dirname(__file__) or os.curdir)
os.chdir(testroot)
if sys.version_info >= (3,0):
sys.path.insert(0, os.path.normpath(os.path.join(testroot,
'..', 'build', 'lib')))
sys.path.append(os.path.normpath(os.path.join(testroot, '..',
'build', 'lib', 'extras')))
else:
sys.path.insert(0, os.path.normpath(os.path.join(testroot, '..')))
sys.path.append(os.path.normpath(os.path.join(testroot, '..', 'extras')))
sys.path.insert(0, testroot)
try:
import difflib
import package_unittest
import docutils
import docutils.core
from docutils import frontend, nodes, statemachine, utils
from docutils.utils import urischemes
from docutils.transforms import universal
from docutils.parsers import rst
from docutils.parsers.rst import states, tableparser, roles, languages
from docutils.readers import standalone, pep
from docutils.statemachine import StringList, string2lines
from docutils._compat import bytes
except ImportError:
# The importing module (usually __init__.py in one of the
# subdirectories) may catch ImportErrors in order to detect the
# absence of DocutilsTestSupport in sys.path. Thus, ImportErrors
# resulting from problems with importing Docutils modules must
# caught here.
traceback.print_exc()
sys.exit(1)
try:
import mypdb as pdb
except:
import pdb
# Hack to make repr(StringList) look like repr(list):
StringList.__repr__ = StringList.__str__
class DevNull:
"""Output sink."""
def write(self, string):
pass
def close(self):
pass
class StandardTestCase(unittest.TestCase):
"""
Helper class, providing the same interface as unittest.TestCase,
but with useful setUp and comparison methods.
"""
def setUp(self):
os.chdir(testroot)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
if not first == second:
raise self.failureException, (
msg or '%s != %s' % _format_str(first, second))
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if first == second:
raise self.failureException, (
msg or '%s == %s' % _format_str(first, second))
# assertIn and assertNotIn: new in Python 2.7:
def assertIn(self, a, b, msg=None):
if a not in b:
raise self.failureException, (
msg or '%s not in %s' % _format_str(a, b))
def assertNotIn(self, a, b, msg=None):
if a in b:
raise self.failureException, (
msg or '%s in %s' % _format_str(a, b))
# aliases for assertion methods, deprecated since Python 2.7
failUnlessEqual = assertEquals = assertEqual
assertNotEquals = failIfEqual = assertNotEqual
class CustomTestCase(StandardTestCase):
"""
Helper class, providing extended functionality over unittest.TestCase.
The methods assertEqual and assertNotEqual have been overwritten
to provide better support for multi-line strings. Furthermore,
see the compare_output method and the parameter list of __init__.
"""
compare = difflib.Differ().compare
"""Comparison method shared by all subclasses."""
def __init__(self, method_name, input, expected, id,
run_in_debugger=True, suite_settings=None):
"""
Initialise the CustomTestCase.
Arguments:
method_name -- name of test method to run.
input -- input to the parser.
expected -- expected output from the parser.
id -- unique test identifier, used by the test framework.
run_in_debugger -- if true, run this test under the pdb debugger.
suite_settings -- settings overrides for this test suite.
"""
self.id = id
self.input = input
self.expected = expected
self.run_in_debugger = run_in_debugger
self.suite_settings = suite_settings.copy() or {}
# Ring your mother.
unittest.TestCase.__init__(self, method_name)
def __str__(self):
"""
Return string conversion. Overridden to give test id, in addition to
method name.
"""
return '%s; %s' % (self.id, unittest.TestCase.__str__(self))
def __repr__(self):
return "<%s %s>" % (self.id, unittest.TestCase.__repr__(self))
def clear_roles(self):
# Language-specific roles and roles added by the
# "default-role" and "role" directives are currently stored
# globally in the roles._roles dictionary. This workaround
# empties that dictionary.
roles._roles = {}
def setUp(self):
StandardTestCase.setUp(self)
self.clear_roles()
def compare_output(self, input, output, expected):
"""`input`, `output`, and `expected` should all be strings."""
if isinstance(input, unicode):
input = input.encode('raw_unicode_escape')
if sys.version_info > (3,):
# API difference: Python 3's node.__str__ doesn't escape
#assert expected is None or isinstance(expected, unicode)
if isinstance(expected, bytes):
expected = expected.decode('utf-8')
if isinstance(output, bytes):
output = output.decode('utf-8')
else:
if isinstance(expected, unicode):
expected = expected.encode('raw_unicode_escape')
if isinstance(output, unicode):
output = output.encode('raw_unicode_escape')
# Normalize line endings:
if expected:
expected = '\n'.join(expected.splitlines())
if output:
output = '\n'.join(output.splitlines())
try:
self.assertEqual(output, expected)
except AssertionError, error:
print >>sys.stderr, '\n%s\ninput:' % (self,)
print >>sys.stderr, input
try:
comparison = ''.join(self.compare(expected.splitlines(1),
output.splitlines(1)))
print >>sys.stderr, '-: expected\n+: output'
print >>sys.stderr, comparison
except AttributeError: # expected or output not a string
# alternative output for non-strings:
print >>sys.stderr, 'expected: %r' % expected
print >>sys.stderr, 'output: %r' % output
raise error
class CustomTestSuite(unittest.TestSuite):
"""
A collection of CustomTestCases.
Provides test suite ID generation and a method for adding test cases.
"""
id = ''
"""Identifier for the TestSuite. Prepended to the
TestCase identifiers to make identification easier."""
next_test_case_id = 0
"""The next identifier to use for non-identified test cases."""
def __init__(self, tests=(), id=None, suite_settings=None):
"""
Initialize the CustomTestSuite.
Arguments:
id -- identifier for the suite, prepended to test cases.
suite_settings -- settings overrides for this test suite.
"""
unittest.TestSuite.__init__(self, tests)
self.suite_settings = suite_settings or {}
if id is None:
mypath = os.path.abspath(
sys.modules[CustomTestSuite.__module__].__file__)
outerframes = inspect.getouterframes(inspect.currentframe())
for outerframe in outerframes[1:]:
if outerframe[3] != '__init__':
callerpath = outerframe[1]
if callerpath is None:
# It happens sometimes. Why is a mystery.
callerpath = os.getcwd()
callerpath = os.path.abspath(callerpath)
break
mydir, myname = os.path.split(mypath)
if not mydir:
mydir = os.curdir
if callerpath.startswith(mydir):
self.id = callerpath[len(mydir) + 1:] # caller's module
else:
self.id = callerpath
else:
self.id = id
def addTestCase(self, test_case_class, method_name, input, expected,
id=None, run_in_debugger=False, **kwargs):
"""
Create a CustomTestCase in the CustomTestSuite.
Also return it, just in case.
Arguments:
test_case_class -- the CustomTestCase to add
method_name -- a string; CustomTestCase.method_name is the test
input -- input to the parser.
expected -- expected output from the parser.
id -- unique test identifier, used by the test framework.
run_in_debugger -- if true, run this test under the pdb debugger.
"""
if id is None: # generate id if required
id = self.next_test_case_id
self.next_test_case_id += 1
# test identifier will become suiteid.testid
tcid = '%s: %s' % (self.id, id)
# suite_settings may be passed as a parameter;
# if not, set from attribute:
kwargs.setdefault('suite_settings', self.suite_settings)
# generate and add test case
tc = test_case_class(method_name, input, expected, tcid,
run_in_debugger=run_in_debugger, **kwargs)
self.addTest(tc)
return tc
def generate_no_tests(self, *args, **kwargs):
pass
class TransformTestCase(CustomTestCase):
"""
Output checker for the transform.
Should probably be called TransformOutputChecker, but I can deal with
that later when/if someone comes up with a category of transform test
cases that have nothing to do with the input and output of the transform.
"""
option_parser = frontend.OptionParser(components=(rst.Parser,))
settings = option_parser.get_default_values()
settings.report_level = 1
settings.halt_level = 5
settings.debug = package_unittest.debug
settings.warning_stream = DevNull()
unknown_reference_resolvers = ()
def __init__(self, *args, **kwargs):
self.transforms = kwargs['transforms']
"""List of transforms to perform for this test case."""
self.parser = kwargs['parser']
"""Input parser for this test case."""
del kwargs['transforms'], kwargs['parser'] # only wanted here
CustomTestCase.__init__(self, *args, **kwargs)
def supports(self, format):
return 1
def test_transforms(self):
if self.run_in_debugger:
pdb.set_trace()
settings = self.settings.copy()
settings.__dict__.update(self.suite_settings)
document = utils.new_document('test data', settings)
self.parser.parse(self.input, document)
# Don't do a ``populate_from_components()`` because that would
# enable the Transformer's default transforms.
document.transformer.add_transforms(self.transforms)
document.transformer.add_transform(universal.TestMessages)
document.transformer.components['writer'] = self
document.transformer.apply_transforms()
output = document.pformat()
self.compare_output(self.input, output, self.expected)
def test_transforms_verbosely(self):
if self.run_in_debugger:
pdb.set_trace()
print '\n', self.id
print '-' * 70
print self.input
settings = self.settings.copy()
settings.__dict__.update(self.suite_settings)
document = utils.new_document('test data', settings)
self.parser.parse(self.input, document)
print '-' * 70
print document.pformat()
for transformClass in self.transforms:
transformClass(document).apply()
output = document.pformat()
print '-' * 70
print output
self.compare_output(self.input, output, self.expected)
class TransformTestSuite(CustomTestSuite):
"""
A collection of TransformTestCases.
A TransformTestSuite instance manufactures TransformTestCases,
keeps track of them, and provides a shared test fixture (a-la
setUp and tearDown).
"""
def __init__(self, parser, suite_settings=None):
self.parser = parser
"""Parser shared by all test cases."""
CustomTestSuite.__init__(self, suite_settings=suite_settings)
def generateTests(self, dict, dictname='totest',
testmethod='test_transforms'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type's name) maps to a tuple, whose
first item is a list of transform classes and whose second
item is a list of tests. Each test is a list: input, expected
output, optional modifier. The optional third entry, a
behavior modifier, can be 0 (temporarily disable this test) or
1 (run this test under the pdb debugger). Tests should be
self-documenting and not require external comments.
"""
for name, (transforms, cases) in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = False
if len(case)==3:
# TODO: (maybe) change the 3rd argument to a dict, so it
# can handle more cases by keyword ('disable', 'debug',
# 'settings'), here and in other generateTests methods.
# But there's also the method that
# HtmlPublishPartsTestSuite uses <DJG>
if case[2]:
run_in_debugger = True
else:
continue
self.addTestCase(
TransformTestCase, testmethod,
transforms=transforms, parser=self.parser,
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class ParserTestCase(CustomTestCase):
"""
Output checker for the parser.
Should probably be called ParserOutputChecker, but I can deal with
that later when/if someone comes up with a category of parser test
cases that have nothing to do with the input and output of the parser.
"""
parser = rst.Parser()
"""Parser shared by all ParserTestCases."""
option_parser = frontend.OptionParser(components=(rst.Parser,))
settings = option_parser.get_default_values()
settings.report_level = 5
settings.halt_level = 5
settings.debug = package_unittest.debug
def test_parser(self):
if self.run_in_debugger:
pdb.set_trace()
settings = self.settings.copy()
settings.__dict__.update(self.suite_settings)
document = utils.new_document('test data', settings)
self.parser.parse(self.input, document)
output = document.pformat()
self.compare_output(self.input, output, self.expected)
class ParserTestSuite(CustomTestSuite):
"""
A collection of ParserTestCases.
A ParserTestSuite instance manufactures ParserTestCases,
keeps track of them, and provides a shared test fixture (a-la
setUp and tearDown).
"""
test_case_class = ParserTestCase
def generateTests(self, dict, dictname='totest'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type name) maps to a list of tests. Each
test is a list: input, expected output, optional modifier. The
optional third entry, a behavior modifier, can be 0 (temporarily
disable this test) or 1 (run this test under the pdb debugger). Tests
should be self-documenting and not require external comments.
"""
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = False
if len(case)==3:
if case[2]:
run_in_debugger = True
else:
continue
self.addTestCase(
self.test_case_class, 'test_parser',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class PEPParserTestCase(ParserTestCase):
"""PEP-specific parser test case."""
parser = rst.Parser(rfc2822=True, inliner=rst.states.Inliner())
"""Parser shared by all PEPParserTestCases."""
option_parser = frontend.OptionParser(components=(rst.Parser, pep.Reader))
settings = option_parser.get_default_values()
settings.report_level = 5
settings.halt_level = 5
settings.debug = package_unittest.debug
class PEPParserTestSuite(ParserTestSuite):
"""A collection of PEPParserTestCases."""
test_case_class = PEPParserTestCase
class GridTableParserTestCase(CustomTestCase):
parser = tableparser.GridTableParser()
def test_parse_table(self):
self.parser.setup(StringList(string2lines(self.input), 'test data'))
try:
self.parser.find_head_body_sep()
self.parser.parse_table()
output = self.parser.cells
except Exception, details:
output = '%s: %s' % (details.__class__.__name__, details)
self.compare_output(self.input, pformat(output) + '\n',
pformat(self.expected) + '\n')
def test_parse(self):
try:
output = self.parser.parse(StringList(string2lines(self.input),
'test data'))
except Exception, details:
output = '%s: %s' % (details.__class__.__name__, details)
self.compare_output(self.input, pformat(output) + '\n',
pformat(self.expected) + '\n')
class GridTableParserTestSuite(CustomTestSuite):
"""
A collection of GridTableParserTestCases.
A GridTableParserTestSuite instance manufactures GridTableParserTestCases,
keeps track of them, and provides a shared test fixture (a-la setUp and
tearDown).
"""
test_case_class = GridTableParserTestCase
def generateTests(self, dict, dictname='totest'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type name) maps to a list of tests. Each
test is a list: an input table, expected output from parse_table(),
expected output from parse(), optional modifier. The optional fourth
entry, a behavior modifier, can be 0 (temporarily disable this test)
or 1 (run this test under the pdb debugger). Tests should be
self-documenting and not require external comments.
"""
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = False
if len(case) == 4:
if case[-1]:
run_in_debugger = True
else:
continue
self.addTestCase(self.test_case_class, 'test_parse_table',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
self.addTestCase(self.test_case_class, 'test_parse',
input=case[0], expected=case[2],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class SimpleTableParserTestCase(GridTableParserTestCase):
parser = tableparser.SimpleTableParser()
class SimpleTableParserTestSuite(CustomTestSuite):
"""
A collection of SimpleTableParserTestCases.
"""
test_case_class = SimpleTableParserTestCase
def generateTests(self, dict, dictname='totest'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type name) maps to a list of tests. Each
test is a list: an input table, expected output from parse(), optional
modifier. The optional third entry, a behavior modifier, can be 0
(temporarily disable this test) or 1 (run this test under the pdb
debugger). Tests should be self-documenting and not require external
comments.
"""
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = False
if len(case) == 3:
if case[-1]:
run_in_debugger = True
else:
continue
self.addTestCase(self.test_case_class, 'test_parse',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class PythonModuleParserTestCase(CustomTestCase):
def test_parser(self):
if self.run_in_debugger:
pdb.set_trace()
try:
import compiler
except ImportError:
# skip on Python 3
return
from docutils.readers.python import moduleparser
module = moduleparser.parse_module(self.input, 'test data').pformat()
output = str(module)
self.compare_output(self.input, output, self.expected)
def test_token_parser_rhs(self):
if self.run_in_debugger:
pdb.set_trace()
try:
import compiler
except ImportError:
# skip on Python 3
return
from docutils.readers.python import moduleparser
tr = moduleparser.TokenParser(self.input)
output = tr.rhs(1)
self.compare_output(self.input, output, self.expected)
class PythonModuleParserTestSuite(CustomTestSuite):
"""
A collection of PythonModuleParserTestCase.
"""
def generateTests(self, dict, dictname='totest',
testmethod='test_parser'):
"""
Stock the suite with test cases generated from a test data dictionary.
Each dictionary key (test type's name) maps to a list of tests. Each
test is a list: input, expected output, optional modifier. The
optional third entry, a behavior modifier, can be 0 (temporarily
disable this test) or 1 (run this test under the pdb debugger). Tests
should be self-documenting and not require external comments.
"""
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = False
if len(case)==3:
if case[2]:
run_in_debugger = True
else:
continue
self.addTestCase(
PythonModuleParserTestCase, testmethod,
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger)
class WriterPublishTestCase(CustomTestCase, docutils.SettingsSpec):
"""
Test case for publish.
"""
settings_default_overrides = {'_disable_config': True,
'strict_visitor': True}
writer_name = '' # set in subclasses or constructor
def __init__(self, *args, **kwargs):
if 'writer_name' in kwargs:
self.writer_name = kwargs['writer_name']
del kwargs['writer_name']
CustomTestCase.__init__(self, *args, **kwargs)
def test_publish(self):
if self.run_in_debugger:
pdb.set_trace()
output = docutils.core.publish_string(
source=self.input,
reader_name='standalone',
parser_name='restructuredtext',
writer_name=self.writer_name,
settings_spec=self,
settings_overrides=self.suite_settings)
self.compare_output(self.input, output, self.expected)
class PublishTestSuite(CustomTestSuite):
def __init__(self, writer_name, suite_settings=None):
"""
`writer_name` is the name of the writer to use.
"""
CustomTestSuite.__init__(self, suite_settings=suite_settings)
self.test_class = WriterPublishTestCase
self.writer_name = writer_name
def generateTests(self, dict, dictname='totest'):
for name, cases in dict.items():
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = False
if len(case)==3:
if case[2]:
run_in_debugger = True
else:
continue
self.addTestCase(
self.test_class, 'test_publish',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger,
# Passed to constructor of self.test_class:
writer_name=self.writer_name)
class HtmlPublishPartsTestSuite(CustomTestSuite):
def generateTests(self, dict, dictname='totest'):
for name, (settings_overrides, cases) in dict.items():
settings = self.suite_settings.copy()
settings.update(settings_overrides)
for casenum in range(len(cases)):
case = cases[casenum]
run_in_debugger = False
if len(case)==3:
if case[2]:
run_in_debugger = True
else:
continue
self.addTestCase(
HtmlWriterPublishPartsTestCase, 'test_publish',
input=case[0], expected=case[1],
id='%s[%r][%s]' % (dictname, name, casenum),
run_in_debugger=run_in_debugger,
suite_settings=settings)
class HtmlWriterPublishPartsTestCase(WriterPublishTestCase):
"""
Test case for HTML writer via the publish_parts interface.
"""
writer_name = 'html'
settings_default_overrides = \
WriterPublishTestCase.settings_default_overrides.copy()
settings_default_overrides['stylesheet'] = ''
def test_publish(self):
if self.run_in_debugger:
pdb.set_trace()
parts = docutils.core.publish_parts(
source=self.input,
reader_name='standalone',
parser_name='restructuredtext',
writer_name=self.writer_name,
settings_spec=self,
settings_overrides=self.suite_settings)
output = self.format_output(parts)
# interpolate standard variables:
expected = self.expected % {'version': docutils.__version__}
self.compare_output(self.input, output, expected)
standard_content_type_template = ('<meta http-equiv="Content-Type"'
' content="text/html; charset=%s" />\n')
standard_generator_template = (
'<meta name="generator"'
' content="Docutils %s: http://docutils.sourceforge.net/" />\n')
standard_html_meta_value = (
standard_content_type_template
+ standard_generator_template % docutils.__version__)
standard_meta_value = standard_html_meta_value % 'utf-8'
standard_html_prolog = """\
<?xml version="1.0" encoding="%s" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
"""
def format_output(self, parts):
"""Minimize & standardize the output."""
# remove redundant parts & uninteresting parts:
del parts['whole']
assert parts['body'] == parts['fragment']
del parts['body']
del parts['body_pre_docinfo']
del parts['body_prefix']
del parts['body_suffix']
del parts['head']
del parts['head_prefix']
del parts['encoding']
del parts['version']
# remove standard portions:
parts['meta'] = parts['meta'].replace(self.standard_meta_value, '')
parts['html_head'] = parts['html_head'].replace(
self.standard_html_meta_value, '...')
parts['html_prolog'] = parts['html_prolog'].replace(
self.standard_html_prolog, '')
# remove empty values:
for key in parts.keys():
if not parts[key]:
del parts[key]
# standard output format:
keys = parts.keys()
keys.sort()
output = []
for key in keys:
output.append("%r: '''%s'''"
% (key, parts[key]))
if output[-1].endswith("\n'''"):
output[-1] = output[-1][:-4] + "\\n'''"
return '{' + ',\n '.join(output) + '}\n'
def exception_data(func, *args, **kwds):
"""
Execute `func(*args, **kwds)` and return the resulting exception, the
exception arguments, and the formatted exception string.
"""
try:
func(*args, **kwds)
except Exception, detail:
return (detail, detail.args,
'%s: %s' % (detail.__class__.__name__, detail))
def _format_str(*args):
r"""
Return a tuple containing representations of all args.
Same as map(repr, args) except that it returns multi-line
representations for strings containing newlines, e.g.::
'''\
foo \n\
bar
baz'''
instead of::
'foo \nbar\n\nbaz'
This is a helper function for CustomTestCase.
"""
return_tuple = []
for i in args:
r = repr(i)
if ( (isinstance(i, bytes) or isinstance(i, unicode))
and '\n' in i):
stripped = ''
if isinstance(i, unicode) and r.startswith('u'):
stripped = r[0]
r = r[1:]
elif isinstance(i, bytes) and r.startswith('b'):
stripped = r[0]
r = r[1:]
# quote_char = "'" or '"'
quote_char = r[0]
assert quote_char in ("'", '"'), quote_char
assert r[0] == r[-1]
r = r[1:-1]
r = (stripped + 3 * quote_char + '\\\n' +
re.sub(r'(?<!\\)((\\\\)*)\\n', r'\1\n', r) +
3 * quote_char)
r = re.sub(r' \n', r' \\n\\\n', r)
return_tuple.append(r)
return tuple(return_tuple)
|
Thor77/youtube-dl | refs/heads/master | youtube_dl/extractor/dctp.py | 17 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
class DctpTvIE(InfoExtractor):
_VALID_URL = r'https?://www.dctp.tv/(#/)?filme/(?P<id>.+?)/$'
_TEST = {
'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/',
'info_dict': {
'id': '1324',
'display_id': 'videoinstallation-fuer-eine-kaufhausfassade',
'ext': 'flv',
'title': 'Videoinstallation für eine Kaufhausfassade'
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
base_url = 'http://dctp-ivms2-restapi.s3.amazonaws.com/'
version_json = self._download_json(
base_url + 'version.json',
video_id, note='Determining file version')
version = version_json['version_name']
info_json = self._download_json(
'{0}{1}/restapi/slugs/{2}.json'.format(base_url, version, video_id),
video_id, note='Fetching object ID')
object_id = compat_str(info_json['object_id'])
meta_json = self._download_json(
'{0}{1}/restapi/media/{2}.json'.format(base_url, version, object_id),
video_id, note='Downloading metadata')
uuid = meta_json['uuid']
title = meta_json['title']
wide = meta_json['is_wide']
if wide:
ratio = '16x9'
else:
ratio = '4x3'
play_path = 'mp4:{0}_dctp_0500_{1}.m4v'.format(uuid, ratio)
servers_json = self._download_json(
'http://www.dctp.tv/streaming_servers/',
video_id, note='Downloading server list')
url = servers_json[0]['endpoint']
return {
'id': object_id,
'title': title,
'format': 'rtmp',
'url': url,
'play_path': play_path,
'rtmp_real_time': True,
'ext': 'flv',
'display_id': video_id
}
|
googleapis/googleapis-gen | refs/heads/master | google/cloud/gsuiteaddons/v1/google-cloud-workspace-add-ons-v1-py/tests/unit/gapic/__init__.py | 951 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
Peddle/hue | refs/heads/master | desktop/core/ext-py/tablib-0.10.0/tablib/packages/yaml3/reader.py | 272 | # This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
|
beacloudgenius/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/modulestore/draft_and_published.py | 116 | """
This module provides an abstraction for Module Stores that support Draft and Published branches.
"""
import threading
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from . import ModuleStoreEnum
# Things w/ these categories should never be marked as version=DRAFT
DIRECT_ONLY_CATEGORIES = ['course', 'chapter', 'sequential', 'about', 'static_tab', 'course_info']
class BranchSettingMixin(object):
"""
A mixin to manage a module store's branch setting.
The order of override is (from higher precedence to lower):
1. thread-specific setting temporarily set using the branch_setting contextmanager
2. the return value of the branch_setting_func passed into this mixin's init method
3. the default branch setting being ModuleStoreEnum.Branch.published_only
"""
def __init__(self, *args, **kwargs):
"""
:param branch_setting_func: a function that returns the default branch setting for this object.
If not specified, ModuleStoreEnum.Branch.published_only is used as the default setting.
"""
self.default_branch_setting_func = kwargs.pop(
'branch_setting_func',
lambda: ModuleStoreEnum.Branch.published_only
)
super(BranchSettingMixin, self).__init__(*args, **kwargs)
# cache the branch setting on a local thread to support a multi-threaded environment
self.thread_cache = threading.local()
@contextmanager
def branch_setting(self, branch_setting, course_id=None): # pylint: disable=unused-argument
"""
A context manager for temporarily setting a store's branch value on the current thread.
"""
previous_thread_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
try:
self.thread_cache.branch_setting = branch_setting
yield
finally:
self.thread_cache.branch_setting = previous_thread_branch_setting
def get_branch_setting(self, course_id=None): # pylint: disable=unused-argument
"""
Returns the current branch_setting on the store.
Returns the thread-local setting, if set.
Otherwise, returns the default value of the setting function set during the store's initialization.
"""
# first check the thread-local cache
thread_local_branch_setting = getattr(self.thread_cache, 'branch_setting', None)
if thread_local_branch_setting:
return thread_local_branch_setting
else:
# return the default value
return self.default_branch_setting_func()
class ModuleStoreDraftAndPublished(BranchSettingMixin):
"""
A mixin for a read-write database backend that supports two branches, Draft and Published, with
options to prefer Draft and fallback to Published.
"""
__metaclass__ = ABCMeta
@abstractmethod
def delete_item(self, location, user_id, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def get_parent_location(self, location, revision=None, **kwargs):
raise NotImplementedError
@abstractmethod
def has_changes(self, xblock):
raise NotImplementedError
@abstractmethod
def publish(self, location, user_id):
raise NotImplementedError
@abstractmethod
def unpublish(self, location, user_id):
raise NotImplementedError
@abstractmethod
def revert_to_published(self, location, user_id):
raise NotImplementedError
@abstractmethod
def has_published_version(self, xblock):
raise NotImplementedError
@abstractmethod
def convert_to_draft(self, location, user_id):
raise NotImplementedError
@abstractmethod
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
Import the given xblock into the current branch setting: import completely overwrites any
existing block of the same id.
In ModuleStoreDraftAndPublished, importing a published block ensures that access from the draft
will get a block (either the one imported or a preexisting one). See xml_importer
"""
raise NotImplementedError
class UnsupportedRevisionError(ValueError):
"""
This error is raised if a method is called with an unsupported revision parameter.
"""
def __init__(self, allowed_revisions=None):
if not allowed_revisions:
allowed_revisions = [
None,
ModuleStoreEnum.RevisionOption.published_only,
ModuleStoreEnum.RevisionOption.draft_only
]
super(UnsupportedRevisionError, self).__init__('revision not one of {}'.format(allowed_revisions))
|
uchida/selenium | refs/heads/master | py/selenium/webdriver/remote/errorhandler.py | 5 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import ElementNotSelectableException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import InvalidCookieDomainException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import InvalidSelectorException
from selenium.common.exceptions import ImeNotAvailableException
from selenium.common.exceptions import ImeActivationFailedException
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import NoSuchWindowException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import UnableToSetCookieException
from selenium.common.exceptions import UnexpectedAlertPresentException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import ErrorInResponseException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import MoveTargetOutOfBoundsException
try:
basestring
except NameError: # Python 3.x
basestring = str
class ErrorCode(object):
"""
Error codes defined in the WebDriver wire protocol.
"""
# Keep in sync with org.openqa.selenium.remote.ErrorCodes and errorcodes.h
SUCCESS = 0
NO_SUCH_ELEMENT = [7, 'no such element']
NO_SUCH_FRAME = [8, 'no such frame']
UNKNOWN_COMMAND = [9, 'unknown command']
STALE_ELEMENT_REFERENCE = [10, 'stale element reference']
ELEMENT_NOT_VISIBLE = [11, 'element not visible']
INVALID_ELEMENT_STATE = [12, 'invalid element state']
UNKNOWN_ERROR = [13, 'unknown error']
ELEMENT_IS_NOT_SELECTABLE = [15, 'element not selectable']
JAVASCRIPT_ERROR = [17, 'javascript error']
XPATH_LOOKUP_ERROR = [19, 'invalid selector']
TIMEOUT = [21, 'timeout']
NO_SUCH_WINDOW = [23, 'no such window']
INVALID_COOKIE_DOMAIN = [24, 'invalid cookie domain']
UNABLE_TO_SET_COOKIE = [25, 'unable to set cookie']
UNEXPECTED_ALERT_OPEN = [26, 'unexpected alert open']
NO_ALERT_OPEN = [27, 'no such alert']
SCRIPT_TIMEOUT = [28, 'script timeout']
INVALID_ELEMENT_COORDINATES = [29, 'invalid element coordinates']
IME_NOT_AVAILABLE = [30, 'ime not available']
IME_ENGINE_ACTIVATION_FAILED = [31, 'ime engine activation failed']
INVALID_SELECTOR = [32, 'invalid selector']
MOVE_TARGET_OUT_OF_BOUNDS = [34, 'move target out of bounds']
INVALID_XPATH_SELECTOR = [51, 'invalid selector']
INVALID_XPATH_SELECTOR_RETURN_TYPER = [52, 'invalid selector']
METHOD_NOT_ALLOWED = [405, 'unsupported operation']
class ErrorHandler(object):
"""
Handles errors returned by the WebDriver server.
"""
def check_response(self, response):
"""
Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message.
"""
status = response.get('status', None)
if status is None or status == ErrorCode.SUCCESS:
return
value = None
message = response.get("message", "")
screen = response.get("screen", "")
stacktrace = None
if isinstance(status, int):
value_json = response.get('value', None)
if value_json and isinstance(value_json, basestring):
import json
try:
value = json.loads(value_json)
status = value.get('error', None)
if status is None:
status = value["status"]
message = value["value"]
if not isinstance(message, basestring):
value = message
try:
message = message['message']
except TypeError:
message = None
else:
message = value.get('message', None)
except ValueError:
pass
exception_class = ErrorInResponseException
if status in ErrorCode.NO_SUCH_ELEMENT:
exception_class = NoSuchElementException
elif status in ErrorCode.NO_SUCH_FRAME:
exception_class = NoSuchFrameException
elif status in ErrorCode.NO_SUCH_WINDOW:
exception_class = NoSuchWindowException
elif status in ErrorCode.STALE_ELEMENT_REFERENCE:
exception_class = StaleElementReferenceException
elif status in ErrorCode.ELEMENT_NOT_VISIBLE:
exception_class = ElementNotVisibleException
elif status in ErrorCode.INVALID_ELEMENT_STATE:
exception_class = InvalidElementStateException
elif status in ErrorCode.INVALID_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR_RETURN_TYPER:
exception_class = InvalidSelectorException
elif status in ErrorCode.ELEMENT_IS_NOT_SELECTABLE:
exception_class = ElementNotSelectableException
elif status in ErrorCode.INVALID_COOKIE_DOMAIN:
exception_class = WebDriverException
elif status in ErrorCode.UNABLE_TO_SET_COOKIE:
exception_class = WebDriverException
elif status in ErrorCode.TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.SCRIPT_TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.UNKNOWN_ERROR:
exception_class = WebDriverException
elif status in ErrorCode.UNEXPECTED_ALERT_OPEN:
exception_class = UnexpectedAlertPresentException
elif status in ErrorCode.NO_ALERT_OPEN:
exception_class = NoAlertPresentException
elif status in ErrorCode.IME_NOT_AVAILABLE:
exception_class = ImeNotAvailableException
elif status in ErrorCode.IME_ENGINE_ACTIVATION_FAILED:
exception_class = ImeActivationFailedException
elif status in ErrorCode.MOVE_TARGET_OUT_OF_BOUNDS:
exception_class = MoveTargetOutOfBoundsException
else:
exception_class = WebDriverException
if value == '' or value is None:
value = response['value']
if isinstance(value, basestring):
if exception_class == ErrorInResponseException:
raise exception_class(response, value)
raise exception_class(value)
if message == "" and 'message' in value:
message = value['message']
screen = None
if 'screen' in value:
screen = value['screen']
stacktrace = None
if 'stackTrace' in value and value['stackTrace']:
stacktrace = []
try:
for frame in value['stackTrace']:
line = self._value_or_default(frame, 'lineNumber', '')
file = self._value_or_default(frame, 'fileName', '<anonymous>')
if line:
file = "%s:%s" % (file, line)
meth = self._value_or_default(frame, 'methodName', '<anonymous>')
if 'className' in frame:
meth = "%s.%s" % (frame['className'], meth)
msg = " at %s (%s)"
msg = msg % (meth, file)
stacktrace.append(msg)
except TypeError:
pass
if exception_class == ErrorInResponseException:
raise exception_class(response, message)
elif exception_class == UnexpectedAlertPresentException and 'alert' in value:
raise exception_class(message, screen, stacktrace, value['alert'].get('text'))
raise exception_class(message, screen, stacktrace)
def _value_or_default(self, obj, key, default):
return obj[key] if key in obj else default
|
inspyration/django-gluon | refs/heads/master | gluon/saas/apps.py | 1 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SaasConfig(AppConfig):
name = "saas"
verbose_name = _("Gluon - 02 - SAAS management")
|
arbrandes/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/modulestore/xml_importer.py | 3 | """
Each store has slightly different semantics wrt draft v published. XML doesn't officially recognize draft
but does hold it in a subdir. Old mongo has a virtual but not physical draft for every unit in published state.
Split mongo has a physical for every unit in every state.
Given that, here's a table of semantics and behaviors where - means no record and letters indicate values.
For xml, (-, x) means the item is published and can be edited. For split, it means the item's
been deleted from draft and will be deleted from published the next time it gets published. old mongo
can't represent that virtual state (2nd row in table)
In the table body, the tuples represent virtual modulestore result. The row headers represent the pre-import
modulestore state.
Modulestore virtual | XML physical (draft, published)
(draft, published) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
----------------------+--------------------------------------------
(-, -) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
(-, a) | (-, a) | (x, a) | (x, x) | (x, y) | (-, x) : deleted from draft before import
(a, -) | (a, -) | (x, -) | (x, x) | (x, y) | (a, x)
(a, a) | (a, a) | (x, a) | (x, x) | (x, y) | (a, x)
(a, b) | (a, b) | (x, b) | (x, x) | (x, y) | (a, x)
"""
import json
import logging
import mimetypes
import os
import re
from abc import abstractmethod
import xblock
from django.utils.translation import ugettext as _
from lxml import etree
from opaque_keys.edx.keys import UsageKey
from opaque_keys.edx.locator import LibraryLocator
from path import Path as path
from xblock.core import XBlockMixin
from xblock.fields import Reference, ReferenceList, ReferenceValueDict, Scope
from xblock.runtime import DictKeyValueStore, KvsFieldData
from common.djangoapps.util.monitoring import monitor_import_failure
from xmodule.assetstore import AssetMetadata
from xmodule.contentstore.content import StaticContent
from xmodule.errortracker import make_error_tracker
from xmodule.library_tools import LibraryToolsService
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.modulestore.exceptions import DuplicateCourseError
from xmodule.modulestore.mongo.base import MongoRevisionKey
from xmodule.modulestore.store_utilities import draft_node_constructor, get_draft_subtree_roots
from xmodule.modulestore.xml import ImportSystem, LibraryXMLModuleStore, XMLModuleStore
from xmodule.tabs import CourseTabList
from xmodule.util.misc import escape_invalid_characters
from xmodule.x_module import XModuleDescriptor, XModuleMixin
from .inheritance import own_metadata
from .store_utilities import rewrite_nonportable_content_links
log = logging.getLogger(__name__)
DEFAULT_STATIC_CONTENT_SUBDIR = 'static'
class CourseImportException(Exception):
"""
Base exception class for course import workflows.
"""
def __init__(self):
super().__init__(self.description) # pylint: disable=no-member
class ErrorReadingFileException(CourseImportException):
"""
Raised when error occurs while trying to read a file.
"""
MESSAGE_TEMPLATE = _('Error while reading {}. Check file for XML errors.')
def __init__(self, filename, **kwargs):
self.description = self.MESSAGE_TEMPLATE.format(filename)
super().__init__(**kwargs)
class ModuleFailedToImport(CourseImportException):
"""
Raised when a module is failed to import.
"""
MESSAGE_TEMPLATE = _('Failed to import module: {} at location: {}')
def __init__(self, display_name, location, **kwargs):
self.description = self.MESSAGE_TEMPLATE.format(display_name, location)
super().__init__(**kwargs)
class LocationMixin(XBlockMixin):
"""
Adds a `location` property to an :class:`XBlock` so it is more compatible
with old-style :class:`XModule` API. This is a simplified version of
:class:`XModuleMixin`.
"""
@property
def location(self):
""" Get the UsageKey of this block. """
return self.scope_ids.usage_id
@location.setter
def location(self, value):
""" Set the UsageKey of this block. """
assert isinstance(value, UsageKey)
self.scope_ids = self.scope_ids._replace(
def_id=value,
usage_id=value,
)
class StaticContentImporter: # lint-amnesty, pylint: disable=missing-class-docstring
def __init__(self, static_content_store, course_data_path, target_id):
self.static_content_store = static_content_store
self.target_id = target_id
self.course_data_path = course_data_path
try:
with open(course_data_path / 'policies/assets.json') as f:
self.policy = json.load(f)
except (OSError, ValueError) as err: # lint-amnesty, pylint: disable=unused-variable
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
self.policy = {}
mimetypes.add_type('application/octet-stream', '.sjson')
mimetypes.add_type('application/octet-stream', '.srt')
self.mimetypes_list = list(mimetypes.types_map.values())
def import_static_content_directory(self, content_subdir=DEFAULT_STATIC_CONTENT_SUBDIR, verbose=False): # lint-amnesty, pylint: disable=missing-function-docstring
remap_dict = {}
static_dir = self.course_data_path / content_subdir
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
file_path = os.path.join(dirname, filename)
if re.match(ASSET_IGNORE_REGEX, filename):
if verbose:
log.debug('skipping static content %s...', file_path)
continue
if verbose:
log.debug('importing static content %s...', file_path)
imported_file_attrs = self.import_static_file(file_path, base_dir=static_dir)
if imported_file_attrs:
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[imported_file_attrs[0]] = imported_file_attrs[1]
return remap_dict
def import_static_file(self, full_file_path, base_dir): # lint-amnesty, pylint: disable=missing-function-docstring
filename = os.path.basename(full_file_path)
try:
with open(full_file_path, 'rb') as f:
data = f.read()
except OSError:
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
if filename.startswith('._'):
return None
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
file_subpath = full_file_path.replace(base_dir, '')
if file_subpath.startswith('/'):
file_subpath = file_subpath[1:]
asset_key = StaticContent.compute_location(self.target_id, file_subpath)
policy_ele = self.policy.get(asset_key.path, {})
# During export display name is used to create files, strip away slashes from name
displayname = escape_invalid_characters(
name=policy_ele.get('displayname', filename),
invalid_char_list=['/', '\\']
)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in self.mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
asset_key, displayname, mime_type, data,
import_path=file_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = self.static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
self.static_content_store.save(content)
except Exception as err: # lint-amnesty, pylint: disable=broad-except
msg = f'Error importing {file_subpath}, error={err}'
log.exception(f'Course import {self.target_id}: {msg}')
monitor_import_failure(self.target_id, 'Updating', exception=err)
return file_subpath, asset_key
class ImportManager:
"""
Import xml-based courselikes from data_dir into modulestore.
Returns:
list of new courselike objects
Args:
store: a modulestore implementing ModuleStoreWriteBase in which to store the imported courselikes.
data_dir: the root directory from which to find the xml courselikes.
source_dirs: If specified, the list of data_dir subdirectories to load. Otherwise, load
all dirs
target_id: is the Locator that all modules should be remapped to
after import off disk. NOTE: this only makes sense if importing only
one courselike. If there are more than one courselike loaded from data_dir/source_dirs & you
supply this id, an AssertException will be raised.
static_content_store: the static asset store
do_import_static: if True, then import the courselike's static files into static_content_store
This can be employed for courselikes which have substantial
unchanging static content, which is too inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
do_import_python_lib: if True, import a courselike's python lib file into static_content_store
if it exists. This can be useful if the static content import needs to be skipped
(e.g.: for performance reasons), but the python lib still needs to be imported. If static
content is imported, then the python lib file will be imported regardless of this value.
create_if_not_present: If True, then a new courselike is created if it doesn't already exist.
Otherwise, it throws an InvalidLocationError if the courselike does not exist.
static_content_subdir: The subdirectory that contains static content.
python_lib_filename: The filename of the courselike's python library. Course authors can optionally
create this file to implement custom logic in their course.
default_class, load_error_modules: are arguments for constructing the XMLModuleStore (see its doc)
"""
store_class = XMLModuleStore
def __init__(
self, store, user_id, data_dir, source_dirs=None,
default_class='xmodule.hidden_module.HiddenDescriptor',
load_error_modules=True, static_content_store=None,
target_id=None, verbose=False,
do_import_static=True, do_import_python_lib=True,
create_if_not_present=False, raise_on_failure=False,
static_content_subdir=DEFAULT_STATIC_CONTENT_SUBDIR,
python_lib_filename='python_lib.zip',
):
self.store = store
self.user_id = user_id
self.data_dir = data_dir
self.source_dirs = source_dirs
self.load_error_modules = load_error_modules
self.static_content_store = static_content_store
self.target_id = target_id
self.verbose = verbose
self.static_content_subdir = static_content_subdir
self.python_lib_filename = python_lib_filename
self.do_import_static = do_import_static
self.do_import_python_lib = do_import_python_lib
self.create_if_not_present = create_if_not_present
self.raise_on_failure = raise_on_failure
self.xml_module_store = self.store_class(
data_dir,
default_class=default_class,
source_dirs=source_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
target_course_id=target_id,
)
self.logger, self.errors = make_error_tracker()
def preflight(self):
"""
Perform any pre-import sanity checks.
"""
# If we're going to remap the ID, then we can only do that with
# a single target
if self.target_id:
assert len(self.xml_module_store.modules) == 1, 'Store unable to load course correctly.'
def import_static(self, data_path, dest_id):
"""
Import all static items into the content store.
"""
if self.static_content_store is None:
log.warning(
f'Course import {self.target_id}: Static content store is None. Skipping static content import.'
)
return
static_content_importer = StaticContentImporter(
self.static_content_store,
course_data_path=data_path,
target_id=dest_id
)
if self.do_import_static:
if self.verbose:
log.info(f'Course import {self.target_id}: Importing static content and python library')
# first pass to find everything in the static content directory
static_content_importer.import_static_content_directory(
content_subdir=self.static_content_subdir, verbose=self.verbose
)
elif self.do_import_python_lib and self.python_lib_filename:
if self.verbose:
log.info(
f'Course import {self.target_id}: Skipping static content import, still importing python library'
)
python_lib_dir_path = data_path / self.static_content_subdir
python_lib_full_path = python_lib_dir_path / self.python_lib_filename
if os.path.isfile(python_lib_full_path):
static_content_importer.import_static_file(
python_lib_full_path, base_dir=python_lib_dir_path
)
else:
if self.verbose:
log.info(f'Course import {self.target_id}: Skipping import of static content and python library')
# No matter what do_import_static is, import "static_import" directory.
# This is needed because the "about" pages (eg "overview") are
# loaded via load_extra_content, and do not inherit the lms
# metadata from the course module, and thus do not get
# "static_content_store" properly defined. Static content
# referenced in those extra pages thus need to come through the
# c4x:// contentstore, unfortunately. Tell users to copy that
# content into the "static_import" subdir.
simport = 'static_import'
if os.path.exists(data_path / simport):
if self.verbose:
log.info(f'Course import {self.target_id}: Importing {simport} directory')
static_content_importer.import_static_content_directory(
content_subdir=simport, verbose=self.verbose
)
def import_asset_metadata(self, data_dir, course_id):
"""
Read in assets XML file, parse it, and add all asset metadata to the modulestore.
"""
asset_dir = path(data_dir) / AssetMetadata.EXPORTED_ASSET_DIR
assets_filename = AssetMetadata.EXPORTED_ASSET_FILENAME
asset_xml_file = asset_dir / assets_filename
def make_asset_id(course_id, asset_xml):
"""
Construct an asset ID out of a complete asset XML section.
"""
asset_type = None
asset_name = None
for child in asset_xml.iterchildren():
if child.tag == AssetMetadata.ASSET_TYPE_ATTR:
asset_type = child.text
elif child.tag == AssetMetadata.ASSET_BASENAME_ATTR:
asset_name = child.text
return course_id.make_asset_key(asset_type, asset_name)
all_assets = []
try:
xml_data = etree.parse(asset_xml_file).getroot()
assert xml_data.tag == AssetMetadata.ALL_ASSETS_XML_TAG
for asset in xml_data.iterchildren():
if asset.tag == AssetMetadata.ASSET_XML_TAG:
# Construct the asset key.
asset_key = make_asset_id(course_id, asset)
asset_md = AssetMetadata(asset_key)
asset_md.from_xml(asset)
all_assets.append(asset_md)
except OSError:
# file does not exist.
logging.info(f'Course import {course_id}: No {assets_filename} file present.')
return
except Exception as exc: # pylint: disable=W0703
if self.raise_on_failure: # lint-amnesty, pylint: disable=no-else-raise
monitor_import_failure(course_id, 'Updating', exception=exc)
logging.exception(f'Course import {course_id}: Error while parsing {assets_filename}.')
raise ErrorReadingFileException(assets_filename) # pylint: disable=raise-missing-from
else:
return
# Now add all asset metadata to the modulestore.
if len(all_assets) > 0:
self.store.save_asset_metadata_list(all_assets, all_assets[0].edited_by, import_only=True)
def import_courselike(self, runtime, courselike_key, dest_id, source_courselike):
"""
Import the base module/block
"""
if self.verbose:
log.debug("Scanning %s for courselike module...", courselike_key)
# Quick scan to get course module as we need some info from there.
# Also we need to make sure that the course module is committed
# first into the store
course_data_path = path(self.data_dir) / source_courselike.data_dir
log.debug('======> IMPORTING courselike %s', courselike_key)
if not self.do_import_static:
# for old-style xblock where this was actually linked to kvs
source_courselike.static_asset_path = source_courselike.data_dir
source_courselike.save()
log.debug('course static_asset_path=%s', source_courselike.static_asset_path)
log.debug('course data_dir=%s', source_courselike.data_dir)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_id):
course = _update_and_import_module(
source_courselike, self.store, self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=runtime,
)
self.static_updater(course, source_courselike, courselike_key, dest_id, runtime)
self.store.update_item(course, self.user_id)
return course, course_data_path
@abstractmethod
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Updates any special static items, such as PDF coursebooks.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@abstractmethod
def get_dest_id(self, courselike_key):
"""
Given a courselike_key, get the version of the key that will actually be used in the modulestore
for import.
"""
raise NotImplementedError
@abstractmethod
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Given a key, a runtime, and an intended destination key, get the descriptor for the courselike
we'll be importing into.
"""
raise NotImplementedError
@abstractmethod
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
To be overloaded with a method that installs the child items into self.store.
"""
raise NotImplementedError
@abstractmethod
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
To be overloaded with a method that installs the draft items into self.store.
"""
raise NotImplementedError
def recursive_build(self, source_courselike, courselike, courselike_key, dest_id):
"""
Recursively imports all child blocks from the temporary modulestore into the
target modulestore.
"""
all_locs = set(self.xml_module_store.modules[courselike_key].keys())
all_locs.remove(source_courselike.location)
def depth_first(subtree):
"""
Import top down just so import code can make assumptions about parents always being available
"""
if subtree.has_children:
for child in subtree.get_children():
try:
all_locs.remove(child.location)
except KeyError:
# tolerate same child occurring under 2 parents such as in
# ContentStoreTest.test_image_import
pass
if self.verbose:
log.debug('importing module location %s', child.location)
try:
_update_and_import_module(
child,
self.store,
self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=courselike.runtime,
)
except Exception:
log.exception(
f'Course import {dest_id}: failed to import module location {child.location}'
)
raise ModuleFailedToImport(child.display_name, child.location) # pylint: disable=raise-missing-from
depth_first(child)
depth_first(source_courselike)
for leftover in all_locs:
if self.verbose:
log.debug('importing module location %s', leftover)
try:
_update_and_import_module(
self.xml_module_store.get_item(leftover),
self.store,
self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=courselike.runtime,
)
except Exception:
log.exception(
f'Course import {dest_id}: failed to import module location {leftover}'
)
# pylint: disable=raise-missing-from
raise ModuleFailedToImport(leftover.display_name, leftover.location)
def run_imports(self):
"""
Iterate over the given directories and yield courses.
"""
self.preflight()
for courselike_key in self.xml_module_store.modules.keys():
try:
dest_id, runtime = self.get_dest_id(courselike_key)
except DuplicateCourseError:
continue
# This bulk operation wraps all the operations to populate the published branch.
with self.store.bulk_operations(dest_id):
# Retrieve the course itself.
source_courselike, courselike, data_path = self.get_courselike(courselike_key, runtime, dest_id)
# Import all static pieces.
self.import_static(data_path, dest_id)
# Import asset metadata stored in XML.
self.import_asset_metadata(data_path, dest_id)
# Import all children
self.import_children(source_courselike, courselike, courselike_key, dest_id)
# This bulk operation wraps all the operations to populate the draft branch with any items
# from the /drafts subdirectory.
# Drafts must be imported in a separate bulk operation from published items to import properly,
# due to the recursive_build() above creating a draft item for each course block
# and then publishing it.
with self.store.bulk_operations(dest_id):
# Import all draft items into the courselike.
courselike = self.import_drafts(courselike, courselike_key, data_path, dest_id)
yield courselike
class CourseImportManager(ImportManager):
"""
Import manager for Courses.
"""
store_class = XMLModuleStore
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Given a key, runtime, and target key, get the version of the course
from the temporary modulestore.
"""
source_course = self.xml_module_store.get_course(courselike_key)
# STEP 1: find and import course module
course, course_data_path = self.import_courselike(
runtime, courselike_key, dest_id, source_course,
)
return source_course, course, course_data_path
def get_dest_id(self, courselike_key):
"""
Get the course key that will be used for the target modulestore.
"""
if self.target_id is not None:
dest_id = self.target_id
else:
# Note that dest_course_id will be in the format for the default modulestore.
dest_id = self.store.make_course_key(courselike_key.org, courselike_key.course, courselike_key.run)
existing_id = self.store.has_course(dest_id, ignore_case=True)
# store.has_course will return the course_key in the format for the modulestore in which it was found.
# This may be different from dest_course_id, so correct to the format found.
if existing_id:
dest_id = existing_id
runtime = None
# Creates a new course if it doesn't already exist
if self.create_if_not_present and not existing_id:
try:
new_course = self.store.create_course(
dest_id.org, dest_id.course, dest_id.run, self.user_id
)
runtime = new_course.runtime
except DuplicateCourseError:
log.debug(
"Skipping import of course with id, %s, "
"since it collides with an existing one", dest_id
)
raise
return dest_id, runtime
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Update special static assets, such as PDF textbooks and wiki resources.
"""
for entry in course.pdf_textbooks:
for chapter in entry.get('chapters', []):
if StaticContent.is_c4x_path(chapter.get('url', '')):
asset_key = StaticContent.get_location_from_path(chapter['url'])
chapter['url'] = StaticContent.get_static_path_from_location(asset_key)
# Original wiki_slugs had value location.course. To make them unique this was changed to 'org.course.name'.
# If we are importing into a course with a different course_id and wiki_slug is equal to either of these default
# values then remap it so that the wiki does not point to the old wiki.
if courselike_key != course.id:
original_unique_wiki_slug = '{}.{}.{}'.format(
courselike_key.org,
courselike_key.course,
courselike_key.run
)
if course.wiki_slug == original_unique_wiki_slug or course.wiki_slug == courselike_key.course:
course.wiki_slug = '{}.{}.{}'.format(
course.id.org,
course.id.course,
course.id.run,
)
# cdodge: more hacks (what else). Seems like we have a
# problem when importing a course (like 6.002) which
# does not have any tabs defined in the policy file.
# The import goes fine and then displays fine in LMS,
# but if someone tries to add a new tab in the CMS, then
# the LMS barfs because it expects that -- if there are
# *any* tabs -- then there at least needs to be
# some predefined ones
if course.tabs is None or len(course.tabs) == 0:
CourseTabList.initialize_default(course)
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
Imports all children into the desired store.
"""
# The branch setting of published_only forces an overwrite of all draft modules
# during the course import.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, dest_id):
self.recursive_build(source_courselike, courselike, courselike_key, dest_id)
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
Imports all drafts into the desired store.
"""
# Import any draft items
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_id):
_import_course_draft(
self.xml_module_store,
self.store,
self.user_id,
data_path,
courselike_key,
dest_id,
courselike.runtime
)
# Importing the drafts potentially triggered a new structure version.
# If so, the HEAD version_guid of the passed-in courselike will be out-of-date.
# Fetch the course to return the most recent course version.
return self.store.get_course(courselike.id.replace(branch=None, version_guid=None))
class LibraryImportManager(ImportManager):
"""
Import manager for Libraries
"""
store_class = LibraryXMLModuleStore
def get_dest_id(self, courselike_key):
"""
Get the LibraryLocator that will be used in the target modulestore.
"""
if self.target_id is not None:
dest_id = self.target_id
else:
dest_id = LibraryLocator(self.target_id.org, self.target_id.library)
existing_lib = self.store.get_library(dest_id, ignore_case=True)
runtime = None
if existing_lib:
dest_id = existing_lib.location.library_key
runtime = existing_lib.runtime
if self.create_if_not_present and not existing_lib:
try:
library = self.store.create_library(
org=self.target_id.org,
library=self.target_id.library,
user_id=self.user_id,
fields={"display_name": ""},
)
runtime = library.runtime
except DuplicateCourseError:
log.debug(
"Skipping import of Library with id %s, "
"since it collides with an existing one", dest_id
)
raise
return dest_id, runtime
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Get the descriptor of the library from the XML import modulestore.
"""
source_library = self.xml_module_store.get_library(courselike_key)
library, library_data_path = self.import_courselike(
runtime, courselike_key, dest_id, source_library,
)
return source_library, library, library_data_path
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Libraries have no special static items to import.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
Imports all children into the desired store.
"""
self.recursive_build(source_courselike, courselike, courselike_key, dest_id)
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
Imports all drafts into the desired store.
"""
return courselike
def import_course_from_xml(*args, **kwargs):
"""
Thin wrapper for the Course Import Manager. See ImportManager for details.
"""
manager = CourseImportManager(*args, **kwargs)
return list(manager.run_imports())
def import_library_from_xml(*args, **kwargs):
"""
Thin wrapper for the Library Import Manager. See ImportManager for details.
"""
manager = LibraryImportManager(*args, **kwargs)
return list(manager.run_imports())
def _update_and_import_module(
module, store, user_id,
source_course_id, dest_course_id,
do_import_static=True, runtime=None):
"""
Update all the module reference fields to the destination course id,
then import the module into the destination course.
"""
logging.debug('processing import of module %s...', str(module.location))
def _update_module_references(module, source_course_id, dest_course_id):
"""
Move the module to a new course.
"""
def _convert_ref_fields_to_new_namespace(reference):
"""
Convert a reference to the new namespace, but only
if the original namespace matched the original course.
Otherwise, returns the input value.
"""
assert isinstance(reference, UsageKey)
if source_course_id == reference.course_key:
return reference.map_into_course(dest_course_id)
else:
return reference
fields = {}
for field_name, field in module.fields.items():
if field.scope != Scope.parent and field.is_set_on(module):
if isinstance(field, Reference):
value = field.read_from(module)
if value is None:
fields[field_name] = None
else:
fields[field_name] = _convert_ref_fields_to_new_namespace(field.read_from(module))
elif isinstance(field, ReferenceList):
references = field.read_from(module)
fields[field_name] = [_convert_ref_fields_to_new_namespace(reference) for reference in references]
elif isinstance(field, ReferenceValueDict):
reference_dict = field.read_from(module)
fields[field_name] = {
key: _convert_ref_fields_to_new_namespace(reference)
for key, reference
in reference_dict.items()
}
elif field_name == 'xml_attributes':
value = field.read_from(module)
# remove any export/import only xml_attributes
# which are used to wire together draft imports
if 'parent_url' in value:
del value['parent_url']
if 'parent_sequential_url' in value:
del value['parent_sequential_url']
if 'index_in_children_list' in value:
del value['index_in_children_list']
fields[field_name] = value
else:
fields[field_name] = field.read_from(module)
return fields
if do_import_static and 'data' in module.fields and isinstance(module.fields['data'], xblock.fields.String):
# we want to convert all 'non-portable' links in the module_data
# (if it is a string) to portable strings (e.g. /static/)
module.data = rewrite_nonportable_content_links(
source_course_id,
dest_course_id,
module.data
)
fields = _update_module_references(module, source_course_id, dest_course_id)
asides = module.get_asides() if isinstance(module, XModuleMixin) else None
if module.location.block_type == 'library_content':
with store.branch_setting(branch_setting=ModuleStoreEnum.Branch.published_only):
lib_content_block_already_published = store.has_item(module.location)
block = store.import_xblock(
user_id, dest_course_id, module.location.block_type,
module.location.block_id, fields, runtime, asides=asides
)
# TODO: Move this code once the following condition is met.
# Get to the point where XML import is happening inside the
# modulestore that is eventually going to store the data.
# Ticket: https://openedx.atlassian.net/browse/PLAT-1046
# Special case handling for library content blocks. The fact that this is
# in Modulestore code is _bad_ and breaks abstraction barriers, but is too
# much work to factor out at this point.
if block.location.block_type == 'library_content':
# If library exists, update source_library_version and children
# according to this existing library and library content block.
if store.get_library(block.source_library_key):
# If the library content block is already in the course, then don't
# refresh the children when we re-import it. This lets us address
# TNL-7507 (Randomized Content Block Settings Lost in Course Import)
# while still avoiding AA-310, where the IDs of the children for an
# existing library_content block might be altered, losing student
# user state.
#
# Note that while this method is run on import, it's also run when
# adding the library content from Studio for the first time.
#
# TLDR: When importing, we only copy the default values from content
# in a library the first time that library_content block is created.
# Future imports ignore what's in the library so as not to disrupt
# course state. You _can_ still update to the library via the Studio
# UI for updating to the latest version of a library for this block.
if lib_content_block_already_published:
return block
# Update library content block's children on draft branch
with store.branch_setting(branch_setting=ModuleStoreEnum.Branch.draft_preferred):
LibraryToolsService(store, user_id).update_children(
block,
version=block.source_library_version,
)
# Publish it if importing the course for branch setting published_only.
if store.get_branch_setting() == ModuleStoreEnum.Branch.published_only:
store.publish(block.location, user_id)
return block
def _import_course_draft(
xml_module_store,
store,
user_id,
course_data_path,
source_course_id,
target_id,
mongo_runtime
):
"""
This method will import all the content inside of the 'drafts' folder, if content exists.
NOTE: This is not a full course import! In our current application, only verticals
(and blocks beneath) can be in draft. Therefore, different call points into the import
process_xml are used as the XMLModuleStore() constructor cannot simply be called
(as is done for importing public content).
"""
draft_dir = course_data_path + "/drafts"
if not os.path.exists(draft_dir):
return
# create a new 'System' object which will manage the importing
errorlog = make_error_tracker()
# The course_dir as passed to ImportSystem is expected to just be relative, not
# the complete path including data_dir. ImportSystem will concatenate the two together.
data_dir = xml_module_store.data_dir
# Whether or not data_dir ends with a "/" differs in production vs. test.
if not data_dir.endswith("/"):
data_dir += "/"
# Remove absolute path, leaving relative <course_name>/drafts.
draft_course_dir = draft_dir.replace(data_dir, '', 1)
system = ImportSystem(
xmlstore=xml_module_store,
course_id=source_course_id,
course_dir=draft_course_dir,
error_tracker=errorlog.tracker,
load_error_modules=False,
mixins=xml_module_store.xblock_mixins,
field_data=KvsFieldData(kvs=DictKeyValueStore()),
target_course_id=target_id,
)
def _import_module(module):
# IMPORTANT: Be sure to update the module location in the NEW namespace
module_location = module.location.map_into_course(target_id)
# Update the module's location to DRAFT revision
# We need to call this method (instead of updating the location directly)
# to ensure that pure XBlock field data is updated correctly.
_update_module_location(module, module_location.replace(revision=MongoRevisionKey.draft))
parent_url = get_parent_url(module)
index = index_in_children_list(module)
# make sure our parent has us in its list of children
# this is to make sure private only modules show up
# in the list of children since they would have been
# filtered out from the non-draft store export.
if parent_url is not None and index is not None:
course_key = descriptor.location.course_key
parent_location = UsageKey.from_string(parent_url).map_into_course(course_key)
# IMPORTANT: Be sure to update the parent in the NEW namespace
parent_location = parent_location.map_into_course(target_id)
parent = store.get_item(parent_location, depth=0)
non_draft_location = module.location.map_into_course(target_id)
if not any(child.block_id == module.location.block_id for child in parent.children):
parent.children.insert(index, non_draft_location)
store.update_item(parent, user_id)
_update_and_import_module(
module, store, user_id,
source_course_id,
target_id,
runtime=mongo_runtime,
)
for child in module.get_children():
_import_module(child)
# Now walk the /drafts directory.
# Each file in the directory will be a draft copy of the vertical.
# First it is necessary to order the draft items by their desired index in the child list,
# since the order in which os.walk() returns the files is not guaranteed.
drafts = []
for rootdir, __, filenames in os.walk(draft_dir):
for filename in filenames:
if filename.startswith('._'):
# Skip any OSX quarantine files, prefixed with a '._'.
continue
module_path = os.path.join(rootdir, filename)
with open(module_path) as f:
try:
xml = f.read()
# The process_xml() call below recursively processes all descendants. If
# we call this on all verticals in a course with verticals nested below
# the unit level, we try to import the same content twice, causing naming conflicts.
# Therefore only process verticals at the unit level, assuming that any other
# verticals must be descendants.
if 'index_in_children_list' in xml:
descriptor = system.process_xml(xml)
# HACK: since we are doing partial imports of drafts
# the vertical doesn't have the 'url-name' set in the
# attributes (they are normally in the parent object,
# aka sequential), so we have to replace the location.name
# with the XML filename that is part of the pack
filename, __ = os.path.splitext(filename)
descriptor.location = descriptor.location.replace(name=filename)
index = index_in_children_list(descriptor)
parent_url = get_parent_url(descriptor, xml)
draft_url = str(descriptor.location)
draft = draft_node_constructor(
module=descriptor, url=draft_url, parent_url=parent_url, index=index
)
drafts.append(draft)
except Exception: # pylint: disable=broad-except
logging.exception('Error while parsing course drafts xml.')
# Sort drafts by `index_in_children_list` attribute.
drafts.sort(key=lambda x: x.index)
for draft in get_draft_subtree_roots(drafts):
try:
_import_module(draft.module)
except Exception: # pylint: disable=broad-except
logging.exception(f'Course import {source_course_id}: while importing draft descriptor {draft.module}')
def allowed_metadata_by_category(category):
# should this be in the descriptors?!?
return {
'vertical': [],
'chapter': ['start'],
'sequential': ['due', 'format', 'start', 'graded']
}.get(category, ['*'])
def check_module_metadata_editability(module):
"""
Assert that there is no metadata within a particular module that
we can't support editing. However we always allow 'display_name'
and 'xml_attributes'
"""
allowed = allowed_metadata_by_category(module.location.block_type)
if '*' in allowed:
# everything is allowed
return 0
allowed = allowed + ['xml_attributes', 'display_name']
err_cnt = 0
illegal_keys = set(own_metadata(module).keys()) - set(allowed)
if len(illegal_keys) > 0:
err_cnt = err_cnt + 1
print(
": found non-editable metadata on {url}. "
"These metadata keys are not supported = {keys}".format(
url=str(module.location), keys=illegal_keys
)
)
return err_cnt
def get_parent_url(module, xml=None):
"""
Get the parent_url, if any, from module using xml as an alternative source. If it finds it in
xml but not on module, it modifies module so that the next call to this w/o the xml will get the parent url
"""
if hasattr(module, 'xml_attributes'):
return module.xml_attributes.get(
# handle deprecated old attr
'parent_url', module.xml_attributes.get('parent_sequential_url')
)
if xml is not None:
create_xml_attributes(module, xml)
return get_parent_url(module) # don't reparse xml b/c don't infinite recurse but retry above lines
return None
def index_in_children_list(module, xml=None):
"""
Get the index_in_children_list, if any, from module using xml
as an alternative source. If it finds it in xml but not on module,
it modifies module so that the next call to this w/o the xml
will get the field.
"""
if hasattr(module, 'xml_attributes'):
val = module.xml_attributes.get('index_in_children_list')
if val is not None:
return int(val)
return None
if xml is not None:
create_xml_attributes(module, xml)
return index_in_children_list(module) # don't reparse xml b/c don't infinite recurse but retry above lines
return None
def create_xml_attributes(module, xml):
"""
Make up for modules which don't define xml_attributes by creating them here and populating
"""
xml_attrs = {}
for attr, val in xml.attrib.items():
if attr not in module.fields:
# translate obsolete attr
if attr == 'parent_sequential_url':
attr = 'parent_url'
xml_attrs[attr] = val
# now cache it on module where it's expected
module.xml_attributes = xml_attrs
def validate_no_non_editable_metadata(module_store, course_id, category): # lint-amnesty, pylint: disable=missing-function-docstring
err_cnt = 0
for module_loc in module_store.modules[course_id]:
module = module_store.modules[course_id][module_loc]
if module.location.block_type == category:
err_cnt = err_cnt + check_module_metadata_editability(module)
return err_cnt
def validate_category_hierarchy( # lint-amnesty, pylint: disable=missing-function-docstring
module_store, course_id, parent_category, expected_child_category):
err_cnt = 0
parents = []
# get all modules of parent_category
for module in module_store.modules[course_id].values():
if module.location.block_type == parent_category:
parents.append(module)
for parent in parents:
for child_loc in parent.children:
if child_loc.block_type != expected_child_category:
err_cnt += 1
print(
"ERROR: child {child} of parent {parent} was expected to be "
"category of {expected} but was {actual}".format(
child=child_loc, parent=parent.location,
expected=expected_child_category,
actual=child_loc.block_type
)
)
return err_cnt
def validate_data_source_path_existence(path, is_err=True, extra_msg=None): # lint-amnesty, pylint: disable=missing-function-docstring, redefined-outer-name
_cnt = 0
if not os.path.exists(path):
print(
"{type}: Expected folder at {path}. {extra}".format(
type='ERROR' if is_err else 'WARNING',
path=path,
extra=extra_msg or "",
)
)
_cnt = 1
return _cnt
def validate_data_source_paths(data_dir, course_dir): # lint-amnesty, pylint: disable=missing-function-docstring
# check that there is a '/static/' directory
course_path = data_dir / course_dir
err_cnt = 0
warn_cnt = 0
err_cnt += validate_data_source_path_existence(course_path / 'static')
warn_cnt += validate_data_source_path_existence(
course_path / 'static/subs', is_err=False,
extra_msg='Video captions (if they are used) will not work unless they are static/subs.'
)
return err_cnt, warn_cnt
def validate_course_policy(module_store, course_id):
"""
Validate that the course explicitly sets values for any fields
whose defaults may have changed between the export and the import.
Does not add to error count as these are just warnings.
"""
# is there a reliable way to get the module location just given the course_id?
warn_cnt = 0
for module in module_store.modules[course_id].values():
if module.location.block_type == 'course':
if not module._field_data.has(module, 'rerandomize'): # lint-amnesty, pylint: disable=protected-access
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"rerandomize" whose default is now "never". '
'The behavior of your course may change.'
)
if not module._field_data.has(module, 'showanswer'): # lint-amnesty, pylint: disable=protected-access
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"showanswer" whose default is now "finished". '
'The behavior of your course may change.'
)
return warn_cnt
def perform_xlint( # lint-amnesty, pylint: disable=missing-function-docstring
data_dir, source_dirs,
default_class='xmodule.hidden_module.HiddenDescriptor',
load_error_modules=True,
xblock_mixins=(LocationMixin, XModuleMixin)):
err_cnt = 0
warn_cnt = 0
module_store = XMLModuleStore(
data_dir,
default_class=default_class,
source_dirs=source_dirs,
load_error_modules=load_error_modules,
xblock_mixins=xblock_mixins
)
# check all data source path information
for course_dir in source_dirs:
_err_cnt, _warn_cnt = validate_data_source_paths(path(data_dir), course_dir)
err_cnt += _err_cnt
warn_cnt += _warn_cnt
# first count all errors and warnings as part of the XMLModuleStore import
for err_log in module_store._course_errors.values(): # pylint: disable=protected-access
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
# then count outright all courses that failed to load at all
for err_log in module_store.errored_courses.values():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
print(msg)
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
for course_id in module_store.modules.keys():
# constrain that courses only have 'chapter' children
err_cnt += validate_category_hierarchy(
module_store, course_id, "course", "chapter"
)
# constrain that chapters only have 'sequentials'
err_cnt += validate_category_hierarchy(
module_store, course_id, "chapter", "sequential"
)
# constrain that sequentials only have 'verticals'
err_cnt += validate_category_hierarchy(
module_store, course_id, "sequential", "vertical"
)
# validate the course policy overrides any defaults
# which have changed over time
warn_cnt += validate_course_policy(module_store, course_id)
# don't allow metadata on verticals, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "vertical"
)
# don't allow metadata on chapters, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "chapter"
)
# don't allow metadata on sequences that we can't edit
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "sequential"
)
# check for a presence of a course marketing video
if not module_store.has_item(course_id.make_usage_key('about', 'video')):
print(
"WARN: Missing course marketing video. It is recommended "
"that every course have a marketing video."
)
warn_cnt += 1
print("\n")
print("------------------------------------------")
print("VALIDATION SUMMARY: {err} Errors {warn} Warnings".format(
err=err_cnt,
warn=warn_cnt
))
if err_cnt > 0:
print(
"This course is not suitable for importing. Please fix courseware "
"according to specifications before importing."
)
elif warn_cnt > 0:
print(
"This course can be imported, but some errors may occur "
"during the run of the course. It is recommend that you fix "
"your courseware before importing"
)
else:
print("This course can be imported successfully.")
return err_cnt
def _update_module_location(module, new_location):
"""
Update a module's location.
If the module is a pure XBlock (not an XModule), then its field data
keys will need to be updated to include the new location.
Args:
module (XModuleMixin): The module to update.
new_location (Location): The new location of the module.
Returns:
None
"""
# Retrieve the content and settings fields that have been explicitly set
# to ensure that they are properly re-keyed in the XBlock field data.
if isinstance(module, XModuleDescriptor):
rekey_fields = []
else:
rekey_fields = (
list(module.get_explicitly_set_fields_by_scope(Scope.content).keys()) +
list(module.get_explicitly_set_fields_by_scope(Scope.settings).keys()) +
list(module.get_explicitly_set_fields_by_scope(Scope.children).keys())
)
module.location = new_location
# Pure XBlocks store the field data in a key-value store
# in which one component of the key is the XBlock's location (equivalent to "scope_ids").
# Since we've changed the XBlock's location, we need to re-save
# all the XBlock's fields so they will be stored using the new location in the key.
# However, since XBlocks only save "dirty" fields, we need to call
# XBlock's `force_save_fields_method`
if len(rekey_fields) > 0:
module.force_save_fields(rekey_fields)
|
yassersouri/pykov | refs/heads/master | pykov/learning.py | 1 | import numpy
from utils import add_logs, forward_path, backward_path, params_to_vector
import markov, evaluation
def baum_welch(observations, N, M, max_iters=1000, conv_crit=0.00001, INITIALIZATION_MODE="random"):
"""
N is the number of hidden states, M is the number of different possible observations
"""
T = observations.shape[0]
MAX_ITERS = max_iters
CONV_CRIT = conv_crit
# initialize variables
pi, A, B = initialize_variables(N, M, mode=INITIALIZATION_MODE)
likelihoods = []
# go to log-space
pi = numpy.log(pi)
A = numpy.log(A)
B = numpy.log(B)
print 'initialization: Done'
converge = False
iter_num = 0
while not converge:
likelihoods.append(calculate_likelihood(observations, pi, A, B))
iter_num += 1
# iterate
new_pi, new_A, new_B = EM_iterate(observations, N, M, T, pi, A, B)
print 'EM Iteration: %d' % iter_num
# check convergence
converge = did_converge(pi, A, B, new_pi, new_A, new_B, CONV_CRIT)
if iter_num > MAX_ITERS:
converge = True
#update values
pi, A, B = new_pi, new_A, new_B
# return variables
return pi, A, B, likelihoods
def EM_iterate(observations, N, M, T, pi, A, B):
""" Expectation """
alphas = forward_path(observations, pi, A, B, T, N)
betas = backward_path(observations, pi, A, B, T, N)
gammas = calculate_gammas(alphas, betas, T, N)
kesies = calculate_ksies(observations, alphas, betas, A, B, T, N)
""" Maximization """
new_pi = gammas[0, :]
new_A = numpy.zeros((N, N))
for i in range(N):
norm_factor = add_logs(gammas[:-1, i])
for j in range(N):
new_A[i, j] = add_logs(kesies[:-1, i, j]) - norm_factor
new_B = numpy.zeros((N, M))
for i in range(N):
norm_factor = add_logs(gammas[:, i])
for k in range(M):
new_B[i, k] = add_logs(gammas[:, i][observations == k]) - norm_factor
return new_pi, new_A, new_B
def did_converge(pi, A, B, new_pi, new_A, new_B, CONV_CRIT):
old = params_to_vector(pi, A, B)
old = numpy.exp(old)
new = params_to_vector(new_pi, new_A, new_B)
new = numpy.exp(new)
diff = numpy.linalg.norm(old - new)
print diff
if diff > CONV_CRIT:
return False
return True
def initialize_variables(N, M, mode="random"):
"""
initializes the model parameters
It can perform in two modes, "random", "equal"
"""
A = numpy.zeros((N, N))
B = numpy.zeros((N, M))
pi = numpy.zeros(N)
if mode == "random":
pi = numpy.random.random(N)
pi = pi / pi.sum()
for i in range(N):
tempA = numpy.random.random(N)
A[i, :] = tempA / tempA.sum()
tempB = numpy.random.random(M)
B[i, :] = tempB / tempB.sum()
elif mode == "equal":
pi[:] = 1. / N
A[:] = 1. / N
B[:] = 1. / M
else:
raise Exception("invalid mode: %s" % mode)
return pi, A, B
def calculate_gammas(alphas, betas, T, N):
gammas = numpy.zeros((T, N))
for t in range(T):
for i in range(N):
gammas[t, i] = alphas[t, i] + betas[t, i]
sum_all = add_logs(gammas[t, :])
gammas[t, :] = gammas[t, :] - sum_all
return gammas
def calculate_ksies(observations, alphas, betas, A, B, T, N):
ksies = numpy.zeros((T, N, N))
norms = numpy.zeros(T)
for t in range(T):
temps = numpy.zeros(N)
for k in range(N):
temps[k] = alphas[t, k] + betas[t, k]
norms[t] = add_logs(temps)
for t in range(T-1):
for i in range(N):
for j in range(N):
ksies[t, i, j] = alphas[t, i] + A[i, j] + betas[t+1, j] + B[j, observations[t+1]] - norms[t]
return ksies
def calculate_likelihood(observations, pi, A, B):
model = markov.HMM(numpy.exp(pi), numpy.exp(A), numpy.exp(B))
return evaluation.evaluate(observations, model, log=True) |
gdelpierre/ansible-modules-core | refs/heads/devel | network/cumulus/cl_bridge.py | 8 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cl_bridge
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bridge port on Cumulus Linux
description:
- Configures a bridge interface on Cumulus Linux To configure a bond port
use the cl_bond module. To configure any other type of interface use the
cl_interface module. Follow the guidelines for bridging found in the
Cumulus User Guide at U(http://docs.cumulusnetworks.com)
options:
name:
description:
- Name of the interface.
required: true
alias_name:
description:
- Description of the port.
ipv4:
description:
- List of IPv4 addresses to configure on the interface.
In the form I(X.X.X.X/YY).
ipv6:
description:
- List of IPv6 addresses to configure on the interface.
In the form I(X:X:X::X/YYY).
addr_method:
description:
- Configures the port to use DHCP.
To enable this feature use the option I(dhcp).
choices: ['dhcp']
mtu:
description:
- Set MTU. Configure Jumbo Frame by setting MTU to I(9000).
virtual_ip:
description:
- Define IPv4 virtual IP used by the Cumulus Linux VRR feature.
virtual_mac:
description:
- Define Ethernet mac associated with Cumulus Linux VRR feature.
vids:
description:
- In vlan-aware mode, lists VLANs defined under the interface.
pvid:
description:
- In vlan-aware mode, defines vlan that is the untagged vlan.
stp:
description:
- Enables spanning tree Protocol. As of Cumulus Linux 2.5 the default
bridging mode, only per vlan RSTP or 802.1d is supported. For the
vlan aware mode, only common instance STP is supported
default: 'yes'
choices: ['yes', 'no']
ports:
description:
- List of bridge members.
required: True
vlan_aware:
description:
- Enables vlan-aware mode.
choices: ['yes', 'no']
mstpctl_treeprio:
description:
- Set spanning tree root priority. Must be a multiple of 4096.
location:
description:
- Interface directory location.
default:
- '/etc/network/interfaces.d'
requirements: [ Alternate Debian network interface manager
ifupdown2 @ github.com/CumulusNetworks/ifupdown2 ]
notes:
- As this module writes the interface directory location, ensure that
``/etc/network/interfaces`` has a 'source /etc/network/interfaces.d/\*' or
whatever path is mentioned in the ``location`` attribute.
- For the config to be activated, i.e installed in the kernel,
"service networking reload" needs be be executed. See EXAMPLES section.
'''
EXAMPLES = '''
# Options ['virtual_mac', 'virtual_ip'] are required together
# configure a bridge vlan aware bridge.
cl_bridge: name=br0 ports='swp1-12' vlan_aware='yes'
notify: reload networking
# configure bridge interface to define a default set of vlans
cl_bridge: name=bridge ports='swp1-12' vlan_aware='yes' vids='1-100'
notify: reload networking
# define cl_bridge once in tasks file
# then write interface config in variables file
# with just the options you want.
cl_bridge:
name: "{{ item.key }}"
ports: "{{ item.value.ports }}"
vlan_aware: "{{ item.value.vlan_aware|default(omit) }}"
ipv4: "{{ item.value.ipv4|default(omit) }}"
ipv6: "{{ item.value.ipv6|default(omit) }}"
alias_name: "{{ item.value.alias_name|default(omit) }}"
addr_method: "{{ item.value.addr_method|default(omit) }}"
mtu: "{{ item.value.mtu|default(omit) }}"
vids: "{{ item.value.vids|default(omit) }}"
virtual_ip: "{{ item.value.virtual_ip|default(omit) }}"
virtual_mac: "{{ item.value.virtual_mac|default(omit) }}"
mstpctl_treeprio: "{{ item.value.mstpctl_treeprio|default(omit) }}"
with_dict: cl_bridges
notify: reload networking
# In vars file
# ============
cl_bridge:
br0:
alias_name: 'vlan aware bridge'
ports: ['swp1', 'swp3']
vlan_aware: true
vids: ['1-100']
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
# handy helper for calling system calls.
# calls AnsibleModule.run_command and prints a more appropriate message
# exec_path - path to file to execute, with all its arguments.
# E.g "/sbin/ip -o link show"
# failure_msg - what message to print on failure
def run_cmd(module, exec_path):
(_rc, out, _err) = module.run_command(exec_path)
if _rc > 0:
if re.search('cannot find interface', _err):
return '[{}]'
failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
module.fail_json(msg=failure_msg)
else:
return out
def current_iface_config(module):
# due to a bug in ifquery, have to check for presence of interface file
# and not rely solely on ifquery. when bug is fixed, this check can be
# removed
_ifacename = module.params.get('name')
_int_dir = module.params.get('location')
module.custom_current_config = {}
if os.path.exists(_int_dir + '/' + _ifacename):
_cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
module.custom_current_config = module.from_json(
run_cmd(module, _cmd))[0]
def build_address(module):
# if addr_method == 'dhcp', dont add IP address
if module.params.get('addr_method') == 'dhcp':
return
_ipv4 = module.params.get('ipv4')
_ipv6 = module.params.get('ipv6')
_addresslist = []
if _ipv4 and len(_ipv4) > 0:
_addresslist += _ipv4
if _ipv6 and len(_ipv6) > 0:
_addresslist += _ipv6
if len(_addresslist) > 0:
module.custom_desired_config['config']['address'] = ' '.join(
_addresslist)
def build_vids(module):
_vids = module.params.get('vids')
if _vids and len(_vids) > 0:
module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
def build_pvid(module):
_pvid = module.params.get('pvid')
if _pvid:
module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
def conv_bool_to_str(_value):
if isinstance(_value, bool):
if _value is True:
return 'yes'
else:
return 'no'
return _value
def build_generic_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
if _value:
module.custom_desired_config['config'][
re.sub('_', '-', _attr)] = str(_value)
def build_alias_name(module):
alias_name = module.params.get('alias_name')
if alias_name:
module.custom_desired_config['config']['alias'] = alias_name
def build_addr_method(module):
_addr_method = module.params.get('addr_method')
if _addr_method:
module.custom_desired_config['addr_family'] = 'inet'
module.custom_desired_config['addr_method'] = _addr_method
def build_vrr(module):
_virtual_ip = module.params.get('virtual_ip')
_virtual_mac = module.params.get('virtual_mac')
vrr_config = []
if _virtual_ip:
vrr_config.append(_virtual_mac)
vrr_config.append(_virtual_ip)
module.custom_desired_config.get('config')['address-virtual'] = \
' '.join(vrr_config)
def add_glob_to_array(_bridgemems):
"""
goes through each bridge member if it sees a dash add glob
before it
"""
result = []
if isinstance(_bridgemems, list):
for _entry in _bridgemems:
if re.search('-', _entry):
_entry = 'glob ' + _entry
result.append(_entry)
return ' '.join(result)
return _bridgemems
def build_bridge_attr(module, _attr):
_value = module.params.get(_attr)
_value = conv_bool_to_str(_value)
_value = add_glob_to_array(_value)
if _value:
module.custom_desired_config['config'][
'bridge-' + re.sub('_', '-', _attr)] = str(_value)
def build_desired_iface_config(module):
"""
take parameters defined and build ifupdown2 compatible hash
"""
module.custom_desired_config = {
'addr_family': None,
'auto': True,
'config': {},
'name': module.params.get('name')
}
for _attr in ['vlan_aware', 'pvid', 'ports', 'stp']:
build_bridge_attr(module, _attr)
build_addr_method(module)
build_address(module)
build_vids(module)
build_alias_name(module)
build_vrr(module)
for _attr in ['mtu', 'mstpctl_treeprio']:
build_generic_attr(module, _attr)
def config_dict_changed(module):
"""
return true if 'config' dict in hash is different
between desired and current config
"""
current_config = module.custom_current_config.get('config')
desired_config = module.custom_desired_config.get('config')
return current_config != desired_config
def config_changed(module):
"""
returns true if config has changed
"""
if config_dict_changed(module):
return True
# check if addr_method is changed
return module.custom_desired_config.get('addr_method') != \
module.custom_current_config.get('addr_method')
def replace_config(module):
temp = tempfile.NamedTemporaryFile()
desired_config = module.custom_desired_config
# by default it will be something like /etc/network/interfaces.d/swp1
final_location = module.params.get('location') + '/' + \
module.params.get('name')
final_text = ''
_fh = open(final_location, 'w')
# make sure to put hash in array or else ifquery will fail
# write to temp file
try:
temp.write(module.jsonify([desired_config]))
# need to seek to 0 so that data is written to tempfile.
temp.seek(0)
_cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
final_text = run_cmd(module, _cmd)
finally:
temp.close()
try:
_fh.write(final_text)
finally:
_fh.close()
def main():
module = AnsibleModule(
argument_spec=dict(
ports=dict(required=True, type='list'),
name=dict(required=True, type='str'),
ipv4=dict(type='list'),
ipv6=dict(type='list'),
alias_name=dict(type='str'),
addr_method=dict(type='str',
choices=['', 'dhcp']),
mtu=dict(type='str'),
virtual_ip=dict(type='str'),
virtual_mac=dict(type='str'),
vids=dict(type='list'),
pvid=dict(type='str'),
mstpctl_treeprio=dict(type='str'),
vlan_aware=dict(type='bool', choices=BOOLEANS),
stp=dict(type='bool', default='yes', choices=BOOLEANS),
location=dict(type='str',
default='/etc/network/interfaces.d')
),
required_together=[
['virtual_ip', 'virtual_mac']
]
)
# if using the jinja default filter, this resolves to
# create an list with an empty string ['']. The following
# checks all lists and removes it, so that functions expecting
# an empty list, get this result. May upstream this fix into
# the AnsibleModule code to have it check for this.
for k, _param in module.params.iteritems():
if isinstance(_param, list):
module.params[k] = [x for x in _param if x]
_location = module.params.get('location')
if not os.path.exists(_location):
_msg = "%s does not exist." % (_location)
module.fail_json(msg=_msg)
return # for testing purposes only
ifacename = module.params.get('name')
_changed = False
_msg = "interface %s config not changed" % (ifacename)
current_iface_config(module)
build_desired_iface_config(module)
if config_changed(module):
replace_config(module)
_msg = "interface %s config updated" % (ifacename)
_changed = True
module.exit_json(changed=_changed, msg=_msg)
# import module snippets
from ansible.module_utils.basic import *
import tempfile
import os
import re
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.