repo_name
stringlengths 5
88
| path
stringlengths 4
199
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 855
832k
| license
stringclasses 15
values | hash
int64 -9,223,128,179,723,874,000
9,223,237,214B
| line_mean
float64 3.5
99
| line_max
int64 15
999
| alpha_frac
float64 0.25
0.87
| autogenerated
bool 1
class | ratio
float64 1.5
7.55
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class | score
float64 0
0.2
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
citrix-openstack-build/python-ironicclient | ironicclient/common/base.py | 1 | 4238 | # Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import copy
import six
# Python 2.4 compat
try:
all
except NameError:
def all(iterable):
return True not in (not x for x in iterable)
def getid(obj):
"""Abstracts the common pattern of allowing both an object or an
object's ID (UUID) as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class Manager(object):
"""Managers interact with a particular type of API and provide CRUD
operations for them.
"""
resource_class = None
def __init__(self, api):
self.api = api
def _create(self, url, body):
resp, body = self.api.json_request('POST', url, body=body)
if body:
return self.resource_class(self, body)
def _list(self, url, response_key=None, obj_class=None, body=None):
resp, body = self.api.json_request('GET', url)
if obj_class is None:
obj_class = self.resource_class
if response_key:
try:
data = body[response_key]
except KeyError:
return []
else:
data = body
if not isinstance(data, list):
data = [data]
return [obj_class(self, res, loaded=True) for res in data if res]
def _update(self, url, body, response_key=None):
resp, body = self.api.json_request('PATCH', url, body=body)
# PATCH requests may not return a body
if body:
return self.resource_class(self, body)
def _delete(self, url):
self.api.raw_request('DELETE', url)
class Resource(object):
"""A resource represents a particular instance of an object (tenant, user,
etc). This is pretty much just a bag for attributes.
:param manager: Manager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
def __init__(self, manager, info, loaded=False):
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def _add_details(self, info):
for (k, v) in six.iteritems(info):
setattr(self, k, v)
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def __repr__(self):
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and
k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
def get(self):
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
| apache-2.0 | -435,973,744,146,223,200 | 28.636364 | 78 | 0.591081 | false | 3.990584 | false | false | false | 0 |
chuck-aka-ben/TripodPi | testServo.py | 1 | 6093 | import os
import time
from Servo import Servo
from csvFile import csvFile
servo_max = 100
servo_min = -100
servo = Servo(0, servo_max, servo_min)
servo.maxABSVelocity = 20
servo.maxABSacceleration = 5
csvHeader = 'time,position,velocity'
def generateImage(csv, image, title):
os.system('python graph.py {0} {1} --title="{2}"'.format(csv.filename, image, title))
""" ****************************************
Standard Test
***************************************** """
standardPV = csvFile('data/standardPV.csv')
standardPV.open()
standardPV.write(csvHeader)
startTime = time.time()
servo.setPosition(-25)
servo.updateTarget(75)
while servo.position != servo.target:
servo.updatePosition(time.time())
currentTime = time.time() - startTime
standardPV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
standardPV.close()
generateImage(standardPV, 'graphs/standardPV', 'Standard Positive Velocity')
standardNV = csvFile('data/standardPN.csv')
standardNV.open()
standardNV.write(csvHeader)
startTime = time.time()
servo.setPosition(75)
servo.updateTarget(-25)
while servo.position != servo.target:
servo.updatePosition(time.time())
currentTime = time.time() - startTime
standardNV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
standardNV.close()
generateImage(standardNV, 'graphs/standardNV', 'Standard Negative Velocity')
""" ****************************************
Update Target Test
***************************************** """
updateTargetPV = csvFile('data/updateTarget.csv')
updateTargetPV.open()
updateTargetPV.write(csvHeader)
startTime = time.time()
servo.setPosition(0)
servo.updateTarget(100)
updated = False
while servo.position != servo.target:
currentTime = time.time() - startTime
if not updated and currentTime > 2:
servo.updateTarget(-100)
updated = True
servo.updatePosition(time.time())
updateTargetPV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
updateTargetPV.close()
generateImage(updateTargetPV, 'graphs/updateTarget', 'Update Test')
""" ****************************************
initial Velocity Test
***************************************** """
initialVelocityPV = csvFile('data/initialVelocityPV.csv')
initialVelocityPV.open()
initialVelocityPV.write(csvHeader)
startTime = time.time()
servo.velocity = 15
servo.setPosition(-25)
servo.updateTarget(75)
while servo.position != servo.target:
currentTime = time.time() - startTime
servo.updatePosition(time.time())
initialVelocityPV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
initialVelocityPV.close()
generateImage(initialVelocityPV, 'graphs/initialVelocityPV', 'Initial Velocity Positive Velocity')
initialVelocityNV = csvFile('data/initialVelocityNV.csv')
initialVelocityNV.open()
initialVelocityNV.write(csvHeader)
startTime = time.time()
servo.velocity = -15
servo.setPosition(75)
servo.updateTarget(-25)
while servo.position != servo.target:
currentTime = time.time() - startTime
servo.updatePosition(time.time())
initialVelocityNV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
initialVelocityNV.close()
generateImage(initialVelocityNV, 'graphs/initialVelocityNV', 'Initial Velocity Negative Velocity')
""" ****************************************
Overshoot Test
***************************************** """
overshootPV = csvFile('data/overshootPV.csv')
overshootPV.open()
servo.maxABSVelocity = 2000
servo.maxABSacceleration = 5
servo.setPosition(-25)
servo.updateTarget(75)
startTime = time.time()
while servo.position != servo.target:
servo.updatePosition(time.time())
currentTime = time.time() - startTime
overshootPV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
overshootPV.close()
generateImage(overshootPV, 'graphs/overshootPV', 'Overshoot Positive Position')
overshootinitPPV = csvFile('data/overshootinitPPV.csv')
overshootinitPPV.open()
servo.maxABSVelocity = 2000.0
servo.maxABSacceleration = 5
servo.velocity = -10.0
servo.setPosition(-25)
servo.updateTarget(75)
startTime = time.time()
while servo.position != servo.target:
servo.updatePosition(time.time())
currentTime = time.time() - startTime
overshootinitPPV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
overshootinitPPV.close()
generateImage(overshootinitPPV, 'graphs/overshootinitPPV', 'Overshoot with Opposite Initial Velocity')
overshootinitNPV = csvFile('data/overshootinitNPV.csv')
overshootinitNPV.open()
servo.maxABSVelocity = 2000.0
servo.maxABSacceleration = 5
servo.velocity = 10.0
servo.setPosition(-25)
servo.updateTarget(75)
startTime = time.time()
while servo.position != servo.target:
servo.updatePosition(time.time())
currentTime = time.time() - startTime
overshootinitNPV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
overshootinitNPV.close()
generateImage(overshootinitNPV, 'graphs/overshootinitNPV', 'Overshoot with Same Initial Velocity')
overshootinitLNPV = csvFile('data/overshootinitLNPV.csv')
overshootinitLNPV.open()
servo.maxABSVelocity = 2000.0
servo.maxABSacceleration = 5
servo.velocity = 35.0
servo.setPosition(-25)
servo.updateTarget(75)
startTime = time.time()
while servo.position != servo.target:
servo.updatePosition(time.time())
currentTime = time.time() - startTime
overshootinitLNPV.write('{0:.3f}, {1:.3f}, {2:.3f}\n'.format(currentTime, servo.position, servo.velocity))
time.sleep(0.005)
overshootinitLNPV.close()
generateImage(overshootinitLNPV, 'graphs/overshootinitLNPV', 'Overshoot with Same Large Initial Velocity')
| mit | -7,537,419,694,346,813,000 | 25.491304 | 110 | 0.690629 | false | 3.150465 | false | false | false | 0.002626 |
CiCiUi/django-db-logger | django_db_logger/admin.py | 1 | 1263 | from __future__ import unicode_literals
import logging
from django.contrib import admin
from django.utils.html import format_html
from django_db_logger.config import DJANGO_DB_LOGGER_ADMIN_LIST_PER_PAGE
from .models import StatusLog
class StatusLogAdmin(admin.ModelAdmin):
list_display = ('colored_msg', 'traceback', 'create_datetime_format')
list_display_links = ('colored_msg', )
list_filter = ('level', )
list_per_page = DJANGO_DB_LOGGER_ADMIN_LIST_PER_PAGE
def colored_msg(self, instance):
if instance.level in [logging.NOTSET, logging.INFO]:
color = 'green'
elif instance.level in [logging.WARNING, logging.DEBUG]:
color = 'orange'
else:
color = 'red'
return format_html('<span style="color: {color};">{msg}</span>', color=color, msg=instance.msg)
colored_msg.short_description = 'Message'
def traceback(self, instance):
return format_html('<pre><code>{content}</code></pre>', content=instance.trace if instance.trace else '')
def create_datetime_format(self, instance):
return instance.create_datetime.strftime('%Y-%m-%d %X')
create_datetime_format.short_description = 'Created at'
admin.site.register(StatusLog, StatusLogAdmin) | mit | -8,222,163,766,581,472,000 | 35.114286 | 113 | 0.68171 | false | 3.725664 | false | false | false | 0.002375 |
NERC-CEH/jules-jasmin | majic/joj/model/dataset_type.py | 1 | 1230 | """
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from sqlalchemy import Column, Integer, String
from joj.model.meta import Base
class DatasetType(Base):
"""Used to distinguish between the different types of map dataset we're dealing with"""
__tablename__ = 'dataset_types'
id = Column(Integer, primary_key=True)
type = Column(String(30))
def __repr__(self):
"""String representation of the dataset type"""
return "<DatasetType(type=%s)>" % self.type
| gpl-2.0 | -2,896,177,032,572,925,000 | 35.176471 | 91 | 0.701626 | false | 4.059406 | false | false | false | 0.000813 |
sriki18/scipy | benchmarks/benchmarks/sparse.py | 20 | 9563 | """
Simple benchmarks for the sparse module
"""
from __future__ import division, print_function, absolute_import
import warnings
import time
import timeit
import numpy
import numpy as np
from numpy import ones, array, asarray, empty, random, zeros
try:
from scipy import sparse
from scipy.sparse import (csr_matrix, coo_matrix, dia_matrix, lil_matrix,
dok_matrix, rand, SparseEfficiencyWarning)
except ImportError:
pass
from .common import Benchmark
def random_sparse(m, n, nnz_per_row):
rows = numpy.arange(m).repeat(nnz_per_row)
cols = numpy.random.randint(0, n, size=nnz_per_row*m)
vals = numpy.random.random_sample(m*nnz_per_row)
return coo_matrix((vals, (rows, cols)), (m, n)).tocsr()
# TODO move this to a matrix gallery and add unittests
def poisson2d(N, dtype='d', format=None):
"""
Return a sparse matrix for the 2D Poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray([[4]], dtype=dtype)
return dia_matrix((diags, [0]), shape=(1, 1)).asformat(format)
offsets = array([0, -N, N, -1, 1])
diags = empty((5, N**2), dtype=dtype)
diags[0] = 4 # main diagonal
diags[1:] = -1 # all offdiagonals
diags[3, N-1::N] = 0 # first lower diagonal
diags[4, N::N] = 0 # first upper diagonal
return dia_matrix((diags, offsets), shape=(N**2, N**2)).asformat(format)
class Arithmetic(Benchmark):
param_names = ['format', 'XY', 'op']
params = [
['csr'],
['AA', 'AB', 'BA', 'BB'],
['__add__', '__sub__', 'multiply', '__mul__']
]
def setup(self, format, XY, op):
matrices = dict(A=poisson2d(250, format=format),
B=poisson2d(250, format=format)**2)
x = matrices[XY[0]]
self.y = matrices[XY[1]]
self.fn = getattr(x, op)
self.fn(self.y) # warmup
def time_arithmetic(self, format, XY, op):
self.fn(self.y)
class Sort(Benchmark):
params = ['Rand10', 'Rand25', 'Rand50', 'Rand100', 'Rand200']
param_names = ['matrix']
def setup(self, matrix):
n = 10000
if matrix.startswith('Rand'):
k = int(matrix[4:])
self.A = random_sparse(n, n, k)
self.A.has_sorted_indices = False
self.A.indices[:2] = 2, 1
else:
raise NotImplementedError()
def time_sort(self, matrix):
"""sort CSR column indices"""
self.A.sort_indices()
class Matvec(Benchmark):
params = [
['Identity', 'Poisson5pt', 'Block2x2', 'Block3x3'],
['dia', 'csr', 'csc', 'dok', 'lil', 'coo', 'bsr']
]
param_names = ['matrix', 'format']
def setup(self, matrix, format):
if matrix == 'Identity':
if format in ('lil', 'dok'):
raise NotImplementedError()
self.A = sparse.eye(10000, 10000, format=format)
elif matrix == 'Poisson5pt':
self.A = poisson2d(300, format=format)
elif matrix == 'Block2x2':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (2, 2)
self.A = sparse.kron(poisson2d(150),
ones(b)).tobsr(blocksize=b).asformat(format)
elif matrix == 'Block3x3':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (3, 3)
self.A = sparse.kron(poisson2d(100),
ones(b)).tobsr(blocksize=b).asformat(format)
else:
raise NotImplementedError()
self.x = ones(self.A.shape[1], dtype=float)
def time_matvec(self, matrix, format):
self.A * self.x
class Matvecs(Benchmark):
params = ['dia', 'coo', 'csr', 'csc', 'bsr']
param_names = ["format"]
def setup(self, format):
self.A = poisson2d(300, format=format)
self.x = ones((self.A.shape[1], 10), dtype=self.A.dtype)
def time_matvecs(self, format):
self.A * self.x
class Matmul(Benchmark):
def setup(self):
H1, W1 = 1, 100000
H2, W2 = W1, 1000
C1 = 10
C2 = 1000000
random.seed(0)
matrix1 = lil_matrix(zeros((H1, W1)))
matrix2 = lil_matrix(zeros((H2, W2)))
for i in range(C1):
matrix1[random.randint(H1), random.randint(W1)] = random.rand()
for i in range(C2):
matrix2[random.randint(H2), random.randint(W2)] = random.rand()
self.matrix1 = matrix1.tocsr()
self.matrix2 = matrix2.tocsr()
def time_large(self):
for i in range(100):
self.matrix1 * self.matrix2
class Construction(Benchmark):
params = [
['Empty', 'Identity', 'Poisson5pt'],
['lil', 'dok']
]
param_names = ['matrix', 'format']
def setup(self, name, format):
if name == 'Empty':
self.A = coo_matrix((10000, 10000))
elif name == 'Identity':
self.A = sparse.eye(10000, format='coo')
else:
self.A = poisson2d(100, format='coo')
formats = {'lil': lil_matrix, 'dok': dok_matrix}
self.cls = formats[format]
def time_construction(self, name, format):
T = self.cls(self.A.shape)
for i, j, v in zip(self.A.row, self.A.col, self.A.data):
T[i, j] = v
class Conversion(Benchmark):
params = [
['csr', 'csc', 'coo', 'dia', 'lil', 'dok'],
['csr', 'csc', 'coo', 'dia', 'lil', 'dok'],
]
param_names = ['from_format', 'to_format']
def setup(self, fromfmt, tofmt):
base = poisson2d(100, format=fromfmt)
try:
self.fn = getattr(base, 'to' + tofmt)
except:
def fn():
raise RuntimeError()
self.fn = fn
def time_conversion(self, fromfmt, tofmt):
self.fn()
class Getset(Benchmark):
params = [
[1, 10, 100, 1000, 10000],
['different', 'same'],
['csr', 'csc', 'lil', 'dok']
]
param_names = ['N', 'sparsity pattern', 'format']
unit = "seconds"
def setup(self, N, sparsity_pattern, format):
if format == 'dok' and N > 500:
raise NotImplementedError()
self.A = rand(1000, 1000, density=1e-5)
A = self.A
N = int(N)
# indices to assign to
i, j = [], []
while len(i) < N:
n = N - len(i)
ip = numpy.random.randint(0, A.shape[0], size=n)
jp = numpy.random.randint(0, A.shape[1], size=n)
i = numpy.r_[i, ip]
j = numpy.r_[j, jp]
v = numpy.random.rand(n)
if N == 1:
i = int(i)
j = int(j)
v = float(v)
base = A.asformat(format)
self.m = base.copy()
self.i = i
self.j = j
self.v = v
def _timeit(self, kernel, recopy):
min_time = 1e99
if not recopy:
kernel(self.m, self.i, self.j, self.v)
number = 1
start = time.time()
while time.time() - start < 0.1:
if recopy:
m = self.m.copy()
else:
m = self.m
while True:
duration = timeit.timeit(
lambda: kernel(m, self.i, self.j, self.v), number=number)
if duration > 1e-5:
break
else:
number *= 10
min_time = min(min_time, duration/number)
return min_time
def track_fancy_setitem(self, N, sparsity_pattern, format):
def kernel(A, i, j, v):
A[i, j] = v
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
return self._timeit(kernel, sparsity_pattern == 'different')
def time_fancy_getitem(self, N, sparsity_pattern, format):
self.m[self.i, self.j]
class NullSlice(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 100000
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_3_rows(self, density, format):
self.X[[0, 100, 105], :]
def time_10000_rows(self, density, format):
self.X[np.arange(10000), :]
def time_3_cols(self, density, format):
self.X[:, [0, 100, 105]]
def time_100_cols(self, density, format):
self.X[:, np.arange(100)]
class Diagonal(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
self.X = sparse.rand(n, n, format=format, density=density)
def time_diagonal(self, density, format):
self.X.diagonal()
class Sum(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
self.X = sparse.rand(n, n, format=format, density=density)
def time_sum(self, density, format):
self.X.sum()
def time_sum_axis0(self, density, format):
self.X.sum(axis=0)
def time_sum_axis1(self, density, format):
self.X.sum(axis=1)
| bsd-3-clause | 3,417,685,548,190,418,000 | 27.20944 | 77 | 0.531005 | false | 3.3637 | false | false | false | 0.000105 |
emonty/ansible | lib/ansible/playbook/play.py | 19 | 13265 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleParserError, AnsibleAssertionError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.block import Block
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.vars.manager import preprocess_vars
from ansible.utils.display import Display
display = Display()
__all__ = ['Play']
class Play(Base, Taggable, CollectionSearch):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
_hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True, priority=-1)
# Facts
_gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
_gather_subset = FieldAttribute(isa='list', default=(lambda: C.DEFAULT_GATHER_SUBSET), listof=string_types, always_post_validate=True)
_gather_timeout = FieldAttribute(isa='int', default=C.DEFAULT_GATHER_TIMEOUT, always_post_validate=True)
_fact_path = FieldAttribute(isa='string', default=C.DEFAULT_FACT_PATH)
# Variable Attributes
_vars_files = FieldAttribute(isa='list', default=list, priority=99)
_vars_prompt = FieldAttribute(isa='list', default=list, always_post_validate=False)
# Role Attributes
_roles = FieldAttribute(isa='list', default=list, priority=90)
# Block (Task) Lists Attributes
_handlers = FieldAttribute(isa='list', default=list)
_pre_tasks = FieldAttribute(isa='list', default=list)
_post_tasks = FieldAttribute(isa='list', default=list)
_tasks = FieldAttribute(isa='list', default=list)
# Flag/Setting Attributes
_force_handlers = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('force_handlers'), always_post_validate=True)
_max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
_serial = FieldAttribute(isa='list', default=list, always_post_validate=True)
_strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
_order = FieldAttribute(isa='string', always_post_validate=True)
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self._included_conditional = None
self._included_path = None
self._removed_hosts = []
self.ROLE_CACHE = {}
self.only_tags = set(context.CLIARGS.get('tags', [])) or frozenset(('all',))
self.skip_tags = set(context.CLIARGS.get('skip_tags', []))
def __repr__(self):
return self.get_name()
def get_name(self):
''' return the name of the Play '''
return self.name
@staticmethod
def load(data, variable_manager=None, loader=None, vars=None):
if ('name' not in data or data['name'] is None) and 'hosts' in data:
if data['hosts'] is None or all(host is None for host in data['hosts']):
raise AnsibleParserError("Hosts list cannot be empty - please check your playbook")
if isinstance(data['hosts'], list):
data['name'] = ','.join(data['hosts'])
else:
data['name'] = data['hosts']
p = Play()
if vars:
p.vars = vars.copy()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
'''
Adjusts play datastructure to cleanup old/legacy items
'''
if not isinstance(ds, dict):
raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds)))
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. "
"The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
return super(Play, self).preprocess_data(ds)
def _load_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading tasks: %s" % to_native(e), obj=self._ds, orig_exc=e)
def _load_pre_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e)
def _load_post_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e)
def _load_handlers(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
'''
try:
return self._extend_value(
self.handlers,
load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
prepend=True
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e)
def _load_roles(self, attr, ds):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
'''
if ds is None:
ds = []
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager,
loader=self._loader, collection_search_list=self.collections)
except AssertionError as e:
raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e)
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
self.roles[:0] = roles
return self.roles
def _load_vars_prompt(self, attr, ds):
new_ds = preprocess_vars(ds)
vars_prompts = []
if new_ds is not None:
for prompt_data in new_ds:
if 'name' not in prompt_data:
raise AnsibleParserError("Invalid vars_prompt data structure, missing 'name' key", obj=ds)
for key in prompt_data:
if key not in ('name', 'prompt', 'default', 'private', 'confirm', 'encrypt', 'salt_size', 'salt', 'unsafe'):
raise AnsibleParserError("Invalid vars_prompt data structure, found unsupported key '%s'" % key, obj=ds)
vars_prompts.append(prompt_data)
return vars_prompts
def _compile_roles(self):
'''
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
# Don't insert tasks from ``import/include_role``, preventing
# duplicate execution at the wrong time
if r.from_include:
continue
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
'''
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
if r.from_include:
continue
block_list.extend(r.get_handler_blocks(play=self))
return block_list
def compile(self):
'''
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
'''
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block.load(
data={'meta': 'flush_handlers'},
play=self,
variable_manager=self._variable_manager,
loader=self._loader
)
block_list = []
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
if self.vars_files is None:
return []
elif not isinstance(self.vars_files, list):
return [self.vars_files]
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def serialize(self):
data = super(Play, self).serialize()
roles = []
for role in self.get_roles():
roles.append(role.serialize())
data['roles'] = roles
data['included_path'] = self._included_path
return data
def deserialize(self, data):
super(Play, self).deserialize(data)
self._included_path = data.get('included_path', None)
if 'roles' in data:
role_data = data.get('roles', [])
roles = []
for role in role_data:
r = Role()
r.deserialize(role)
roles.append(r)
setattr(self, 'roles', roles)
del data['roles']
def copy(self):
new_me = super(Play, self).copy()
new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
new_me._included_conditional = self._included_conditional
new_me._included_path = self._included_path
return new_me
| gpl-3.0 | -3,397,066,269,610,508,300 | 37.673469 | 138 | 0.614399 | false | 4.203105 | false | false | false | 0.002337 |
kenshay/ImageScript | ProgramData/SystemFiles/opencv/sources/samples/python2/hist.py | 9 | 3575 | #!/usr/bin/env python
''' This is a sample for histogram plotting for RGB images and grayscale images for better understanding of colour distribution
Benefit : Learn how to draw histogram of images
Get familier with cv2.calcHist, cv2.equalizeHist,cv2.normalize and some drawing functions
Level : Beginner or Intermediate
Functions : 1) hist_curve : returns histogram of an image drawn as curves
2) hist_lines : return histogram of an image drawn as bins ( only for grayscale images )
Usage : python hist.py <image_file>
Abid Rahman 3/14/12 debug Gary Bradski
'''
import cv2
import numpy as np
bins = np.arange(256).reshape(256,1)
def hist_curve(im):
h = np.zeros((300,256,3))
if len(im.shape) == 2:
color = [(255,255,255)]
elif im.shape[2] == 3:
color = [ (255,0,0),(0,255,0),(0,0,255) ]
for ch, col in enumerate(color):
hist_item = cv2.calcHist([im],[ch],None,[256],[0,256])
cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.int32(np.column_stack((bins,hist)))
cv2.polylines(h,[pts],False,col)
y=np.flipud(h)
return y
def hist_lines(im):
h = np.zeros((300,256,3))
if len(im.shape)!=2:
print "hist_lines applicable only for grayscale images"
#print "so converting image to grayscale for representation"
im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
hist_item = cv2.calcHist([im],[0],None,[256],[0,256])
cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
for x,y in enumerate(hist):
cv2.line(h,(x,0),(x,y),(255,255,255))
y = np.flipud(h)
return y
if __name__ == '__main__':
import sys
if len(sys.argv)>1:
fname = sys.argv[1]
else :
fname = '../data/lena.jpg'
print "usage : python hist.py <image_file>"
im = cv2.imread(fname)
if im is None:
print 'Failed to load image file:', fname
sys.exit(1)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
print ''' Histogram plotting \n
Keymap :\n
a - show histogram for color image in curve mode \n
b - show histogram in bin mode \n
c - show equalized histogram (always in bin mode) \n
d - show histogram for color image in curve mode \n
e - show histogram for a normalized image in curve mode \n
Esc - exit \n
'''
cv2.imshow('image',im)
while True:
k = cv2.waitKey(0)&0xFF
if k == ord('a'):
curve = hist_curve(im)
cv2.imshow('histogram',curve)
cv2.imshow('image',im)
print 'a'
elif k == ord('b'):
print 'b'
lines = hist_lines(im)
cv2.imshow('histogram',lines)
cv2.imshow('image',gray)
elif k == ord('c'):
print 'c'
equ = cv2.equalizeHist(gray)
lines = hist_lines(equ)
cv2.imshow('histogram',lines)
cv2.imshow('image',equ)
elif k == ord('d'):
print 'd'
curve = hist_curve(gray)
cv2.imshow('histogram',curve)
cv2.imshow('image',gray)
elif k == ord('e'):
print 'e'
norm = cv2.normalize(gray,alpha = 0,beta = 255,norm_type = cv2.NORM_MINMAX)
lines = hist_lines(norm)
cv2.imshow('histogram',lines)
cv2.imshow('image',norm)
elif k == 27:
print 'ESC'
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
| gpl-3.0 | 646,714,477,431,335,800 | 29.818966 | 127 | 0.579021 | false | 3.27681 | false | false | false | 0.023497 |
tadek-project/tadek-common | setup.py | 1 | 6075 | #!/usr/bin/env python
################################################################################
## ##
## This file is a part of TADEK. ##
## ##
## TADEK - Test Automation in a Distributed Environment ##
## (http://tadek.comarch.com) ##
## ##
## Copyright (C) 2011,2012 Comarch S.A. ##
## All rights reserved. ##
## ##
## TADEK is free software for non-commercial purposes. For commercial ones ##
## we offer a commercial license. Please check http://tadek.comarch.com for ##
## details or write to tadek-licenses@comarch.com ##
## ##
## You can redistribute it and/or modify it under the terms of the ##
## GNU General Public License as published by the Free Software Foundation, ##
## either version 3 of the License, or (at your option) any later version. ##
## ##
## TADEK is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with TADEK bundled with this file in the file LICENSE. ##
## If not, see http://www.gnu.org/licenses/. ##
## ##
## Please notice that Contributor Agreement applies to any contribution ##
## you make to TADEK. The Agreement must be completed, signed and sent ##
## to Comarch before any contribution is made. You should have received ##
## a copy of Contribution Agreement along with TADEK bundled with this file ##
## in the file CONTRIBUTION_AGREEMENT.pdf or see http://tadek.comarch.com ##
## or write to tadek-licenses@comarch.com ##
## ##
################################################################################
import os
from glob import glob
from subprocess import check_call
from distutils.command.build import build as _build
from distutils.command.clean import clean as _clean
from distutils.core import Distribution as _Distribution
from distutils.core import setup
from distutils.dir_util import remove_tree
from distutils import log
from tadek.core.config import CONF_DIR, DATA_DIR, DOC_DIR, VERSION
BUILD_HTML_DIR = os.path.join("build", "html")
HTML_DOC_DIR = os.path.join(DOC_DIR, "api", "html")
PACKAGES = []
DATA_FILES = []
class Distribution(_Distribution):
_Distribution.global_options.extend([
("skip-doc", None,
"don't build and install API documentation"),
("only-doc", None,
"build and install only API documentation"),
])
def __init__(self, attrs=None):
self.skip_doc = 0
self.only_doc = 0
_Distribution.__init__(self, attrs)
class build(_build):
def run(self):
if not self.distribution.only_doc:
DATA_FILES.extend([
(os.path.join(CONF_DIR, "common"),
glob(os.path.join("data", "config", "common", '*'))),
(os.path.join(DATA_DIR, "locale"), []),
])
PACKAGES.extend([
"tadek",
"tadek.connection",
"tadek.connection.protocol",
"tadek.core",
"tadek.engine",
"tadek.engine.channels",
"tadek.models",
"tadek.teststeps",
"tadek.testcases",
"tadek.testsuites"
])
_build.run(self)
if not self.distribution.skip_doc:
if not os.path.exists(BUILD_HTML_DIR):
os.makedirs(BUILD_HTML_DIR)
EPYDOC = ["epydoc",
"--html", "--docformat=reStructuredText",
"--inheritance=grouped",
"--no-private",
"--show-imports",
"--redundant-details",
"--graph=umlclasstree",
"--css=" + os.path.join("doc", "tadek.css"),
"--output=" + BUILD_HTML_DIR,
"--no-sourcecode",
"tadek"]
check_call(EPYDOC)
DATA_FILES.append((HTML_DOC_DIR,
glob(os.path.join("build", "html", '*'))))
class clean(_clean):
def run(self):
if self.all:
if os.path.exists(BUILD_HTML_DIR):
remove_tree(BUILD_HTML_DIR, dry_run=self.dry_run)
else:
log.warn("'%s' does not exist -- can't clean it",
BUILD_HTML_DIR)
_clean.run(self)
setup(
name="tadek-common",
version=VERSION,
description="TADEK is a distributed environment for test automation",
long_description=''.join(['\n', open("README").read()]),
author="Comarch TADEK Team",
author_email="tadek@comarch.com",
license="http://tadek.comarch.com/licensing",
url="http://tadek.comarch.com/",
distclass=Distribution,
cmdclass={"build": build, "clean": clean},
packages=PACKAGES,
data_files=DATA_FILES,
)
| gpl-3.0 | -6,610,551,545,670,898,000 | 44.676692 | 80 | 0.465185 | false | 4.602273 | false | false | false | 0.013663 |
sergei-maertens/django | tests/migrations/test_autodetector.py | 5 | 113881 | # -*- coding: utf-8 -*-
import functools
import re
from django.apps import apps
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser
from django.core.validators import RegexValidator, validate_slug
from django.db import connection, models
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ModelState, ProjectState
from django.test import TestCase, mock, override_settings
from django.test.utils import isolate_lru_cache
from .models import FoodManager, FoodQuerySet
class DeconstructibleObject(object):
"""
A custom deconstructible object.
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def deconstruct(self):
return (
self.__module__ + '.' + self.__class__.__name__,
self.args,
self.kwargs
)
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
])
author_name_null = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, null=True)),
])
author_name_longer = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=400)),
])
author_name_renamed = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("names", models.CharField(max_length=200)),
])
author_name_default = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default='Ada Lovelace')),
])
author_dates_of_birth_auto_now = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("date_of_birth", models.DateField(auto_now=True)),
("date_time_of_birth", models.DateTimeField(auto_now=True)),
("time_of_birth", models.TimeField(auto_now=True)),
])
author_dates_of_birth_auto_now_add = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("date_of_birth", models.DateField(auto_now_add=True)),
("date_time_of_birth", models.DateTimeField(auto_now_add=True)),
("time_of_birth", models.TimeField(auto_now_add=True)),
])
author_name_deconstructible_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject())),
])
author_name_deconstructible_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject())),
])
author_name_deconstructible_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
])
author_name_deconstructible_4 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=models.IntegerField())),
])
author_name_deconstructible_list_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructibleObject(), 123])),
])
author_name_deconstructible_list_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructibleObject(), 123])),
])
author_name_deconstructible_list_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=[DeconstructibleObject(), 999])),
])
author_name_deconstructible_tuple_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructibleObject(), 123))),
])
author_name_deconstructible_tuple_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructibleObject(), 123))),
])
author_name_deconstructible_tuple_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=(DeconstructibleObject(), 999))),
])
author_name_deconstructible_dict_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructibleObject(), 'otheritem': 123
})),
])
author_name_deconstructible_dict_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructibleObject(), 'otheritem': 123
})),
])
author_name_deconstructible_dict_3 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default={
'item': DeconstructibleObject(), 'otheritem': 999
})),
])
author_name_nested_deconstructible_1 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
))),
])
author_name_nested_deconstructible_2 = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
))),
])
author_name_nested_deconstructible_changed_arg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2-changed'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
))),
])
author_name_nested_deconstructible_extra_arg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
None,
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
))),
])
author_name_nested_deconstructible_changed_kwarg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c-changed')),
))),
])
author_name_nested_deconstructible_extra_kwarg = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200, default=DeconstructibleObject(
DeconstructibleObject(1),
(DeconstructibleObject('t1'), DeconstructibleObject('t2'),),
a=DeconstructibleObject('A'),
b=DeconstructibleObject(B=DeconstructibleObject('c')),
c=None,
))),
])
author_custom_pk = ModelState("testapp", "Author", [("pk_field", models.IntegerField(primary_key=True))])
author_with_biography_non_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField()),
("biography", models.TextField()),
])
author_with_biography_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(blank=True)),
("biography", models.TextField(blank=True)),
])
author_with_book = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
author_with_book_order_wrt = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
], options={"order_with_respect_to": "book"})
author_renamed_with_book = ModelState("testapp", "Writer", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
author_with_publisher_string = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher_name", models.CharField(max_length=200)),
])
author_with_publisher = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
])
author_with_user = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("auth.User", models.CASCADE)),
])
author_with_custom_user = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
("user", models.ForeignKey("thirdapp.CustomUser", models.CASCADE)),
])
author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author",))
author_proxy_options = ModelState("testapp", "AuthorProxy", [], {
"proxy": True,
"verbose_name": "Super Author",
}, ("testapp.author", ))
author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", ))
author_proxy_third = ModelState("thirdapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_third_notproxy = ModelState("thirdapp", "AuthorProxy", [], {}, ("testapp.author", ))
author_proxy_proxy = ModelState("testapp", "AAuthorProxyProxy", [], {"proxy": True}, ("testapp.authorproxy", ))
author_unmanaged = ModelState("testapp", "AuthorUnmanaged", [], {"managed": False}, ("testapp.author", ))
author_unmanaged_managed = ModelState("testapp", "AuthorUnmanaged", [], {}, ("testapp.author", ))
author_unmanaged_default_pk = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_unmanaged_custom_pk = ModelState("testapp", "Author", [
("pk_field", models.IntegerField(primary_key=True)),
])
author_with_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher")),
])
author_with_m2m_blank = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", blank=True)),
])
author_with_m2m_through = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Contract")),
])
author_with_renamed_m2m_through = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.ManyToManyField("testapp.Publisher", through="testapp.Deal")),
])
author_with_former_m2m = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("publishers", models.CharField(max_length=100)),
])
author_with_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {
"permissions": [('can_hire', 'Can hire')],
"verbose_name": "Authi",
})
author_with_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_one"})
author_with_new_db_table_options = ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_two"})
author_renamed_with_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_one"})
author_renamed_with_new_db_table_options = ModelState("testapp", "NewAuthor", [
("id", models.AutoField(primary_key=True)),
], {"db_table": "author_three"})
contract = ModelState("testapp", "Contract", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
])
contract_renamed = ModelState("testapp", "Deal", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("publisher", models.ForeignKey("testapp.Publisher", models.CASCADE)),
])
publisher = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
])
publisher_with_author = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
publisher_with_aardvark_author = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Aardvark", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
publisher_with_book = ModelState("testapp", "Publisher", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Book", models.CASCADE)),
("name", models.CharField(max_length=100)),
])
other_pony = ModelState("otherapp", "Pony", [
("id", models.AutoField(primary_key=True)),
])
other_pony_food = ModelState("otherapp", "Pony", [
("id", models.AutoField(primary_key=True)),
], managers=[
('food_qs', FoodQuerySet.as_manager()),
('food_mgr', FoodManager('a', 'b')),
('food_mgr_kwargs', FoodManager('x', 'y', 3, 4)),
])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_proxy_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("thirdapp.AuthorProxy", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_proxy_proxy_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.AAuthorProxyProxy", models.CASCADE)),
])
book_migrations_fk = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.UnmigratedModel", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_no_author = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=200)),
])
book_with_author_renamed = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_field_and_author_renamed = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("writer", models.ForeignKey("testapp.Writer", models.CASCADE)),
("title", models.CharField(max_length=200)),
])
book_with_multiple_authors = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author")),
("title", models.CharField(max_length=200)),
])
book_with_multiple_authors_through_attribution = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("authors", models.ManyToManyField("testapp.Author", through="otherapp.Attribution")),
("title", models.CharField(max_length=200)),
])
book_indexes = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"indexes": [models.Index(fields=["author", "title"], name="book_title_author_idx")],
})
book_unordered_indexes = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"indexes": [models.Index(fields=["title", "author"], name="book_author_title_idx")],
})
book_foo_together = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("author", "title")},
"unique_together": {("author", "title")},
})
book_foo_together_2 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "author")},
"unique_together": {("title", "author")},
})
book_foo_together_3 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "newfield")},
"unique_together": {("title", "newfield")},
})
book_foo_together_4 = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("newfield2", models.IntegerField()),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "newfield2")},
"unique_together": {("title", "newfield2")},
})
attribution = ModelState("otherapp", "Attribution", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("testapp.Author", models.CASCADE)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
edition = ModelState("thirdapp", "Edition", [
("id", models.AutoField(primary_key=True)),
("book", models.ForeignKey("otherapp.Book", models.CASCADE)),
])
custom_user = ModelState("thirdapp", "CustomUser", [
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
], bases=(AbstractBaseUser, ))
custom_user_no_inherit = ModelState("thirdapp", "CustomUser", [
("id", models.AutoField(primary_key=True)),
("username", models.CharField(max_length=255)),
])
aardvark = ModelState("thirdapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_testapp = ModelState("testapp", "Aardvark", [("id", models.AutoField(primary_key=True))])
aardvark_based_on_author = ModelState("testapp", "Aardvark", [], bases=("testapp.Author", ))
aardvark_pk_fk_author = ModelState("testapp", "Aardvark", [
("id", models.OneToOneField("testapp.Author", models.CASCADE, primary_key=True)),
])
knight = ModelState("eggs", "Knight", [("id", models.AutoField(primary_key=True))])
rabbit = ModelState("eggs", "Rabbit", [
("id", models.AutoField(primary_key=True)),
("knight", models.ForeignKey("eggs.Knight", models.CASCADE)),
("parent", models.ForeignKey("eggs.Rabbit", models.CASCADE)),
], {
"unique_together": {("parent", "knight")},
"indexes": [models.Index(fields=["parent", "knight"], name='rabbit_circular_fk_index')],
})
def repr_changes(self, changes, include_dependencies=False):
output = ""
for app_label, migrations in sorted(changes.items()):
output += " %s:\n" % app_label
for migration in migrations:
output += " %s\n" % migration.name
for operation in migration.operations:
output += " %s\n" % operation
if include_dependencies:
output += " Dependencies:\n"
if migration.dependencies:
for dep in migration.dependencies:
output += " %s\n" % (dep,)
else:
output += " None\n"
return output
def assertNumberMigrations(self, changes, app_label, number):
if len(changes.get(app_label, [])) != number:
self.fail("Incorrect number of migrations (%s) for %s (expected %s)\n%s" % (
len(changes.get(app_label, [])),
app_label,
number,
self.repr_changes(changes),
))
def assertMigrationDependencies(self, changes, app_label, position, dependencies):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < position + 1:
self.fail("No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)))
migration = changes[app_label][position]
if set(migration.dependencies) != set(dependencies):
self.fail("Migration dependencies mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
dependencies,
self.repr_changes(changes, include_dependencies=True),
))
def assertOperationTypes(self, changes, app_label, position, types):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < position + 1:
self.fail("No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)))
migration = changes[app_label][position]
real_types = [operation.__class__.__name__ for operation in migration.operations]
if types != real_types:
self.fail("Operation type mismatch for %s.%s (expected %s):\n%s" % (
app_label,
migration.name,
types,
self.repr_changes(changes),
))
def assertOperationAttributes(self, changes, app_label, position, operation_position, **attrs):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < position + 1:
self.fail("No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)))
migration = changes[app_label][position]
if len(changes[app_label]) < position + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_position,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_position]
for attr, value in attrs.items():
if getattr(operation, attr, None) != value:
self.fail("Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_position,
attr,
value,
getattr(operation, attr, None),
self.repr_changes(changes),
))
def assertOperationFieldAttributes(self, changes, app_label, position, operation_position, **attrs):
if not changes.get(app_label):
self.fail("No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)))
if len(changes[app_label]) < position + 1:
self.fail("No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)))
migration = changes[app_label][position]
if len(changes[app_label]) < position + 1:
self.fail("No operation at index %s for %s.%s\n%s" % (
operation_position,
app_label,
migration.name,
self.repr_changes(changes),
))
operation = migration.operations[operation_position]
if not hasattr(operation, 'field'):
self.fail("No field attribute for %s.%s op #%s." % (
app_label,
migration.name,
operation_position,
))
field = operation.field
for attr, value in attrs.items():
if getattr(field, attr, None) != value:
self.fail("Field attribute mismatch for %s.%s op #%s, field.%s (expected %r, got %r):\n%s" % (
app_label,
migration.name,
operation_position,
attr,
value,
getattr(field, attr, None),
self.repr_changes(changes),
))
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model(model_state.clone())
return project_state
def get_changes(self, before_states, after_states, questioner=None):
return MigrationAutodetector(
self.make_project_state(before_states),
self.make_project_state(after_states),
questioner,
)._detect_changes()
def test_arrange_for_graph(self):
"""Tests auto-naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector.arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"""
Tests that trim does not remove dependencies but does remove unwanted
apps.
"""
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector.arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, {"testapp"})
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_custom_migration_name(self):
"""Tests custom naming of migrations for graph matching."""
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency("testapp.0002_foobar", ("testapp", "0002_foobar"), ("testapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
migration_name = 'custom_name'
changes = autodetector.arrange_for_graph(changes, graph, migration_name)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_%s" % migration_name)
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_%s" % migration_name)
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_new_model(self):
"""Tests autodetection of new models."""
changes = self.get_changes([], [self.other_pony_food])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="Pony")
self.assertEqual([name for name, mgr in changes['otherapp'][0].operations[0].managers],
['food_qs', 'food_mgr', 'food_mgr_kwargs'])
def test_old_model(self):
"""Tests deletion of old models."""
changes = self.get_changes([self.author_empty], [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
def test_add_field(self):
"""Tests autodetection of new fields."""
changes = self.get_changes([self.author_empty], [self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_date_fields_with_auto_now_not_asking_for_default(self, mocked_ask_method):
changes = self.get_changes([self.author_empty], [self.author_dates_of_birth_auto_now])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField", "AddField"])
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now=True)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_date_fields_with_auto_now_add_not_asking_for_null_addition(self, mocked_ask_method):
changes = self.get_changes([self.author_empty], [self.author_dates_of_birth_auto_now_add])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField", "AddField"])
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_auto_now_add_addition')
def test_add_date_fields_with_auto_now_add_asking_for_default(self, mocked_ask_method):
changes = self.get_changes([self.author_empty], [self.author_dates_of_birth_auto_now_add])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField", "AddField"])
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 1, auto_now_add=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 2, auto_now_add=True)
self.assertEqual(mocked_ask_method.call_count, 3)
def test_remove_field(self):
"""Tests autodetection of removed fields."""
changes = self.get_changes([self.author_name], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RemoveField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name")
def test_alter_field(self):
"""Tests autodetection of new fields."""
changes = self.get_changes([self.author_name], [self.author_name_longer])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
def test_supports_functools_partial(self):
def _content_file_name(instance, filename, key, **kwargs):
return '{}/{}'.format(instance, filename)
def content_file_name(key, **kwargs):
return functools.partial(_content_file_name, key, **kwargs)
# An unchanged partial reference.
before = [ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("file", models.FileField(max_length=200, upload_to=content_file_name('file'))),
])]
after = [ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("file", models.FileField(max_length=200, upload_to=content_file_name('file'))),
])]
changes = self.get_changes(before, after)
self.assertNumberMigrations(changes, 'testapp', 0)
# A changed partial reference.
args_changed = [ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("file", models.FileField(max_length=200, upload_to=content_file_name('other-file'))),
])]
changes = self.get_changes(before, args_changed)
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ['AlterField'])
# Can't use assertOperationFieldAttributes because we need the
# deconstructed version, i.e., the exploded func/args/keywords rather
# than the partial: we don't care if it's not the same instance of the
# partial, only if it's the same source function, args, and keywords.
value = changes['testapp'][0].operations[0].field.upload_to
self.assertEqual(
(_content_file_name, ('other-file',), {}),
(value.func, value.args, value.keywords)
)
kwargs_changed = [ModelState("testapp", "Author", [
("id", models.AutoField(primary_key=True)),
("file", models.FileField(max_length=200, upload_to=content_file_name('file', spam='eggs'))),
])]
changes = self.get_changes(before, kwargs_changed)
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ['AlterField'])
value = changes['testapp'][0].operations[0].field.upload_to
self.assertEqual(
(_content_file_name, ('file',), {'spam': 'eggs'}),
(value.func, value.args, value.keywords)
)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_alter_field_to_not_null_with_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name_default])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default='Ada Lovelace')
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
return_value=models.NOT_PROVIDED)
def test_alter_field_to_not_null_without_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name])
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=True)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default=models.NOT_PROVIDED)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_alteration',
return_value='Some Name')
def test_alter_field_to_not_null_oneoff_default(self, mocked_ask_method):
"""
#23609 - Tests autodetection of nullable to non-nullable alterations.
"""
changes = self.get_changes([self.author_name_null], [self.author_name])
self.assertEqual(mocked_ask_method.call_count, 1)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="name", preserve_default=False)
self.assertOperationFieldAttributes(changes, "testapp", 0, 0, default="Some Name")
def test_rename_field(self):
"""Tests autodetection of renamed fields."""
changes = self.get_changes(
[self.author_name], [self.author_name_renamed], MigrationQuestioner({"ask_rename": True})
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="name", new_name="names")
def test_rename_model(self):
"""Tests autodetection of renamed models."""
changes = self.get_changes(
[self.author_with_book, self.book],
[self.author_renamed_with_book, self.book_with_author_renamed],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="Author", new_name="Writer")
# Now that RenameModel handles related fields too, there should be
# no AlterField for the related field.
self.assertNumberMigrations(changes, 'otherapp', 0)
def test_rename_m2m_through_model(self):
"""
Tests autodetection of renamed models that are used in M2M relations as
through models.
"""
changes = self.get_changes(
[self.author_with_m2m_through, self.publisher, self.contract],
[self.author_with_renamed_m2m_through, self.publisher, self.contract_renamed],
MigrationQuestioner({'ask_rename_model': True})
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ['RenameModel'])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name='Contract', new_name='Deal')
def test_rename_model_with_renamed_rel_field(self):
"""
Tests autodetection of renamed models while simultaneously renaming one
of the fields that relate to the renamed model.
"""
changes = self.get_changes(
[self.author_with_book, self.book],
[self.author_renamed_with_book, self.book_with_field_and_author_renamed],
MigrationQuestioner({"ask_rename": True, "ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, old_name="Author", new_name="Writer")
# Right number/type of migrations for related field rename?
# Alter is already taken care of.
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["RenameField"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, old_name="author", new_name="writer")
def test_rename_model_with_fks_in_different_position(self):
"""
#24537 - Tests that the order of fields in a model does not influence
the RenameModel detection.
"""
before = [
ModelState("testapp", "EntityA", [
("id", models.AutoField(primary_key=True)),
]),
ModelState("testapp", "EntityB", [
("id", models.AutoField(primary_key=True)),
("some_label", models.CharField(max_length=255)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
]),
]
after = [
ModelState("testapp", "EntityA", [
("id", models.AutoField(primary_key=True)),
]),
ModelState("testapp", "RenamedEntityB", [
("id", models.AutoField(primary_key=True)),
("entity_a", models.ForeignKey("testapp.EntityA", models.CASCADE)),
("some_label", models.CharField(max_length=255)),
]),
]
changes = self.get_changes(before, after, MigrationQuestioner({"ask_rename_model": True}))
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RenameModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="EntityB", new_name="RenamedEntityB")
def test_fk_dependency(self):
"""Tests that having a ForeignKey automatically adds a dependency."""
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (author),
# thirdapp (edition) depends on otherapp (book)
changes = self.get_changes([], [self.author_name, self.book, self.edition])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("testapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="Edition")
self.assertMigrationDependencies(changes, 'thirdapp', 0, [("otherapp", "auto_1")])
def test_proxy_fk_dependency(self):
"""Tests that FK dependencies still work on proxy models."""
# Note that testapp (author) has no dependencies,
# otherapp (book) depends on testapp (authorproxy)
changes = self.get_changes([], [self.author_empty, self.author_proxy_third, self.book_proxy_fk])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("thirdapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="AuthorProxy")
self.assertMigrationDependencies(changes, 'thirdapp', 0, [("testapp", "auto_1")])
def test_same_app_no_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
changes = self.get_changes([], [self.author_with_publisher, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
def test_circular_fk_dependency(self):
"""
Tests that having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
changes = self.get_changes([], [self.author_with_book, self.book, self.publisher_with_book])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [("otherapp", "auto_1")])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 2)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'otherapp', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'otherapp', 0, [])
self.assertMigrationDependencies(changes, 'otherapp', 1, [("otherapp", "auto_1"), ("testapp", "auto_1")])
# both split migrations should be `initial`
self.assertTrue(changes['otherapp'][0].initial)
self.assertTrue(changes['otherapp'][1].initial)
def test_same_app_circular_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app does
not have a dependency to itself.
"""
changes = self.get_changes([], [self.author_with_publisher, self.publisher_with_author])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "AddField"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="Publisher")
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher")
self.assertMigrationDependencies(changes, 'testapp', 0, [])
def test_same_app_circular_fk_dependency_with_unique_together_and_indexes(self):
"""
#22275 - Tests that a migration with circular FK dependency does not try
to create unique together constraint and indexes before creating all
required fields first.
"""
changes = self.get_changes([], [self.knight, self.rabbit])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'eggs', 1)
self.assertOperationTypes(
changes, 'eggs', 0, ["CreateModel", "CreateModel", "AddIndex", "AlterUniqueTogether"]
)
self.assertNotIn("unique_together", changes['eggs'][0].operations[0].options)
self.assertNotIn("unique_together", changes['eggs'][0].operations[1].options)
self.assertMigrationDependencies(changes, 'eggs', 0, [])
def test_alter_db_table_add(self):
"""Tests detection for adding db_table in model's options."""
changes = self.get_changes([self.author_empty], [self.author_with_db_table_options])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table="author_one")
def test_alter_db_table_change(self):
"""Tests detection for changing db_table in model's options'."""
changes = self.get_changes([self.author_with_db_table_options], [self.author_with_new_db_table_options])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table="author_two")
def test_alter_db_table_remove(self):
"""Tests detection for removing db_table in model's options."""
changes = self.get_changes([self.author_with_db_table_options], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", table=None)
def test_alter_db_table_no_changes(self):
"""
Tests that alter_db_table doesn't generate a migration if no changes
have been made.
"""
changes = self.get_changes([self.author_with_db_table_options], [self.author_with_db_table_options])
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_keep_db_table_with_model_change(self):
"""
Tests when model changes but db_table stays as-is, autodetector must not
create more than one operation.
"""
changes = self.get_changes(
[self.author_with_db_table_options],
[self.author_renamed_with_db_table_options],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor")
def test_alter_db_table_with_model_change(self):
"""
Tests when model and db_table changes, autodetector must create two
operations.
"""
changes = self.get_changes(
[self.author_with_db_table_options],
[self.author_renamed_with_new_db_table_options],
MigrationQuestioner({"ask_rename_model": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RenameModel", "AlterModelTable"])
self.assertOperationAttributes(changes, "testapp", 0, 0, old_name="Author", new_name="NewAuthor")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="newauthor", table="author_three")
def test_identical_regex_doesnt_alter(self):
from_state = ModelState(
"testapp", "model", [("id", models.AutoField(primary_key=True, validators=[
RegexValidator(
re.compile('^[-a-zA-Z0-9_]+\\Z'),
"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.",
'invalid'
)
]))]
)
to_state = ModelState(
"testapp", "model", [("id", models.AutoField(primary_key=True, validators=[validate_slug]))]
)
changes = self.get_changes([from_state], [to_state])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 0)
def test_different_regex_does_alter(self):
from_state = ModelState(
"testapp", "model", [("id", models.AutoField(primary_key=True, validators=[
RegexValidator(
re.compile('^[a-z]+\\Z', 32),
"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.",
'invalid'
)
]))]
)
to_state = ModelState(
"testapp", "model", [("id", models.AutoField(primary_key=True, validators=[validate_slug]))]
)
changes = self.get_changes([from_state], [to_state])
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterField"])
def test_empty_foo_together(self):
"""
#23452 - Empty unique/index_together shouldn't generate a migration.
"""
# Explicitly testing for not specified, since this is the case after
# a CreateModel operation w/o any definition on the original model
model_state_not_specified = ModelState("a", "model", [("id", models.AutoField(primary_key=True))])
# Explicitly testing for None, since this was the issue in #23452 after
# a AlterFooTogether operation with e.g. () as value
model_state_none = ModelState("a", "model", [
("id", models.AutoField(primary_key=True))
], {
"index_together": None,
"unique_together": None,
})
# Explicitly testing for the empty set, since we now always have sets.
# During removal (('col1', 'col2'),) --> () this becomes set([])
model_state_empty = ModelState("a", "model", [
("id", models.AutoField(primary_key=True))
], {
"index_together": set(),
"unique_together": set(),
})
def test(from_state, to_state, msg):
changes = self.get_changes([from_state], [to_state])
if len(changes) > 0:
ops = ', '.join(o.__class__.__name__ for o in changes['a'][0].operations)
self.fail('Created operation(s) %s from %s' % (ops, msg))
tests = (
(model_state_not_specified, model_state_not_specified, '"not specified" to "not specified"'),
(model_state_not_specified, model_state_none, '"not specified" to "None"'),
(model_state_not_specified, model_state_empty, '"not specified" to "empty"'),
(model_state_none, model_state_not_specified, '"None" to "not specified"'),
(model_state_none, model_state_none, '"None" to "None"'),
(model_state_none, model_state_empty, '"None" to "empty"'),
(model_state_empty, model_state_not_specified, '"empty" to "not specified"'),
(model_state_empty, model_state_none, '"empty" to "None"'),
(model_state_empty, model_state_empty, '"empty" to "empty"'),
)
for t in tests:
test(*t)
def test_create_model_with_indexes(self):
"""Test creation of new model with indexes already defined."""
author = ModelState('otherapp', 'Author', [
('id', models.AutoField(primary_key=True)),
('name', models.CharField(max_length=200)),
], {'indexes': [models.Index(fields=['name'], name='create_model_with_indexes_idx')]})
changes = self.get_changes([], [author])
added_index = models.Index(fields=['name'], name='create_model_with_indexes_idx')
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 2)
# Right actions order?
self.assertOperationTypes(changes, 'otherapp', 0, ['CreateModel', 'AddIndex'])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name='Author')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, model_name='author', index=added_index)
def test_add_indexes(self):
"""Test change detection of new indexes."""
changes = self.get_changes([self.author_empty, self.book], [self.author_empty, self.book_indexes])
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['AddIndex'])
added_index = models.Index(fields=['author', 'title'], name='book_title_author_idx')
self.assertOperationAttributes(changes, 'otherapp', 0, 0, model_name='book', index=added_index)
def test_remove_indexes(self):
"""Test change detection of removed indexes."""
changes = self.get_changes([self.author_empty, self.book_indexes], [self.author_empty, self.book])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['RemoveIndex'])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, model_name='book', name='book_title_author_idx')
def test_order_fields_indexes(self):
"""Test change detection of reordering of fields in indexes."""
changes = self.get_changes(
[self.author_empty, self.book_indexes], [self.author_empty, self.book_unordered_indexes]
)
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['RemoveIndex', 'AddIndex'])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, model_name='book', name='book_title_author_idx')
added_index = models.Index(fields=['title', 'author'], name='book_author_title_idx')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, model_name='book', index=added_index)
def test_add_foo_together(self):
"""Tests index/unique_together detection."""
changes = self.get_changes([self.author_empty, self.book], [self.author_empty, self.book_foo_together])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("author", "title")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("author", "title")})
def test_remove_foo_together(self):
"""Tests index/unique_together detection."""
changes = self.get_changes([self.author_empty, self.book_foo_together], [self.author_empty, self.book])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together=set())
def test_foo_together_remove_fk(self):
"""Tests unique_together and field removal detection & ordering"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together], [self.author_empty, self.book_with_no_author]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, [
"AlterUniqueTogether", "AlterIndexTogether", "RemoveField"
])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together=set())
self.assertOperationAttributes(changes, "otherapp", 0, 2, model_name="book", name="author")
def test_foo_together_no_changes(self):
"""
Tests that index/unique_together doesn't generate a migration if no
changes have been made.
"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together], [self.author_empty, self.book_foo_together]
)
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_foo_together_ordering(self):
"""
Tests that index/unique_together also triggers on ordering changes.
"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together], [self.author_empty, self.book_foo_together_2]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, name="book", unique_together={("title", "author")})
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", index_together={("title", "author")})
def test_add_field_and_foo_together(self):
"""
Tests that added fields will be created before using them in
index/unique_together.
"""
changes = self.get_changes([self.author_empty, self.book], [self.author_empty, self.book_foo_together_3])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["AddField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("title", "newfield")})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("title", "newfield")})
def test_create_model_and_unique_together(self):
author = ModelState("otherapp", "Author", [
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=200)),
])
book_with_author = ModelState("otherapp", "Book", [
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("otherapp.Author", models.CASCADE)),
("title", models.CharField(max_length=200)),
], {
"index_together": {("title", "author")},
"unique_together": {("title", "author")},
})
changes = self.get_changes([self.book_with_no_author], [author, book_with_author])
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 4)
# Right actions order?
self.assertOperationTypes(
changes, 'otherapp', 0,
['CreateModel', 'AddField', 'AlterUniqueTogether', 'AlterIndexTogether']
)
def test_remove_field_and_foo_together(self):
"""
Tests that removed fields will be removed after updating
index/unique_together.
"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together_3], [self.author_empty, self.book_foo_together]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 0, model_name="book", name="newfield")
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={("author", "title")})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("author", "title")})
def test_rename_field_and_foo_together(self):
"""
Tests that removed fields will be removed after updating
index/unique_together.
"""
changes = self.get_changes(
[self.author_empty, self.book_foo_together_3],
[self.author_empty, self.book_foo_together_4],
MigrationQuestioner({"ask_rename": True}),
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RenameField", "AlterUniqueTogether", "AlterIndexTogether"])
self.assertOperationAttributes(changes, "otherapp", 0, 1, name="book", unique_together={
("title", "newfield2")
})
self.assertOperationAttributes(changes, "otherapp", 0, 2, name="book", index_together={("title", "newfield2")})
def test_proxy(self):
"""Tests that the autodetector correctly deals with proxy models."""
# First, we test adding a proxy model
changes = self.get_changes([self.author_empty], [self.author_empty, self.author_proxy])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel"])
self.assertOperationAttributes(
changes, "testapp", 0, 0, name="AuthorProxy", options={"proxy": True, "indexes": []}
)
# Now, we test turning a proxy model into a non-proxy model
# It should delete the proxy then make the real one
changes = self.get_changes(
[self.author_empty, self.author_proxy], [self.author_empty, self.author_proxy_notproxy]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["DeleteModel", "CreateModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="AuthorProxy")
self.assertOperationAttributes(changes, "testapp", 0, 1, name="AuthorProxy", options={"indexes": []})
def test_proxy_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on proxy
models.
"""
# First, we test the default pk field name
changes = self.get_changes([], [self.author_empty, self.author_proxy_third, self.book_proxy_fk])
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'id')
# Now, we test the custom pk field name
changes = self.get_changes([], [self.author_custom_pk, self.author_proxy_third, self.book_proxy_fk])
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'pk_field')
def test_proxy_to_mti_with_fk_to_proxy(self):
# First, test the pk table and field name.
changes = self.get_changes(
[],
[self.author_empty, self.author_proxy_third, self.book_proxy_fk],
)
self.assertEqual(
changes['otherapp'][0].operations[0].fields[2][1].remote_field.model._meta.db_table,
'testapp_author',
)
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'id')
# Change AuthorProxy to use MTI.
changes = self.get_changes(
[self.author_empty, self.author_proxy_third, self.book_proxy_fk],
[self.author_empty, self.author_proxy_third_notproxy, self.book_proxy_fk],
)
# Right number/type of migrations for the AuthorProxy model?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ['DeleteModel', 'CreateModel'])
# Right number/type of migrations for the Book model with a FK to
# AuthorProxy?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['AlterField'])
# otherapp should depend on thirdapp.
self.assertMigrationDependencies(changes, 'otherapp', 0, [('thirdapp', 'auto_1')])
# Now, test the pk table and field name.
self.assertEqual(
changes['otherapp'][0].operations[0].field.remote_field.model._meta.db_table,
'thirdapp_authorproxy',
)
self.assertEqual(changes['otherapp'][0].operations[0].field.remote_field.field_name, 'author_ptr')
def test_proxy_to_mti_with_fk_to_proxy_proxy(self):
# First, test the pk table and field name.
changes = self.get_changes(
[],
[self.author_empty, self.author_proxy, self.author_proxy_proxy, self.book_proxy_proxy_fk],
)
self.assertEqual(
changes['otherapp'][0].operations[0].fields[1][1].remote_field.model._meta.db_table,
'testapp_author',
)
self.assertEqual(changes['otherapp'][0].operations[0].fields[1][1].remote_field.field_name, 'id')
# Change AuthorProxy to use MTI. FK still points to AAuthorProxyProxy,
# a proxy of AuthorProxy.
changes = self.get_changes(
[self.author_empty, self.author_proxy, self.author_proxy_proxy, self.book_proxy_proxy_fk],
[self.author_empty, self.author_proxy_notproxy, self.author_proxy_proxy, self.book_proxy_proxy_fk],
)
# Right number/type of migrations for the AuthorProxy model?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ['DeleteModel', 'CreateModel'])
# Right number/type of migrations for the Book model with a FK to
# AAuthorProxyProxy?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ['AlterField'])
# otherapp should depend on testapp.
self.assertMigrationDependencies(changes, 'otherapp', 0, [('testapp', 'auto_1')])
# Now, test the pk table and field name.
self.assertEqual(
changes['otherapp'][0].operations[0].field.remote_field.model._meta.db_table,
'testapp_authorproxy',
)
self.assertEqual(changes['otherapp'][0].operations[0].field.remote_field.field_name, 'author_ptr')
def test_unmanaged_create(self):
"""Tests that the autodetector correctly deals with managed models."""
# First, we test adding an unmanaged model
changes = self.get_changes([self.author_empty], [self.author_empty, self.author_unmanaged])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(
changes, 'testapp', 0, 0, name="AuthorUnmanaged", options={"managed": False, "indexes": []}
)
def test_unmanaged_to_managed(self):
# Now, we test turning an unmanaged model into a managed model
changes = self.get_changes(
[self.author_empty, self.author_unmanaged], [self.author_empty, self.author_unmanaged_managed]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="authorunmanaged", options={})
def test_managed_to_unmanaged(self):
# Now, we turn managed to unmanaged.
changes = self.get_changes(
[self.author_empty, self.author_unmanaged_managed], [self.author_empty, self.author_unmanaged]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="authorunmanaged", options={"managed": False})
def test_unmanaged_custom_pk(self):
"""
#23415 - The autodetector must correctly deal with custom FK on
unmanaged models.
"""
# First, we test the default pk field name
changes = self.get_changes([], [self.author_unmanaged_default_pk, self.book])
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'id')
# Now, we test the custom pk field name
changes = self.get_changes([], [self.author_unmanaged_custom_pk, self.book])
# The field name the FK on the book model points to
self.assertEqual(changes['otherapp'][0].operations[0].fields[2][1].remote_field.field_name, 'pk_field')
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes([self.custom_user], [self.custom_user, self.author_with_custom_user])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertMigrationDependencies(changes, 'testapp', 0, [("__setting__", "AUTH_USER_MODEL")])
def test_swappable_changed(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
before = self.make_project_state([self.custom_user, self.author_with_user])
with override_settings(AUTH_USER_MODEL="thirdapp.CustomUser"):
after = self.make_project_state([self.custom_user, self.author_with_custom_user])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name='user')
fk_field = changes['testapp'][0].operations[0].field
to_model = '%s.%s' % (
fk_field.remote_field.model._meta.app_label,
fk_field.remote_field.model._meta.object_name,
)
self.assertEqual(to_model, 'thirdapp.CustomUser')
def test_add_field_with_default(self):
"""#22030 - Adding a field with a default should work."""
changes = self.get_changes([self.author_empty], [self.author_name_default])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="name")
def test_custom_deconstructible(self):
"""
Two instances which deconstruct to the same value aren't considered a
change.
"""
changes = self.get_changes([self.author_name_deconstructible_1], [self.author_name_deconstructible_2])
# Right number of migrations?
self.assertEqual(len(changes), 0)
def test_deconstruct_field_kwarg(self):
"""Field instances are handled correctly by nested deconstruction."""
changes = self.get_changes([self.author_name_deconstructible_3], [self.author_name_deconstructible_4])
self.assertEqual(changes, {})
def test_deconstructible_list(self):
"""Nested deconstruction descends into lists."""
# When lists contain items that deconstruct to identical values, those lists
# should be considered equal for the purpose of detecting state changes
# (even if the original items are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_list_1], [self.author_name_deconstructible_list_2]
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed lists should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_list_1], [self.author_name_deconstructible_list_3]
)
self.assertEqual(len(changes), 1)
def test_deconstructible_tuple(self):
"""Nested deconstruction descends into tuples."""
# When tuples contain items that deconstruct to identical values, those tuples
# should be considered equal for the purpose of detecting state changes
# (even if the original items are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_tuple_1], [self.author_name_deconstructible_tuple_2]
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed tuples should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_tuple_1], [self.author_name_deconstructible_tuple_3]
)
self.assertEqual(len(changes), 1)
def test_deconstructible_dict(self):
"""Nested deconstruction descends into dict values."""
# When dicts contain items whose values deconstruct to identical values,
# those dicts should be considered equal for the purpose of detecting
# state changes (even if the original values are unequal).
changes = self.get_changes(
[self.author_name_deconstructible_dict_1], [self.author_name_deconstructible_dict_2]
)
self.assertEqual(changes, {})
# Legitimate differences within the deconstructed dicts should be reported
# as a change
changes = self.get_changes(
[self.author_name_deconstructible_dict_1], [self.author_name_deconstructible_dict_3]
)
self.assertEqual(len(changes), 1)
def test_nested_deconstructible_objects(self):
"""
Nested deconstruction is applied recursively to the args/kwargs of
deconstructed objects.
"""
# If the items within a deconstructed object's args/kwargs have the same
# deconstructed values - whether or not the items themselves are different
# instances - then the object as a whole is regarded as unchanged.
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_2]
)
self.assertEqual(changes, {})
# Differences that exist solely within the args list of a deconstructed object
# should be reported as changes
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_changed_arg]
)
self.assertEqual(len(changes), 1)
# Additional args should also be reported as a change
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_extra_arg]
)
self.assertEqual(len(changes), 1)
# Differences that exist solely within the kwargs dict of a deconstructed object
# should be reported as changes
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_changed_kwarg]
)
self.assertEqual(len(changes), 1)
# Additional kwargs should also be reported as a change
changes = self.get_changes(
[self.author_name_nested_deconstructible_1], [self.author_name_nested_deconstructible_extra_kwarg]
)
self.assertEqual(len(changes), 1)
def test_deconstruct_type(self):
"""
#22951 -- Uninstantiated classes with deconstruct are correctly returned
by deep_deconstruct during serialization.
"""
author = ModelState(
"testapp",
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(
max_length=200,
# IntegerField intentionally not instantiated.
default=models.IntegerField,
))
],
)
changes = self.get_changes([], [author])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel"])
def test_replace_string_with_foreignkey(self):
"""
#22300 - Adding an FK in the same "spot" as a deleted CharField should
work.
"""
changes = self.get_changes([self.author_with_publisher_string], [self.author_with_publisher, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publisher_name")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publisher")
def test_foreign_key_removed_before_target_model(self):
"""
Removing an FK and the model it targets in the same change must remove
the FK field before the model to maintain consistency.
"""
changes = self.get_changes(
[self.author_with_publisher, self.publisher], [self.author_name]
) # removes both the model and FK
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Publisher")
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_many_to_many(self, mocked_ask_method):
"""#22435 - Adding a ManyToManyField should not prompt for a default."""
changes = self.get_changes([self.author_empty, self.publisher], [self.author_with_m2m, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers")
def test_alter_many_to_many(self):
changes = self.get_changes(
[self.author_with_m2m, self.publisher], [self.author_with_m2m_blank, self.publisher]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers")
def test_create_with_through_model(self):
"""
Adding a m2m with a through model and the models that use it should be
ordered correctly.
"""
changes = self.get_changes([], [self.author_with_m2m_through, self.publisher, self.contract])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, [
"CreateModel", "CreateModel", "CreateModel", "AddField", "AddField"
])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Contract")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Publisher")
self.assertOperationAttributes(changes, 'testapp', 0, 3, model_name='contract', name='publisher')
self.assertOperationAttributes(changes, 'testapp', 0, 4, model_name='author', name='publishers')
def test_many_to_many_removed_before_through_model(self):
"""
Removing a ManyToManyField and the "through" model in the same change
must remove the field before the model to maintain consistency.
"""
changes = self.get_changes(
[self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution],
[self.book_with_no_author, self.author_name],
)
# Remove both the through model and ManyToMany
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, ["RemoveField", "RemoveField", "RemoveField", "DeleteModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="author", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, name="book", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 2, name="authors", model_name='book')
self.assertOperationAttributes(changes, 'otherapp', 0, 3, name='Attribution')
def test_many_to_many_removed_before_through_model_2(self):
"""
Removing a model that contains a ManyToManyField and the "through" model
in the same change must remove the field before the model to maintain
consistency.
"""
changes = self.get_changes(
[self.book_with_multiple_authors_through_attribution, self.author_name, self.attribution],
[self.author_name],
)
# Remove both the through model and ManyToMany
# Right number/type of migrations?
self.assertNumberMigrations(changes, "otherapp", 1)
self.assertOperationTypes(changes, "otherapp", 0, [
"RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"
])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="author", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 1, name="book", model_name='attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 2, name="authors", model_name='book')
self.assertOperationAttributes(changes, 'otherapp', 0, 3, name='Attribution')
self.assertOperationAttributes(changes, 'otherapp', 0, 4, name='Book')
def test_m2m_w_through_multistep_remove(self):
"""
A model with a m2m field that specifies a "through" model cannot be
removed in the same migration as that through model as the schema will
pass through an inconsistent state. The autodetector should produce two
migrations to avoid this issue.
"""
changes = self.get_changes([self.author_with_m2m_through, self.publisher, self.contract], [self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, [
"RemoveField", "RemoveField", "RemoveField", "DeleteModel", "DeleteModel"
])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publishers", model_name='author')
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author", model_name='contract')
self.assertOperationAttributes(changes, "testapp", 0, 2, name="publisher", model_name='contract')
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 4, name="Contract")
def test_concrete_field_changed_to_many_to_many(self):
"""
#23938 - Tests that changing a concrete field into a ManyToManyField
first removes the concrete field and then adds the m2m field.
"""
changes = self.get_changes([self.author_with_former_m2m], [self.author_with_m2m, self.publisher])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["CreateModel", "RemoveField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name='Publisher')
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="publishers", model_name='author')
def test_many_to_many_changed_to_concrete_field(self):
"""
#23938 - Tests that changing a ManyToManyField into a concrete field
first removes the m2m field and then adds the concrete field.
"""
changes = self.get_changes([self.author_with_m2m, self.publisher], [self.author_with_former_m2m])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "AddField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="publishers", model_name='author')
self.assertOperationAttributes(changes, 'testapp', 0, 2, name='Publisher')
self.assertOperationFieldAttributes(changes, 'testapp', 0, 1, max_length=100)
def test_non_circular_foreignkey_dependency_removal(self):
"""
If two models with a ForeignKey from one to the other are removed at the
same time, the autodetector should remove them in the correct order.
"""
changes = self.get_changes([self.author_with_publisher, self.publisher_with_author], [])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["RemoveField", "RemoveField", "DeleteModel", "DeleteModel"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="publisher", model_name='author')
self.assertOperationAttributes(changes, "testapp", 0, 1, name="author", model_name='publisher')
self.assertOperationAttributes(changes, "testapp", 0, 2, name="Author")
self.assertOperationAttributes(changes, "testapp", 0, 3, name="Publisher")
def test_alter_model_options(self):
"""Changing a model's options should make a change."""
changes = self.get_changes([self.author_empty], [self.author_with_options])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, options={
"permissions": [('can_hire', 'Can hire')],
"verbose_name": "Authi",
})
# Changing them back to empty should also make a change
changes = self.get_changes([self.author_with_options], [self.author_empty])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="author", options={})
def test_alter_model_options_proxy(self):
"""Changing a proxy model's options should also make a change."""
changes = self.get_changes(
[self.author_proxy, self.author_empty], [self.author_proxy_options, self.author_empty]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, "testapp", 1)
self.assertOperationTypes(changes, "testapp", 0, ["AlterModelOptions"])
self.assertOperationAttributes(changes, "testapp", 0, 0, name="authorproxy", options={
"verbose_name": "Super Author"
})
def test_set_alter_order_with_respect_to(self):
"""Tests that setting order_with_respect_to adds a field."""
changes = self.get_changes([self.book, self.author_with_book], [self.book, self.author_with_book_order_wrt])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to="book")
def test_add_alter_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the FK too does
things in the right order.
"""
changes = self.get_changes([self.author_name], [self.book, self.author_with_book_order_wrt])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, model_name="author", name="book")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
def test_remove_alter_order_with_respect_to(self):
"""
Tests that removing order_with_respect_to when removing the FK too does
things in the right order.
"""
changes = self.get_changes([self.book, self.author_with_book_order_wrt], [self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AlterOrderWithRespectTo", "RemoveField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="author", order_with_respect_to=None)
self.assertOperationAttributes(changes, 'testapp', 0, 1, model_name="author", name="book")
def test_add_model_order_with_respect_to(self):
"""
Tests that setting order_with_respect_to when adding the whole model
does things in the right order.
"""
changes = self.get_changes([], [self.book, self.author_with_book_order_wrt])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterOrderWithRespectTo"])
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author", order_with_respect_to="book")
self.assertNotIn("_order", [name for name, field in changes['testapp'][0].operations[0].fields])
def test_alter_model_managers(self):
"""
Tests that changing the model managers adds a new operation.
"""
changes = self.get_changes([self.other_pony], [self.other_pony_food])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["AlterModelManagers"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="pony")
self.assertEqual([name for name, mgr in changes['otherapp'][0].operations[0].managers],
['food_qs', 'food_mgr', 'food_mgr_kwargs'])
self.assertEqual(changes['otherapp'][0].operations[0].managers[1][1].args, ('a', 'b', 1, 2))
self.assertEqual(changes['otherapp'][0].operations[0].managers[2][1].args, ('x', 'y', 3, 4))
def test_swappable_first_inheritance(self):
"""Tests that swappable models get their CreateModel first."""
changes = self.get_changes([], [self.custom_user, self.aardvark])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
@override_settings(AUTH_USER_MODEL="thirdapp.CustomUser")
def test_swappable_first_setting(self):
"""Tests that swappable models get their CreateModel first."""
with isolate_lru_cache(apps.get_swappable_settings_name):
changes = self.get_changes([], [self.custom_user_no_inherit, self.aardvark])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'thirdapp', 1)
self.assertOperationTypes(changes, 'thirdapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'thirdapp', 0, 0, name="CustomUser")
self.assertOperationAttributes(changes, 'thirdapp', 0, 1, name="Aardvark")
def test_bases_first(self):
"""Tests that bases of other models come first."""
changes = self.get_changes([], [self.aardvark_based_on_author, self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_multiple_bases(self):
"""#23956 - Tests that inheriting models doesn't move *_ptr fields into AddField operations."""
A = ModelState("app", "A", [("a_id", models.AutoField(primary_key=True))])
B = ModelState("app", "B", [("b_id", models.AutoField(primary_key=True))])
C = ModelState("app", "C", [], bases=("app.A", "app.B"))
D = ModelState("app", "D", [], bases=("app.A", "app.B"))
E = ModelState("app", "E", [], bases=("app.A", "app.B"))
changes = self.get_changes([], [A, B, C, D, E])
# Right number/type of migrations?
self.assertNumberMigrations(changes, "app", 1)
self.assertOperationTypes(changes, "app", 0, [
"CreateModel", "CreateModel", "CreateModel", "CreateModel", "CreateModel"
])
self.assertOperationAttributes(changes, "app", 0, 0, name="A")
self.assertOperationAttributes(changes, "app", 0, 1, name="B")
self.assertOperationAttributes(changes, "app", 0, 2, name="C")
self.assertOperationAttributes(changes, "app", 0, 3, name="D")
self.assertOperationAttributes(changes, "app", 0, 4, name="E")
def test_proxy_bases_first(self):
"""Tests that bases of proxies come first."""
changes = self.get_changes([], [self.author_empty, self.author_proxy, self.author_proxy_proxy])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="AuthorProxy")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="AAuthorProxyProxy")
def test_pk_fk_included(self):
"""
Tests that a relation used as the primary key is kept as part of
CreateModel.
"""
changes = self.get_changes([], [self.aardvark_pk_fk_author, self.author_name])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "CreateModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Author")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="Aardvark")
def test_first_dependency(self):
"""
Tests that a dependency to an app with no migrations uses __first__.
"""
# Load graph
loader = MigrationLoader(connection)
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("migrations", "__first__")])
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_last_dependency(self):
"""
Tests that a dependency to an app with existing migrations uses the
last migration of that app.
"""
# Load graph
loader = MigrationLoader(connection)
before = self.make_project_state([])
after = self.make_project_state([self.book_migrations_fk])
after.real_apps = ["migrations"]
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes(graph=loader.graph)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'otherapp', 1)
self.assertOperationTypes(changes, 'otherapp', 0, ["CreateModel"])
self.assertOperationAttributes(changes, 'otherapp', 0, 0, name="Book")
self.assertMigrationDependencies(changes, 'otherapp', 0, [("migrations", "0002_second")])
def test_alter_fk_before_model_deletion(self):
"""
Tests that ForeignKeys are altered _before_ the model they used to
refer to are deleted.
"""
changes = self.get_changes(
[self.author_name, self.publisher_with_author],
[self.aardvark_testapp, self.publisher_with_aardvark_author]
)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["CreateModel", "AlterField", "DeleteModel"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="Aardvark")
self.assertOperationAttributes(changes, 'testapp', 0, 1, name="author")
self.assertOperationAttributes(changes, 'testapp', 0, 2, name="Author")
def test_fk_dependency_other_app(self):
"""
#23100 - Tests that ForeignKeys correctly depend on other apps' models.
"""
changes = self.get_changes([self.author_name, self.book], [self.author_with_book, self.book])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0, name="book")
self.assertMigrationDependencies(changes, 'testapp', 0, [("otherapp", "__first__")])
def test_circular_dependency_mixed_addcreate(self):
"""
#23315 - Tests that the dependency resolver knows to put all CreateModel
before AddField and not become unsolvable.
"""
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("country", models.ForeignKey("b.DeliveryCountry", models.CASCADE)),
])
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
])
apackage = ModelState("b", "APackage", [
("id", models.AutoField(primary_key=True)),
("person", models.ForeignKey("a.Person", models.CASCADE)),
])
country = ModelState("b", "DeliveryCountry", [
("id", models.AutoField(primary_key=True)),
])
changes = self.get_changes([], [address, person, apackage, country])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel", "CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertOperationTypes(changes, 'b', 0, ["CreateModel", "CreateModel"])
@override_settings(AUTH_USER_MODEL="a.Tenant")
def test_circular_dependency_swappable(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
tenant = ModelState("a", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("b.Address", models.CASCADE))],
bases=(AbstractBaseUser, )
)
address = ModelState("b", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE)),
])
changes = self.get_changes([], [address, tenant])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'a', 0, [])
self.assertMigrationDependencies(changes, 'a', 1, [('a', 'auto_1'), ('b', 'auto_1')])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'b', 0, [('__setting__', 'AUTH_USER_MODEL')])
@override_settings(AUTH_USER_MODEL="b.Tenant")
def test_circular_dependency_swappable2(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models but with the swappable not being the first migrated
model.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
address = ModelState("a", "Address", [
("id", models.AutoField(primary_key=True)),
("tenant", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE)),
])
tenant = ModelState("b", "Tenant", [
("id", models.AutoField(primary_key=True)),
("primary_address", models.ForeignKey("a.Address", models.CASCADE))],
bases=(AbstractBaseUser, )
)
changes = self.get_changes([], [address, tenant])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 2)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertOperationTypes(changes, 'a', 1, ["AddField"])
self.assertMigrationDependencies(changes, 'a', 0, [])
self.assertMigrationDependencies(changes, 'a', 1, [('__setting__', 'AUTH_USER_MODEL'), ('a', 'auto_1')])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'b', 1)
self.assertOperationTypes(changes, 'b', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'b', 0, [('a', 'auto_1')])
@override_settings(AUTH_USER_MODEL="a.Person")
def test_circular_dependency_swappable_self(self):
"""
#23322 - Tests that the dependency resolver knows to explicitly resolve
swappable models.
"""
with isolate_lru_cache(apps.get_swappable_settings_name):
person = ModelState("a", "Person", [
("id", models.AutoField(primary_key=True)),
("parent1", models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, related_name='children'))
])
changes = self.get_changes([], [person])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'a', 1)
self.assertOperationTypes(changes, 'a', 0, ["CreateModel"])
self.assertMigrationDependencies(changes, 'a', 0, [])
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition',
side_effect=AssertionError("Should not have prompted for not null addition"))
def test_add_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and blank `CharField` or `TextField`
without default should not prompt for a default.
"""
changes = self.get_changes([self.author_empty], [self.author_with_biography_blank])
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0)
@mock.patch('django.db.migrations.questioner.MigrationQuestioner.ask_not_null_addition')
def test_add_non_blank_textfield_and_charfield(self, mocked_ask_method):
"""
#23405 - Adding a NOT NULL and non-blank `CharField` or `TextField`
without default should prompt for a default.
"""
changes = self.get_changes([self.author_empty], [self.author_with_biography_non_blank])
self.assertEqual(mocked_ask_method.call_count, 2)
# Right number/type of migrations?
self.assertNumberMigrations(changes, 'testapp', 1)
self.assertOperationTypes(changes, 'testapp', 0, ["AddField", "AddField"])
self.assertOperationAttributes(changes, 'testapp', 0, 0)
| bsd-3-clause | -3,440,156,367,147,122,700 | 52.215421 | 119 | 0.635523 | false | 3.94516 | true | false | false | 0.003741 |
bartoldeman/easybuild-framework | easybuild/tools/job/pbs_python.py | 1 | 18752 | ##
# Copyright 2012-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Interface module to TORQUE (PBS).
:author: Stijn De Weirdt (Ghent University)
:author: Toon Willems (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
from distutils.version import LooseVersion
import os
import re
import tempfile
from vsc.utils import fancylogger
from easybuild.tools.build_log import EasyBuildError, print_msg
from easybuild.tools.config import build_option
from easybuild.tools.job.backend import JobBackend
from easybuild.tools.utilities import only_if_module_is_available
_log = fancylogger.getLogger('pbs_python', fname=False)
# extend paramater should be 'NULL' in some functions because this is required by the python api
NULL = 'NULL'
# list of known hold types
KNOWN_HOLD_TYPES = []
try:
import pbs
from PBSQuery import PBSQuery
KNOWN_HOLD_TYPES = [pbs.USER_HOLD, pbs.OTHER_HOLD, pbs.SYSTEM_HOLD]
except ImportError as err:
_log.debug("Failed to import pbs/PBSQuery from pbs_python."
" Silently ignoring, this is a real issue only when pbs_python is used as backend for --job")
class PbsPython(JobBackend):
"""
Manage PBS server communication and create `PbsJob` objects.
"""
# pbs_python 4.1.0 introduces the pbs.version variable we rely on
REQ_VERSION = '4.1.0'
# _check_version is called by __init__, so guard it (too) with the decorator
@only_if_module_is_available('pbs', pkgname='pbs_python')
def _check_version(self):
"""Check whether pbs_python version complies with required version."""
version_regex = re.compile('pbs_python version (?P<version>.*)')
res = version_regex.search(pbs.version)
if res:
version = res.group('version')
if LooseVersion(version) < LooseVersion(self.REQ_VERSION):
raise EasyBuildError("Found pbs_python version %s, but version %s or more recent is required",
version, self.REQ_VERSION)
else:
raise EasyBuildError("Failed to parse pbs_python version string '%s' using pattern %s",
pbs.version, version_regex.pattern)
def __init__(self, *args, **kwargs):
"""Constructor."""
pbs_server = kwargs.pop('pbs_server', None)
super(PbsPython, self).__init__(*args, **kwargs)
self.pbs_server = pbs_server or build_option('job_target_resource') or pbs.pbs_default()
self.conn = None
self._ppn = None
def init(self):
"""
Initialise the job backend.
Connect to the PBS server & reset list of submitted jobs.
"""
self.connect_to_server()
self._submitted = []
def connect_to_server(self):
"""Connect to PBS server, set and return connection."""
if not self.conn:
self.conn = pbs.pbs_connect(self.pbs_server)
return self.conn
def queue(self, job, dependencies=frozenset()):
"""
Add a job to the queue.
:param dependencies: jobs on which this job depends.
"""
if dependencies:
job.add_dependencies(dependencies)
job._submit()
self._submitted.append(job)
def complete(self):
"""
Complete a bulk job submission.
Release all user holds on submitted jobs, and disconnect from server.
"""
for job in self._submitted:
if job.has_holds():
self.log.info("releasing user hold on job %s" % job.jobid)
job.release_hold()
self.disconnect_from_server()
# print list of submitted jobs
submitted_jobs = '; '.join(["%s (%s): %s" % (job.name, job.module, job.jobid) for job in self._submitted])
print_msg("List of submitted jobs (%d): %s" % (len(self._submitted), submitted_jobs), log=self.log)
# determine leaf nodes in dependency graph, and report them
all_deps = set()
for job in self._submitted:
all_deps = all_deps.union(job.deps)
leaf_nodes = []
for job in self._submitted:
if job.jobid not in all_deps:
leaf_nodes.append(str(job.jobid).split('.')[0])
self.log.info("Job ids of leaf nodes in dep. graph: %s" % ','.join(leaf_nodes))
def disconnect_from_server(self):
"""Disconnect current connection."""
pbs.pbs_disconnect(self.conn)
self.conn = None
def _get_ppn(self):
"""Guess PBS' `ppn` value for a full node."""
# cache this value as it's not likely going to change over the
# `eb` script runtime ...
if not self._ppn:
pq = PBSQuery()
node_vals = pq.getnodes().values() # only the values, not the names
interesting_nodes = ('free', 'job-exclusive',)
res = {}
for np in [int(x['np'][0]) for x in node_vals if x['state'][0] in interesting_nodes]:
res.setdefault(np, 0)
res[np] += 1
if not res:
raise EasyBuildError("Could not guess the ppn value of a full node because " +
"there are no free or job-exclusive nodes.")
# return most frequent
freq_count, freq_np = max([(j, i) for i, j in res.items()])
self.log.debug("Found most frequent np %s (%s times) in interesting nodes %s" % (freq_np, freq_count, interesting_nodes))
self._ppn = freq_np
return self._ppn
ppn = property(_get_ppn)
def make_job(self, script, name, env_vars=None, hours=None, cores=None):
"""Create and return a `PbsJob` object with the given parameters."""
return PbsJob(self, script, name, env_vars=env_vars, hours=hours, cores=cores, conn=self.conn, ppn=self.ppn)
class PbsJob(object):
"""Interaction with TORQUE"""
def __init__(self, server, script, name, env_vars=None,
hours=None, cores=None, conn=None, ppn=None):
"""
create a new Job to be submitted to PBS
env_vars is a dictionary with key-value pairs of environment variables that should be passed on to the job
hours and cores should be integer values.
hours can be 1 - (max walltime), cores depends on which cluster it is being run.
"""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
self._server = server
self.script = script
if env_vars:
self.env_vars = env_vars.copy()
else:
self.env_vars = {}
self.name = name
try:
self.pbsconn = self._server.connect_to_server()
except Exception, err:
raise EasyBuildError("Failed to connect to the default pbs server: %s", err)
# setup the resources requested
# validate requested resources!
max_walltime = build_option('job_max_walltime')
if hours is None:
hours = max_walltime
if hours > max_walltime:
self.log.warn("Specified %s hours, but this is impossible. (resetting to %s hours)" % (hours, max_walltime))
hours = max_walltime
if ppn is None:
max_cores = server.ppn
else:
max_cores = ppn
if cores is None:
cores = max_cores
if cores > max_cores:
self.log.warn("number of requested cores (%s) was greater than available (%s) " % (cores, max_cores))
cores = max_cores
# only allow cores and hours for now.
self.resources = {
'walltime': '%s:00:00' % hours,
'nodes': '1:ppn=%s' % cores,
}
# don't specify any queue name to submit to, use the default
self.queue = None
# job id of this job
self.jobid = None
# list of dependencies for this job
self.deps = []
# list of holds that are placed on this job
self.holds = []
def __str__(self):
"""Return the job ID as a string."""
return (str(self.jobid) if self.jobid is not None
else repr(self))
def add_dependencies(self, jobs):
"""
Add dependencies to this job.
Argument `jobs` is a sequence of `PbsJob` objects.
"""
self.deps.extend(jobs)
def _submit(self):
"""Submit the jobscript txt, set self.jobid"""
txt = self.script
self.log.debug("Going to submit script %s" % txt)
# Build default pbs_attributes list
pbs_attributes = pbs.new_attropl(3)
pbs_attributes[0].name = pbs.ATTR_N # Job_Name
pbs_attributes[0].value = self.name
output_dir = build_option('job_output_dir')
pbs_attributes[1].name = pbs.ATTR_o
pbs_attributes[1].value = os.path.join(output_dir, '%s.o$PBS_JOBID' % self.name)
pbs_attributes[2].name = pbs.ATTR_e
pbs_attributes[2].value = os.path.join(output_dir, '%s.e$PBS_JOBID' % self.name)
# set resource requirements
resource_attributes = pbs.new_attropl(len(self.resources))
idx = 0
for k, v in self.resources.items():
resource_attributes[idx].name = pbs.ATTR_l # Resource_List
resource_attributes[idx].resource = k
resource_attributes[idx].value = v
idx += 1
pbs_attributes.extend(resource_attributes)
# add job dependencies to attributes
if self.deps:
deps_attributes = pbs.new_attropl(1)
deps_attributes[0].name = pbs.ATTR_depend
deps_attributes[0].value = ",".join(["afterany:%s" % dep.jobid for dep in self.deps])
pbs_attributes.extend(deps_attributes)
self.log.debug("Job deps attributes: %s" % deps_attributes[0].value)
# submit job with (user) hold
hold_attributes = pbs.new_attropl(1)
hold_attributes[0].name = pbs.ATTR_h
hold_attributes[0].value = pbs.USER_HOLD
pbs_attributes.extend(hold_attributes)
self.holds.append(pbs.USER_HOLD)
self.log.debug("Job hold attributes: %s" % hold_attributes[0].value)
# add a bunch of variables (added by qsub)
# also set PBS_O_WORKDIR to os.getcwd()
os.environ.setdefault('WORKDIR', os.getcwd())
defvars = ['MAIL', 'HOME', 'PATH', 'SHELL', 'WORKDIR']
pbsvars = ["PBS_O_%s=%s" % (x, os.environ.get(x, 'NOTFOUND_%s' % x)) for x in defvars]
# extend PBS variables with specified variables
pbsvars.extend(["%s=%s" % (name, value) for (name, value) in self.env_vars.items()])
variable_attributes = pbs.new_attropl(1)
variable_attributes[0].name = pbs.ATTR_v # Variable_List
variable_attributes[0].value = ",".join(pbsvars)
pbs_attributes.extend(variable_attributes)
self.log.debug("Job variable attributes: %s" % variable_attributes[0].value)
# mail settings
mail_attributes = pbs.new_attropl(1)
mail_attributes[0].name = pbs.ATTR_m # Mail_Points
mail_attributes[0].value = 'n' # disable all mail
pbs_attributes.extend(mail_attributes)
self.log.debug("Job mail attributes: %s" % mail_attributes[0].value)
fh, scriptfn = tempfile.mkstemp()
f = os.fdopen(fh, 'w')
self.log.debug("Writing temporary job script to %s" % scriptfn)
f.write(txt)
f.close()
self.log.debug("Going to submit to queue %s" % self.queue)
# job submission sometimes fails without producing an error, e.g. when one of the dependency jobs has already finished
# when that occurs, None will be returned by pbs_submit as job id
jobid = pbs.pbs_submit(self.pbsconn, pbs_attributes, scriptfn, self.queue, NULL)
is_error, errormsg = pbs.error()
if is_error or jobid is None:
raise EasyBuildError("Failed to submit job script %s (job id: %s, error %s)", scriptfn, jobid, errormsg)
else:
self.log.debug("Succesful job submission returned jobid %s" % jobid)
self.jobid = jobid
os.remove(scriptfn)
def set_hold(self, hold_type=None):
"""Set hold on job of specified type."""
# we can't set this default for hold_type in function signature,
# because we need to be able to load this module even when the pbs module is not available
if hold_type is None:
hold_type = pbs.USER_HOLD
# only set hold if it wasn't set before
if hold_type not in self.holds:
if hold_type not in KNOWN_HOLD_TYPES:
raise EasyBuildError("set_hold: unknown hold type: %s (supported: %s)", hold_type, KNOWN_HOLD_TYPES)
# set hold, check for errors, and keep track of this hold
ec = pbs.pbs_holdjob(self.pbsconn, self.jobid, hold_type, NULL)
is_error, errormsg = pbs.error()
if is_error or ec:
raise EasyBuildError("Failed to set hold of type %s on job %s (is_error: %s, exit code: %s, msg: %s)",
hold_type, self.jobid, is_error, ec, errormsg)
else:
self.holds.append(hold_type)
else:
self.log.warning("Hold type %s was already set for %s" % (hold_type, self.jobid))
def release_hold(self, hold_type=None):
"""Release hold on job of specified type."""
# we can't set this default for hold_type in function signature,
# because we need to be able to load this module even when the pbs module is not available
if hold_type is None:
hold_type = pbs.USER_HOLD
# only release hold if it was set
if hold_type in self.holds:
if hold_type not in KNOWN_HOLD_TYPES:
raise EasyBuildError("release_hold: unknown hold type: %s (supported: %s)", hold_type, KNOWN_HOLD_TYPES)
# release hold, check for errors, remove from list of holds
ec = pbs.pbs_rlsjob(self.pbsconn, self.jobid, hold_type, NULL)
self.log.debug("Released hold of type %s for job %s" % (hold_type, self.jobid))
is_error, errormsg = pbs.error()
if is_error or ec:
raise EasyBuildError("Failed to release hold type %s on job %s (is_error: %s, exit code: %s, msg: %s)",
hold_type, self.jobid, is_error, ec, errormsg)
else:
self.holds.remove(hold_type)
else:
self.log.warning("No hold type %s was set for %s, so skipping hold release" % (hold_type, self.jobid))
def has_holds(self):
"""Return whether this job has holds or not."""
return bool(self.holds)
def state(self):
"""
Return the state of the job
State can be 'not submitted', 'running', 'queued' or 'finished',
"""
state = self.info(types=['job_state', 'exec_host'])
if state is None:
if self.jobid is None:
return 'not submitted'
else:
return 'finished'
jid = state['id']
jstate = state.get('job_state', None)
def get_uniq_hosts(txt, num=None):
"""
- txt: format: host1/cpuid+host2/cpuid
- num: number of nodes to return (default: all)
"""
if num is None:
num = -1
res = []
for h_c in txt.split('+'):
h = h_c.split('/')[0]
if h in res:
continue
res.append(h)
return res[:num]
ehosts = get_uniq_hosts(state.get('exec_host', ''), 1)
self.log.debug("Jobid %s jid %s state %s ehosts %s (%s)" % (self.jobid, jid, jstate, ehosts, state))
if jstate == 'Q':
return 'queued'
else:
return 'running'
def info(self, types=None):
"""
Return jobinfo
"""
if not self.jobid:
self.log.debug("no jobid, job is not submitted yet?")
return None
# convert single type into list
if type(types) is str:
types = [types]
self.log.debug("Return info types %s" % types)
# create attribute list to query pbs with
if types is None:
jobattr = NULL
else:
jobattr = pbs.new_attrl(len(types))
for idx, attr in enumerate(types):
jobattr[idx].name = attr
jobs = pbs.pbs_statjob(self.pbsconn, self.jobid, jobattr, NULL)
if len(jobs) == 0:
# no job found, return None info
res = None
self.log.debug("No job found. Wrong id %s or job finished? Returning %s" % (self.jobid, res))
return res
elif len(jobs) == 1:
self.log.debug("Request for jobid %s returned one result %s" % (self.jobid, jobs))
else:
raise EasyBuildError("Request for jobid %s returned more then one result %s", self.jobid, jobs)
# only expect to have a list with one element
j = jobs[0]
# convert attribs into useable dict
job_details = dict([(attrib.name, attrib.value) for attrib in j.attribs])
# manually set 'id' attribute
job_details['id'] = j.name
self.log.debug("Found jobinfo %s" % job_details)
return job_details
def remove(self):
"""Remove the job with id jobid"""
result = pbs.pbs_deljob(self.pbsconn, self.jobid, '') # use empty string, not NULL
if result:
raise EasyBuildError("Failed to delete job %s: error %s", self.jobid, result)
else:
self.log.debug("Succesfully deleted job %s" % self.jobid)
| gpl-2.0 | 8,836,855,745,217,800,000 | 38.148225 | 133 | 0.59183 | false | 3.733227 | false | false | false | 0.00272 |
leafjungle/luigi | luigi/process.py | 24 | 3196 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains some helper functions to run luigid in daemon mode
"""
from __future__ import print_function
import datetime
import logging
import logging.handlers
import os
rootlogger = logging.getLogger()
server_logger = logging.getLogger("luigi.server")
def check_pid(pidfile):
if pidfile and os.path.exists(pidfile):
try:
pid = int(open(pidfile).read().strip())
os.kill(pid, 0)
return pid
except BaseException:
return 0
return 0
def write_pid(pidfile):
server_logger.info("Writing pid file")
piddir = os.path.dirname(pidfile)
if piddir != '' and not os.path.exists(piddir):
os.makedirs(piddir)
with open(pidfile, 'w') as fobj:
fobj.write(str(os.getpid()))
def get_log_format():
return "%(asctime)s %(name)s[%(process)s] %(levelname)s: %(message)s"
def get_spool_handler(filename):
handler = logging.handlers.TimedRotatingFileHandler(
filename=filename,
when='d',
encoding='utf8',
backupCount=7 # keep one week of historical logs
)
formatter = logging.Formatter(get_log_format())
handler.setFormatter(formatter)
return handler
def _server_already_running(pidfile):
existing_pid = check_pid(pidfile)
if pidfile and existing_pid:
return True
return False
def daemonize(cmd, pidfile=None, logdir=None, api_port=8082, address=None, unix_socket=None):
import daemon
logdir = logdir or "/var/log/luigi"
if not os.path.exists(logdir):
os.makedirs(logdir)
log_path = os.path.join(logdir, "luigi-server.log")
# redirect stdout/stderr
today = datetime.date.today()
stdout_path = os.path.join(
logdir,
"luigi-server-{0:%Y-%m-%d}.out".format(today)
)
stderr_path = os.path.join(
logdir,
"luigi-server-{0:%Y-%m-%d}.err".format(today)
)
stdout_proxy = open(stdout_path, 'a+')
stderr_proxy = open(stderr_path, 'a+')
ctx = daemon.DaemonContext(
stdout=stdout_proxy,
stderr=stderr_proxy,
working_directory='.'
)
with ctx:
loghandler = get_spool_handler(log_path)
rootlogger.addHandler(loghandler)
if pidfile:
server_logger.info("Checking pid file")
existing_pid = check_pid(pidfile)
if pidfile and existing_pid:
server_logger.info("Server already running (pid=%s)", existing_pid)
return
write_pid(pidfile)
cmd(api_port=api_port, address=address, unix_socket=unix_socket)
| apache-2.0 | 6,161,972,483,165,032,000 | 26.791304 | 93 | 0.647685 | false | 3.764429 | false | false | false | 0.000626 |
marckuz/django | tests/template_tests/test_callables.py | 347 | 4265 | from __future__ import unicode_literals
from unittest import TestCase
from django.template import Context, Engine
class CallableVariablesTests(TestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine()
super(CallableVariablesTests, cls).setUpClass()
def test_callable(self):
class Doodad(object):
def __init__(self, value):
self.num_calls = 0
self.value = value
def __call__(self):
self.num_calls += 1
return {"the_value": self.value}
my_doodad = Doodad(42)
c = Context({"my_doodad": my_doodad})
# We can't access ``my_doodad.value`` in the template, because
# ``my_doodad.__call__`` will be invoked first, yielding a dictionary
# without a key ``value``.
t = self.engine.from_string('{{ my_doodad.value }}')
self.assertEqual(t.render(c), '')
# We can confirm that the doodad has been called
self.assertEqual(my_doodad.num_calls, 1)
# But we can access keys on the dict that's returned
# by ``__call__``, instead.
t = self.engine.from_string('{{ my_doodad.the_value }}')
self.assertEqual(t.render(c), '42')
self.assertEqual(my_doodad.num_calls, 2)
def test_alters_data(self):
class Doodad(object):
alters_data = True
def __init__(self, value):
self.num_calls = 0
self.value = value
def __call__(self):
self.num_calls += 1
return {"the_value": self.value}
my_doodad = Doodad(42)
c = Context({"my_doodad": my_doodad})
# Since ``my_doodad.alters_data`` is True, the template system will not
# try to call our doodad but will use string_if_invalid
t = self.engine.from_string('{{ my_doodad.value }}')
self.assertEqual(t.render(c), '')
t = self.engine.from_string('{{ my_doodad.the_value }}')
self.assertEqual(t.render(c), '')
# Double-check that the object was really never called during the
# template rendering.
self.assertEqual(my_doodad.num_calls, 0)
def test_do_not_call(self):
class Doodad(object):
do_not_call_in_templates = True
def __init__(self, value):
self.num_calls = 0
self.value = value
def __call__(self):
self.num_calls += 1
return {"the_value": self.value}
my_doodad = Doodad(42)
c = Context({"my_doodad": my_doodad})
# Since ``my_doodad.do_not_call_in_templates`` is True, the template
# system will not try to call our doodad. We can access its attributes
# as normal, and we don't have access to the dict that it returns when
# called.
t = self.engine.from_string('{{ my_doodad.value }}')
self.assertEqual(t.render(c), '42')
t = self.engine.from_string('{{ my_doodad.the_value }}')
self.assertEqual(t.render(c), '')
# Double-check that the object was really never called during the
# template rendering.
self.assertEqual(my_doodad.num_calls, 0)
def test_do_not_call_and_alters_data(self):
# If we combine ``alters_data`` and ``do_not_call_in_templates``, the
# ``alters_data`` attribute will not make any difference in the
# template system's behavior.
class Doodad(object):
do_not_call_in_templates = True
alters_data = True
def __init__(self, value):
self.num_calls = 0
self.value = value
def __call__(self):
self.num_calls += 1
return {"the_value": self.value}
my_doodad = Doodad(42)
c = Context({"my_doodad": my_doodad})
t = self.engine.from_string('{{ my_doodad.value }}')
self.assertEqual(t.render(c), '42')
t = self.engine.from_string('{{ my_doodad.the_value }}')
self.assertEqual(t.render(c), '')
# Double-check that the object was really never called during the
# template rendering.
self.assertEqual(my_doodad.num_calls, 0)
| bsd-3-clause | -7,829,160,117,257,347,000 | 32.582677 | 79 | 0.56061 | false | 3.770999 | true | false | false | 0 |
boundarydevices/android_external_chromium_org | build/android/pylib/forwarder.py | 8 | 13644 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0212
import fcntl
import logging
import os
import psutil
from pylib import cmd_helper
from pylib import constants
from pylib import valgrind_tools
# TODO(jbudorick) Remove once telemetry gets switched over.
import pylib.android_commands
import pylib.device.device_utils
def _GetProcessStartTime(pid):
return psutil.Process(pid).create_time
class _FileLock(object):
"""With statement-aware implementation of a file lock.
File locks are needed for cross-process synchronization when the
multiprocessing Python module is used.
"""
def __init__(self, path):
self._fd = -1
self._path = path
def __enter__(self):
self._fd = os.open(self._path, os.O_RDONLY | os.O_CREAT)
if self._fd < 0:
raise Exception('Could not open file %s for reading' % self._path)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def __exit__(self, _exception_type, _exception_value, traceback):
fcntl.flock(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
class Forwarder(object):
"""Thread-safe class to manage port forwards from the device to the host."""
_DEVICE_FORWARDER_FOLDER = (constants.TEST_EXECUTABLE_DIR +
'/forwarder/')
_DEVICE_FORWARDER_PATH = (constants.TEST_EXECUTABLE_DIR +
'/forwarder/device_forwarder')
_LOCK_PATH = '/tmp/chrome.forwarder.lock'
_MULTIPROCESSING_ENV_VAR = 'CHROME_FORWARDER_USE_MULTIPROCESSING'
# Defined in host_forwarder_main.cc
_HOST_FORWARDER_LOG = '/tmp/host_forwarder_log'
_instance = None
@staticmethod
def UseMultiprocessing():
"""Tells the forwarder that multiprocessing is used."""
os.environ[Forwarder._MULTIPROCESSING_ENV_VAR] = '1'
@staticmethod
def Map(port_pairs, device, tool=None):
"""Runs the forwarder.
Args:
port_pairs: A list of tuples (device_port, host_port) to forward. Note
that you can specify 0 as a device_port, in which case a
port will by dynamically assigned on the device. You can
get the number of the assigned port using the
DevicePortForHostPort method.
device: A DeviceUtils instance.
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
Raises:
Exception on failure to forward the port.
"""
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, pylib.android_commands.AndroidCommands):
device = pylib.device.device_utils.DeviceUtils(device)
if not tool:
tool = valgrind_tools.CreateTool(None, device)
with _FileLock(Forwarder._LOCK_PATH):
instance = Forwarder._GetInstanceLocked(tool)
instance._InitDeviceLocked(device, tool)
device_serial = device.old_interface.Adb().GetSerialNumber()
redirection_commands = [
['--serial-id=' + device_serial, '--map', str(device),
str(host)] for device, host in port_pairs]
logging.info('Forwarding using commands: %s', redirection_commands)
for redirection_command in redirection_commands:
try:
(exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
[instance._host_forwarder_path] + redirection_command)
except OSError as e:
if e.errno == 2:
raise Exception('Unable to start host forwarder. Make sure you have'
' built host_forwarder.')
else: raise
if exit_code != 0:
raise Exception('%s exited with %d:\n%s' % (
instance._host_forwarder_path, exit_code, '\n'.join(output)))
tokens = output.split(':')
if len(tokens) != 2:
raise Exception(('Unexpected host forwarder output "%s", ' +
'expected "device_port:host_port"') % output)
device_port = int(tokens[0])
host_port = int(tokens[1])
serial_with_port = (device_serial, device_port)
instance._device_to_host_port_map[serial_with_port] = host_port
instance._host_to_device_port_map[host_port] = serial_with_port
logging.info('Forwarding device port: %d to host port: %d.',
device_port, host_port)
@staticmethod
def UnmapDevicePort(device_port, device):
"""Unmaps a previously forwarded device port.
Args:
device: A DeviceUtils instance.
device_port: A previously forwarded port (through Map()).
"""
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, pylib.android_commands.AndroidCommands):
device = pylib.device.device_utils.DeviceUtils(device)
with _FileLock(Forwarder._LOCK_PATH):
Forwarder._UnmapDevicePortLocked(device_port, device)
@staticmethod
def UnmapAllDevicePorts(device):
"""Unmaps all the previously forwarded ports for the provided device.
Args:
device: A DeviceUtils instance.
port_pairs: A list of tuples (device_port, host_port) to unmap.
"""
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, pylib.android_commands.AndroidCommands):
device = pylib.device.device_utils.DeviceUtils(device)
with _FileLock(Forwarder._LOCK_PATH):
if not Forwarder._instance:
return
adb_serial = device.old_interface.Adb().GetSerialNumber()
if adb_serial not in Forwarder._instance._initialized_devices:
return
port_map = Forwarder._GetInstanceLocked(
None)._device_to_host_port_map
for (device_serial, device_port) in port_map.keys():
if adb_serial == device_serial:
Forwarder._UnmapDevicePortLocked(device_port, device)
# There are no more ports mapped, kill the device_forwarder.
tool = valgrind_tools.CreateTool(None, device)
Forwarder._KillDeviceLocked(device, tool)
Forwarder._instance._initialized_devices.remove(adb_serial)
@staticmethod
def DevicePortForHostPort(host_port):
"""Returns the device port that corresponds to a given host port."""
with _FileLock(Forwarder._LOCK_PATH):
(_device_serial, device_port) = Forwarder._GetInstanceLocked(
None)._host_to_device_port_map.get(host_port)
return device_port
@staticmethod
def RemoveHostLog():
if os.path.exists(Forwarder._HOST_FORWARDER_LOG):
os.unlink(Forwarder._HOST_FORWARDER_LOG)
@staticmethod
def GetHostLog():
if not os.path.exists(Forwarder._HOST_FORWARDER_LOG):
return ''
with file(Forwarder._HOST_FORWARDER_LOG, 'r') as f:
return f.read()
@staticmethod
def _GetInstanceLocked(tool):
"""Returns the singleton instance.
Note that the global lock must be acquired before calling this method.
Args:
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
if not Forwarder._instance:
Forwarder._instance = Forwarder(tool)
return Forwarder._instance
def __init__(self, tool):
"""Constructs a new instance of Forwarder.
Note that Forwarder is a singleton therefore this constructor should be
called only once.
Args:
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
assert not Forwarder._instance
self._tool = tool
self._initialized_devices = set()
self._device_to_host_port_map = dict()
self._host_to_device_port_map = dict()
self._host_forwarder_path = os.path.join(
constants.GetOutDirectory(), 'host_forwarder')
assert os.path.exists(self._host_forwarder_path), 'Please build forwarder2'
self._device_forwarder_path_on_host = os.path.join(
constants.GetOutDirectory(), 'forwarder_dist')
self._InitHostLocked()
@staticmethod
def _UnmapDevicePortLocked(device_port, device):
"""Internal method used by UnmapDevicePort().
Note that the global lock must be acquired before calling this method.
"""
instance = Forwarder._GetInstanceLocked(None)
serial = device.old_interface.Adb().GetSerialNumber()
serial_with_port = (serial, device_port)
if not serial_with_port in instance._device_to_host_port_map:
logging.error('Trying to unmap non-forwarded port %d' % device_port)
return
redirection_command = ['--serial-id=' + serial, '--unmap', str(device_port)]
(exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
[instance._host_forwarder_path] + redirection_command)
if exit_code != 0:
logging.error('%s exited with %d:\n%s' % (
instance._host_forwarder_path, exit_code, '\n'.join(output)))
host_port = instance._device_to_host_port_map[serial_with_port]
del instance._device_to_host_port_map[serial_with_port]
del instance._host_to_device_port_map[host_port]
@staticmethod
def _GetPidForLock():
"""Returns the PID used for host_forwarder initialization.
In case multi-process sharding is used, the PID of the "sharder" is used.
The "sharder" is the initial process that forks that is the parent process.
By default, multi-processing is not used. In that case the PID of the
current process is returned.
"""
use_multiprocessing = Forwarder._MULTIPROCESSING_ENV_VAR in os.environ
return os.getppid() if use_multiprocessing else os.getpid()
def _InitHostLocked(self):
"""Initializes the host forwarder daemon.
Note that the global lock must be acquired before calling this method. This
method kills any existing host_forwarder process that could be stale.
"""
# See if the host_forwarder daemon was already initialized by a concurrent
# process or thread (in case multi-process sharding is not used).
pid_for_lock = Forwarder._GetPidForLock()
fd = os.open(Forwarder._LOCK_PATH, os.O_RDWR | os.O_CREAT)
with os.fdopen(fd, 'r+') as pid_file:
pid_with_start_time = pid_file.readline()
if pid_with_start_time:
(pid, process_start_time) = pid_with_start_time.split(':')
if pid == str(pid_for_lock):
if process_start_time == str(_GetProcessStartTime(pid_for_lock)):
return
self._KillHostLocked()
pid_file.seek(0)
pid_file.write(
'%s:%s' % (pid_for_lock, str(_GetProcessStartTime(pid_for_lock))))
def _InitDeviceLocked(self, device, tool):
"""Initializes the device_forwarder daemon for a specific device (once).
Note that the global lock must be acquired before calling this method. This
method kills any existing device_forwarder daemon on the device that could
be stale, pushes the latest version of the daemon (to the device) and starts
it.
Args:
device: A DeviceUtils instance.
tool: Tool class to use to get wrapper, if necessary, for executing the
forwarder (see valgrind_tools.py).
"""
device_serial = device.old_interface.Adb().GetSerialNumber()
if device_serial in self._initialized_devices:
return
Forwarder._KillDeviceLocked(device, tool)
device.old_interface.PushIfNeeded(
self._device_forwarder_path_on_host,
Forwarder._DEVICE_FORWARDER_FOLDER)
cmd = '%s %s' % (tool.GetUtilWrapper(), Forwarder._DEVICE_FORWARDER_PATH)
(exit_code, output) = device.old_interface.GetAndroidToolStatusAndOutput(
cmd, lib_path=Forwarder._DEVICE_FORWARDER_FOLDER)
if exit_code != 0:
raise Exception(
'Failed to start device forwarder:\n%s' % '\n'.join(output))
self._initialized_devices.add(device_serial)
def _KillHostLocked(self):
"""Kills the forwarder process running on the host.
Note that the global lock must be acquired before calling this method.
"""
logging.info('Killing host_forwarder.')
(exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
[self._host_forwarder_path, '--kill-server'])
if exit_code != 0:
(exit_code, output) = cmd_helper.GetCmdStatusAndOutput(
['pkill', '-9', 'host_forwarder'])
if exit_code != 0:
raise Exception('%s exited with %d:\n%s' % (
self._host_forwarder_path, exit_code, '\n'.join(output)))
@staticmethod
def _KillDeviceLocked(device, tool):
"""Kills the forwarder process running on the device.
Note that the global lock must be acquired before calling this method.
Args:
device: Instance of DeviceUtils for talking to the device.
tool: Wrapper tool (e.g. valgrind) that can be used to execute the device
forwarder (see valgrind_tools.py).
"""
logging.info('Killing device_forwarder.')
if not device.old_interface.FileExistsOnDevice(
Forwarder._DEVICE_FORWARDER_PATH):
return
cmd = '%s %s --kill-server' % (tool.GetUtilWrapper(),
Forwarder._DEVICE_FORWARDER_PATH)
device.old_interface.GetAndroidToolStatusAndOutput(
cmd, lib_path=Forwarder._DEVICE_FORWARDER_FOLDER)
# TODO(pliard): Remove the following call to KillAllBlocking() when we are
# sure that the old version of device_forwarder (not supporting
# 'kill-server') is not running on the bots anymore.
timeout_sec = 5
processes_killed = device.old_interface.KillAllBlocking(
'device_forwarder', timeout_sec)
if not processes_killed:
pids = device.old_interface.ExtractPid('device_forwarder')
if pids:
raise Exception('Timed out while killing device_forwarder')
| bsd-3-clause | -3,962,818,538,970,347,000 | 38.206897 | 80 | 0.671211 | false | 3.84338 | false | false | false | 0.006963 |
devunt/ika | ika/services/ozinger/commands/app_disable.py | 1 | 2114 | from ika.service import Command, Permission
from ika.models import Application, Channel
from ika.enums import Flags
class DisableApp(Command):
name = '앱비활성화'
aliases = (
'앱떼기',
'disableapp',
'detachapp',
)
syntax = '<#채널명> <앱 이름>'
regex = r'(?P<cname>#\S+) (?P<appname>\S+)'
permission = Permission.LOGIN_REQUIRED
description = (
'오징어 IRC 네트워크에 등록되어 있는 채널에 앱을 비활성화합니다.',
' ',
'이 명령을 사용할 시 오징어 IRC 네트워크에 등록되어 있는 채널에 특정 앱을 비활성화할 수 있습니다.',
'채널에 앱을 비활성화할 시 해당 앱의 개발자가 더 이상 해당 앱을 이용해 본 채널의 메시지를 읽거나 쓸 수 없습니다.',
' ',
'본 명령을 이용하기 위해서는 해당 채널에 운영자 (+Q) 이상의 권한이 필요합니다.',
)
async def execute(self, user, cname, appname):
channel = Channel.get(cname)
if channel is None:
if user.is_operator:
self.err(user, f'해당 채널 \x02{cname}\x02 은 오징어 IRC 네트워크에 등록되어 있지 않습니다.')
else:
self.err(user, '해당 명령을 실행할 권한이 없습니다.')
if Flags.OWNER not in channel.get_flags_by_user(user):
if not user.is_operator:
self.err(user, '해당 명령을 실행할 권한이 없습니다.')
app = Application.objects.filter(name__iexact=appname).first()
if app is None or not channel.apps.filter(pk=app.pk).exists():
self.err(user, f'해당 채널 \x02{channel.name}\x02에 \x02{appname}\x02 앱이 활성화되어있지 않습니다.')
channel.apps.remove(app)
self.msg(user, f'해당 채널 \x02{channel.name}\x02에 \x02{app.name}\x02 앱이 비활성화되었습니다.')
self.writesvsuserline(f'NOTICE {channel.name} : {user.nick} 님이 채널에서 {app.name} 앱을 제거했습니다.')
| agpl-3.0 | 4,539,484,564,452,875,300 | 34.555556 | 99 | 0.58625 | false | 1.574803 | false | false | false | 0.0025 |
xsynergy510x/android_external_chromium_org | tools/cr/cr/commands/sync.py | 112 | 1871 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the sync command."""
import os.path
import cr
class SyncCommand(cr.Command):
"""The implementation of the sync command.
This command is a very thin shim over the gclient sync, and should remain so.
The only significant thing it adds is that the environment is set up so that
the run-hooks will do their work in the selected output directory.
"""
# The configuration loaded to support this command.
DEFAULT = cr.Config.From(
GCLIENT_BINARY=os.path.join('{DEPOT_TOOLS}', 'gclient'),
)
# A placeholder for the detected gclient environment
DETECTED = cr.Config('DETECTED')
def __init__(self):
super(SyncCommand, self).__init__()
self.help = 'Sync the source tree'
self.description = 'Run gclient sync with the right environment.'
def AddArguments(self, subparsers):
parser = super(SyncCommand, self).AddArguments(subparsers)
self.ConsumeArgs(parser, 'gclient')
# TODO(iancottrell): clean no-hooks support would be nice.
return parser
def Run(self):
self.Sync(cr.context.remains)
@staticmethod
def Sync(args):
cr.PrepareCommand.UpdateContext()
# TODO(iancottrell): we should probably run the python directly,
# rather than the shell wrapper
# TODO(iancottrell): try to help out when the local state is not a good
# one to do a sync in
cr.Host.Execute('{GCLIENT_BINARY}', 'sync', *args)
@classmethod
def ClassInit(cls):
# Attempt to detect gclient and it's parent repository.
gclient_binaries = cr.Host.SearchPath('gclient')
if gclient_binaries:
cls.DETECTED.Set(GCLIENT_BINARY=gclient_binaries[0])
cls.DETECTED.Set(DEPOT_TOOLS=os.path.dirname(gclient_binaries[0]))
| bsd-3-clause | 8,508,602,389,302,104,000 | 31.824561 | 79 | 0.711919 | false | 3.742 | false | false | false | 0.007483 |
sharkykh/SickRage | lib/stevedore/extension.py | 21 | 11403 | """ExtensionManager
"""
import pkg_resources
import logging
from .exception import NoMatches
LOG = logging.getLogger(__name__)
class Extension(object):
"""Book-keeping object for tracking extensions.
The arguments passed to the constructor are saved as attributes of
the instance using the same names, and can be accessed by the
callables passed to :meth:`map` or when iterating over an
:class:`ExtensionManager` directly.
:param name: The entry point name.
:type name: str
:param entry_point: The EntryPoint instance returned by
:mod:`pkg_resources`.
:type entry_point: EntryPoint
:param plugin: The value returned by entry_point.load()
:param obj: The object returned by ``plugin(*args, **kwds)`` if the
manager invoked the extension on load.
"""
def __init__(self, name, entry_point, plugin, obj):
self.name = name
self.entry_point = entry_point
self.plugin = plugin
self.obj = obj
@property
def entry_point_target(self):
"""The module and attribute referenced by this extension's entry_point.
:return: A string representation of the target of the entry point in
'dotted.module:object' format.
"""
return '%s:%s' % (self.entry_point.module_name,
self.entry_point.attrs[0])
class ExtensionManager(object):
"""Base class for all of the other managers.
:param namespace: The namespace for the entry points.
:type namespace: str
:param invoke_on_load: Boolean controlling whether to invoke the
object returned by the entry point after the driver is loaded.
:type invoke_on_load: bool
:param invoke_args: Positional arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_args: tuple
:param invoke_kwds: Named arguments to pass when invoking
the object returned by the entry point. Only used if invoke_on_load
is True.
:type invoke_kwds: dict
:param propagate_map_exceptions: Boolean controlling whether exceptions
are propagated up through the map call or whether they are logged and
then ignored
:type propagate_map_exceptions: bool
:param on_load_failure_callback: Callback function that will be called when
a entrypoint can not be loaded. The arguments that will be provided
when this is called (when an entrypoint fails to load) are
(manager, entrypoint, exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
"""
def __init__(self, namespace,
invoke_on_load=False,
invoke_args=(),
invoke_kwds={},
propagate_map_exceptions=False,
on_load_failure_callback=None,
verify_requirements=False):
self._init_attributes(
namespace,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback)
extensions = self._load_plugins(invoke_on_load,
invoke_args,
invoke_kwds,
verify_requirements)
self._init_plugins(extensions)
@classmethod
def make_test_instance(cls, extensions, namespace='TESTING',
propagate_map_exceptions=False,
on_load_failure_callback=None,
verify_requirements=False):
"""Construct a test ExtensionManager
Test instances are passed a list of extensions to work from rather
than loading them from entry points.
:param extensions: Pre-configured Extension instances to use
:type extensions: list of :class:`~stevedore.extension.Extension`
:param namespace: The namespace for the manager; used only for
identification since the extensions are passed in.
:type namespace: str
:param propagate_map_exceptions: When calling map, controls whether
exceptions are propagated up through the map call or whether they
are logged and then ignored
:type propagate_map_exceptions: bool
:param on_load_failure_callback: Callback function that will
be called when a entrypoint can not be loaded. The
arguments that will be provided when this is called (when
an entrypoint fails to load) are (manager, entrypoint,
exception)
:type on_load_failure_callback: function
:param verify_requirements: Use setuptools to enforce the
dependencies of the plugin(s) being loaded. Defaults to False.
:type verify_requirements: bool
:return: The manager instance, initialized for testing
"""
o = cls.__new__(cls)
o._init_attributes(namespace,
propagate_map_exceptions=propagate_map_exceptions,
on_load_failure_callback=on_load_failure_callback)
o._init_plugins(extensions)
return o
def _init_attributes(self, namespace, propagate_map_exceptions=False,
on_load_failure_callback=None):
self.namespace = namespace
self.propagate_map_exceptions = propagate_map_exceptions
self._on_load_failure_callback = on_load_failure_callback
def _init_plugins(self, extensions):
self.extensions = extensions
self._extensions_by_name = None
ENTRY_POINT_CACHE = {}
def _find_entry_points(self, namespace):
if namespace not in self.ENTRY_POINT_CACHE:
eps = list(pkg_resources.iter_entry_points(namespace))
self.ENTRY_POINT_CACHE[namespace] = eps
return self.ENTRY_POINT_CACHE[namespace]
def _load_plugins(self, invoke_on_load, invoke_args, invoke_kwds,
verify_requirements):
extensions = []
for ep in self._find_entry_points(self.namespace):
LOG.debug('found extension %r', ep)
try:
ext = self._load_one_plugin(ep,
invoke_on_load,
invoke_args,
invoke_kwds,
verify_requirements,
)
if ext:
extensions.append(ext)
except (KeyboardInterrupt, AssertionError):
raise
except Exception as err:
if self._on_load_failure_callback is not None:
self._on_load_failure_callback(self, ep, err)
else:
LOG.error('Could not load %r: %s', ep.name, err)
LOG.exception(err)
return extensions
def _load_one_plugin(self, ep, invoke_on_load, invoke_args, invoke_kwds,
verify_requirements):
# NOTE(dhellmann): Using require=False is deprecated in
# setuptools 11.3.
if hasattr(ep, 'resolve') and hasattr(ep, 'require'):
if verify_requirements:
ep.require()
plugin = ep.resolve()
else:
plugin = ep.load(require=verify_requirements)
if invoke_on_load:
obj = plugin(*invoke_args, **invoke_kwds)
else:
obj = None
return Extension(ep.name, ep, plugin, obj)
def names(self):
"Returns the names of the discovered extensions"
# We want to return the names of the extensions in the order
# they would be used by map(), since some subclasses change
# that order.
return [e.name for e in self.extensions]
def map(self, func, *args, **kwds):
"""Iterate over the extensions invoking func() for each.
The signature for func() should be::
def func(ext, *args, **kwds):
pass
The first argument to func(), 'ext', is the
:class:`~stevedore.extension.Extension` instance.
Exceptions raised from within func() are propagated up and
processing stopped if self.propagate_map_exceptions is True,
otherwise they are logged and ignored.
:param func: Callable to invoke for each extension.
:param args: Variable arguments to pass to func()
:param kwds: Keyword arguments to pass to func()
:returns: List of values returned from func()
"""
if not self.extensions:
# FIXME: Use a more specific exception class here.
raise NoMatches('No %s extensions found' % self.namespace)
response = []
for e in self.extensions:
self._invoke_one_plugin(response.append, func, e, args, kwds)
return response
@staticmethod
def _call_extension_method(extension, method_name, *args, **kwds):
return getattr(extension.obj, method_name)(*args, **kwds)
def map_method(self, method_name, *args, **kwds):
"""Iterate over the extensions invoking a method by name.
This is equivalent of using :meth:`map` with func set to
`lambda x: x.obj.method_name()`
while being more convenient.
Exceptions raised from within the called method are propagated up
and processing stopped if self.propagate_map_exceptions is True,
otherwise they are logged and ignored.
.. versionadded:: 0.12
:param method_name: The extension method name
to call for each extension.
:param args: Variable arguments to pass to method
:param kwds: Keyword arguments to pass to method
:returns: List of values returned from methods
"""
return self.map(self._call_extension_method,
method_name, *args, **kwds)
def _invoke_one_plugin(self, response_callback, func, e, args, kwds):
try:
response_callback(func(e, *args, **kwds))
except Exception as err:
if self.propagate_map_exceptions:
raise
else:
LOG.error('error calling %r: %s', e.name, err)
LOG.exception(err)
def __iter__(self):
"""Produce iterator for the manager.
Iterating over an ExtensionManager produces the :class:`Extension`
instances in the order they would be invoked.
"""
return iter(self.extensions)
def __getitem__(self, name):
"""Return the named extension.
Accessing an ExtensionManager as a dictionary (``em['name']``)
produces the :class:`Extension` instance with the
specified name.
"""
if self._extensions_by_name is None:
d = {}
for e in self.extensions:
d[e.name] = e
self._extensions_by_name = d
return self._extensions_by_name[name]
def __contains__(self, name):
"""Return true if name is in list of enabled extensions.
"""
return any(extension.name == name for extension in self.extensions)
| gpl-3.0 | -7,815,812,277,166,083,000 | 38.456747 | 79 | 0.600719 | false | 4.710037 | false | false | false | 0 |
jonathonwalz/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_list.py | 36 | 4523 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_list
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: List Ansible Tower jobs.
description:
- List Ansible Tower jobs. See
U(https://www.ansible.com/tower) for an overview.
options:
status:
description:
- Only list jobs with this status.
default: null
choices: ['pending', 'waiting', 'running', 'error', 'failed', 'canceled', 'successful']
page:
description:
- Page number of the results to fetch.
default: null
all_pages:
description:
- Fetch all the pages and return a single result.
default: False
query:
description:
- Query used to further filter the list of jobs. {"foo":"bar"} will be passed at ?foo=bar
default: null
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: List running jobs for the testing.yml playbook
tower_job_list:
status: running
query: {"playbook": "testing.yml"}
register: testing_jobs
tower_config_file: "~/tower_cli.cfg"
'''
RETURN = '''
count:
description: Total count of objects return
returned: success
type: int
sample: 51
next:
description: next page available for the listing
returned: success
type: int
sample: 3
previous:
description: previous page available for the listing
returned: success
type: int
sample: 1
results:
description: a list of job objects represented as dictionaries
returned: success
type: list
sample: [{"allow_simultaneous": false, "artifacts": {}, "ask_credential_on_launch": false,
"ask_inventory_on_launch": false, "ask_job_type_on_launch": false, "failed": false,
"finished": "2017-02-22T15:09:05.633942Z", "force_handlers": false, "forks": 0, "id": 2,
"inventory": 1, "job_explanation": "", "job_tags": "", "job_template": 5, "job_type": "run"}, ...]
'''
from ansible.module_utils.basic import AnsibleModule
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import (
tower_auth_config,
tower_check_mode,
tower_argument_spec,
)
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
status=dict(choices=['pending', 'waiting', 'running', 'error', 'failed', 'canceled', 'successful']),
page=dict(type='int'),
all_pages=dict(type='bool', default=False),
query=dict(type='dict'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
json_output = {}
query = module.params.get('query')
status = module.params.get('status')
page = module.params.get('page')
all_pages = module.params.get('all_pages')
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
try:
job = tower_cli.get_resource('job')
params = {'status': status, 'page': page, 'all_pages': all_pages}
if query:
params['query'] = query.items()
json_output = job.list(**params)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to list jobs: {0}'.format(excinfo), changed=False)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,390,392,128,143,225,000 | 29.355705 | 112 | 0.640725 | false | 3.797649 | false | false | false | 0.00199 |
ndp-systemes/odoo-addons | stock_account_improved/stock_account_improved.py | 1 | 1355 | # -*- coding: utf8 -*-
#
# Copyright (C) 2017 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp import models, api
class StockAccountImprovedStockMove(models.Model):
_inherit = 'stock.move'
@api.model
def default_get(self, fields_list):
result = super(StockAccountImprovedStockMove, self).default_get(fields_list)
picking_id = result.get('default_picking_id') or self.env.context.get('default_picking_id')
if picking_id:
picking = self.env['stock.picking'].browse(picking_id)
if picking.invoice_state:
result['invoice_state'] = picking.invoice_state
return result
| agpl-3.0 | 2,104,406,411,903,891,000 | 38.823529 | 99 | 0.691285 | false | 3.846591 | false | false | false | 0.001477 |
dirkjankrijnders/aubio | python/aubio/task/onset.py | 13 | 6828 | from aubio.task.task import task
from aubio.aubioclass import *
class taskonset(task):
def __init__(self,input,output=None,params=None):
""" open the input file and initialize arguments
parameters should be set *before* calling this method.
"""
task.__init__(self,input,params=params)
self.opick = onsetpick(self.params.bufsize,
self.params.hopsize,
self.myvec,
self.params.threshold,
mode=self.params.onsetmode,
dcthreshold=self.params.dcthreshold,
derivate=self.params.derivate)
self.olist = []
self.ofunc = []
self.maxofunc = 0
self.last = 0
if self.params.localmin:
self.ovalist = [0., 0., 0., 0., 0.]
def __call__(self):
task.__call__(self)
isonset,val = self.opick.do(self.myvec)
if (aubio_silence_detection(self.myvec(),self.params.silence)):
isonset=0
if self.params.storefunc:
self.ofunc.append(val)
if self.params.localmin:
if val > 0: self.ovalist.append(val)
else: self.ovalist.append(0)
self.ovalist.pop(0)
if (isonset > 0.):
if self.params.localmin:
# find local minima before peak
i=len(self.ovalist)-1
while self.ovalist[i-1] < self.ovalist[i] and i > 0:
i -= 1
now = (self.frameread+1-i)
else:
now = self.frameread
# take back delay
if self.params.delay != 0.: now -= self.params.delay
if now < 0 :
now = 0
if self.params.mintol:
# prune doubled
if (now - self.last) > self.params.mintol:
self.last = now
return now, val
else:
return now, val
def fprint(self,foo):
print self.params.step*foo[0]
def eval(self,inputdata,ftru,mode='roc',vmode=''):
from aubio.txtfile import read_datafile
from aubio.onsetcompare import onset_roc, onset_diffs, onset_rocloc
ltru = read_datafile(ftru,depth=0)
lres = []
for i in range(len(inputdata)): lres.append(inputdata[i][0]*self.params.step)
if vmode=='verbose':
print "Running with mode %s" % self.params.onsetmode,
print " and threshold %f" % self.params.threshold,
print " on file", self.input
#print ltru; print lres
if mode == 'local':
l = onset_diffs(ltru,lres,self.params.tol)
mean = 0
for i in l: mean += i
if len(l): mean = "%.3f" % (mean/len(l))
else: mean = "?0"
return l, mean
elif mode == 'roc':
self.orig, self.missed, self.merged, \
self.expc, self.bad, self.doubled = \
onset_roc(ltru,lres,self.params.tol)
elif mode == 'rocloc':
self.v = {}
self.v['orig'], self.v['missed'], self.v['Tm'], \
self.v['expc'], self.v['bad'], self.v['Td'], \
self.v['l'], self.v['labs'] = \
onset_rocloc(ltru,lres,self.params.tol)
def plot(self,onsets,ofunc,wplot,oplots,nplot=False):
import Gnuplot, Gnuplot.funcutils
import aubio.txtfile
import os.path
from numpy import arange, array, ones
from aubio.onsetcompare import onset_roc
x1,y1,y1p = [],[],[]
oplot = []
if self.params.onsetmode in ('mkl','kl'): ofunc[0:10] = [0] * 10
self.lenofunc = len(ofunc)
self.maxofunc = max(ofunc)
# onset detection function
downtime = arange(len(ofunc))*self.params.step
oplot.append(Gnuplot.Data(downtime,ofunc,with_='lines',title=self.params.onsetmode))
# detected onsets
if not nplot:
for i in onsets:
x1.append(i[0]*self.params.step)
y1.append(self.maxofunc)
y1p.append(-self.maxofunc)
#x1 = array(onsets)*self.params.step
#y1 = self.maxofunc*ones(len(onsets))
if x1:
oplot.append(Gnuplot.Data(x1,y1,with_='impulses'))
wplot.append(Gnuplot.Data(x1,y1p,with_='impulses'))
oplots.append((oplot,self.params.onsetmode,self.maxofunc))
# check if ground truth datafile exists
datafile = self.input.replace('.wav','.txt')
if datafile == self.input: datafile = ""
if not os.path.isfile(datafile):
self.title = "" #"(no ground truth)"
else:
t_onsets = aubio.txtfile.read_datafile(datafile)
x2 = array(t_onsets).resize(len(t_onsets))
y2 = self.maxofunc*ones(len(t_onsets))
wplot.append(Gnuplot.Data(x2,y2,with_='impulses'))
tol = 0.050
orig, missed, merged, expc, bad, doubled = \
onset_roc(x2,x1,tol)
self.title = "GD %2.3f%% FP %2.3f%%" % \
((100*float(orig-missed-merged)/(orig)),
(100*float(bad+doubled)/(orig)))
def plotplot(self,wplot,oplots,outplot=None,extension=None,xsize=1.,ysize=1.,spectro=False):
from aubio.gnuplot import gnuplot_create, audio_to_array, make_audio_plot, audio_to_spec
import re
# prepare the plot
g = gnuplot_create(outplot=outplot, extension=extension)
g('set title \'%s\'' % (re.sub('.*/','',self.input)))
if spectro:
g('set size %f,%f' % (xsize,1.3*ysize) )
else:
g('set size %f,%f' % (xsize,ysize) )
g('set multiplot')
# hack to align left axis
g('set lmargin 3')
g('set rmargin 6')
if spectro:
import Gnuplot
minf = 50
maxf = 500
data,time,freq = audio_to_spec(self.input,minf=minf,maxf=maxf)
g('set size %f,%f' % (1.24*xsize , 0.34*ysize) )
g('set origin %f,%f' % (-0.12,0.65*ysize))
g('set xrange [0.:%f]' % time[-1])
g('set yrange [%f:%f]' % (minf,maxf))
g('set pm3d map')
g('unset colorbox')
g('set lmargin 0')
g('set rmargin 0')
g('set tmargin 0')
g('set palette rgbformulae -25,-24,-32')
g.xlabel('time (s)',offset=(0,1.))
g.ylabel('freq (Hz)')
g('set origin 0,%f' % (1.0*ysize) )
g('set format x "%1.1f"')
#if log:
# g('set yrange [%f:%f]' % (max(10,minf),maxf))
# g('set log y')
g.splot(Gnuplot.GridData(data,time,freq, binary=1, title=''))
else:
# plot waveform and onsets
time,data = audio_to_array(self.input)
wplot = [make_audio_plot(time,data)] + wplot
g('set origin 0,%f' % (0.7*ysize) )
g('set size %f,%f' % (xsize,0.3*ysize))
g('set format y "%1f"')
g('set xrange [0:%f]' % max(time))
g('set yrange [-1:1]')
g('set noytics')
g('set y2tics -1,1')
g.xlabel('time (s)',offset=(0,0.7))
g.ylabel('amplitude')
g.plot(*wplot)
# default settings for next plots
g('unset title')
g('set format x ""')
g('set format y "%3e"')
g('set tmargin 0')
g.xlabel('')
N = len(oplots)
y = 0.7*ysize # the vertical proportion of the plot taken by onset functions
delta = 0.035 # the constant part of y taken by last plot label and data
for i in range(N):
# plot onset detection functions
g('set size %f,%f' % ( xsize, (y-delta)/N))
g('set origin 0,%f' % ((N-i-1)*(y-delta)/N + delta ))
g('set nokey')
g('set xrange [0:%f]' % (self.lenofunc*self.params.step))
g('set yrange [0:%f]' % (1.1*oplots[i][2]))
g('set y2tics ("0" 0, "%d" %d)' % (round(oplots[i][2]),round(oplots[i][2])))
g.ylabel(oplots[i][1])
if i == N-1:
g('set size %f,%f' % ( xsize, (y-delta)/N + delta ) )
g('set origin 0,0')
g.xlabel('time (s)', offset=(0,0.7))
g('set format x')
g.plot(*oplots[i][0])
g('unset multiplot')
| gpl-3.0 | 6,215,116,083,548,806,000 | 30.036364 | 93 | 0.624048 | false | 2.490153 | false | false | false | 0.049209 |
usc-isi/essex-baremetal-support | nova/api/openstack/compute/contrib/networks.py | 2 | 5028 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import extensions
from nova import exception
from nova import flags
from nova import log as logging
import nova.network.api
from nova.rpc import common
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'networks')
def network_dict(network):
if network:
fields = ('bridge', 'vpn_public_port', 'dhcp_start',
'bridge_interface', 'updated_at', 'id', 'cidr_v6',
'deleted_at', 'gateway', 'label', 'project_id',
'vpn_private_address', 'deleted', 'vlan', 'broadcast',
'netmask', 'injected', 'cidr', 'vpn_public_address',
'multi_host', 'dns1', 'host', 'gateway_v6', 'netmask_v6',
'created_at')
result = dict((field, network[field]) for field in fields)
if 'uuid' in network:
result['id'] = network['uuid']
return result
else:
return {}
class NetworkController(object):
def __init__(self, network_api=None):
self.network_api = network_api or nova.network.api.API()
def action(self, req, id, body):
_actions = {
'disassociate': self._disassociate,
}
for action, data in body.iteritems():
try:
return _actions[action](req, id, body)
except KeyError:
msg = _("Network does not have %s action") % action
raise exc.HTTPBadRequest(explanation=msg)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
def _disassociate(self, request, network_id, body):
context = request.environ['nova.context']
authorize(context)
LOG.debug(_("Disassociating network with id %s") % network_id)
try:
self.network_api.disassociate(context, network_id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
except common.RemoteError as ex:
if ex.exc_type in ["NetworkNotFound", "NetworkNotFoundForUUID"]:
raise exc.HTTPNotFound(_("Network not found"))
else:
raise
return exc.HTTPAccepted()
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = self.network_api.get_all(context)
result = [network_dict(net_ref) for net_ref in networks]
return {'networks': result}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
LOG.debug(_("Showing network with id %s") % id)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
except common.RemoteError as ex:
if ex.exc_type in ["NetworkNotFound", "NetworkNotFoundForUUID"]:
raise exc.HTTPNotFound(_("Network not found"))
else:
raise
return {'network': network_dict(network)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
LOG.info(_("Deleting network with id %s") % id)
try:
self.network_api.delete(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
except common.RemoteError as ex:
if ex.exc_type in ["NetworkNotFound", "NetworkNotFoundForUUID"]:
raise exc.HTTPNotFound(_("Network not found"))
else:
raise
return exc.HTTPAccepted()
def create(self, req, id, body=None):
raise exc.HTTPNotImplemented()
class Networks(extensions.ExtensionDescriptor):
"""Admin-only Network Management Extension"""
name = "Networks"
alias = "os-networks"
namespace = "http://docs.openstack.org/compute/ext/networks/api/v1.1"
updated = "2011-12-23T00:00:00+00:00"
def get_resources(self):
member_actions = {'action': 'POST'}
res = extensions.ResourceExtension('os-networks',
NetworkController(),
member_actions=member_actions)
return [res]
| apache-2.0 | -7,931,237,392,861,441,000 | 34.914286 | 78 | 0.605807 | false | 4.304795 | false | false | false | 0.000199 |
pasqualguerrero/django-oscar | src/oscar/test/factories/catalogue.py | 47 | 3004 | # coding=utf-8
import factory
from oscar.core.loading import get_model
__all__ = [
'ProductClassFactory', 'ProductFactory',
'CategoryFactory', 'ProductCategoryFactory',
'ProductAttributeFactory', 'AttributeOptionGroupFactory',
'OptionFactory', 'AttributeOptionFactory',
'ProductAttributeValueFactory', 'ProductReviewFactory',
]
class ProductClassFactory(factory.DjangoModelFactory):
name = "Books"
requires_shipping = True
track_stock = True
class Meta:
model = get_model('catalogue', 'ProductClass')
class ProductFactory(factory.DjangoModelFactory):
class Meta:
model = get_model('catalogue', 'Product')
structure = Meta.model.STANDALONE
upc = factory.Sequence(lambda n: '978080213020%d' % n)
title = "A confederacy of dunces"
product_class = factory.SubFactory(ProductClassFactory)
stockrecords = factory.RelatedFactory(
'oscar.test.factories.StockRecordFactory', 'product')
categories = factory.RelatedFactory(
'oscar.test.factories.ProductCategoryFactory', 'product')
class CategoryFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'Category %d' % n)
# Very naive handling of treebeard node fields. Works though!
depth = 1
path = factory.Sequence(lambda n: '%04d' % n)
class Meta:
model = get_model('catalogue', 'Category')
class ProductCategoryFactory(factory.DjangoModelFactory):
category = factory.SubFactory(CategoryFactory)
class Meta:
model = get_model('catalogue', 'ProductCategory')
class ProductAttributeFactory(factory.DjangoModelFactory):
code = name = 'weight'
product_class = factory.SubFactory(ProductClassFactory)
type = "float"
class Meta:
model = get_model('catalogue', 'ProductAttribute')
class OptionFactory(factory.DjangoModelFactory):
class Meta:
model = get_model('catalogue', 'Option')
name = 'example option'
code = 'example'
type = Meta.model.OPTIONAL
class AttributeOptionFactory(factory.DjangoModelFactory):
# Ideally we'd get_or_create a AttributeOptionGroup here, but I'm not
# aware of how to not create a unique option group for each call of the
# factory
option = factory.Sequence(lambda n: 'Option %d' % n)
class Meta:
model = get_model('catalogue', 'AttributeOption')
class AttributeOptionGroupFactory(factory.DjangoModelFactory):
name = u'Grüppchen'
class Meta:
model = get_model('catalogue', 'AttributeOptionGroup')
class ProductAttributeValueFactory(factory.DjangoModelFactory):
attribute = factory.SubFactory(ProductAttributeFactory)
product = factory.SubFactory(ProductFactory)
class Meta:
model = get_model('catalogue', 'ProductAttributeValue')
class ProductReviewFactory(factory.DjangoModelFactory):
score = 5
product = factory.SubFactory(ProductFactory, stockrecords=[])
class Meta:
model = get_model('reviews', 'ProductReview')
| bsd-3-clause | -2,586,670,222,495,006,700 | 27.330189 | 75 | 0.710623 | false | 4.074627 | false | false | false | 0 |
arjunbm13/youtube-dl | youtube_dl/extractor/nba.py | 101 | 2338 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
remove_end,
parse_duration,
)
class NBAIE(InfoExtractor):
_VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$'
_TESTS = [{
'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
'md5': 'c0edcfc37607344e2ff8f13c378c88a4',
'info_dict': {
'id': '0021200253-okc-bkn-recap.nba',
'ext': 'mp4',
'title': 'Thunder vs. Nets',
'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
'duration': 181,
},
}, {
'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/',
'only_matching': True,
}, {
'url': 'http://watch.nba.com/nba/video/channels/playoffs/2015/05/20/0041400301-cle-atl-recap.nba',
'info_dict': {
'id': '0041400301-cle-atl-recap.nba',
'ext': 'mp4',
'title': 'NBA GAME TIME | Video: Hawks vs. Cavaliers Game 1',
'description': 'md5:8094c3498d35a9bd6b1a8c396a071b4d',
'duration': 228,
},
'params': {
'skip_download': True,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
shortened_video_id = video_id.rpartition('/')[2]
title = remove_end(
self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com')
description = self._og_search_description(webpage)
duration_str = self._html_search_meta(
'duration', webpage, 'duration', default=None)
if not duration_str:
duration_str = self._html_search_regex(
r'Duration:</b>\s*(\d+:\d+)', webpage, 'duration', fatal=False)
duration = parse_duration(duration_str)
return {
'id': shortened_video_id,
'url': video_url,
'title': title,
'description': description,
'duration': duration,
}
| unlicense | 1,364,132,447,214,559,000 | 36.111111 | 128 | 0.55432 | false | 3.121495 | false | false | false | 0.002994 |
cpollard1001/FreeCAD_sf_master | src/Mod/Arch/ArchAxis.py | 8 | 19447 | #***************************************************************************
#* *
#* Copyright (c) 2011 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,Draft,math,DraftVecUtils,ArchCommands
from FreeCAD import Vector
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore, QtGui
from DraftTools import translate
from pivy import coin
else:
def translate(ctxt,txt):
return txt
__title__="FreeCAD Axis System"
__author__ = "Yorik van Havre"
__url__ = "http://www.freecadweb.org"
def makeAxis(num=5,size=1000,name="Axes"):
'''makeAxis(num,size): makes an Axis System
based on the given number of axes and interval distances'''
obj = FreeCAD.ActiveDocument.addObject("App::FeaturePython",name)
obj.Label = translate("Arch",name)
_Axis(obj)
if FreeCAD.GuiUp:
_ViewProviderAxis(obj.ViewObject)
if num:
dist = []
angles = []
for i in range(num):
dist.append(float(size))
angles.append(float(0))
obj.Distances = dist
obj.Angles = angles
FreeCAD.ActiveDocument.recompute()
return obj
class _CommandAxis:
"the Arch Axis command definition"
def GetResources(self):
return {'Pixmap' : 'Arch_Axis',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Arch_Axis","Axis"),
'Accel': "A, X",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Arch_Axis","Creates an axis system.")}
def Activated(self):
FreeCAD.ActiveDocument.openTransaction(translate("Arch","Create Axis"))
FreeCADGui.addModule("Arch")
sel = FreeCADGui.Selection.getSelection()
st = Draft.getObjectsOfType(sel,"Structure")
if st:
FreeCADGui.doCommand("axe = Arch.makeAxis()")
FreeCADGui.doCommand("Arch.makeStructuralSystem(" + ArchCommands.getStringList(st) + ",[axe])")
else:
FreeCADGui.doCommand("Arch.makeAxis()")
FreeCAD.ActiveDocument.commitTransaction()
def IsActive(self):
return not FreeCAD.ActiveDocument is None
class _Axis:
"The Axis object"
def __init__(self,obj):
obj.addProperty("App::PropertyFloatList","Distances","Arch", translate("Arch","The intervals between axes"))
obj.addProperty("App::PropertyFloatList","Angles","Arch", translate("Arch","The angles of each axis"))
obj.addProperty("App::PropertyLength","Length","Arch", translate("Arch","The length of the axes"))
obj.addProperty("App::PropertyPlacement","Placement","Base","")
obj.addProperty("Part::PropertyPartShape","Shape","Base","")
self.Type = "Axis"
obj.Length=3000
obj.Proxy = self
def execute(self,obj):
import Part
geoms = []
dist = 0
if obj.Distances:
if len(obj.Distances) == len(obj.Angles):
for i in range(len(obj.Distances)):
if hasattr(obj.Length,"Value"):
l = obj.Length.Value
else:
l = obj.Length
dist += obj.Distances[i]
ang = math.radians(obj.Angles[i])
p1 = Vector(dist,0,0)
p2 = Vector(dist+(l/math.cos(ang))*math.sin(ang),l,0)
geoms.append(Part.Line(p1,p2).toShape())
if geoms:
sh = Part.Compound(geoms)
sh.Placement = obj.Placement
obj.Shape = sh
def onChanged(self,obj,prop):
if prop in ["Angles","Distances","Placement"]:
self.execute(obj)
def __getstate__(self):
return self.Type
def __setstate__(self,state):
if state:
self.Type = state
class _ViewProviderAxis:
"A View Provider for the Axis object"
def __init__(self,vobj):
vobj.addProperty("App::PropertyLength","BubbleSize","Arch", translate("Arch","The size of the axis bubbles"))
vobj.addProperty("App::PropertyEnumeration","NumberingStyle","Arch", translate("Arch","The numbering style"))
vobj.addProperty("App::PropertyEnumeration","DrawStyle","Base","")
vobj.addProperty("App::PropertyFloat","LineWidth","Base","")
vobj.addProperty("App::PropertyColor","LineColor","Base","")
vobj.NumberingStyle = ["1,2,3","01,02,03","001,002,003","A,B,C","a,b,c","I,II,III","L0,L1,L2"]
vobj.DrawStyle = ["Solid","Dashed","Dotted","Dashdot"]
vobj.Proxy = self
vobj.BubbleSize = 500
vobj.LineWidth = 1
vobj.LineColor = (0.13,0.15,0.37)
vobj.DrawStyle = "Dashdot"
vobj.NumberingStyle = "1,2,3"
def getIcon(self):
import Arch_rc
return ":/icons/Arch_Axis_Tree.svg"
def claimChildren(self):
return []
def attach(self, vobj):
self.bubbles = None
self.bubbletexts = []
sep = coin.SoSeparator()
self.mat = coin.SoMaterial()
self.linestyle = coin.SoDrawStyle()
self.linecoords = coin.SoCoordinate3()
self.lineset = coin.SoType.fromName("SoBrepEdgeSet").createInstance()
self.bubbleset = coin.SoSeparator()
sep.addChild(self.mat)
sep.addChild(self.linestyle)
sep.addChild(self.linecoords)
sep.addChild(self.lineset)
sep.addChild(self.bubbleset)
vobj.addDisplayMode(sep,"Default")
self.onChanged(vobj,"BubbleSize")
def getDisplayModes(self,vobj):
return ["Default"]
def getDefaultDisplayMode(self):
return "Default"
def setDisplayMode(self,mode):
return mode
def updateData(self,obj,prop):
if prop == "Shape":
if obj.Shape:
if obj.Shape.Edges:
verts = []
vset = []
i = 0
for e in obj.Shape.Edges:
for v in e.Vertexes:
verts.append([v.X,v.Y,v.Z])
vset.append(i)
i += 1
vset.append(-1)
self.linecoords.point.setValues(verts)
self.lineset.coordIndex.setValues(0,len(vset),vset)
self.lineset.coordIndex.setNum(len(vset))
self.onChanged(obj.ViewObject,"BubbleSize")
def onChanged(self, vobj, prop):
if prop == "LineColor":
l = vobj.LineColor
self.mat.diffuseColor.setValue([l[0],l[1],l[2]])
elif prop == "DrawStyle":
if vobj.DrawStyle == "Solid":
self.linestyle.linePattern = 0xffff
elif vobj.DrawStyle == "Dashed":
self.linestyle.linePattern = 0xf00f
elif vobj.DrawStyle == "Dotted":
self.linestyle.linePattern = 0x0f0f
else:
self.linestyle.linePattern = 0xff88
elif prop == "LineWidth":
self.linestyle.lineWidth = vobj.LineWidth
elif prop == "BubbleSize":
if hasattr(self,"bubbleset"):
if self.bubbles:
self.bubbleset.removeChild(self.bubbles)
self.bubbles = None
if vobj.Object.Shape:
if vobj.Object.Shape.Edges:
self.bubbles = coin.SoSeparator()
self.bubblestyle = coin.SoDrawStyle()
self.bubblestyle.linePattern = 0xffff
self.bubbles.addChild(self.bubblestyle)
import Part,Draft
self.bubbletexts = []
for i in range(len(vobj.Object.Shape.Edges)):
verts = vobj.Object.Shape.Edges[i].Vertexes
p1 = verts[0].Point
p2 = verts[1].Point
dv = p2.sub(p1)
dv.normalize()
if hasattr(vobj.BubbleSize,"Value"):
rad = vobj.BubbleSize.Value/2
else:
rad = vobj.BubbleSize/2
center = p2.add(dv.scale(rad,rad,rad))
buf = Part.makeCircle(rad,center).writeInventor()
try:
cin = coin.SoInput()
cin.setBuffer(buf)
cob = coin.SoDB.readAll(cin)
except:
import re
# workaround for pivy SoInput.setBuffer() bug
buf = buf.replace("\n","")
pts = re.findall("point \[(.*?)\]",buf)[0]
pts = pts.split(",")
pc = []
for p in pts:
v = p.strip().split()
pc.append([float(v[0]),float(v[1]),float(v[2])])
coords = coin.SoCoordinate3()
coords.point.setValues(0,len(pc),pc)
line = coin.SoLineSet()
line.numVertices.setValue(-1)
else:
coords = cob.getChild(1).getChild(0).getChild(2)
line = cob.getChild(1).getChild(0).getChild(3)
self.bubbles.addChild(coords)
self.bubbles.addChild(line)
st = coin.SoSeparator()
tr = coin.SoTransform()
tr.translation.setValue((center.x,center.y-rad/2,center.z))
fo = coin.SoFont()
fo.name = Draft.getParam("textfont","Arial,Sans")
fo.size = rad*1.5
tx = coin.SoAsciiText()
tx.justification = coin.SoText2.CENTER
self.bubbletexts.append(tx)
st.addChild(tr)
st.addChild(fo)
st.addChild(tx)
self.bubbles.addChild(st)
self.bubbleset.addChild(self.bubbles)
self.onChanged(vobj,"NumberingStyle")
elif prop == "NumberingStyle":
if hasattr(self,"bubbletexts"):
chars = "abcdefghijklmnopqrstuvwxyz"
roman=(('M',1000),('CM',900),('D',500),('CD',400),
('C',100),('XC',90),('L',50),('XL',40),
('X',10),('IX',9),('V',5),('IV',4),('I',1))
num = 0
for t in self.bubbletexts:
if hasattr(vobj,"NumberingStyle"):
if vobj.NumberingStyle == "1,2,3":
t.string = str(num+1)
elif vobj.NumberingStyle == "01,02,03":
t.string = str(num+1).zfill(2)
elif vobj.NumberingStyle == "001,002,003":
t.string = str(num+1).zfill(3)
elif vobj.NumberingStyle == "A,B,C":
result = ""
base = num/26
if base:
result += chars[base].upper()
remainder = num % 26
result += chars[remainder].upper()
t.string = result
elif vobj.NumberingStyle == "a,b,c":
result = ""
base = num/26
if base:
result += chars[base]
remainder = num % 26
result += chars[remainder]
t.string = result
elif vobj.NumberingStyle == "I,II,III":
result = ""
num += 1
for numeral, integer in roman:
while num >= integer:
result += numeral
num -= integer
t.string = result
elif vobj.NumberingStyle == "L0,L1,L2":
t.string = "L"+str(num)
else:
t.string = str(num+1)
num += 1
def setEdit(self,vobj,mode=0):
taskd = _AxisTaskPanel()
taskd.obj = vobj.Object
taskd.update()
FreeCADGui.Control.showDialog(taskd)
return True
def unsetEdit(self,vobj,mode):
FreeCADGui.Control.closeDialog()
return
def doubleClicked(self,vobj):
self.setEdit(vobj)
def __getstate__(self):
return None
def __setstate__(self,state):
return None
class _AxisTaskPanel:
'''The editmode TaskPanel for Axis objects'''
def __init__(self):
# the panel has a tree widget that contains categories
# for the subcomponents, such as additions, subtractions.
# the categories are shown only if they are not empty.
self.updating = False
self.obj = None
self.form = QtGui.QWidget()
self.form.setObjectName("TaskPanel")
self.grid = QtGui.QGridLayout(self.form)
self.grid.setObjectName("grid")
self.title = QtGui.QLabel(self.form)
self.grid.addWidget(self.title, 0, 0, 1, 2)
# tree
self.tree = QtGui.QTreeWidget(self.form)
self.grid.addWidget(self.tree, 1, 0, 1, 2)
self.tree.setColumnCount(3)
self.tree.header().resizeSection(0,50)
self.tree.header().resizeSection(1,80)
self.tree.header().resizeSection(2,60)
# buttons
self.addButton = QtGui.QPushButton(self.form)
self.addButton.setObjectName("addButton")
self.addButton.setIcon(QtGui.QIcon(":/icons/Arch_Add.svg"))
self.grid.addWidget(self.addButton, 3, 0, 1, 1)
self.addButton.setEnabled(True)
self.delButton = QtGui.QPushButton(self.form)
self.delButton.setObjectName("delButton")
self.delButton.setIcon(QtGui.QIcon(":/icons/Arch_Remove.svg"))
self.grid.addWidget(self.delButton, 3, 1, 1, 1)
self.delButton.setEnabled(True)
QtCore.QObject.connect(self.addButton, QtCore.SIGNAL("clicked()"), self.addElement)
QtCore.QObject.connect(self.delButton, QtCore.SIGNAL("clicked()"), self.removeElement)
QtCore.QObject.connect(self.tree, QtCore.SIGNAL("itemChanged(QTreeWidgetItem *, int)"), self.edit)
self.update()
def isAllowedAlterSelection(self):
return False
def isAllowedAlterView(self):
return True
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Close)
def update(self):
'fills the treewidget'
self.updating = True
self.tree.clear()
if self.obj:
for i in range(len(self.obj.Distances)):
item = QtGui.QTreeWidgetItem(self.tree)
item.setText(0,str(i+1))
item.setText(1,str(self.obj.Distances[i]))
item.setText(2,str(self.obj.Angles[i]))
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
item.setTextAlignment(0,QtCore.Qt.AlignLeft)
self.retranslateUi(self.form)
self.updating = False
def addElement(self):
item = QtGui.QTreeWidgetItem(self.tree)
item.setText(0,str(self.tree.topLevelItemCount()))
item.setText(1,"1.0")
item.setText(2,"0.0")
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
self.resetObject()
def removeElement(self):
it = self.tree.currentItem()
if it:
nr = int(it.text(0))-1
self.resetObject(remove=nr)
self.update()
def edit(self,item,column):
if not self.updating:
self.resetObject()
def resetObject(self,remove=None):
"transfers the values from the widget to the object"
d = []
a = []
for i in range(self.tree.topLevelItemCount()):
it = self.tree.findItems(str(i+1),QtCore.Qt.MatchExactly,0)[0]
if (remove == None) or (remove != i):
d.append(float(it.text(1)))
a.append(float(it.text(2)))
self.obj.Distances = d
self.obj.Angles = a
self.obj.touch()
FreeCAD.ActiveDocument.recompute()
def reject(self):
FreeCAD.ActiveDocument.recompute()
FreeCADGui.ActiveDocument.resetEdit()
return True
def retranslateUi(self, TaskPanel):
TaskPanel.setWindowTitle(QtGui.QApplication.translate("Arch", "Axes", None, QtGui.QApplication.UnicodeUTF8))
self.delButton.setText(QtGui.QApplication.translate("Arch", "Remove", None, QtGui.QApplication.UnicodeUTF8))
self.addButton.setText(QtGui.QApplication.translate("Arch", "Add", None, QtGui.QApplication.UnicodeUTF8))
self.title.setText(QtGui.QApplication.translate("Arch", "Distances (mm) and angles (deg) between axes", None, QtGui.QApplication.UnicodeUTF8))
self.tree.setHeaderLabels([QtGui.QApplication.translate("Arch", "Axis", None, QtGui.QApplication.UnicodeUTF8),
QtGui.QApplication.translate("Arch", "Distance", None, QtGui.QApplication.UnicodeUTF8),
QtGui.QApplication.translate("Arch", "Angle", None, QtGui.QApplication.UnicodeUTF8)])
if FreeCAD.GuiUp:
FreeCADGui.addCommand('Arch_Axis',_CommandAxis())
| lgpl-2.1 | 1,497,676,824,235,094,800 | 41.834802 | 150 | 0.504139 | false | 4.289149 | false | false | false | 0.010696 |
rhndg/openedx | common/djangoapps/embargo/migrations/0004_migrate_embargo_config.py | 102 | 8223 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Move the current course embargo configuration to the new models. """
for old_course in orm.EmbargoedCourse.objects.all():
new_course, __ = orm.RestrictedCourse.objects.get_or_create(course_key=old_course.course_id)
# Set the message keys to 'embargo'
new_course.enroll_msg_key = 'embargo'
new_course.access_msg_key = 'embargo'
new_course.save()
for country in self._embargoed_countries_list(orm):
country_model = orm.Country.objects.get(country=country)
orm.CountryAccessRule.objects.get_or_create(
country=country_model,
rule_type='blacklist',
restricted_course=new_course
)
def backwards(self, orm):
"""No backwards migration required since the forward migration is idempotent. """
pass
def _embargoed_countries_list(self, orm):
"""Retrieve the list of embargoed countries from the existing tables. """
# We need to replicate some application logic here, because South
# doesn't give us access to class methods on the Django model objects.
try:
current_config = orm.EmbargoedState.objects.order_by('-change_date')[0]
if current_config.enabled and current_config.embargoed_countries:
return [
country.strip().upper() for country
in current_config.embargoed_countries.split(',')
]
except IndexError:
pass
return []
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'ordering': "['country']", 'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'country'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '255'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo']
symmetrical = True
| agpl-3.0 | -392,376,308,329,836,300 | 62.744186 | 182 | 0.558069 | false | 3.880604 | false | false | false | 0.00681 |
rowemoore/odoo | addons/gamification/models/badge.py | 287 | 13760 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.translate import _
from datetime import date
import logging
_logger = logging.getLogger(__name__)
class gamification_badge_user(osv.Model):
"""User having received a badge"""
_name = 'gamification.badge.user'
_description = 'Gamification user badge'
_order = "create_date desc"
_rec_name = "badge_name"
_columns = {
'user_id': fields.many2one('res.users', string="User", required=True, ondelete="cascade"),
'sender_id': fields.many2one('res.users', string="Sender", help="The user who has send the badge"),
'badge_id': fields.many2one('gamification.badge', string='Badge', required=True, ondelete="cascade"),
'challenge_id': fields.many2one('gamification.challenge', string='Challenge originating', help="If this badge was rewarded through a challenge"),
'comment': fields.text('Comment'),
'badge_name': fields.related('badge_id', 'name', type="char", string="Badge Name"),
'create_date': fields.datetime('Created', readonly=True),
'create_uid': fields.many2one('res.users', string='Creator', readonly=True),
}
def _send_badge(self, cr, uid, ids, context=None):
"""Send a notification to a user for receiving a badge
Does not verify constrains on badge granting.
The users are added to the owner_ids (create badge_user if needed)
The stats counters are incremented
:param ids: list(int) of badge users that will receive the badge
"""
res = True
temp_obj = self.pool.get('email.template')
user_obj = self.pool.get('res.users')
template_id = self.pool['ir.model.data'].get_object(cr, uid, 'gamification', 'email_template_badge_received', context)
for badge_user in self.browse(cr, uid, ids, context=context):
body_html = temp_obj.render_template(cr, uid, template_id.body_html, 'gamification.badge.user', badge_user.id, context=context)
res = user_obj.message_post(
cr, uid, badge_user.user_id.id,
body=body_html,
subtype='gamification.mt_badge_granted',
partner_ids=[badge_user.user_id.partner_id.id],
context=context)
return res
def create(self, cr, uid, vals, context=None):
self.pool.get('gamification.badge').check_granting(cr, uid, badge_id=vals.get('badge_id'), context=context)
return super(gamification_badge_user, self).create(cr, uid, vals, context=context)
class gamification_badge(osv.Model):
"""Badge object that users can send and receive"""
CAN_GRANT = 1
NOBODY_CAN_GRANT = 2
USER_NOT_VIP = 3
BADGE_REQUIRED = 4
TOO_MANY = 5
_name = 'gamification.badge'
_description = 'Gamification badge'
_inherit = ['mail.thread']
def _get_owners_info(self, cr, uid, ids, name, args, context=None):
"""Return:
the list of unique res.users ids having received this badge
the total number of time this badge was granted
the total number of users this badge was granted to
"""
result = dict((res_id, {'stat_count': 0, 'stat_count_distinct': 0, 'unique_owner_ids': []}) for res_id in ids)
cr.execute("""
SELECT badge_id, count(user_id) as stat_count,
count(distinct(user_id)) as stat_count_distinct,
array_agg(distinct(user_id)) as unique_owner_ids
FROM gamification_badge_user
WHERE badge_id in %s
GROUP BY badge_id
""", (tuple(ids),))
for (badge_id, stat_count, stat_count_distinct, unique_owner_ids) in cr.fetchall():
result[badge_id] = {
'stat_count': stat_count,
'stat_count_distinct': stat_count_distinct,
'unique_owner_ids': unique_owner_ids,
}
return result
def _get_badge_user_stats(self, cr, uid, ids, name, args, context=None):
"""Return stats related to badge users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool.get('gamification.badge.user')
first_month_day = date.today().replace(day=1).strftime(DF)
for bid in ids:
result[bid] = {
'stat_my': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid)], context=context, count=True),
'stat_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_date', '>=', first_month_day)], context=context, count=True),
'stat_my_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True),
'stat_my_monthly_sending': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_uid', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True)
}
return result
def _remaining_sending_calc(self, cr, uid, ids, name, args, context=None):
"""Computes the number of badges remaining the user can send
0 if not allowed or no remaining
integer if limited sending
-1 if infinite (should not be displayed)
"""
result = dict.fromkeys(ids, False)
for badge in self.browse(cr, uid, ids, context=context):
if self._can_grant_badge(cr, uid, badge.id, context) != 1:
# if the user cannot grant this badge at all, result is 0
result[badge.id] = 0
elif not badge.rule_max:
# if there is no limitation, -1 is returned which means 'infinite'
result[badge.id] = -1
else:
result[badge.id] = badge.rule_max_number - badge.stat_my_monthly_sending
return result
_columns = {
'name': fields.char('Badge', required=True, translate=True),
'description': fields.text('Description'),
'image': fields.binary("Image", help="This field holds the image used for the badge, limited to 256x256"),
'rule_auth': fields.selection([
('everyone', 'Everyone'),
('users', 'A selected list of users'),
('having', 'People having some badges'),
('nobody', 'No one, assigned through challenges'),
],
string="Allowance to Grant",
help="Who can grant this badge",
required=True),
'rule_auth_user_ids': fields.many2many('res.users', 'rel_badge_auth_users',
string='Authorized Users',
help="Only these people can give this badge"),
'rule_auth_badge_ids': fields.many2many('gamification.badge',
'gamification_badge_rule_badge_rel', 'badge1_id', 'badge2_id',
string='Required Badges',
help="Only the people having these badges can give this badge"),
'rule_max': fields.boolean('Monthly Limited Sending',
help="Check to set a monthly limit per person of sending this badge"),
'rule_max_number': fields.integer('Limitation Number',
help="The maximum number of time this badge can be sent per month per person."),
'stat_my_monthly_sending': fields.function(_get_badge_user_stats,
type="integer",
string='My Monthly Sending Total',
multi='badge_users',
help="The number of time the current user has sent this badge this month."),
'remaining_sending': fields.function(_remaining_sending_calc, type='integer',
string='Remaining Sending Allowed', help="If a maxium is set"),
'challenge_ids': fields.one2many('gamification.challenge', 'reward_id',
string="Reward of Challenges"),
'goal_definition_ids': fields.many2many('gamification.goal.definition', 'badge_unlocked_definition_rel',
string='Rewarded by',
help="The users that have succeeded theses goals will receive automatically the badge."),
'owner_ids': fields.one2many('gamification.badge.user', 'badge_id',
string='Owners', help='The list of instances of this badge granted to users'),
'active': fields.boolean('Active'),
'unique_owner_ids': fields.function(_get_owners_info,
string='Unique Owners',
help="The list of unique users having received this badge.",
multi='unique_users',
type="many2many", relation="res.users"),
'stat_count': fields.function(_get_owners_info, string='Total',
type="integer",
multi='unique_users',
help="The number of time this badge has been received."),
'stat_count_distinct': fields.function(_get_owners_info,
type="integer",
string='Number of users',
multi='unique_users',
help="The number of time this badge has been received by unique users."),
'stat_this_month': fields.function(_get_badge_user_stats,
type="integer",
string='Monthly total',
multi='badge_users',
help="The number of time this badge has been received this month."),
'stat_my': fields.function(_get_badge_user_stats, string='My Total',
type="integer",
multi='badge_users',
help="The number of time the current user has received this badge."),
'stat_my_this_month': fields.function(_get_badge_user_stats,
type="integer",
string='My Monthly Total',
multi='badge_users',
help="The number of time the current user has received this badge this month."),
}
_defaults = {
'rule_auth': 'everyone',
'active': True,
}
def check_granting(self, cr, uid, badge_id, context=None):
"""Check the user 'uid' can grant the badge 'badge_id' and raise the appropriate exception
if not
Do not check for SUPERUSER_ID
"""
status_code = self._can_grant_badge(cr, uid, badge_id, context=context)
if status_code == self.CAN_GRANT:
return True
elif status_code == self.NOBODY_CAN_GRANT:
raise osv.except_osv(_('Warning!'), _('This badge can not be sent by users.'))
elif status_code == self.USER_NOT_VIP:
raise osv.except_osv(_('Warning!'), _('You are not in the user allowed list.'))
elif status_code == self.BADGE_REQUIRED:
raise osv.except_osv(_('Warning!'), _('You do not have the required badges.'))
elif status_code == self.TOO_MANY:
raise osv.except_osv(_('Warning!'), _('You have already sent this badge too many time this month.'))
else:
_logger.exception("Unknown badge status code: %d" % int(status_code))
return False
def _can_grant_badge(self, cr, uid, badge_id, context=None):
"""Check if a user can grant a badge to another user
:param uid: the id of the res.users trying to send the badge
:param badge_id: the granted badge id
:return: integer representing the permission.
"""
if uid == SUPERUSER_ID:
return self.CAN_GRANT
badge = self.browse(cr, uid, badge_id, context=context)
if badge.rule_auth == 'nobody':
return self.NOBODY_CAN_GRANT
elif badge.rule_auth == 'users' and uid not in [user.id for user in badge.rule_auth_user_ids]:
return self.USER_NOT_VIP
elif badge.rule_auth == 'having':
all_user_badges = self.pool.get('gamification.badge.user').search(cr, uid, [('user_id', '=', uid)], context=context)
for required_badge in badge.rule_auth_badge_ids:
if required_badge.id not in all_user_badges:
return self.BADGE_REQUIRED
if badge.rule_max and badge.stat_my_monthly_sending >= badge.rule_max_number:
return self.TOO_MANY
# badge.rule_auth == 'everyone' -> no check
return self.CAN_GRANT
def check_progress(self, cr, uid, context=None):
try:
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'badge_hidden')
except ValueError:
return True
badge_user_obj = self.pool.get('gamification.badge.user')
if not badge_user_obj.search(cr, uid, [('user_id', '=', uid), ('badge_id', '=', res_id)], context=context):
values = {
'user_id': uid,
'badge_id': res_id,
}
badge_user_obj.create(cr, SUPERUSER_ID, values, context=context)
return True
| agpl-3.0 | -5,540,631,430,296,637,000 | 46.285223 | 194 | 0.599201 | false | 3.964275 | false | false | false | 0.005959 |
omermahgoub/MigTool | general.py | 1 | 4106 | # __author__ = 'OmerMahgoub'
# strmsg = {'EventId': u'e786786', 'PlanName': u'Premimum',
# 'PlanDetails': [{u'item':
# {u'itemid': 1, u'name': u'Storage', u'created': u'2015-07-02T00:00:00.000Z', u'modified': u'2015-07-02T00:00:00.000Z', u'meta_data': u'metadata',
# u'description': u'Storage for this plan"'}, u'id': 1, u'unit': {u'symbol': u'GB', u'id': 1, u'short_name': u'GB', u'name': u'GigaByte'}, u'quantity': u'4.0'},
# {u'item':
# {u'itemid': 1, u'name': u'RAM', u'created': u'2015-07-02T00:00:00.000Z', u'modified': u'2015-07-02T00:00:00.000Z', u'meta_data': u'metadata',
# u'description': u'Memory for this plan'}, u'id': 2, u'unit': {u'symbol': u'GB', u'id': 1, u'short_name': u'GB', u'name': u'GigaByte'}, u'quantity': u'1'},
# {u'item':
# {u'itemid': 1, u'name': u'vCPU', u'created': u'2015-07-02T00:00:00.000Z', u'modified': u'2015-07-02T00:00:00.000Z', u'meta_data': u'metadata',
# u'description': u'Virtual CPU for this plan'}, u'id': 2, u'unit': {u'symbol': u'', u'id': 1, u'short_name': u'', u'name': u''}, u'quantity': u'1.0'}],
# 'CustomerName': u'Mohammad Zubair Pasha'}
#
# # 1.0 vCPU
# # 1.0 GB RAM
# # 20.0 GB Storage
#
# # print strmsg['PlanDetails'][0]['item']['name']
# # print strmsg['PlanDetails'][0]['unit']['short_name']
# # print strmsg['PlanDetails'][0]['quantity']
# #
# # print strmsg['PlanDetails'][1]['item']['name']
# # print strmsg['PlanDetails'][1]['unit']['short_name']
# # print strmsg['PlanDetails'][1]['quantity']
# #
# # print strmsg['PlanDetails'][2]['item']['name']
# # print strmsg['PlanDetails'][2]['unit']['short_name']
# # print strmsg['PlanDetails'][2]['quantity']
#
# for plan_items in strmsg['PlanDetails']:
# plandets = str(plan_items['quantity']) + ' ' + str(plan_items['unit']['symbol']) + ' ' + str(plan_items['item']['name'])
# if str(plan_items['item']['name']) == "Storage":
# storageValue = plan_items['quantity']
# elif str(plan_items['item']['name']) == "vCPU":
# cpuValue = plan_items['quantity']
# elif str(plan_items['item']['name']) == "RAM":
# memoryValue = plan_items['quantity']
#
#
#
# print storageValue
# print cpuValue
# print memoryValue
#
# PlanData = {'Flavor': 'm1.tiny', 'Image': 'TestVM', 'Zone': 'nova-ruh-highend-zone',
# 'instances': '1', 'cores': cpuValue, 'ram': int(memoryValue) * 1024, 'floating_ips': '1'}
#
# message = {'Status': True, 'ItemDetails': PlanData}
#
# print message
#
#
# import daemon
# with daemon.DaemonContext:
#
#
# Quota ={'Status': True, 'QuotaDetails': <QuotaSet cores=4, fixed_ips=-1, floating_ips=100, injected_file_content_bytes=10240, injected_file_path_bytes=255, injected_files=5, instances=1, key_pairs=10, metadata_items=1024, ram=1024, security_group_rules=20, security_groups=10, server_group_members=10, server_groups=10>}
#
# print dir(Quota['QuotaDetails'])
# network = {'Status': True, 'QuotaDetails': {u'quota': {u'subnet': 1, u'network': 1, u'floatingip': 1, u'security_group_rule': 3, u'security_group': 1, u'router': 1, u'port': 3}}}
#
# print network['QuotaDetails']['quota']['subnet']
# print network['quota']['network']
# print network['quota']['floatingip']
# print network['quota']['security_group_rule']
# print network['quota']['security_group']
# print network['quota']['router']
# print network['quota']['port']
strmsg = {
u'service': {
u'metadata': {
},
u'id': 17,
u'name': u'InfrastructureasaService'
},
u'type': u'subscription.user.added',
u'created_at': u'2015-09-16T09: 41: 43.869930Z',
u'data': {
u'admin': False,
u'user': 85,
u'id': 562,
u'subscription': 230
},
u'id': u'23e3b29c-5c57-11e5-a398-fa163e6cad1a',
u'api_version': u'1.0.0'
}
print strmsg['type'] | gpl-3.0 | -993,545,973,489,705,200 | 43.142857 | 322 | 0.557964 | false | 2.759409 | false | false | false | 0.002923 |
varunagrawal/azure-services | varunagrawal/VarunWeb/env/Lib/site-packages/django/contrib/gis/db/backends/postgis/introspection.py | 109 | 4592 | from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.contrib.gis.gdal import OGRGeomType
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
ignored_tables = DatabaseIntrospection.ignored_tables + [
'geography_columns',
'geometry_columns',
'raster_columns',
'spatial_ref_sys',
'raster_overviews',
]
def get_postgis_types(self):
"""
Returns a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
cursor = self.connection.cursor()
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
try:
cursor.execute(oid_sql, ('geometry',))
GEOM_TYPE = cursor.fetchone()[0]
postgis_types = { GEOM_TYPE : 'GeometryField' }
if self.connection.ops.geography:
cursor.execute(oid_sql, ('geography',))
GEOG_TYPE = cursor.fetchone()[0]
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
postgis_types[GEOG_TYPE] = ('GeometryField', {'geography' : True})
finally:
cursor.close()
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# intialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super(PostGISIntrospection, self).get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type,
"""
cursor = self.connection.cursor()
try:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row: raise GeoIntrospectionError
except GeoIntrospectionError:
if self.connection.ops.geography:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| gpl-2.0 | -4,871,090,667,621,374,000 | 43.582524 | 95 | 0.580793 | false | 4.661929 | false | false | false | 0.003267 |
sunlightlabs/django-meetup | meetup/models.py | 1 | 3123 | from django.conf import settings
from django.db import models
from meetup.api import MeetupClient
import datetime
STATUSES = [(s, s) for s in ('past','pending','upcoming')]
API_KEY = getattr(settings, 'MEETUP_KEY', None)
class Account(models.Model):
key = models.CharField(max_length=128)
description = models.CharField(max_length=128)
slug = models.SlugField()
container_id = models.CharField(max_length=16, blank=True)
meetup_url = models.URLField(verify_exists=False, blank=True)
sync = models.BooleanField(default=True)
def __unicode__(self):
return self.slug
def past_events(self):
return self.events.filter(status='past')
def upcoming_events(self):
return self.events.exclude(status='past')
class EventManager(models.Manager):
def past(self):
return Event.objects.filter(status='past')
def upcoming(self):
return Event.objects.exclude(status='past')
class Event(models.Model):
objects = EventManager()
account = models.ForeignKey(Account, related_name="events")
# Meetup.com fields
id = models.CharField(max_length=255, primary_key=True)
meetup_url = models.URLField(verify_exists=False)
title = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
start_time = models.DateTimeField(blank=True, null=True)
location = models.CharField(max_length=255, blank=True)
address = models.CharField(max_length=128, blank=True)
city = models.CharField(max_length=64, blank=True)
state = models.CharField(max_length=64, blank=True)
zipcode = models.CharField(max_length=10, blank=True)
latitude = models.CharField(max_length=16, blank=True)
longitude = models.CharField(max_length=16, blank=True)
url = models.URLField(verify_exists=False, max_length=255, blank=True)
rsvp_count = models.IntegerField(default=0)
timestamp = models.DateTimeField()
status = models.CharField(max_length=16, choices=STATUSES)
organizer_id = models.CharField(max_length=32, blank=True)
organizer_name = models.CharField(max_length=128, blank=True)
# user defined fields
# none for now, add tags later
class Meta:
ordering = ('start_time',)
def __unicode__(self):
return self.pk
def save(self, sync=True, **kwargs):
super(Event, self).save(**kwargs)
# if sync:
# api_client = MeetupClient(self.account.key)
# api_client.update_event(self.pk, udf_category=self.category)
def city_state(self):
if self.city:
if self.state:
return "%s, %s" % (self.city, self.state)
else:
return self.city
elif self.state:
return self.state
else:
return ''
def short_description(self, length=64):
if len(self.description) > length:
desc = self.description[:length]
if desc.endswith(' '):
desc = desc[:-1]
return desc + '...'
return self.description | bsd-3-clause | -4,450,050,897,407,959,600 | 33.711111 | 74 | 0.642011 | false | 3.780872 | false | false | false | 0.006084 |
ClearCorp-dev/odoo-clearcorp | TODO-9.0/budget/purchase.py | 2 | 20349 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class purchase_order(osv.osv):
_name = 'purchase.order'
_inherit = 'purchase.order'
STATE_SELECTION = [
('draft', 'Budget Request'),
('budget_approval', 'Waiting Approval'),
('budget_approved', 'Draft Bill'),
('sent', 'RFQ Sent'),
('published', 'Bill published'),
('review', 'Bid review'),
('deserted', 'Deserted'),
('awarded', 'Awarded'),
('ineffectual', 'Ineffectual'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('final_approval', 'Final Approval'),
('done', 'Done'),
('void', 'Anulled'),
('cancel', 'Cancelled')]
_columns = {
'reserved_amount' : fields.float('Reserved', digits=(12,3), readonly=True, ),
'budget_move_id': fields.many2one('budget.move', 'Budget move'),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'partner_id':fields.many2one('res.partner', 'Supplier', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
}
def onchange_plan(self, cr, uid, ids,plan_id, context=None):
return {'domain': {'program_id': [('plan_id','=',plan_id),], }}
def onchange_program(self, cr, uid, ids,program_id, context=None):
return {'domain': {'program_line_id': [('program_id','=',program_id),], }}
def action_invoice_create(self, cr, uid, ids, context=None):
obj_bud_mov = self.pool.get('budget.move')
obj_bud_line = self.pool.get('budget.move.line')
acc_inv_mov = self.pool.get('account.invoice')
obj_inv_line = self.pool.get('account.invoice.line')
res = False
for id in ids:
if context is None:
context = {}
invoice_id = super(purchase_order, self).action_invoice_create(cr, uid, ids, context=context)
acc_inv_mov.write(cr, uid, [invoice_id],{'from_order': True})
for purchase in self.browse(cr, uid, [id],context=context):
move_id = purchase.budget_move_id.id
for po_line in purchase.order_line:
asoc_bud_line_id = obj_bud_line.search(cr, uid, [('po_line_id','=',po_line.id), ])[0]
if po_line.invoice_lines:
inv_line = po_line.invoice_lines[0]
obj_bud_line.write(cr, uid, [asoc_bud_line_id],{'inv_line_id': inv_line.id}, context=context)
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_execute', context=context)
return res
def action_mark_budget_approval(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'budget_approval'})
return True
def action_approve_budget(self, cr, uid, ids, context=None):
obj_bud_mov = self.pool.get('budget.move')
for purchase in self.browse(cr, uid, ids, context=context):
reserved_amount = purchase.amount_total
if reserved_amount != 0.0:
move_id = purchase.budget_move_id.id
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_reserve', context=context)
self.write(cr, uid, [purchase.id], {'state': 'budget_approved', 'reserved_amount': reserved_amount})
else:
raise osv.except_osv(_('Error!'), _('You cannot approve an order with amount zero '))
return True
def action_publish(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'published'})
return True
def action_review(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'review'})
return True
def action_award(self, cr, uid, ids, context=None):
obj_bud_mov = self.pool.get('budget.move')
for purchase in self.browse(cr, uid, ids, context=context):
if not purchase.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
move_id = purchase.budget_move_id.id
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_compromise', context=context)
self.write(cr, uid, ids, {'state': 'awarded'})
return True
def action_desert(self, cr, uid, ids, context=None):
obj_bud_mov = self.pool.get('budget.move')
for purchase in self.browse(cr, uid, ids, context=context):
move_id = purchase.budget_move_id.id
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_cancel', context=context)
self.write(cr, uid, ids, {'state': 'deserted'})
return True
def action_ineffectual(self, cr, uid, ids, context=None):
obj_bud_mov = self.pool.get('budget.move')
for purchase in self.browse(cr, uid, ids, context=context):
move_id = purchase.budget_move_id.id
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_cancel', context=context)
self.write(cr, uid, ids, {'state': 'ineffectual'})
return True
def action_final_approval(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'final_approval'})
return True
def action_void(self, cr, uid, ids, context=None):
amld_obj = self.pool.get('account.move.line.distribution')
self.write(cr, uid, ids, {'state': 'void'})
for purchase in self.browse(cr, uid, ids, context=context):
bud_move = purchase.budget_move_id
if bud_move:
for bud_line in bud_move.move_lines:
amld_obj.create(cr, uid, {'distribution_percentage':100.0, 'distribution_amount':bud_line.compromised, 'target_budget_move_line_id':bud_line.id, 'account_move_line_type': 'void'})
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_execute', context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
obj_bud_mov = self.pool.get('budget.move')
for purchase in self.browse(cr, uid, ids, context=context):
bud_move = purchase.budget_move_id
move_id = bud_move.id
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_cancel', context=context)
super(purchase_order, self).action_cancel(cr, uid, ids, context=context)
def action_draft(self, cr, uid, ids, context=None):
obj_bud_mov = self.pool.get('budget.move')
for purchase in self.browse(cr, uid, ids, context=context):
bud_move = purchase.budget_move_id
if bud_move:
move_id = purchase.budget_move_id.id
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_draft', context=context)
self.write(cr, uid, ids, {'state': 'draft'})
def create_budget_move(self,cr, uid, vals, context=None):
bud_move_obj = self.pool.get('budget.move')
move_id = bud_move_obj.create(cr, uid, { 'type':'invoice_in' ,}, context=context)
return move_id
def create(self, cr, uid, vals, context=None):
obj_bud_move = self.pool.get('budget.move')
move_id = self.create_budget_move(cr, uid, vals, context=context)
vals['budget_move_id'] = move_id
order_id = super(purchase_order, self).create(cr, uid, vals, context=context)
for order in self.browse(cr,uid,[order_id], context=context):
obj_bud_move.write(cr, uid, [move_id], {'origin': order.name,}, context=context)
return order_id
def write(self, cr, uid, ids, vals, context=None):
bud_move_obj = self.pool.get('budget.move')
result = super(purchase_order, self).write(cr, uid, ids, vals, context=context)
for order in self.browse(cr, uid, ids, context=context):
move_id = order.budget_move_id.id
bud_move_obj.write(cr,uid, [move_id], {'date':order.budget_move_id.date},context=context)
return result
class purchase_order_line(osv.osv):
_name = 'purchase.order.line'
_inherit = 'purchase.order.line'
def _subtotal_discounted_taxed(self, cr, uid, ids, field_name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
if line.discount > 0:
price_unit_discount = line.price_unit - (line.price_unit * (line.discount / 100) )
else:
price_unit_discount = line.price_unit
#-----taxes---------------#
#taxes must be calculated with unit_price - discount
amount_discounted_taxed = self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, price_unit_discount, line.product_qty, line.product_id.id, line.order_id.partner_id)['total_included']
res[line.id]= amount_discounted_taxed
return res
def on_change_program_line(self, cr, uid, ids, program_line, context=None):
if program_line:
for line in self.pool.get('budget.program.line').browse(cr, uid,[program_line], context=context):
return {'value': {'line_available':line.available_budget},}
return {'value': {}}
def _check_no_taxes(self, cr, uid, ids, context=None):
for line in self.browse(cr,uid,ids,context=context):
product = line.product_id
if product.supplier_taxes_id:
return False
if product.property_account_expense and product.property_account_expense.tax_ids:
return False
elif product.categ_id.property_account_expense_categ and product.categ_id.property_account_expense_categ.tax_ids:
return False
return True
def _check_available(self, cr, uid, ids, field_name, args, context=None):
bud_line_obj = self.pool.get('budget.move.line')
result ={}
if ids:
for po_line_id in ids:
bud_line_ids = bud_line_obj.search(cr, uid, [('po_line_id','=', po_line_id)], context=context)
for bud_line in bud_line_obj.browse(cr, uid,bud_line_ids, context=context):
result[po_line_id] = bud_line.program_line_id.available_budget
return result
_columns = {
'program_line_id': fields.many2one('budget.program.line', 'Program line', required=True),
'line_available': fields.function(_check_available, type='float', method=True, string='Line available',readonly=True),
'subtotal_discounted_taxed': fields.function(_subtotal_discounted_taxed, digits_compute= dp.get_precision('Account'), string='Subtotal', ),
}
_constraints=[
(_check_no_taxes, 'Error!\n There is a tax defined for this product, its account or the account of the product category. \n The tax must be included in the price of the product.', []),
]
#*************************************************************************************
# Methods used to create budget move lines for the tax amount of each purchase order line
#*************************************************************************************
# def create_bud_tax_line(self, cr, uid, line_id,vals=None, context=None):
# bud_line_obj = self.pool.get('budget.move.line')
# tax_obj = self.pool.get('account.tax')
# order_line = self.browse(cr, uid, [line_id], context=context)[0]
# move_id = order_line.order_id.budget_move_id.id
# program_line_id = vals['program_line_id'] if vals else order_line.program_line_id.id
# for tax in tax_obj.compute_all(cr, uid, order_line.taxes_id, order_line.price_unit, order_line.product_qty, order_line.product_id, order_line.partner_id)['taxes']:
# bud_line_obj.create(cr, uid, {'budget_move_id': move_id,
# 'origin' : 'Taxes of: ' + order_line.name,
# 'program_line_id': program_line_id,
# 'fixed_amount': tax.get('amount', 0.0),
# 'po_line_id': order_line.id,
# }, context=context)
# def write(self, cr, uid, ids, vals, context=None):
# bud_line_obj = self.pool.get('budget.move.line')
# bud_move_obj = self.pool.get('budget.move')
# result = False
# for line in self.browse(cr, uid, ids, context=context):
# search_result = bud_line_obj.search(cr, uid,[('po_line_id','=', line.id)], context=context)
# bud_lines = bud_line_obj.browse(cr, uid, search_result, context=context)
# #deleting tax lines
# for bud_line in bud_lines:
# if bud_line.fixed_amount != line.price_subtotal:
# bud_line_obj.unlink(cr, uid, [bud_line.id], context=context)
# #processing PO lines and re-creating taxes
# for bud_line in bud_lines:
# if bud_line.fixed_amount == line.price_subtotal:
# result = super(purchase_order_line, self).write(cr, uid, [line.id], vals, context=context)
# updated_fields = self.read(cr, uid,[line.id], ['program_line_id', 'price_subtotal'], context=context)[0]
# bud_line_obj.write(cr, uid, [bud_line.id], {'program_line_id': updated_fields['program_line_id'][0], 'fixed_amount':updated_fields['price_subtotal']})
# self.create_bud_tax_line(cr, uid, line.id, context=None)
# return result
def create_budget_move_line(self,cr, uid, vals, line_id, context=None):
purch_order_obj = self.pool.get('purchase.order')
purch_line_obj = self.pool.get('purchase.order.line')
bud_move_obj = self.pool.get('budget.move')
bud_line_obj = self.pool.get('budget.move.line')
po_id = vals['order_id']
order = purch_order_obj.browse(cr, uid, [po_id], context=context)[0]
order_line = purch_line_obj.browse(cr, uid, [line_id], context=context)[0]
move_id = order.budget_move_id.id
new_line_id=bud_line_obj.create(cr, uid, {'budget_move_id': move_id,
'origin' : order_line.name,
'program_line_id': vals['program_line_id'],
'fixed_amount': order_line.price_subtotal,
'po_line_id': line_id,
}, context=context)
bud_move_obj.recalculate_values(cr, uid, [move_id], context=context)
return new_line_id
def check_budget_from_po_line(self, cr, uid, po_line_ids, context=None):
bud_move_obj = self.pool.get('budget.move')
for order_line in self.browse(cr, uid, po_line_ids, context=context):
result = bud_move_obj._check_values(cr, uid, [order_line.order_id.budget_move_id.id], context)
if result[0]:
return True
else:
raise osv.except_osv(_('Error!'), result[1])
return True
def create(self, cr, uid, vals, context=None):
line_id = super(purchase_order_line, self).create(cr, uid, vals, context=context)
bud_line_id = self.create_budget_move_line(cr, uid, vals, line_id, context=context)
self.check_budget_from_po_line(cr, uid, [line_id], context)
return line_id
def write(self, cr, uid, ids, vals, context=None):
bud_line_obj = self.pool.get('budget.move.line')
bud_move_obj = self.pool.get('budget.move')
moves_to_update = []
for line in self.browse(cr, uid, ids, context=context):
search_result = bud_line_obj.search(cr, uid,[('po_line_id','=', line.id)], context=context)
bud_lines = bud_line_obj.browse(cr, uid, search_result, context=context)
for bud_line in bud_lines:
if bud_line.fixed_amount == line.price_subtotal:
if 'price_subtotal' in vals:
bud_line_obj.write(cr, uid, [bud_line.id], {'fixed_amount':updated_fields['price_subtotal']})
moves_to_update.append(bud_line.budget_move_id.id)
if 'program_line_id' in vals:
bud_line_obj.write(cr, uid, [bud_line.id], {'program_line_id': vals['program_line_id']})
if bud_line.budget_move_id.id not in moves_to_update:
moves_to_update.append(bud_line.budget_move_id.id)
bud_move_obj.recalculate_values(cr, uid, moves_to_update, context=context)
self.check_budget_from_po_line(cr, uid, ids, context)
return super(purchase_order_line, self).write(cr, uid, ids, vals, context=context)
class purchase_line_invoice(osv.osv_memory):
""" To create invoice for purchase order line"""
_inherit = 'purchase.order.line_invoice'
def makeInvoices(self, cr, uid, ids, context=None):
result = super(purchase_line_invoice, self).makeInvoices(cr, uid, ids, context=context)
record_ids = context.get('active_ids',[])
if record_ids:
obj_bud_mov = self.pool.get('budget.move')
obj_bud_line = self.pool.get('budget.move.line')
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_obj = self.pool.get('account.invoice')
purchase_obj = self.pool.get('purchase.order')
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
for po_line in purchase_line_obj.browse(cr, uid, record_ids, context=context):
asoc_bud_line_id = obj_bud_line.search(cr, uid, [('po_line_id','=',po_line.id), ])[0]
if po_line.invoice_lines:
inv_line = po_line.invoice_lines[0]
invoice_line_obj.write(cr, uid, inv_line.id, {'program_line_id': po_line.program_line_id.id}, context=context)
obj_bud_line.write(cr, uid, [asoc_bud_line_id],{'inv_line_id': inv_line.id}, context=context)
move_id = po_line.order_id.budget_move_id.id
invoice_obj.write(cr, uid, [inv_line.invoice_id.id], {'budget_move_id': move_id, 'from_order':True}, context=context)
obj_bud_mov.signal_workflow(cr, uid, [move_id], 'button_execute', context=context)
return result
| agpl-3.0 | -3,191,216,334,419,582,500 | 53.557641 | 545 | 0.582092 | false | 3.55007 | false | false | false | 0.010467 |
hayd/contracts | docs/sphinxtogithub.py | 2 | 10462 | #! /usr/bin/env python
from optparse import OptionParser
import os
import sys
import shutil
class NoDirectoriesError(Exception):
"Error thrown when no directories starting with an underscore are found"
class DirHelper(object):
def __init__(self, is_dir, list_dir, walk, rmtree):
self.is_dir = is_dir
self.list_dir = list_dir
self.walk = walk
self.rmtree = rmtree
class FileSystemHelper(object):
def __init__(self, open_, path_join, move, exists):
self.open_ = open_
self.path_join = path_join
self.move = move
self.exists = exists
class Replacer(object):
"Encapsulates a simple text replace"
def __init__(self, from_, to):
self.from_ = from_
self.to = to
def process(self, text):
print 'from: %r' % self.from_
print 'to: %r' % self.to
return text.replace( self.from_, self.to )
class FileHandler(object):
"Applies a series of replacements the contents of a file inplace"
def __init__(self, name, replacers, opener):
self.name = name
self.replacers = replacers
self.opener = opener
def process(self):
text = self.opener(self.name).read()
for replacer in self.replacers:
text = replacer.process( text )
self.opener(self.name, "w").write(text)
class Remover(object):
def __init__(self, exists, remove):
self.exists = exists
self.remove = remove
def __call__(self, name):
if self.exists(name):
self.remove(name)
class ForceRename(object):
def __init__(self, renamer, remove):
self.renamer = renamer
self.remove = remove
def __call__(self, from_, to):
self.remove(to)
self.renamer(from_, to)
class VerboseRename(object):
def __init__(self, renamer, stream):
self.renamer = renamer
self.stream = stream
def __call__(self, from_, to):
self.stream.write(
"Renaming directory '%s' -> '%s'\n"
% (os.path.basename(from_), os.path.basename(to))
)
self.renamer(from_, to)
class DirectoryHandler(object):
"Encapsulates renaming a directory by removing its first character"
def __init__(self, name, root, renamer):
self.name = name
self.new_name = name[1:]
self.root = root + os.sep
self.renamer = renamer
def path(self):
return os.path.join(self.root, self.name)
def relative_path(self, directory, filename):
path = directory.replace(self.root, "", 1)
return os.path.join(path, filename)
def new_relative_path(self, directory, filename):
path = self.relative_path(directory, filename)
return path.replace(self.name, self.new_name, 1)
def process(self):
from_ = os.path.join(self.root, self.name)
to = os.path.join(self.root, self.new_name)
self.renamer(from_, to)
class HandlerFactory(object):
def create_file_handler(self, name, replacers, opener):
return FileHandler(name, replacers, opener)
def create_dir_handler(self, name, root, renamer):
return DirectoryHandler(name, root, renamer)
class OperationsFactory(object):
def create_force_rename(self, renamer, remover):
return ForceRename(renamer, remover)
def create_verbose_rename(self, renamer, stream):
return VerboseRename(renamer, stream)
def create_replacer(self, from_, to):
return Replacer(from_, to)
def create_remover(self, exists, remove):
return Remover(exists, remove)
class Layout(object):
"""
Applies a set of operations which result in the layout
of a directory changing
"""
def __init__(self, directory_handlers, file_handlers):
self.directory_handlers = directory_handlers
self.file_handlers = file_handlers
def process(self):
for handler in self.file_handlers:
handler.process()
for handler in self.directory_handlers:
handler.process()
class LayoutFactory(object):
"Creates a layout object"
def __init__(self, operations_factory, handler_factory, file_helper, dir_helper, verbose, stream, force):
self.operations_factory = operations_factory
self.handler_factory = handler_factory
self.file_helper = file_helper
self.dir_helper = dir_helper
self.verbose = verbose
self.output_stream = stream
self.force = force
def create_layout(self, path):
path = str(path)
contents = self.dir_helper.list_dir(path)
renamer = self.file_helper.move
if self.force:
remove = self.operations_factory.create_remover(self.file_helper.exists, self.dir_helper.rmtree)
renamer = self.operations_factory.create_force_rename(renamer, remove)
if self.verbose:
renamer = self.operations_factory.create_verbose_rename(renamer, self.output_stream)
# Build list of directories to process
directories = [d for d in contents if self.is_underscore_dir(path, d)]
underscore_directories = [
self.handler_factory.create_dir_handler(d, path, renamer)
for d in directories
]
if not underscore_directories:
raise NoDirectoriesError()
# Build list of files that are in those directories
replacers = []
for handler in underscore_directories:
for directory, dirs, files in self.dir_helper.walk(handler.path()):
for f in files:
replacers.append(
self.operations_factory.create_replacer(
handler.relative_path(directory, f),
handler.new_relative_path(directory, f)
)
)
# Build list of handlers to process all files
filelist = []
for root, dirs, files in self.dir_helper.walk(path):
for f in files:
if f.endswith(".html"):
filelist.append(
self.handler_factory.create_file_handler(
self.file_helper.path_join(root, f),
replacers,
self.file_helper.open_)
)
if f.endswith(".js"):
filelist.append(
self.handler_factory.create_file_handler(
self.file_helper.path_join(root, f),
[self.operations_factory.create_replacer("'_sources/'", "'sources/'")],
self.file_helper.open_
)
)
return Layout(underscore_directories, filelist)
def is_underscore_dir(self, path, directory):
return (self.dir_helper.is_dir(self.file_helper.path_join(path, directory))
and directory.startswith("_"))
def sphinx_extension(app, exception):
"Wrapped up as a Sphinx Extension"
# This code is sadly untestable in its current state
# It would be helped if there was some function for loading extension
# specific data on to the app object and the app object providing
# a file-like object for writing to standard out.
# The former is doable, but not officially supported (as far as I know)
# so I wouldn't know where to stash the data.
if not app.builder.name in ("html", "dirhtml"):
return
if not app.config.sphinx_to_github:
if app.config.sphinx_to_github_verbose:
print "Sphinx-to-github: Disabled, doing nothing."
return
if exception:
if app.config.sphinx_to_github_verbose:
print "Sphinx-to-github: Exception raised in main build, doing nothing."
return
dir_helper = DirHelper(
os.path.isdir,
os.listdir,
os.walk,
shutil.rmtree
)
file_helper = FileSystemHelper(
open,
os.path.join,
shutil.move,
os.path.exists
)
operations_factory = OperationsFactory()
handler_factory = HandlerFactory()
layout_factory = LayoutFactory(
operations_factory,
handler_factory,
file_helper,
dir_helper,
app.config.sphinx_to_github_verbose,
sys.stdout,
force=True
)
layout = layout_factory.create_layout(app.outdir)
layout.process()
def setup(app):
"Setup function for Sphinx Extension"
app.add_config_value("sphinx_to_github", True, '')
app.add_config_value("sphinx_to_github_verbose", True, '')
app.connect("build-finished", sphinx_extension)
def main(args):
usage = "usage: %prog [options] <html directory>"
parser = OptionParser(usage=usage)
parser.add_option("-v","--verbose", action="store_true",
dest="verbose", default=False, help="Provides verbose output")
opts, args = parser.parse_args(args)
try:
path = args[0]
except IndexError:
sys.stderr.write(
"Error - Expecting path to html directory:"
"sphinx-to-github <path>\n"
)
return
dir_helper = DirHelper(
os.path.isdir,
os.listdir,
os.walk,
shutil.rmtree
)
file_helper = FileSystemHelper(
open,
os.path.join,
shutil.move,
os.path.exists
)
operations_factory = OperationsFactory()
handler_factory = HandlerFactory()
layout_factory = LayoutFactory(
operations_factory,
handler_factory,
file_helper,
dir_helper,
opts.verbose,
sys.stdout,
force=False
)
try:
layout = layout_factory.create_layout(path)
except NoDirectoriesError:
sys.stderr.write(
"Error - No top level directories starting with an underscore "
"were found in '%s'\n" % path
)
return
layout.process()
if __name__ == "__main__":
main(sys.argv[1:])
| lgpl-3.0 | 4,139,026,293,592,866,000 | 26.103627 | 109 | 0.572166 | false | 4.181455 | true | false | false | 0.003441 |
tik0/inkscapeGrid | share/extensions/generate_voronoi.py | 6 | 8433 | #!/usr/bin/env python
"""
Copyright (C) 2010 Alvin Penner, penner@vaxxine.com
- Voronoi Diagram algorithm and C code by Steven Fortune, 1987, http://ect.bell-labs.com/who/sjf/
- Python translation to file voronoi.py by Bill Simons, 2005, http://www.oxfish.com/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# standard library
import random
# local library
import inkex
import simplestyle
import voronoi
inkex.localize()
try:
from subprocess import Popen, PIPE
except:
inkex.errormsg(_("Failed to import the subprocess module. Please report this as a bug at: https://bugs.launchpad.net/inkscape."))
inkex.errormsg(_("Python version is: ") + str(inkex.sys.version_info))
exit()
def clip_line(x1, y1, x2, y2, w, h):
if x1 < 0 and x2 < 0:
return [0, 0, 0, 0]
if x1 > w and x2 > w:
return [0, 0, 0, 0]
if x1 < 0:
y1 = (y1*x2 - y2*x1)/(x2 - x1)
x1 = 0
if x2 < 0:
y2 = (y1*x2 - y2*x1)/(x2 - x1)
x2 = 0
if x1 > w:
y1 = y1 + (w - x1)*(y2 - y1)/(x2 - x1)
x1 = w
if x2 > w:
y2 = y1 + (w - x1)*(y2 - y1)/(x2 - x1)
x2 = w
if y1 < 0 and y2 < 0:
return [0, 0, 0, 0]
if y1 > h and y2 > h:
return [0, 0, 0, 0]
if x1 == x2 and y1 == y2:
return [0, 0, 0, 0]
if y1 < 0:
x1 = (x1*y2 - x2*y1)/(y2 - y1)
y1 = 0
if y2 < 0:
x2 = (x1*y2 - x2*y1)/(y2 - y1)
y2 = 0
if y1 > h:
x1 = x1 + (h - y1)*(x2 - x1)/(y2 - y1)
y1 = h
if y2 > h:
x2 = x1 + (h - y1)*(x2 - x1)/(y2 - y1)
y2 = h
return [x1, y1, x2, y2]
class Pattern(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("--size",
action="store", type="int",
dest="size", default=10,
help="Average size of cell (px)")
self.OptionParser.add_option("--border",
action="store", type="int",
dest="border", default=0,
help="Size of Border (px)")
self.OptionParser.add_option("--tab",
action="store", type="string",
dest="tab",
help="The selected UI-tab when OK was pressed")
def effect(self):
if not self.options.ids:
inkex.errormsg(_("Please select an object"))
exit()
q = {'x':0,'y':0,'width':0,'height':0} # query the bounding box of ids[0]
for query in q.keys():
p = Popen('inkscape --query-%s --query-id=%s "%s"' % (query, self.options.ids[0], self.args[-1]), shell=True, stdout=PIPE, stderr=PIPE)
rc = p.wait()
q[query] = float(p.stdout.read())
defs = self.xpathSingle('/svg:svg//svg:defs')
pattern = inkex.etree.SubElement(defs ,inkex.addNS('pattern','svg'))
pattern.set('id', 'Voronoi' + str(random.randint(1, 9999)))
pattern.set('width', str(q['width']))
pattern.set('height', str(q['height']))
pattern.set('patternTransform', 'translate(%s,%s)' % (q['x'], q['y']))
pattern.set('patternUnits', 'userSpaceOnUse')
# generate random pattern of points
c = voronoi.Context()
pts = []
b = float(self.options.border) # width of border
for i in range(int(q['width']*q['height']/self.options.size/self.options.size)):
x = random.random()*q['width']
y = random.random()*q['height']
if b > 0: # duplicate border area
pts.append(voronoi.Site(x, y))
if x < b:
pts.append(voronoi.Site(x + q['width'], y))
if y < b:
pts.append(voronoi.Site(x + q['width'], y + q['height']))
if y > q['height'] - b:
pts.append(voronoi.Site(x + q['width'], y - q['height']))
if x > q['width'] - b:
pts.append(voronoi.Site(x - q['width'], y))
if y < b:
pts.append(voronoi.Site(x - q['width'], y + q['height']))
if y > q['height'] - b:
pts.append(voronoi.Site(x - q['width'], y - q['height']))
if y < b:
pts.append(voronoi.Site(x, y + q['height']))
if y > q['height'] - b:
pts.append(voronoi.Site(x, y - q['height']))
elif x > -b and y > -b and x < q['width'] + b and y < q['height'] + b:
pts.append(voronoi.Site(x, y)) # leave border area blank
# dot = inkex.etree.SubElement(pattern, inkex.addNS('rect','svg'))
# dot.set('x', str(x-1))
# dot.set('y', str(y-1))
# dot.set('width', '2')
# dot.set('height', '2')
if len(pts) < 3:
inkex.errormsg("Please choose a larger object, or smaller cell size")
exit()
# plot Voronoi diagram
sl = voronoi.SiteList(pts)
voronoi.voronoi(sl, c)
path = ""
for edge in c.edges:
if edge[1] >= 0 and edge[2] >= 0: # two vertices
[x1, y1, x2, y2] = clip_line(c.vertices[edge[1]][0], c.vertices[edge[1]][1], c.vertices[edge[2]][0], c.vertices[edge[2]][1], q['width'], q['height'])
elif edge[1] >= 0: # only one vertex
if c.lines[edge[0]][1] == 0: # vertical line
xtemp = c.lines[edge[0]][2]/c.lines[edge[0]][0]
if c.vertices[edge[1]][1] > q['height']/2:
ytemp = q['height']
else:
ytemp = 0
else:
xtemp = q['width']
ytemp = (c.lines[edge[0]][2] - q['width']*c.lines[edge[0]][0])/c.lines[edge[0]][1]
[x1, y1, x2, y2] = clip_line(c.vertices[edge[1]][0], c.vertices[edge[1]][1], xtemp, ytemp, q['width'], q['height'])
elif edge[2] >= 0: # only one vertex
if c.lines[edge[0]][1] == 0: # vertical line
xtemp = c.lines[edge[0]][2]/c.lines[edge[0]][0]
if c.vertices[edge[2]][1] > q['height']/2:
ytemp = q['height']
else:
ytemp = 0
else:
xtemp = 0
ytemp = c.lines[edge[0]][2]/c.lines[edge[0]][1]
[x1, y1, x2, y2] = clip_line(xtemp, ytemp, c.vertices[edge[2]][0], c.vertices[edge[2]][1], q['width'], q['height'])
if x1 or x2 or y1 or y2:
path += 'M %.3f,%.3f %.3f,%.3f ' % (x1, y1, x2, y2)
attribs = {'d': path, 'style': 'stroke:#000000'}
inkex.etree.SubElement(pattern, inkex.addNS('path', 'svg'), attribs)
# link selected object to pattern
obj = self.selected[self.options.ids[0]]
style = {}
if obj.attrib.has_key('style'):
style = simplestyle.parseStyle(obj.attrib['style'])
style['fill'] = 'url(#%s)' % pattern.get('id')
obj.attrib['style'] = simplestyle.formatStyle(style)
if obj.tag == inkex.addNS('g', 'svg'):
for node in obj:
style = {}
if node.attrib.has_key('style'):
style = simplestyle.parseStyle(node.attrib['style'])
style['fill'] = 'url(#%s)' % pattern.get('id')
node.attrib['style'] = simplestyle.formatStyle(style)
if __name__ == '__main__':
e = Pattern()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| gpl-2.0 | 7,373,992,817,833,651,000 | 41.376884 | 165 | 0.495553 | false | 3.304467 | false | false | false | 0.005218 |
hobbyjobs/photivo | scons-local-2.2.0/SCons/Variables/PackageVariable.py | 14 | 3646 | """engine.SCons.Variables.PackageVariable
This file defines the option type for SCons implementing 'package
activation'.
To be used whenever a 'package' may be enabled/disabled and the
package path may be specified.
Usage example:
Examples:
x11=no (disables X11 support)
x11=yes (will search for the package installation dir)
x11=/usr/local/X11 (will check this path for existance)
To replace autoconf's --with-xxx=yyy
opts = Variables()
opts.Add(PackageVariable('x11',
'use X11 installed here (yes = search some places',
'yes'))
...
if env['x11'] == True:
dir = ... search X11 in some standard places ...
env['x11'] = dir
if env['x11']:
... build with x11 ...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PackageVariable.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__all__ = ['PackageVariable',]
import SCons.Errors
__enable_strings = ('1', 'yes', 'true', 'on', 'enable', 'search')
__disable_strings = ('0', 'no', 'false', 'off', 'disable')
def _converter(val):
"""
"""
lval = val.lower()
if lval in __enable_strings: return True
if lval in __disable_strings: return False
#raise ValueError("Invalid value for boolean option: %s" % val)
return val
def _validator(key, val, env, searchfunc):
# NB: searchfunc is currenty undocumented and unsupported
"""
"""
# todo: write validator, check for path
import os
if env[key] is True:
if searchfunc:
env[key] = searchfunc(key, val)
elif env[key] and not os.path.exists(val):
raise SCons.Errors.UserError(
'Path does not exist for option %s: %s' % (key, val))
def PackageVariable(key, help, default, searchfunc=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (seperated by space).
"""
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k,v,e,searchfunc),
_converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | 3,195,789,878,510,134,000 | 33.396226 | 117 | 0.673066 | false | 3.758763 | false | false | false | 0.003566 |
Mistobaan/tensorflow | tensorflow/contrib/learn/python/learn/utils/export.py | 15 | 13683 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.python.training import training_util
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_first_op_from_collection(collection_name):
"""Get first element from the collection."""
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.local_variables_initializer()
lookup_ops.tables_initializer()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(
init_op=control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))
return export.export(export_dir, training_util.get_global_step(),
session, exports_to_keep=exports_to_keep)
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behavior of
export_estimator.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or `dict` of `Tensor`s.
Returns:
Tuple of default signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
tensors = {'inputs': examples}
if not isinstance(predictions, dict):
predictions = {'outputs': predictions}
tensors.update(predictions)
default_signature = exporter.generic_signature(tensors)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions['classes'])
else:
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor`.
Returns:
Tuple of default regression signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates logistic regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
dict that contains the probabilities tensor as in
{'probabilities', `Tensor`}.
Returns:
Tuple of default regression signature and named signature.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
predictions_tensor = predictions['probabilities']
else:
predictions_tensor = predictions
# predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
# while second column is P(Y=1|x). We are only interested in the second
# column for inference.
predictions_shape = predictions_tensor.get_shape()
predictions_rank = len(predictions_shape)
if predictions_rank != 2:
logging.fatal(
'Expected predictions to have rank 2, but received predictions with '
'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
if predictions_shape[1] != 2:
logging.fatal(
'Expected predictions to have 2nd dimension: 2, but received '
'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
'regression_signature_fn or classification_signature_fn_with_prob '
'instead?'.format(predictions_shape[1], predictions_shape))
positive_predictions = predictions_tensor[:, 1]
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=positive_predictions)
return default_signature, {}
# pylint: disable=protected-access
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)
@deprecated('2016-09-23', 'Please use Estimator.export_savedmodel() instead.')
def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Deprecated, please use Estimator.export_savedmodel()."""
_export_estimator(estimator=estimator,
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_estimator(estimator,
export_dir,
signature_fn,
input_fn,
default_batch_size,
exports_to_keep,
input_feature_key=None,
use_deprecated_input_fn=True,
prediction_key=None,
checkpoint_path=None):
if use_deprecated_input_fn:
input_fn = input_fn or _default_input_fn
elif input_fn is None:
raise ValueError('input_fn must be defined.')
# If checkpoint_path is specified, use the specified checkpoint path.
checkpoint_path = (checkpoint_path or
tf_saver.latest_checkpoint(estimator._model_dir))
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
if use_deprecated_input_fn:
examples = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
features = input_fn(estimator, examples)
else:
features, _ = input_fn()
examples = None
if input_feature_key is not None:
examples = features.pop(input_feature_key)
if (not features) and (examples is None):
raise ValueError('Either features or examples must be defined.')
predictions = estimator._get_predict_ops(features).predictions
if prediction_key is not None:
predictions = predictions[prediction_key]
# Explicit signature_fn takes priority
if signature_fn:
default_signature, named_graph_signatures = signature_fn(examples,
features,
predictions)
else:
try:
# Some estimators provide a signature function.
# TODO(zakaria): check if the estimator has this function,
# raise helpful error if not
signature_fn = estimator._create_signature_fn()
default_signature, named_graph_signatures = (
signature_fn(examples, features, predictions))
except AttributeError:
logging.warn(
'Change warning: `signature_fn` will be required after'
'2016-08-01.\n'
'Using generic signatures for now. To maintain this behavior, '
'pass:\n'
' signature_fn=export.generic_signature_fn\n'
'Also consider passing a regression or classification signature; '
'see cl/126430915 for an example.')
default_signature, named_graph_signatures = generic_signature_fn(
examples, features, predictions)
if exports_to_keep is not None:
exports_to_keep = gc.largest_export_versions(exports_to_keep)
return _export_graph(
g,
_get_saver(),
checkpoint_path,
export_dir,
default_graph_signature=default_signature,
named_graph_signatures=named_graph_signatures,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
| apache-2.0 | -3,850,988,037,189,057,000 | 37.652542 | 80 | 0.674121 | false | 4.425291 | false | false | false | 0.005774 |
bjolivot/ansible | lib/ansible/modules/cloud/amazon/efs.py | 12 | 21120 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs
short_description: create and maintain EFS file systems
description:
- Module allows create, search and destroy Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
- "Artem Kazakov (@akazakov)"
options:
state:
description:
- Allows to create, search and destroy Amazon EFS file system
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Creation Token of Amazon EFS file system. Required for create. Either name or ID required for delete.
required: false
default: None
id:
description:
- ID of Amazon EFS. Either name or ID required for delete.
required: false
default: None
performance_mode:
description:
- File system's performance mode to use. Only takes effect during creation.
required: false
default: 'general_purpose'
choices: ['general_purpose', 'max_io']
tags:
description:
- "List of tags of Amazon EFS. Should be defined as dictionary
In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- subnet_id - Mandatory. The ID of the subnet to add the mount target in.
- ip_address - Optional. A valid IPv4 address within the address range of the specified subnet.
- security_groups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
This data may be modified for existing EFS using state 'present' and new list of mount targets."
required: false
default: None
wait:
description:
- "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
In case of 'absent' state should wait for EFS 'deleted' life cycle state"
required: false
default: "no"
choices: ["yes", "no"]
wait_timeout:
description:
- How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
required: false
default: 0
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# EFS provisioning
- efs:
state: present
name: myTestEFS
tags:
name: myTestNameTag
purpose: file-storage
targets:
- subnet_id: subnet-748c5d03
security_groups: [ "sg-1a2b3c4d" ]
# Modifying EFS data
- efs:
state: present
name: myTestEFS
tags:
name: myAnotherTestTag
targets:
- subnet_id: subnet-7654fdca
security_groups: [ "sg-4c5d6f7a" ]
# Deleting EFS
- efs:
state: absent
name: myTestEFS
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned:
type: datetime
sample: 2015-11-16 07:30:57-05:00
creation_token:
description: EFS creation token
returned:
type: UUID
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned:
type: unique ID
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned:
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system
returned:
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned:
type: list of dicts
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned:
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned:
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned:
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned:
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned:
type: str
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned:
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
import sys
from time import sleep
from time import time as timestamp
from collections import defaultdict
try:
from botocore.exceptions import ClientError
import boto3
HAS_BOTO3 = True
except ImportError as e:
HAS_BOTO3 = False
class EFSConnection(object):
DEFAULT_WAIT_TIMEOUT_SECONDS = 0
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
self.wait = module.params.get('wait')
self.wait_timeout = module.params.get('wait_timeout')
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def get_file_system_id(self, name):
"""
Returns ID of instance by instance name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name
))
return info and info['FileSystemId'] or None
def get_file_system_state(self, name, file_system_id=None):
"""
Returns state of filesystem by EFS id/name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name,
FileSystemId=file_system_id
))
return info and info['LifeCycleState'] or self.STATE_DELETED
def get_mount_targets_in_state(self, file_system_id, states=None):
"""
Returns states of mount targets of selected EFS with selected state(s) (optional)
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
FileSystemId=file_system_id
)
if states:
if not isinstance(states, list):
states = [states]
targets = filter(lambda target: target['LifeCycleState'] in states, targets)
return list(targets)
def create_file_system(self, name, performance_mode):
"""
Creates new filesystem with selected name
"""
changed = False
state = self.get_file_system_state(name)
if state in [self.STATE_DELETING, self.STATE_DELETED]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED
)
self.connection.create_file_system(CreationToken=name, PerformanceMode=performance_mode)
changed = True
# we always wait for the state to be available when creating.
# if we try to take any actions on the file system before it's available
# we'll throw errors
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE,
self.wait_timeout
)
return changed
def converge_file_system(self, name, tags, targets):
"""
Change attributes (mount targets and tags) of filesystem by name
"""
result = False
fs_id = self.get_file_system_id(name)
if tags is not None:
tags_to_create, _, tags_to_delete = dict_diff(self.get_tags(FileSystemId=fs_id), tags)
if tags_to_delete:
self.connection.delete_tags(
FileSystemId=fs_id,
TagKeys=[item[0] for item in tags_to_delete]
)
result = True
if tags_to_create:
self.connection.create_tags(
FileSystemId=fs_id,
Tags=[{'Key': item[0], 'Value': item[1]} for item in tags_to_create]
)
result = True
if targets is not None:
incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
index_by_subnet_id = lambda items: dict((item['SubnetId'], item) for item in items)
current_targets = index_by_subnet_id(self.get_mount_targets(FileSystemId=fs_id))
targets = index_by_subnet_id(targets)
targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
targets, True)
""" To modify mount target it should be deleted and created again """
changed = filter(
lambda sid: not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
current_targets[sid], targets[sid]), intersection)
targets_to_delete = list(targets_to_delete) + changed
targets_to_create = list(targets_to_create) + changed
if targets_to_delete:
for sid in targets_to_delete:
self.connection.delete_mount_target(
MountTargetId=current_targets[sid]['MountTargetId']
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
result = True
if targets_to_create:
for sid in targets_to_create:
self.connection.create_mount_target(
FileSystemId=fs_id,
**targets[sid]
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0,
self.wait_timeout
)
result = True
security_groups_to_update = filter(
lambda sid: 'SecurityGroups' in targets[sid] and
current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups'],
intersection
)
if security_groups_to_update:
for sid in security_groups_to_update:
self.connection.modify_mount_target_security_groups(
MountTargetId=current_targets[sid]['MountTargetId'],
SecurityGroups=targets[sid]['SecurityGroups']
)
result = True
return result
def delete_file_system(self, name, file_system_id=None):
"""
Removes EFS instance by id/name
"""
result = False
state = self.get_file_system_state(name, file_system_id)
if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE
)
if not file_system_id:
file_system_id = self.get_file_system_id(name)
self.delete_mount_targets(file_system_id)
self.connection.delete_file_system(FileSystemId=file_system_id)
result = True
if self.wait:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED,
self.wait_timeout
)
return result
def delete_mount_targets(self, file_system_id):
"""
Removes mount targets by EFS id
"""
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
0
)
targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
for target in targets:
self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
0
)
return len(targets) > 0
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
else:
raise
def targets_equal(keys, a, b):
"""
Method compare two mount targets by specified attributes
"""
for key in keys:
if key in b and a[key] != b[key]:
return False
return True
def dict_diff(dict1, dict2, by_key=False):
"""
Helper method to calculate difference of two dictionaries
"""
keys1 = set(dict1.keys() if by_key else dict1.items())
keys2 = set(dict2.keys() if by_key else dict2.items())
intersection = keys1 & keys2
return keys2 ^ intersection, intersection, keys1 ^ intersection
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
"""
Helper method to wait for desired value returned by callback method
"""
wait_start = timestamp()
while True:
if callback() != value:
if timeout != 0 and (timestamp() - wait_start > timeout):
raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
else:
sleep(5)
continue
break
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[]),
performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
wait=dict(required=False, type="bool", default=False),
wait_timeout=dict(required=False, type="int", default=0)
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
target_translations = {
'ip_address': 'IpAddress',
'security_groups': 'SecurityGroups',
'subnet_id': 'SubnetId'
}
targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
performance_mode_translations = {
'general_purpose': 'generalPurpose',
'max_io': 'maxIO'
}
performance_mode = performance_mode_translations[module.params.get('performance_mode')]
changed = False
state = str(module.params.get('state')).lower()
if state == 'present':
if not name:
module.fail_json(msg='Name parameter is required for create')
changed = connection.create_file_system(name, performance_mode)
changed = connection.converge_file_system(name=name, tags=tags, targets=targets) or changed
result = first_or_default(connection.get_file_systems(CreationToken=name))
elif state == 'absent':
if not name and not fs_id:
module.fail_json(msg='Either name or id parameter is required for delete')
changed = connection.delete_file_system(name, fs_id)
result = None
if result:
result = camel_dict_to_snake_dict(result)
module.exit_json(changed=changed, efs=result)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | -6,992,776,658,636,993,000 | 32.312303 | 154 | 0.575758 | false | 4.212206 | false | false | false | 0.002604 |
mathjazz/pontoon | pontoon/sync/formats/po.py | 2 | 3755 | """
Parser for to pofile translation format.
"""
from datetime import datetime
from django.utils import timezone
import polib
from pontoon.sync import KEY_SEPARATOR
from pontoon.sync.exceptions import ParseError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.vcs.models import VCSTranslation
class POEntity(VCSTranslation):
def __init__(self, po_entry, order):
self.po_entry = po_entry
if po_entry.msgstr_plural:
strings = po_entry.msgstr_plural
else:
strings = {None: po_entry.msgstr}
# Remove empty strings from the string dict.
strings = {key: value for key, value in strings.items() if value}
# Pofiles use the source as the key prepended with context if available.
key = po_entry.msgid
if po_entry.msgctxt:
key = po_entry.msgctxt + KEY_SEPARATOR + key
super().__init__(
key=key,
source_string=po_entry.msgid,
source_string_plural=po_entry.msgid_plural,
strings=strings,
comments=po_entry.comment.split("\n") if po_entry.comment else [],
fuzzy="fuzzy" in po_entry.flags,
order=order,
source=po_entry.occurrences,
)
def update_entry(self, locale):
"""Update the POEntry associated with this translation."""
if self.po_entry.msgstr_plural:
self.po_entry.msgstr_plural = {
plural_form: self.strings.get(plural_form, "")
for plural_form in range(locale.nplurals or 1)
}
else:
self.po_entry.msgstr = self.strings.get(None, "")
if self.fuzzy and "fuzzy" not in self.po_entry.flags:
self.po_entry.flags.append("fuzzy")
elif not self.fuzzy and "fuzzy" in self.po_entry.flags:
self.po_entry.flags.remove("fuzzy")
def __repr__(self):
return "<POEntity {key}>".format(key=self.key.encode("utf-8"))
class POResource(ParsedResource):
def __init__(self, pofile):
self.pofile = pofile
self.entities = [
POEntity(entry, k)
for k, entry in enumerate(self.pofile)
if not entry.obsolete
]
@property
def translations(self):
return self.entities
def save(self, locale):
for entity in self.translations:
entity.update_entry(locale)
metadata = self.pofile.metadata
if len(self.translations) > 0:
latest_translation = max(
self.translations,
key=lambda t: t.last_updated or timezone.make_aware(datetime.min),
)
if latest_translation.last_updated:
metadata["PO-Revision-Date"] = latest_translation.last_updated.strftime(
"%Y-%m-%d %H:%M%z"
)
if latest_translation.last_translator:
metadata[
"Last-Translator"
] = latest_translation.last_translator.display_name_and_email
metadata.update(
{
"Language": locale.code.replace("-", "_"),
"X-Generator": "Pontoon",
"Plural-Forms": (
"nplurals={locale.nplurals}; plural={locale.plural_rule};".format(
locale=locale
)
),
}
)
self.pofile.save()
def __repr__(self):
return f"<POResource {self.pofile.fpath}>"
def parse(path, source_path=None, locale=None):
try:
pofile = polib.pofile(path, wrapwidth=200)
except OSError as err:
raise ParseError(f"Failed to parse {path}: {err}")
return POResource(pofile)
| bsd-3-clause | 163,095,897,765,369,920 | 30.554622 | 88 | 0.56964 | false | 4.011752 | false | false | false | 0.001065 |
Southpaw-TACTIC/Team | src/python/Lib/weakref.py | 8 | 10442 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
import UserDict
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from exceptions import ReferenceError
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceError", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary"]
class WeakValueDictionary(UserDict.UserDict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[wr.key]
self._remove = remove
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError, key
else:
return o
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def has_key(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, wr in self.data.items():
o = wr()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for wr in self.data.itervalues():
value = wr()
if value is not None:
yield wr.key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.itervalues()
def itervalues(self):
for wr in self.data.itervalues():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
while 1:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError, key
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.values()
def values(self):
L = []
for wr in self.data.values():
o = wr()
if o is not None:
L.append(o)
return L
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
class WeakKeyDictionary(UserDict.UserDict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[k]
self._remove = remove
if dict is not None: self.update(dict)
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def has_key(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def items(self):
L = []
for key, value in self.data.items():
o = key()
if o is not None:
L.append((o, value))
return L
def iteritems(self):
for wr, value in self.data.iteritems():
key = wr()
if key is not None:
yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.iterkeys()
def iterkeys(self):
for wr in self.data.iterkeys():
obj = wr()
if obj is not None:
yield obj
def __iter__(self):
return self.iterkeys()
def itervalues(self):
return self.data.itervalues()
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.keys()
def keys(self):
L = []
for wr in self.data.keys():
o = wr()
if o is not None:
L.append(o)
return L
def popitem(self):
while 1:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| epl-1.0 | 200,770,221,042,562,020 | 27.414085 | 79 | 0.540893 | false | 4.481545 | false | false | false | 0.00067 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/eventlet-0.9.14/tests/stdlib/test_asyncore.py | 5 | 1969 | from eventlet import patcher
from eventlet.green import asyncore
from eventlet.green import select
from eventlet.green import socket
from eventlet.green import threading
from eventlet.green import time
patcher.inject("test.test_asyncore", globals())
def new_closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
# the only change we make is to not assign to asyncore.socket_map
# because doing so fails to assign to the real asyncore's socket_map
# and thus the test fails
socketmap = asyncore.socket_map.copy()
try:
asyncore.socket_map.clear()
asyncore.socket_map.update(testmap)
asyncore.close_all()
finally:
testmap = asyncore.socket_map.copy()
asyncore.socket_map.clear()
asyncore.socket_map.update(socketmap)
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
HelperFunctionTests.closeall_check = new_closeall_check
try:
# Eventlet's select() emulation doesn't support the POLLPRI flag,
# which this test relies on. Therefore, nuke it!
BaseTestAPI.test_handle_expt = lambda *a, **kw: None
except NameError:
pass
try:
# temporarily disabling these tests in the python2.7/pyevent configuration
from tests import using_pyevent
import sys
if using_pyevent(None) and sys.version_info >= (2, 7):
TestAPI_UseSelect.test_handle_accept = lambda *a, **kw: None
TestAPI_UseSelect.test_handle_close = lambda *a, **kw: None
TestAPI_UseSelect.test_handle_read = lambda *a, **kw: None
except NameError:
pass
if __name__ == "__main__":
test_main()
| apache-2.0 | -4,287,640,295,647,384,000 | 30.253968 | 78 | 0.655663 | false | 3.801158 | true | false | false | 0.002031 |
foreni-packages/golismero | thirdparty_libs/nltk/corpus/reader/propbank.py | 17 | 17074 | # Natural Language Toolkit: PropBank Corpus Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import re
import codecs
from nltk.tree import Tree
from xml.etree import ElementTree
from util import *
from api import *
class PropbankCorpusReader(CorpusReader):
"""
Corpus reader for the propbank corpus, which augments the Penn
Treebank with information about the predicate argument structure
of every verb instance. The corpus consists of two parts: the
predicate-argument annotations themselves, and a set of "frameset
files" which define the argument labels used by the annotations,
on a per-verb basis. Each "frameset file" contains one or more
predicates, such as ``'turn'`` or ``'turn_on'``, each of which is
divided into coarse-grained word senses called "rolesets". For
each "roleset", the frameset file provides descriptions of the
argument roles, along with examples.
"""
def __init__(self, root, propfile, framefiles='',
verbsfile=None, parse_fileid_xform=None,
parse_corpus=None, encoding=None):
"""
:param root: The root directory for this corpus.
:param propfile: The name of the file containing the predicate-
argument annotations (relative to ``root``).
:param framefiles: A list or regexp specifying the frameset
fileids for this corpus.
:param parse_fileid_xform: A transform that should be applied
to the fileids in this corpus. This should be a function
of one argument (a fileid) that returns a string (the new
fileid).
:param parse_corpus: The corpus containing the parse trees
corresponding to this corpus. These parse trees are
necessary to resolve the tree pointers used by propbank.
"""
# If framefiles is specified as a regexp, expand it.
if isinstance(framefiles, basestring):
framefiles = find_corpus_fileids(root, framefiles)
framefiles = list(framefiles)
# Initialze the corpus reader.
CorpusReader.__init__(self, root, [propfile, verbsfile] + framefiles,
encoding)
# Record our frame fileids & prop file.
self._propfile = propfile
self._framefiles = framefiles
self._verbsfile = verbsfile
self._parse_fileid_xform = parse_fileid_xform
self._parse_corpus = parse_corpus
def raw(self, fileids=None):
"""
:return: the text contents of the given fileids, as a single string.
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, basestring): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def instances(self, baseform=None):
"""
:return: a corpus view that acts as a list of
``PropBankInstance`` objects, one for each noun in the corpus.
"""
kwargs = {}
if baseform is not None:
kwargs['instance_filter'] = lambda inst: inst.baseform==baseform
return StreamBackedCorpusView(self.abspath(self._propfile),
lambda stream: self._read_instance_block(stream, **kwargs),
encoding=self.encoding(self._propfile))
def lines(self):
"""
:return: a corpus view that acts as a list of strings, one for
each line in the predicate-argument annotation file.
"""
return StreamBackedCorpusView(self.abspath(self._propfile),
read_line_block,
encoding=self.encoding(self._propfile))
def roleset(self, roleset_id):
"""
:return: the xml description for the given roleset.
"""
baseform = roleset_id.split('.')[0]
framefile = 'frames/%s.xml' % baseform
if framefile not in self._framefiles:
raise ValueError('Frameset file for %s not found' %
roleset_id)
# n.b.: The encoding for XML fileids is specified by the file
# itself; so we ignore self._encoding here.
etree = ElementTree.parse(self.abspath(framefile).open()).getroot()
for roleset in etree.findall('predicate/roleset'):
if roleset.attrib['id'] == roleset_id:
return roleset
else:
raise ValueError('Roleset %s not found in %s' %
(roleset_id, framefile))
def rolesets(self, baseform=None):
"""
:return: list of xml descriptions for rolesets.
"""
if baseform is not None:
framefile = 'frames/%s.xml' % baseform
if framefile not in self._framefiles:
raise ValueError('Frameset file for %s not found' %
baseform)
framefiles = [framefile]
else:
framefiles = self._framefiles
rsets = []
for framefile in framefiles:
# n.b.: The encoding for XML fileids is specified by the file
# itself; so we ignore self._encoding here.
etree = ElementTree.parse(self.abspath(framefile).open()).getroot()
rsets.append(etree.findall('predicate/roleset'))
return LazyConcatenation(rsets)
def verbs(self):
"""
:return: a corpus view that acts as a list of all verb lemmas
in this corpus (from the verbs.txt file).
"""
return StreamBackedCorpusView(self.abspath(self._verbsfile),
read_line_block,
encoding=self.encoding(self._verbsfile))
def _read_instance_block(self, stream, instance_filter=lambda inst: True):
block = []
# Read 100 at a time.
for i in range(100):
line = stream.readline().strip()
if line:
inst = PropbankInstance.parse(
line, self._parse_fileid_xform,
self._parse_corpus)
if instance_filter(inst):
block.append(inst)
return block
######################################################################
#{ Propbank Instance & related datatypes
######################################################################
class PropbankInstance(object):
def __init__(self, fileid, sentnum, wordnum, tagger, roleset,
inflection, predicate, arguments, parse_corpus=None):
self.fileid = fileid
"""The name of the file containing the parse tree for this
instance's sentence."""
self.sentnum = sentnum
"""The sentence number of this sentence within ``fileid``.
Indexing starts from zero."""
self.wordnum = wordnum
"""The word number of this instance's predicate within its
containing sentence. Word numbers are indexed starting from
zero, and include traces and other empty parse elements."""
self.tagger = tagger
"""An identifier for the tagger who tagged this instance; or
``'gold'`` if this is an adjuticated instance."""
self.roleset = roleset
"""The name of the roleset used by this instance's predicate.
Use ``propbank.roleset() <PropbankCorpusReader.roleset>`` to
look up information about the roleset."""
self.inflection = inflection
"""A ``PropbankInflection`` object describing the inflection of
this instance's predicate."""
self.predicate = predicate
"""A ``PropbankTreePointer`` indicating the position of this
instance's predicate within its containing sentence."""
self.arguments = tuple(arguments)
"""A list of tuples (argloc, argid), specifying the location
and identifier for each of the predicate's argument in the
containing sentence. Argument identifiers are strings such as
``'ARG0'`` or ``'ARGM-TMP'``. This list does *not* contain
the predicate."""
self.parse_corpus = parse_corpus
"""A corpus reader for the parse trees corresponding to the
instances in this propbank corpus."""
@property
def baseform(self):
"""The baseform of the predicate."""
return self.roleset.split('.')[0]
@property
def sensenumber(self):
"""The sense number of the predicate."""
return self.roleset.split('.')[1]
@property
def predid(self):
"""Identifier of the predicate."""
return 'rel'
def __repr__(self):
return ('<PropbankInstance: %s, sent %s, word %s>' %
(self.fileid, self.sentnum, self.wordnum))
def __str__(self):
s = '%s %s %s %s %s %s' % (self.fileid, self.sentnum, self.wordnum,
self.tagger, self.roleset, self.inflection)
items = self.arguments + ((self.predicate, 'rel'),)
for (argloc, argid) in sorted(items):
s += ' %s-%s' % (argloc, argid)
return s
def _get_tree(self):
if self.parse_corpus is None: return None
if self.fileid not in self.parse_corpus.fileids(): return None
return self.parse_corpus.parsed_sents(self.fileid)[self.sentnum]
tree = property(_get_tree, doc="""
The parse tree corresponding to this instance, or None if
the corresponding tree is not available.""")
@staticmethod
def parse(s, parse_fileid_xform=None, parse_corpus=None):
pieces = s.split()
if len(pieces) < 7:
raise ValueError('Badly formatted propbank line: %r' % s)
# Divide the line into its basic pieces.
(fileid, sentnum, wordnum,
tagger, roleset, inflection) = pieces[:6]
rel = [p for p in pieces[6:] if p.endswith('-rel')]
args = [p for p in pieces[6:] if not p.endswith('-rel')]
if len(rel) != 1:
raise ValueError('Badly formatted propbank line: %r' % s)
# Apply the fileid selector, if any.
if parse_fileid_xform is not None:
fileid = parse_fileid_xform(fileid)
# Convert sentence & word numbers to ints.
sentnum = int(sentnum)
wordnum = int(wordnum)
# Parse the inflection
inflection = PropbankInflection.parse(inflection)
# Parse the predicate location.
predicate = PropbankTreePointer.parse(rel[0][:-4])
# Parse the arguments.
arguments = []
for arg in args:
argloc, argid = arg.split('-', 1)
arguments.append( (PropbankTreePointer.parse(argloc), argid) )
# Put it all together.
return PropbankInstance(fileid, sentnum, wordnum, tagger,
roleset, inflection, predicate,
arguments, parse_corpus)
class PropbankPointer(object):
"""
A pointer used by propbank to identify one or more constituents in
a parse tree. ``PropbankPointer`` is an abstract base class with
three concrete subclasses:
- ``PropbankTreePointer`` is used to point to single constituents.
- ``PropbankSplitTreePointer`` is used to point to 'split'
constituents, which consist of a sequence of two or more
``PropbankTreePointer`` pointers.
- ``PropbankChainTreePointer`` is used to point to entire trace
chains in a tree. It consists of a sequence of pieces, which
can be ``PropbankTreePointer`` or ``PropbankSplitTreePointer`` pointers.
"""
def __init__(self):
if self.__class__ == PropbankPoitner:
raise NotImplementedError()
class PropbankChainTreePointer(PropbankPointer):
def __init__(self, pieces):
self.pieces = pieces
"""A list of the pieces that make up this chain. Elements may
be either ``PropbankSplitTreePointer`` or
``PropbankTreePointer`` pointers."""
def __str__(self):
return '*'.join('%s' % p for p in self.pieces)
def __repr__(self):
return '<PropbankChainTreePointer: %s>' % self
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return Tree('*CHAIN*', [p.select(tree) for p in self.pieces])
class PropbankSplitTreePointer(PropbankPointer):
def __init__(self, pieces):
self.pieces = pieces
"""A list of the pieces that make up this chain. Elements are
all ``PropbankTreePointer`` pointers."""
def __str__(self):
return ','.join('%s' % p for p in self.pieces)
def __repr__(self):
return '<PropbankSplitTreePointer: %s>' % self
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return Tree('*SPLIT*', [p.select(tree) for p in self.pieces])
class PropbankTreePointer(PropbankPointer):
"""
wordnum:height*wordnum:height*...
wordnum:height,
"""
def __init__(self, wordnum, height):
self.wordnum = wordnum
self.height = height
@staticmethod
def parse(s):
# Deal with chains (xx*yy*zz)
pieces = s.split('*')
if len(pieces) > 1:
return PropbankChainTreePointer([PropbankTreePointer.parse(elt)
for elt in pieces])
# Deal with split args (xx,yy,zz)
pieces = s.split(',')
if len(pieces) > 1:
return PropbankSplitTreePointer([PropbankTreePointer.parse(elt)
for elt in pieces])
# Deal with normal pointers.
pieces = s.split(':')
if len(pieces) != 2: raise ValueError('bad propbank pointer %r' % s)
return PropbankTreePointer(int(pieces[0]), int(pieces[1]))
def __str__(self):
return '%s:%s' % (self.wordnum, self.height)
def __repr__(self):
return 'PropbankTreePointer(%d, %d)' % (self.wordnum, self.height)
def __cmp__(self, other):
while isinstance(other, (PropbankChainTreePointer,
PropbankSplitTreePointer)):
other = other.pieces[0]
if not isinstance(other, PropbankTreePointer):
return cmp(id(self), id(other))
return cmp( (self.wordnum, -self.height),
(other.wordnum, -other.height) )
def select(self, tree):
if tree is None: raise ValueError('Parse tree not avaialable')
return tree[self.treepos(tree)]
def treepos(self, tree):
"""
Convert this pointer to a standard 'tree position' pointer,
given that it points to the given tree.
"""
if tree is None: raise ValueError('Parse tree not avaialable')
stack = [tree]
treepos = []
wordnum = 0
while True:
#print treepos
#print stack[-1]
# tree node:
if isinstance(stack[-1], Tree):
# Select the next child.
if len(treepos) < len(stack):
treepos.append(0)
else:
treepos[-1] += 1
# Update the stack.
if treepos[-1] < len(stack[-1]):
stack.append(stack[-1][treepos[-1]])
else:
# End of node's child list: pop up a level.
stack.pop()
treepos.pop()
# word node:
else:
if wordnum == self.wordnum:
return tuple(treepos[:len(treepos)-self.height-1])
else:
wordnum += 1
stack.pop()
class PropbankInflection(object):
#{ Inflection Form
INFINITIVE = 'i'
GERUND = 'g'
PARTICIPLE = 'p'
FINITE = 'v'
#{ Inflection Tense
FUTURE = 'f'
PAST = 'p'
PRESENT = 'n'
#{ Inflection Aspect
PERFECT = 'p'
PROGRESSIVE = 'o'
PERFECT_AND_PROGRESSIVE = 'b'
#{ Inflection Person
THIRD_PERSON = '3'
#{ Inflection Voice
ACTIVE = 'a'
PASSIVE = 'p'
#{ Inflection
NONE = '-'
#}
def __init__(self, form='-', tense='-', aspect='-', person='-', voice='-'):
self.form = form
self.tense = tense
self.aspect = aspect
self.person = person
self.voice = voice
def __str__(self):
return self.form+self.tense+self.aspect+self.person+self.voice
def __repr__(self):
return '<PropbankInflection: %s>' % self
_VALIDATE = re.compile(r'[igpv\-][fpn\-][pob\-][3\-][ap\-]$')
@staticmethod
def parse(s):
if not isinstance(s, basestring):
raise TypeError('expected a string')
if (len(s) != 5 or
not PropbankInflection._VALIDATE.match(s)):
raise ValueError('Bad propbank inflection string %r' % s)
return PropbankInflection(*s)
| gpl-2.0 | -4,994,588,967,922,254,000 | 36.279476 | 97 | 0.578013 | false | 4.15628 | false | false | false | 0.002343 |
pombredanne/mitmproxy | examples/tls_passthrough.py | 15 | 4470 | """
This inline script allows conditional TLS Interception based
on a user-defined strategy.
Example:
> mitmdump -s tls_passthrough.py
1. curl --proxy http://localhost:8080 https://example.com --insecure
// works - we'll also see the contents in mitmproxy
2. curl --proxy http://localhost:8080 https://example.com --insecure
// still works - we'll also see the contents in mitmproxy
3. curl --proxy http://localhost:8080 https://example.com
// fails with a certificate error, which we will also see in mitmproxy
4. curl --proxy http://localhost:8080 https://example.com
// works again, but mitmproxy does not intercept and we do *not* see the contents
Authors: Maximilian Hils, Matthew Tuusberg
"""
from __future__ import (absolute_import, print_function, division)
import collections
import random
from enum import Enum
from libmproxy.exceptions import TlsProtocolException
from libmproxy.protocol import TlsLayer, RawTCPLayer
class InterceptionResult(Enum):
success = True
failure = False
skipped = None
class _TlsStrategy(object):
"""
Abstract base class for interception strategies.
"""
def __init__(self):
# A server_address -> interception results mapping
self.history = collections.defaultdict(lambda: collections.deque(maxlen=200))
def should_intercept(self, server_address):
"""
Returns:
True, if we should attempt to intercept the connection.
False, if we want to employ pass-through instead.
"""
raise NotImplementedError()
def record_success(self, server_address):
self.history[server_address].append(InterceptionResult.success)
def record_failure(self, server_address):
self.history[server_address].append(InterceptionResult.failure)
def record_skipped(self, server_address):
self.history[server_address].append(InterceptionResult.skipped)
class ConservativeStrategy(_TlsStrategy):
"""
Conservative Interception Strategy - only intercept if there haven't been any failed attempts
in the history.
"""
def should_intercept(self, server_address):
if InterceptionResult.failure in self.history[server_address]:
return False
return True
class ProbabilisticStrategy(_TlsStrategy):
"""
Fixed probability that we intercept a given connection.
"""
def __init__(self, p):
self.p = p
super(ProbabilisticStrategy, self).__init__()
def should_intercept(self, server_address):
return random.uniform(0, 1) < self.p
class TlsFeedback(TlsLayer):
"""
Monkey-patch _establish_tls_with_client to get feedback if TLS could be established
successfully on the client connection (which may fail due to cert pinning).
"""
def _establish_tls_with_client(self):
server_address = self.server_conn.address
tls_strategy = self.script_context.tls_strategy
try:
super(TlsFeedback, self)._establish_tls_with_client()
except TlsProtocolException as e:
tls_strategy.record_failure(server_address)
raise e
else:
tls_strategy.record_success(server_address)
# inline script hooks below.
def start(context, argv):
if len(argv) == 2:
context.tls_strategy = ProbabilisticStrategy(float(argv[1]))
else:
context.tls_strategy = ConservativeStrategy()
def next_layer(context, next_layer):
"""
This hook does the actual magic - if the next layer is planned to be a TLS layer,
we check if we want to enter pass-through mode instead.
"""
if isinstance(next_layer, TlsLayer) and next_layer._client_tls:
server_address = next_layer.server_conn.address
if context.tls_strategy.should_intercept(server_address):
# We try to intercept.
# Monkey-Patch the layer to get feedback from the TLSLayer if interception worked.
next_layer.__class__ = TlsFeedback
next_layer.script_context = context
else:
# We don't intercept - reply with a pass-through layer and add a "skipped" entry.
context.log("TLS passthrough for %s" % repr(next_layer.server_conn.address), "info")
next_layer_replacement = RawTCPLayer(next_layer.ctx, logging=False)
next_layer.reply(next_layer_replacement)
context.tls_strategy.record_skipped(server_address)
| mit | 2,980,115,923,857,024,500 | 31.867647 | 97 | 0.677405 | false | 4.071038 | false | false | false | 0.00179 |
CLVsol/odoo_api | jcafb_2016.py | 1 | 71683 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2016-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from __future__ import print_function
from erppeek import *
from base import *
import argparse
import getpass
import sqlite3
import csv
def ir_model_data_get_instance(client, code):
ir_model_data = client.model('ir.model.data')
ir_model_data_browse = ir_model_data.browse([('name', '=', code), ])
if ir_model_data_browse.name != []:
instance = ir_model_data_browse.name[0], ir_model_data_browse.model[0], ir_model_data_browse.res_id[0]
return instance
else:
instance = False, False, False
return instance
def survey_question_user_input_line_values_sqlite(client, db_path, code):
table_name = 'question_user_input_line_values' + '_' + code
# conn = sqlite3.connect(':memory:')
conn = sqlite3.connect(db_path)
conn.text_factory = str
cursor = conn.cursor()
try:
cursor.execute('''DROP TABLE ''' + table_name + ''';''')
except Exception as e:
print('------->', e)
cursor.execute('''
CREATE TABLE ''' + table_name + ''' (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
patient_code,
family_code TEXT,
question TEXT,
question_type TEXT,
value_suggested TEXT,
value_text TEXT
);
''')
instance = ir_model_data_get_instance(client, code)
print('------>', instance)
print()
survey_question = client.model('survey.question')
survey_question_browse = survey_question.browse([
('id', '=', instance[2]),
])
question = survey_question_browse[0].question.encode('utf-8')
question_type = survey_question_browse[0].type
survey_user_input_line = client.model('survey.user_input_line')
survey_user_input_line_browse = survey_user_input_line.browse([
('question_id', '=', instance[2]),
])
survey_user_input = client.model('survey.user_input')
survey_user_input_browse = survey_user_input.browse([
('survey_id', '=', survey_question_browse[0].survey_id.id),
])
clv_document = client.model('clv_document')
if question_type == 'simple_choice':
i = 0
for user_input_line in survey_user_input_line_browse:
if user_input_line.answer_type == 'suggestion':
i += 1
clv_document_browse = clv_document.browse([
('survey_user_input_id', '=', user_input_line.user_input_id.id),
])
patient_code = False
if clv_document_browse.patient_id.id != []:
patient_code = clv_document_browse.patient_id.patient_code[0]
if patient_code is False:
patient_code = None
family_code = False
if clv_document_browse.family_id.id != []:
family_code = clv_document_browse.family_id.code[0]
if family_code is False:
family_code = None
value_suggested = False
if user_input_line.value_suggested is not False:
value_suggested = user_input_line.value_suggested.value.encode('utf-8')
if value_suggested is False:
value_suggested = None
survey_user_input_line_2_browse = survey_user_input_line.browse([
('user_input_id', '=', user_input_line.user_input_id.id),
('question_id', '=', instance[2]),
('answer_type', '=', 'text'),
])
value_text = False
if survey_user_input_line_2_browse.id != []:
if survey_user_input_line_2_browse[0].value_text is not False:
value_text = survey_user_input_line_2_browse[0].value_text.encode('utf-8')
if value_text is False:
value_text = None
print(i,
patient_code,
family_code,
question,
question_type,
value_suggested,
value_text,
)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
if question_type == 'multiple_choice':
i = 0
for user_input in survey_user_input_browse:
i += 1
first_user_input_line = True
survey_user_input_line_3_browse = survey_user_input_line.browse([
('user_input_id', '=', user_input.id),
('question_id', '=', instance[2]),
])
for user_input_line_3 in survey_user_input_line_3_browse:
if user_input_line_3.answer_type == 'suggestion':
if first_user_input_line is True:
clv_document_browse = clv_document.browse([
('survey_user_input_id', '=', user_input_line_3.user_input_id.id),
])
patient_code = False
if clv_document_browse.patient_id.id != []:
patient_code = clv_document_browse.patient_id.patient_code[0]
if patient_code is False:
patient_code = None
family_code = False
if clv_document_browse.family_id.id != []:
family_code = clv_document_browse.family_id.code[0]
if family_code is False:
family_code = None
value_suggested_2 = False
if user_input_line_3.value_suggested is not False:
value_suggested_2 = user_input_line_3.value_suggested.value.encode('utf-8')
value_suggested = value_suggested_2
if value_suggested is False:
value_suggested = None
first_user_input_line = False
else:
value_suggested_2 = False
if user_input_line_3.value_suggested is not False:
value_suggested_2 = user_input_line_3.value_suggested.value.encode('utf-8')
value_suggested = value_suggested + ';' + value_suggested_2
survey_user_input_line_2_browse = survey_user_input_line.browse([
('user_input_id', '=', user_input.id),
('question_id', '=', instance[2]),
('answer_type', '=', 'text'),
])
value_text = False
if survey_user_input_line_2_browse.id != []:
if survey_user_input_line_2_browse[0].value_text is not False:
value_text = survey_user_input_line_2_browse[0].value_text.encode('utf-8')
if value_text is False:
value_text = None
print(i,
patient_code,
family_code,
question,
question_type,
value_suggested,
value_text,
)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
print(data)
print([field[0] for field in cursor.description])
for row in cursor:
print(row)
conn.commit()
conn.close()
print()
print('--> i: ', i)
def jcafb_2016_export(client, file_path, db_path, code):
table_name = 'question_user_input_line_values' + '_' + code
conn = sqlite3.connect(db_path)
conn.text_factory = str
cursor = conn.cursor()
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
print(data)
print([field[0] for field in cursor.description])
for row in cursor:
print(row)
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
csv_file = open(file_path, 'wb')
writer_csv_file = csv.writer(csv_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
writer_csv_file.writerow([field[0] for field in cursor.description])
writer_csv_file.writerows(data)
csv_file.close()
conn.close()
def jcafb_2016_export_2(client, file_path, db_path, code_1, code_2):
table_name = 'question_user_input_line_values' + '_' + code_1 + '_' + code_2
table_name_1 = 'question_user_input_line_values' + '_' + code_1
table_name_2 = 'question_user_input_line_values' + '_' + code_2
conn = sqlite3.connect(db_path)
conn.text_factory = str
cursor = conn.cursor()
try:
cursor.execute('''DROP TABLE ''' + table_name + ''';''')
except Exception as e:
print('------->', e)
cursor.execute('''
CREATE TABLE ''' + table_name + ''' (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
patient_code,
family_code TEXT,
question_1 TEXT,
question_type_1 TEXT,
value_suggested_1 TEXT,
value_text_1 TEXT,
question_2 TEXT,
question_type_2 TEXT,
value_suggested_2 TEXT,
value_text_2 TEXT
);
''')
cursor_1 = conn.cursor()
cursor_1.execute('''
SELECT * FROM ''' + table_name_1 + ''';
''')
all_rows = cursor_1.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_1,
question_type_1,
value_suggested_1,
value_text_1
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_2 = conn.cursor()
cursor_2.execute('''
SELECT * FROM ''' + table_name_2 + ''';
''')
all_rows = cursor_2.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_2 = cursor.fetchone()
if row_2 is not None:
id_2 = row_2[0]
print('>>>>>', row_2, id_2)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_2 = ? WHERE id = ? ''',
(question, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_2 = ? WHERE id = ? ''',
(question_type, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_2 = ? WHERE id = ? ''',
(value_suggested, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_2 = ? WHERE id = ? ''',
(value_text, id_2))
else:
print('>>>>>', row_2)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_2,
question_type_2,
value_suggested_2,
value_text_2
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
print(data)
print([field[0] for field in cursor.description])
for row in cursor:
print(row)
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
csv_file = open(file_path, 'wb')
writer_csv_file = csv.writer(csv_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
writer_csv_file.writerow([field[0] for field in cursor.description])
writer_csv_file.writerows(data)
csv_file.close()
conn.close()
def jcafb_2016_export_3(client, file_path, db_path, code_1, code_2, code_3):
table_name = 'question_user_input_line_values' + '_' + code_1 + '_' + code_2
table_name_1 = 'question_user_input_line_values' + '_' + code_1
table_name_2 = 'question_user_input_line_values' + '_' + code_2
table_name_3 = 'question_user_input_line_values' + '_' + code_3
conn = sqlite3.connect(db_path)
conn.text_factory = str
cursor = conn.cursor()
try:
cursor.execute('''DROP TABLE ''' + table_name + ''';''')
except Exception as e:
print('------->', e)
cursor.execute('''
CREATE TABLE ''' + table_name + ''' (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
patient_code,
family_code TEXT,
question_1 TEXT,
question_type_1 TEXT,
value_suggested_1 TEXT,
value_text_1 TEXT,
question_2 TEXT,
question_type_2 TEXT,
value_suggested_2 TEXT,
value_text_2 TEXT,
question_3 TEXT,
question_type_3 TEXT,
value_suggested_3 TEXT,
value_text_3 TEXT
);
''')
cursor_1 = conn.cursor()
cursor_1.execute('''
SELECT * FROM ''' + table_name_1 + ''';
''')
all_rows = cursor_1.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_1,
question_type_1,
value_suggested_1,
value_text_1
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_2 = conn.cursor()
cursor_2.execute('''
SELECT * FROM ''' + table_name_2 + ''';
''')
all_rows = cursor_2.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_2 = cursor.fetchone()
if row_2 is not None:
id_2 = row_2[0]
print('>>>>>', row_2, id_2)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_2 = ? WHERE id = ? ''',
(question, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_2 = ? WHERE id = ? ''',
(question_type, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_2 = ? WHERE id = ? ''',
(value_suggested, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_2 = ? WHERE id = ? ''',
(value_text, id_2))
else:
print('>>>>>', row_2)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_2,
question_type_2,
value_suggested_2,
value_text_2
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_3 = conn.cursor()
cursor_3.execute('''
SELECT * FROM ''' + table_name_3 + ''';
''')
all_rows = cursor_3.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_3 = cursor.fetchone()
if row_3 is not None:
id_3 = row_3[0]
print('>>>>>', row_3, id_3)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_3 = ? WHERE id = ? ''',
(question, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_3 = ? WHERE id = ? ''',
(question_type, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_3 = ? WHERE id = ? ''',
(value_suggested, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_3 = ? WHERE id = ? ''',
(value_text, id_3))
else:
print('>>>>>', row_3)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_3,
question_type_3,
value_suggested_3,
value_text_3
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
print(data)
print([field[0] for field in cursor.description])
for row in cursor:
print(row)
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
csv_file = open(file_path, 'wb')
writer_csv_file = csv.writer(csv_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
writer_csv_file.writerow([field[0] for field in cursor.description])
writer_csv_file.writerows(data)
csv_file.close()
conn.close()
def jcafb_2016_export_4(client, file_path, db_path, code_1, code_2, code_3, code_4):
table_name = 'question_user_input_line_values' + '_' + code_1 + '_' + code_2
table_name_1 = 'question_user_input_line_values' + '_' + code_1
table_name_2 = 'question_user_input_line_values' + '_' + code_2
table_name_3 = 'question_user_input_line_values' + '_' + code_3
table_name_4 = 'question_user_input_line_values' + '_' + code_4
conn = sqlite3.connect(db_path)
conn.text_factory = str
cursor = conn.cursor()
try:
cursor.execute('''DROP TABLE ''' + table_name + ''';''')
except Exception as e:
print('------->', e)
cursor.execute('''
CREATE TABLE ''' + table_name + ''' (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
patient_code,
family_code TEXT,
question_1 TEXT,
question_type_1 TEXT,
value_suggested_1 TEXT,
value_text_1 TEXT,
question_2 TEXT,
question_type_2 TEXT,
value_suggested_2 TEXT,
value_text_2 TEXT,
question_3 TEXT,
question_type_3 TEXT,
value_suggested_3 TEXT,
value_text_3 TEXT,
question_4 TEXT,
question_type_4 TEXT,
value_suggested_4 TEXT,
value_text_4 TEXT
);
''')
cursor_1 = conn.cursor()
cursor_1.execute('''
SELECT * FROM ''' + table_name_1 + ''';
''')
all_rows = cursor_1.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_1,
question_type_1,
value_suggested_1,
value_text_1
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_2 = conn.cursor()
cursor_2.execute('''
SELECT * FROM ''' + table_name_2 + ''';
''')
all_rows = cursor_2.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_2 = cursor.fetchone()
if row_2 is not None:
id_2 = row_2[0]
print('>>>>>', row_2, id_2)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_2 = ? WHERE id = ? ''',
(question, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_2 = ? WHERE id = ? ''',
(question_type, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_2 = ? WHERE id = ? ''',
(value_suggested, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_2 = ? WHERE id = ? ''',
(value_text, id_2))
else:
cursor.execute('''SELECT id, family_code FROM ''' + table_name + ''' WHERE family_code=?''',
(family_code,))
row_2 = cursor.fetchone()
if row_2 is not None:
id_2 = row_2[0]
print('>>>>>', row_2, id_2)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_2 = ? WHERE id = ? ''',
(question, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_2 = ? WHERE id = ? ''',
(question_type, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_2 = ? WHERE id = ? ''',
(value_suggested, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_2 = ? WHERE id = ? ''',
(value_text, id_2))
else:
print('>>>>>', row_2)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_2,
question_type_2,
value_suggested_2,
value_text_2
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_3 = conn.cursor()
cursor_3.execute('''
SELECT * FROM ''' + table_name_3 + ''';
''')
all_rows = cursor_3.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_3 = cursor.fetchone()
if row_3 is not None:
id_3 = row_3[0]
print('>>>>>', row_3, id_3)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_3 = ? WHERE id = ? ''',
(question, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_3 = ? WHERE id = ? ''',
(question_type, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_3 = ? WHERE id = ? ''',
(value_suggested, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_3 = ? WHERE id = ? ''',
(value_text, id_3))
else:
cursor.execute('''SELECT id, family_code FROM ''' + table_name + ''' WHERE family_code=?''',
(family_code,))
row_3 = cursor.fetchone()
if row_3 is not None:
id_3 = row_3[0]
print('>>>>>', row_3, id_3)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_3 = ? WHERE id = ? ''',
(question, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_3 = ? WHERE id = ? ''',
(question_type, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_3 = ? WHERE id = ? ''',
(value_suggested, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_3 = ? WHERE id = ? ''',
(value_text, id_3))
else:
print('>>>>>', row_3)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_3,
question_type_3,
value_suggested_3,
value_text_3
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_4 = conn.cursor()
cursor_4.execute('''
SELECT * FROM ''' + table_name_4 + ''';
''')
all_rows = cursor_4.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_4 = cursor.fetchone()
if row_4 is not None:
id_4 = row_4[0]
print('>>>>>', row_4, id_4)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_4 = ? WHERE id = ? ''',
(question, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_4 = ? WHERE id = ? ''',
(question_type, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_4 = ? WHERE id = ? ''',
(value_suggested, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_4 = ? WHERE id = ? ''',
(value_text, id_4))
else:
cursor.execute('''SELECT id, family_code FROM ''' + table_name + ''' WHERE family_code=?''',
(family_code,))
row_4 = cursor.fetchone()
if row_4 is not None:
id_4 = row_4[0]
print('>>>>>', row_4, id_4)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_4 = ? WHERE id = ? ''',
(question, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_4 = ? WHERE id = ? ''',
(question_type, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_4 = ? WHERE id = ? ''',
(value_suggested, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_4 = ? WHERE id = ? ''',
(value_text, id_4))
else:
print('>>>>>', row_4)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_4,
question_type_4,
value_suggested_4,
value_text_4
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
print(data)
print([field[0] for field in cursor.description])
for row in cursor:
print(row)
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
csv_file = open(file_path, 'wb')
writer_csv_file = csv.writer(csv_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
writer_csv_file.writerow([field[0] for field in cursor.description])
writer_csv_file.writerows(data)
csv_file.close()
conn.close()
def jcafb_2016_export_5(client, file_path, db_path, code_1, code_2, code_3, code_4, code_5):
table_name = 'question_user_input_line_values' + '_' + code_1 + '_' + code_2
table_name_1 = 'question_user_input_line_values' + '_' + code_1
table_name_2 = 'question_user_input_line_values' + '_' + code_2
table_name_3 = 'question_user_input_line_values' + '_' + code_3
table_name_4 = 'question_user_input_line_values' + '_' + code_4
table_name_5 = 'question_user_input_line_values' + '_' + code_5
conn = sqlite3.connect(db_path)
conn.text_factory = str
cursor = conn.cursor()
try:
cursor.execute('''DROP TABLE ''' + table_name + ''';''')
except Exception as e:
print('------->', e)
cursor.execute('''
CREATE TABLE ''' + table_name + ''' (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
patient_code,
family_code TEXT,
question_1 TEXT,
question_type_1 TEXT,
value_suggested_1 TEXT,
value_text_1 TEXT,
question_2 TEXT,
question_type_2 TEXT,
value_suggested_2 TEXT,
value_text_2 TEXT,
question_3 TEXT,
question_type_3 TEXT,
value_suggested_3 TEXT,
value_text_3 TEXT,
question_4 TEXT,
question_type_4 TEXT,
value_suggested_4 TEXT,
value_text_4 TEXT,
question_5 TEXT,
question_type_5 TEXT,
value_suggested_5 TEXT,
value_text_5 TEXT
);
''')
cursor_1 = conn.cursor()
cursor_1.execute('''
SELECT * FROM ''' + table_name_1 + ''';
''')
all_rows = cursor_1.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_1,
question_type_1,
value_suggested_1,
value_text_1
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_2 = conn.cursor()
cursor_2.execute('''
SELECT * FROM ''' + table_name_2 + ''';
''')
all_rows = cursor_2.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_2 = cursor.fetchone()
if row_2 is not None:
id_2 = row_2[0]
print('>>>>>', row_2, id_2)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_2 = ? WHERE id = ? ''',
(question, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_2 = ? WHERE id = ? ''',
(question_type, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_2 = ? WHERE id = ? ''',
(value_suggested, id_2))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_2 = ? WHERE id = ? ''',
(value_text, id_2))
else:
print('>>>>>', row_2)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_2,
question_type_2,
value_suggested_2,
value_text_2
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_3 = conn.cursor()
cursor_3.execute('''
SELECT * FROM ''' + table_name_3 + ''';
''')
all_rows = cursor_3.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_3 = cursor.fetchone()
if row_3 is not None:
id_3 = row_3[0]
print('>>>>>', row_3, id_3)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_3 = ? WHERE id = ? ''',
(question, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_3 = ? WHERE id = ? ''',
(question_type, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_3 = ? WHERE id = ? ''',
(value_suggested, id_3))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_3 = ? WHERE id = ? ''',
(value_text, id_3))
else:
print('>>>>>', row_3)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_3,
question_type_3,
value_suggested_3,
value_text_3
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_4 = conn.cursor()
cursor_4.execute('''
SELECT * FROM ''' + table_name_4 + ''';
''')
all_rows = cursor_4.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_4 = cursor.fetchone()
if row_4 is not None:
id_4 = row_4[0]
print('>>>>>', row_4, id_4)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_4 = ? WHERE id = ? ''',
(question, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_4 = ? WHERE id = ? ''',
(question_type, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_4 = ? WHERE id = ? ''',
(value_suggested, id_4))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_4 = ? WHERE id = ? ''',
(value_text, id_4))
else:
print('>>>>>', row_4)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_4,
question_type_4,
value_suggested_4,
value_text_4
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
cursor_5 = conn.cursor()
cursor_5.execute('''
SELECT * FROM ''' + table_name_5 + ''';
''')
all_rows = cursor_5.fetchall()
for row in all_rows:
patient_code = row[1]
family_code = row[2]
question = row[3]
question_type = row[4]
value_suggested = row[5]
value_text = row[6]
cursor.execute('''SELECT id, patient_code FROM ''' + table_name + ''' WHERE patient_code=?''', (patient_code,))
row_5 = cursor.fetchone()
if row_5 is not None:
id_5 = row_5[0]
print('>>>>>', row_5, id_5)
cursor.execute('''UPDATE ''' + table_name + ''' SET question_5 = ? WHERE id = ? ''',
(question, id_5))
cursor.execute('''UPDATE ''' + table_name + ''' SET question_type_5 = ? WHERE id = ? ''',
(question_type, id_5))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_suggested_5 = ? WHERE id = ? ''',
(value_suggested, id_5))
cursor.execute('''UPDATE ''' + table_name + ''' SET value_text_5 = ? WHERE id = ? ''',
(value_text, id_5))
else:
print('>>>>>', row_4)
cursor.execute('''
INSERT INTO ''' + table_name + '''(
patient_code,
family_code,
question_5,
question_type_5,
value_suggested_5,
value_text_5
)
VALUES(?,?,?,?,?,?)''',
(patient_code,
family_code,
question,
question_type,
value_suggested,
value_text
)
)
conn.commit()
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
print(data)
print([field[0] for field in cursor.description])
for row in cursor:
print(row)
data = cursor.execute('''
SELECT * FROM ''' + table_name + ''';
''')
csv_file = open(file_path, 'wb')
writer_csv_file = csv.writer(csv_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
writer_csv_file.writerow([field[0] for field in cursor.description])
writer_csv_file.writerows(data)
csv_file.close()
conn.close()
def get_arguments():
global username
global password
global dbname
parser = argparse.ArgumentParser()
parser.add_argument('--user', action="store", dest="username")
parser.add_argument('--pw', action="store", dest="password")
parser.add_argument('--db', action="store", dest="dbname")
args = parser.parse_args()
print('%s%s' % ('--> ', args))
if args.dbname is not None:
dbname = args.dbname
elif dbname == '*':
dbname = raw_input('dbname: ')
if args.username is not None:
username = args.username
elif username == '*':
username = raw_input('username: ')
if args.password is not None:
password = args.password
elif password == '*':
password = getpass.getpass('password: ')
if __name__ == '__main__':
server = 'http://localhost:8069'
# username = 'username'
username = '*'
# paswword = 'paswword'
paswword = '*'
dbname = 'odoo'
# dbname = '*'
print()
get_arguments()
from time import time
start = time()
print('--> survey.py...')
client = erppeek.Client(server, dbname, username, password)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'CSE16_03_03'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'CSE16_03_04'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'CSE16_06_01'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_05_01'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_05_02'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_05_03'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_05_04'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_06_06'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_07_04'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_07_05'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_08_01'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'ISE16_03_06'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'ISE16_06_02'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'ISE16_08_01'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'ISE16_09_03'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QAN16_04_05'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QAN16_05_02'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_03_02'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_04_07'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_04_10'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_05_05'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_06_03'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_06_06'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_08_01'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_08_02'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_08_03'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_08_06'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_08_07'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_08_08'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_08_11'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_08_12'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_09_03'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_10_02'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_10_03'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_10_06'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_11_02'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_11_03'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QMD16_03_02'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QMD16_03_08'
# print('-->', client, db_path, code)
# print('--> survey_question_user_input_line_values_sqlite()...')
# survey_question_user_input_line_values_sqlite(client, db_path, code)
#####################################
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_02_QDH16_03_02_QDH16_06_03.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_03_02'
# code_2 = 'QDH16_06_03'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_03_QDH16_06_03_QDH16_08_06_QDH16_08_01.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_06_03'
# code_2 = 'QDH16_08_06'
# code_3 = 'QDH16_08_01'
# print('-->', client, file_path, db_path, code_1, code_2, code_3)
# print('--> jcafb_2016_export_3()...')
# jcafb_2016_export_3(client, file_path, db_path, code_1, code_2, code_3)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_04_QDH16_06_03_QDH16_08_03.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_06_03'
# code_2 = 'QDH16_08_03'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_05_QDH16_06_03_QDH16_08_03.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_06_03'
# code_2 = 'QDH16_08_03'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_06_QDH16_05_05.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_05_05'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_07_QDH16_05_05_QDH16_08_08.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_05_05'
# code_2 = 'QDH16_08_08'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_08_QDH16_04_10.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_04_10'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_09_QDH16_06_06.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_06_06'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_10_QDH16_05_05_QDH16_10_02.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_05_05'
# code_2 = 'QDH16_10_02'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_11_QDH16_05_05_QDH16_11_02_QDH16_11_03.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_05_05'
# code_2 = 'QDH16_11_02'
# code_3 = 'QDH16_11_03'
# print('-->', client, file_path, db_path, code_1, code_2, code_3)
# print('--> jcafb_2016_export_3()...')
# jcafb_2016_export_3(client, file_path, db_path, code_1, code_2, code_3)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_12_QDH16_06_06.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_06_06'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_13_QDH16_10_02_QDH16_10_03_QDH16_05_05_QDH16_06_03_QDH16_06_06.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_10_02'
# code_2 = 'QDH16_10_03'
# code_3 = 'QDH16_05_05'
# code_4 = 'QDH16_06_03'
# code_5 = 'QDH16_06_06'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4, code_5)
# print('--> jcafb_2016_export_5()...')
# jcafb_2016_export_5(client, file_path, db_path, code_1, code_2, code_3, code_4, code_5)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_14_QDH16_10_06_QDH16_05_05_QDH16_06_03_QDH16_06_06.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_10_06'
# code_2 = 'QDH16_05_05'
# code_3 = 'QDH16_06_03'
# code_4 = 'QDH16_06_06'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4)
# print('--> jcafb_2016_export_4()...')
# jcafb_2016_export_4(client, file_path, db_path, code_1, code_2, code_3, code_4)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_15_QDH16_05_05_QDH16_06_06.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_05_05'
# code_2 = 'QDH16_06_06'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_16_QDH16_04_07.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QDH16_04_07'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_17_QAN16_04_05.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QAN16_04_05'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_18_QAN16_04_05_QAN16_05_02.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QAN16_04_05'
# code_2 = 'QAN16_05_02'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_19_QAN16_04_05.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QAN16_04_05'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_20_FSE16_06_06.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_06_06'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_21_FSE16_05_01_FSE16_05_02_FSE16_05_03_FSE16_05_04.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'FSE16_05_01'
# code_2 = 'FSE16_05_02'
# code_3 = 'FSE16_05_03'
# code_4 = 'FSE16_05_04'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4)
# print('--> jcafb_2016_export_4()...')
# jcafb_2016_export_4(client, file_path, db_path, code_1, code_2, code_3, code_4)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_22_FSE16_08_01.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_08_01'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_23_FSE16_08_01.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_08_01'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_24_CSE16_06_01_ISE16_08_01.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'CSE16_06_01'
# code_2 = 'ISE16_08_01'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_25_QAN16_04_05.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QAN16_04_05'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_27_CSE16_03_03_CSE16_03_04.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'CSE16_03_03'
# code_2 = 'CSE16_03_04'
# print('-->', client, file_path, db_path, code_1, code_2)
# print('--> jcafb_2016_export_2()...')
# jcafb_2016_export_2(client, file_path, db_path, code_1, code_2)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_28_QMD16_03_02.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'QMD16_03_02'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_29_FSE16_07_04.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_07_04'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_30_FSE16_07_05.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code = 'FSE16_07_05'
# print('-->', client, file_path, db_path, code)
# print('--> jcafb_2016_export()...')
# jcafb_2016_export(client, file_path, db_path, code)
########################################
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_33_ISE16_03_06_QDH16_08_02_QDH16_08_07_QDH16_08_12.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'ISE16_03_06'
# code_2 = 'QDH16_08_02'
# code_3 = 'QDH16_08_07'
# code_4 = 'QDH16_08_12'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4)
# print('--> jcafb_2016_export_4()...')
# jcafb_2016_export_4(client, file_path, db_path, code_1, code_2, code_3, code_4)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_34_ISE16_09_03_QDH16_08_02_QDH16_08_07_QDH16_08_12.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'ISE16_09_03'
# code_2 = 'QDH16_08_02'
# code_3 = 'QDH16_08_07'
# code_4 = 'QDH16_08_12'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4)
# print('--> jcafb_2016_export_4()...')
# jcafb_2016_export_4(client, file_path, db_path, code_1, code_2, code_3, code_4)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_35_QMD16_03_08_QDH16_08_02_QDH16_08_07_QDH16_08_12.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QMD16_03_08'
# code_2 = 'QDH16_08_02'
# code_3 = 'QDH16_08_07'
# code_4 = 'QDH16_08_12'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4)
# print('--> jcafb_2016_export_4()...')
# jcafb_2016_export_4(client, file_path, db_path, code_1, code_2, code_3, code_4)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_36_ISE16_06_02_QDH16_08_02_QDH16_08_07_QDH16_08_12.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'ISE16_06_02'
# code_2 = 'QDH16_08_02'
# code_3 = 'QDH16_08_07'
# code_4 = 'QDH16_08_12'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4)
# print('--> jcafb_2016_export_4()...')
# jcafb_2016_export_4(client, file_path, db_path, code_1, code_2, code_3, code_4)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_37_QDH16_04_07_QDH16_08_02_QDH16_08_07_QDH16_08_12.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_04_07'
# code_2 = 'QDH16_08_02'
# code_3 = 'QDH16_08_07'
# code_4 = 'QDH16_08_12'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4)
# print('--> jcafb_2016_export_4()...')
# jcafb_2016_export_4(client, file_path, db_path, code_1, code_2, code_3, code_4)
# file_path = '/opt/openerp/jcafb/data/jcafb_2016_38_QDH16_04_10_QDH16_08_02_QDH16_08_07_QDH16_08_12_QDH16_09_03.csv'
# db_path = '/opt/openerp/jcafb/data/jcafb_2016.sqlite'
# code_1 = 'QDH16_04_10'
# code_2 = 'QDH16_08_02'
# code_3 = 'QDH16_08_07'
# code_4 = 'QDH16_08_12'
# code_5 = 'QDH16_09_03'
# print('-->', client, file_path, db_path, code_1, code_2, code_3, code_4, code_5)
# print('--> jcafb_2016_export_5()...')
# jcafb_2016_export_5(client, file_path, db_path, code_1, code_2, code_3, code_4, code_5)
print()
print('--> survey.py')
print('--> Execution time:', secondsToStr(time() - start))
print()
| agpl-3.0 | 8,307,978,204,282,883,000 | 38.669618 | 121 | 0.483141 | false | 3.588636 | false | false | false | 0.001702 |
pvagner/orca | src/orca/scripts/apps/notification-daemon/script.py | 1 | 1912 | # Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" Custom script for The notification daemon."""
__id__ = ""
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.messages as messages
import orca.scripts.default as default
import orca.speech as speech
########################################################################
# #
# The notification-daemon script class. #
# #
########################################################################
class Script(default.Script):
def onWindowCreated(self, event):
"""Callback for window:create accessibility events."""
hasRole = lambda x: x and x.getRole() == pyatspi.ROLE_LABEL
allLabels = pyatspi.findAllDescendants(event.source, hasRole)
texts = [self.utilities.displayedText(acc) for acc in allLabels]
text = '%s %s' % (messages.NOTIFICATION, ' '.join(texts))
speech.speak(text, None, True)
| lgpl-2.1 | -5,968,642,574,940,419,000 | 37.24 | 72 | 0.591004 | false | 4.446512 | false | false | false | 0.004707 |
madan96/sympy | sympy/solvers/pde.py | 13 | 35505 | """
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from __future__ import print_function, division
from itertools import combinations_with_replacement
from sympy.simplify import simplify
from sympy.core import Add, S
from sympy.core.compatibility import (reduce, is_sequence, range)
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)))
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
given_hint = hint # hint given by the user.
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func,
hint=hint, simplify=True, type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'],
hints['func'], hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)))
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
prep = kwargs.pop('prep', True)
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols, diff
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), (_xi_1,), (4*x - 3*y,)))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{df(x,y)}{dx} + b \frac{df(x,y)}{dy} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy.solvers.pde import (
... pde_1st_linear_constant_coeff_homogeneous)
>>> from sympy import pdsolve
>>> from sympy import Function, diff, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{df(x,y)}{dx} + b \frac{df(x,y)}{dy} + c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*u + b*ux + c*uy - G(x,y)
>>> pprint(genform)
d d
a*f(x, y) + b*--(f(x, y)) + c*--(f(x, y)) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// b*x + c*y \
|| / |
|| | |
|| | a*xi |
|| | ------- |
|| | 2 2 |
|| | /b*xi + c*eta -b*eta + c*xi\ b + c |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ b + c b + c / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ b + c /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-a*xi ||
-------||
2 2||
b + c ||
e ||
||
/|eta=-b*y + c*x, xi=b*x + c*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial differential equation is
.. math:: a(x, y) \frac{df(x, y)}{dx} + a(x, y) \frac{df(x, y)}{dy}
+ c(x, y) f(x, y) - G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary functions
in `x` and `y`. This PDE is converted into an ODE by making the following transformation.
1] `\xi` as `x`
2] `\eta` as the constant in the solution to the differential equation
`\frac{dy}{dx} = -\frac{b}{a}`
Making the following substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - d(\xi, \eta) = 0
which can be solved using dsolve.
The general form of this PDE is::
>>> from sympy.solvers.pde import pdsolve
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
fname = func.__name__
for key, sym in enumerate(syms):
tempfun = Function(fname + str(key))
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), x, x)/X(x), Derivative(T(t), t, t)/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
assert ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), x, x)/X(x), Derivative(Y(y), y, y)/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
| bsd-3-clause | -4,996,668,224,187,043,000 | 34.293241 | 133 | 0.543501 | false | 3.631482 | false | false | false | 0.002394 |
rfdougherty/dipy | doc/examples/segment_extending_clustering_framework.py | 4 | 8495 | """
==========================================================
Enhancing QuickBundles with different metrics and features
==========================================================
QuickBundles is a flexible algorithm that requires only a distance metric and
an adjacency threshold to perform clustering. There is a wide variety of metrics
that could be uses to cluster streamlines.
The purpose of this tutorial is to show how to easily create new `Feature` and
new `Metric` classes that can be used by QuickBundles.
Clustering framework
====================
Dipy provides a simple, flexible and fast framework to do clustering of
sequential data (e.g. streamlines).
A *sequential datum* in Dipy is represented as a numpy array of size
:math:`(N \times D)` where each row of the array represents a D dimensional
point of the sequence. A set of these sequences is represented as a list of
numpy arrays of size :math:`(N_i \times D)` for :math:`i=1:M` where $M$ is the
number of sequences in the set.
This clustering framework is modular and divided in three parts:
1) feature extraction
2) distance computation
3) clustering algorithm
The **feature extraction** part includes any preprocessing needed to be done on
the data before computing distances between them (e.g. resampling the number of
points of a streamline). To define a new way of extracting features, one has to
subclass `Feature` (see below).
The **distance computation** part includes any metric capable of evaluating a
distance between two set of features previously extracted from the data. To
define a new way of extracting features, one has to subclass `Metric` (see below).
The **clustering algorithm** part represents the clustering algorithm itself
(e.g. QuickBundles, K-means, Hierarchical Clustering). More precisely, it
includes any algorithms taking as input a list of sequential data and
outputting a `ClusterMap` object.
Extending `Feature`
===================
This section will guide you through the creation of a new feature extraction
method that can be used in the context of this clustering framework.
Assuming a set of streamlines, the type of features we want to extract is the
arc length (i.e. the sum of the length of each segment for a given streamline).
Let's start by importing the necessary modules.
"""
from dipy.segment.metric import Feature
from dipy.tracking.streamline import length
"""
We now define the class 'ArcLengthFeature' that will perform the desired
feature extraction. When subclassing `Feature`, two methods have to be
redefined: `infer_shape` and `extract`.
Also, an important property about feature extraction is whether or not
its process is invariant to the order of the points within a streamline.
This is needed as there is no way one can tell which extremity of a
streamline is the beginning and which one is the end.
"""
class ArcLengthFeature(Feature):
""" Computes the arc length of a streamline. """
def __init__(self):
# The arc length stays the same even if the streamline is reversed.
super(ArcLengthFeature, self).__init__(is_order_invariant=True)
def infer_shape(self, streamline):
""" Infers the shape of features extracted from `streamline`. """
# Arc length is a scalar
return 1
def extract(self, streamline):
""" Extracts features from `streamline`. """
# return np.sum(np.sqrt(np.sum((streamline[1:] - streamline[:-1]) ** 2)))
# or use a Dipy's function that computes the arc length of a streamline.
return length(streamline)
"""
The new feature extraction `ArcLengthFeature` is ready to be used. Let's use
it to cluster a set of streamlines by their arc length. For educational
purposes we will try to cluster a small streamline bundle known from
neuroanatomy as the fornix.
We start by loading the fornix streamlines.
"""
import numpy as np
from nibabel import trackvis as tv
from dipy.data import get_data
from dipy.viz import fvtk
fname = get_data('fornix')
streams, hdr = tv.read(fname)
streamlines = [i[0] for i in streams]
"""
Perform QuickBundles clustering using the metric `SumPointwiseEuclideanMetric`
and our `ArcLengthFeature`.
"""
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import SumPointwiseEuclideanMetric
metric = SumPointwiseEuclideanMetric(feature=ArcLengthFeature())
qb = QuickBundles(threshold=2., metric=metric)
clusters = qb.cluster(streamlines)
"""
We will now visualize the clustering result.
"""
# Color each streamline according to the cluster they belong to.
colormap = fvtk.create_colormap(np.ravel(clusters.centroids))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
colormap_full[cluster.indices] = color
ren = fvtk.ren()
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(ren, n_frames=1, out_path='fornix_clusters_arclength.png', size=(600, 600))
"""
.. figure:: fornix_clusters_arclength.png
:align: center
**Showing the different clusters obtained by using the arc length**.
Extending `Metric`
==================
This section will guide you through the creation of a new metric that can be
used in the context of this clustering framework.
Assuming a set of streamlines, we want a metric that computes the cosine
distance giving the vector between endpoints of each streamline (i.e. one
minus the cosine of the angle between two vectors). For more information
about this distance check `<http://en.wikipedia.org/wiki/Cosine_similarity>`_.
Let's start by importing the necessary modules.
"""
from dipy.segment.metric import Metric
from dipy.segment.metric import VectorBetweenEndpointsFeature
"""
We now define the class `CosineMetric` that will perform the desired
distance computation. When subclassing `Metric`, two methods have to be
redefined: `are_compatible` and `dist`. Moreover, when implementing the
`dist` method, one needs to make sure the distance returned is symmetric
(i.e. `dist(A, B) == dist(B, A)`).
"""
class CosineMetric(Metric):
""" Computes the cosine distance between two streamlines. """
def __init__(self):
# For simplicity, features will be the vector between endpoints of a streamline.
super(CosineMetric, self).__init__(feature=VectorBetweenEndpointsFeature())
def are_compatible(self, shape1, shape2):
""" Checks if two features are vectors of same dimension.
Basically this method exists so we don't have to do this check
inside the `dist` method (speedup).
"""
return shape1 == shape2 and shape1[0] == 1
def dist(self, v1, v2):
""" Computes a the cosine distance between two vectors. """
norm = lambda x: np.sqrt(np.sum(x**2))
cos_theta = np.dot(v1, v2.T) / (norm(v1)*norm(v2))
# Make sure it's in [-1, 1], i.e. within domain of arccosine
cos_theta = np.minimum(cos_theta, 1.)
cos_theta = np.maximum(cos_theta, -1.)
return np.arccos(cos_theta) / np.pi # Normalized cosine distance
"""
The new distance `CosineMetric` is ready to be used. Let's use
it to cluster a set of streamlines according to the cosine distance of the
vector between their endpoints. For educational purposes we will try to
cluster a small streamline bundle known from neuroanatomy as the fornix.
We start by loading the fornix streamlines.
"""
import numpy as np
from nibabel import trackvis as tv
from dipy.data import get_data
from dipy.viz import fvtk
fname = get_data('fornix')
streams, hdr = tv.read(fname)
streamlines = [i[0] for i in streams]
"""
Perform QuickBundles clustering using our metric `CosineMetric`.
"""
from dipy.segment.clustering import QuickBundles
metric = CosineMetric()
qb = QuickBundles(threshold=0.1, metric=metric)
clusters = qb.cluster(streamlines)
"""
We will now visualize the clustering result.
"""
# Color each streamline according to the cluster they belong to.
colormap = fvtk.create_colormap(np.arange(len(clusters)))
colormap_full = np.ones((len(streamlines), 3))
for cluster, color in zip(clusters, colormap):
colormap_full[cluster.indices] = color
ren = fvtk.ren()
ren.SetBackground(1, 1, 1)
fvtk.add(ren, fvtk.streamtube(streamlines, colormap_full))
fvtk.record(ren, n_frames=1, out_path='fornix_clusters_cosine.png', size=(600, 600))
"""
.. figure:: fornix_clusters_cosine.png
:align: center
**Showing the different clusters obtained by using the cosine metric**.
"""
| bsd-3-clause | -1,496,577,652,725,179,100 | 34.543933 | 88 | 0.728311 | false | 3.800895 | false | false | false | 0.002707 |
jfryman/python-packer | setup.py | 1 | 2049 | from setuptools import setup
# from setuptools import find_packages
from setuptools.command.test import test as testcommand
import sys
import re
import os
import codecs
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
print('VERSION: ', version_match.group(1))
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
class Tox(testcommand):
def finalize_options(self):
testcommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
setup(
name='python-packer',
version=find_version('packer', '__init__.py'),
url='https://github.com/nir0s/python-packer',
author='nir0s',
author_email='nir36g@gmail.com',
license='LICENSE',
platforms='All',
description='A Python interface for Hashicorp\'s Packer',
long_description=read('README.rst'),
packages=['packer'],
install_requires=[
"sh==1.11",
],
tests_require=['nose', 'tox'],
cmdclass={'test': Tox},
classifiers=[
'Programming Language :: Python',
'Natural Language :: English',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| apache-2.0 | -4,369,394,990,713,598,000 | 29.58209 | 71 | 0.622743 | false | 3.940385 | true | false | false | 0.000488 |
wd5/jangr | django/contrib/admin/actions.py | 160 | 3285 | """
Built-in, globally-available admin actions.
"""
from django import template
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.shortcuts import render_to_response
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_unicode(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"root_path": modeladmin.admin_site.root_path,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return render_to_response(modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, context_instance=template.RequestContext(request))
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
| bsd-3-clause | -7,116,303,832,504,067,000 | 37.647059 | 96 | 0.675495 | false | 4.179389 | false | false | false | 0.001826 |
lampwins/netbox | netbox/dcim/migrations/0044_virtualization.py | 2 | 1144 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-31 14:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('virtualization', '0001_virtualization'),
('dcim', '0043_device_component_name_lengths'),
]
operations = [
migrations.AddField(
model_name='device',
name='cluster',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='devices', to='virtualization.Cluster'),
),
migrations.AddField(
model_name='interface',
name='virtual_machine',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='interfaces', to='virtualization.VirtualMachine'),
),
migrations.AlterField(
model_name='interface',
name='device',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='interfaces', to='dcim.Device'),
),
]
| apache-2.0 | 6,566,227,937,492,917,000 | 37.133333 | 167 | 0.632867 | false | 4.028169 | false | false | false | 0.002622 |
cselis86/edx-platform | common/djangoapps/student/roles.py | 30 | 11451 | """
Classes used to model the roles used in the courseware. Each role is responsible for checking membership,
adding users, removing users, and listing members
"""
from abc import ABCMeta, abstractmethod
from django.contrib.auth.models import User
import logging
from student.models import CourseAccessRole
from xmodule_django.models import CourseKeyField
log = logging.getLogger(__name__)
# A list of registered access roles.
REGISTERED_ACCESS_ROLES = {}
def register_access_role(cls):
"""
Decorator that allows access roles to be registered within the roles module and referenced by their
string values.
Assumes that the decorated class has a "ROLE" attribute, defining its type.
"""
try:
role_name = getattr(cls, 'ROLE')
REGISTERED_ACCESS_ROLES[role_name] = cls
except AttributeError:
log.exception(u"Unable to register Access Role with attribute 'ROLE'.")
return cls
class RoleCache(object):
"""
A cache of the CourseAccessRoles held by a particular user
"""
def __init__(self, user):
self._roles = set(
CourseAccessRole.objects.filter(user=user).all()
)
def has_role(self, role, course_id, org):
"""
Return whether this RoleCache contains a role with the specified role, course_id, and org
"""
return any(
access_role.role == role and
access_role.course_id == course_id and
access_role.org == org
for access_role in self._roles
)
class AccessRole(object):
"""
Object representing a role with particular access to a resource
"""
__metaclass__ = ABCMeta
@abstractmethod
def has_user(self, user): # pylint: disable=unused-argument
"""
Return whether the supplied django user has access to this role.
"""
return False
@abstractmethod
def add_users(self, *users):
"""
Add the role to the supplied django users.
"""
pass
@abstractmethod
def remove_users(self, *users):
"""
Remove the role from the supplied django users.
"""
pass
@abstractmethod
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
return User.objects.none()
class GlobalStaff(AccessRole):
"""
The global staff role
"""
def has_user(self, user):
return user.is_staff
def add_users(self, *users):
for user in users:
if (user.is_authenticated() and user.is_active):
user.is_staff = True
user.save()
def remove_users(self, *users):
for user in users:
# don't check is_authenticated nor is_active on purpose
user.is_staff = False
user.save()
def users_with_role(self):
raise Exception("This operation is un-indexed, and shouldn't be used")
class RoleBase(AccessRole):
"""
Roles by type (e.g., instructor, beta_user) and optionally org, course_key
"""
def __init__(self, role_name, org='', course_key=None):
"""
Create role from required role_name w/ optional org and course_key. You may just provide a role
name if it's a global role (not constrained to an org or course). Provide org if constrained to
an org. Provide org and course if constrained to a course. Although, you should use the subclasses
for all of these.
"""
super(RoleBase, self).__init__()
self.org = org
self.course_key = course_key
self._role_name = role_name
def has_user(self, user):
"""
Return whether the supplied django user has access to this role.
"""
if not (user.is_authenticated() and user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(user, '_roles'):
# Cache a list of tuples identifying the particular roles that a user has
# Stored as tuples, rather than django models, to make it cheaper to construct objects for comparison
user._roles = RoleCache(user)
return user._roles.has_role(self._role_name, self.course_key, self.org)
def add_users(self, *users):
"""
Add the supplied django users to this role.
"""
# silently ignores anonymous and inactive users so that any that are
# legit get updated.
from student.models import CourseAccessRole
for user in users:
if user.is_authenticated and user.is_active and not self.has_user(user):
entry = CourseAccessRole(user=user, role=self._role_name, course_id=self.course_key, org=self.org)
entry.save()
if hasattr(user, '_roles'):
del user._roles
def remove_users(self, *users):
"""
Remove the supplied django users from this role.
"""
entries = CourseAccessRole.objects.filter(
user__in=users, role=self._role_name, org=self.org, course_id=self.course_key
)
entries.delete()
for user in users:
if hasattr(user, '_roles'):
del user._roles
def users_with_role(self):
"""
Return a django QuerySet for all of the users with this role
"""
# Org roles don't query by CourseKey, so use CourseKeyField.Empty for that query
if self.course_key is None:
self.course_key = CourseKeyField.Empty
entries = User.objects.filter(
courseaccessrole__role=self._role_name,
courseaccessrole__org=self.org,
courseaccessrole__course_id=self.course_key
)
return entries
class CourseRole(RoleBase):
"""
A named role in a particular course
"""
def __init__(self, role, course_key):
"""
Args:
course_key (CourseKey)
"""
super(CourseRole, self).__init__(role, course_key.org, course_key)
@classmethod
def course_group_already_exists(self, course_key):
return CourseAccessRole.objects.filter(org=course_key.org, course_id=course_key).exists()
class OrgRole(RoleBase):
"""
A named role in a particular org independent of course
"""
def __init__(self, role, org):
super(OrgRole, self).__init__(role, org)
@register_access_role
class CourseStaffRole(CourseRole):
"""A Staff member of a course"""
ROLE = 'staff'
def __init__(self, *args, **kwargs):
super(CourseStaffRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseInstructorRole(CourseRole):
"""A course Instructor"""
ROLE = 'instructor'
def __init__(self, *args, **kwargs):
super(CourseInstructorRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseFinanceAdminRole(CourseRole):
"""A course staff member with privileges to review financial data."""
ROLE = 'finance_admin'
def __init__(self, *args, **kwargs):
super(CourseFinanceAdminRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseSalesAdminRole(CourseRole):
"""A course staff member with privileges to perform sales operations. """
ROLE = 'sales_admin'
def __init__(self, *args, **kwargs):
super(CourseSalesAdminRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseBetaTesterRole(CourseRole):
"""A course Beta Tester"""
ROLE = 'beta_testers'
def __init__(self, *args, **kwargs):
super(CourseBetaTesterRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class LibraryUserRole(CourseRole):
"""
A user who can view a library and import content from it, but not edit it.
Used in Studio only.
"""
ROLE = 'library_user'
def __init__(self, *args, **kwargs):
super(LibraryUserRole, self).__init__(self.ROLE, *args, **kwargs)
class CourseCcxCoachRole(CourseRole):
"""A CCX Coach"""
ROLE = 'ccx_coach'
def __init__(self, *args, **kwargs):
super(CourseCcxCoachRole, self).__init__(self.ROLE, *args, **kwargs)
class OrgStaffRole(OrgRole):
"""An organization staff member"""
def __init__(self, *args, **kwargs):
super(OrgStaffRole, self).__init__('staff', *args, **kwargs)
class OrgInstructorRole(OrgRole):
"""An organization instructor"""
def __init__(self, *args, **kwargs):
super(OrgInstructorRole, self).__init__('instructor', *args, **kwargs)
class OrgLibraryUserRole(OrgRole):
"""
A user who can view any libraries in an org and import content from them, but not edit them.
Used in Studio only.
"""
ROLE = LibraryUserRole.ROLE
def __init__(self, *args, **kwargs):
super(OrgLibraryUserRole, self).__init__(self.ROLE, *args, **kwargs)
@register_access_role
class CourseCreatorRole(RoleBase):
"""
This is the group of people who have permission to create new courses (we may want to eventually
make this an org based role).
"""
ROLE = "course_creator_group"
def __init__(self, *args, **kwargs):
super(CourseCreatorRole, self).__init__(self.ROLE, *args, **kwargs)
class UserBasedRole(object):
"""
Backward mapping: given a user, manipulate the courses and roles
"""
def __init__(self, user, role):
"""
Create a UserBasedRole accessor: for a given user and role (e.g., "instructor")
"""
self.user = user
self.role = role
def has_course(self, course_key):
"""
Return whether the role's user has the configured role access to the passed course
"""
if not (self.user.is_authenticated() and self.user.is_active):
return False
# pylint: disable=protected-access
if not hasattr(self.user, '_roles'):
self.user._roles = RoleCache(self.user)
return self.user._roles.has_role(self.role, course_key, course_key.org)
def add_course(self, *course_keys):
"""
Grant this object's user the object's role for the supplied courses
"""
if self.user.is_authenticated and self.user.is_active:
for course_key in course_keys:
entry = CourseAccessRole(user=self.user, role=self.role, course_id=course_key, org=course_key.org)
entry.save()
if hasattr(self.user, '_roles'):
del self.user._roles
else:
raise ValueError("user is not active. Cannot grant access to courses")
def remove_courses(self, *course_keys):
"""
Remove the supplied courses from this user's configured role.
"""
entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys)
entries.delete()
if hasattr(self.user, '_roles'):
del self.user._roles
def courses_with_role(self):
"""
Return a django QuerySet for all of the courses with this user x role. You can access
any of these properties on each result record:
* user (will be self.user--thus uninteresting)
* org
* course_id
* role (will be self.role--thus uninteresting)
"""
return CourseAccessRole.objects.filter(role=self.role, user=self.user)
| agpl-3.0 | -3,798,410,386,038,660,000 | 30.116848 | 114 | 0.617413 | false | 4.072191 | false | false | false | 0.001921 |
RNAer/micronota | micronota/database/uniprot.py | 1 | 5889 | # ----------------------------------------------------------------------------
# Copyright (c) 2015--, micronota development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from logging import getLogger
from sqlite3 import connect, IntegrityError
from xml.etree import ElementTree as ET
logger = getLogger(__name__)
def add_metadata(xml_fh, db_fp):
'''Add to the database the metadata of records from the UniProt xml file.
Parameters
----------
xml_fh : file object
The file of either UniProtKB Swiss-Prot or TrEMBLE.
db_fp : str
The output database file. See ``Notes``.
Returns
-------
int
The number of records processed.
'''
logger.info('Adding UniProt metadata to db from %r' % xml_fh)
# this is the namespace for uniprot xml files.
ns_map = {'xmlns': 'http://uniprot.org/uniprot',
'xsi': 'http://WWW.w3.org/2001/XMLSchema-instance'}
entry_tag_path = ['{{{ns}}}{path}'.format(ns=ns_map['xmlns'], path=tag) for tag in 'entry'.split('/')]
paths = {'EC_number': './/xmlns:ecNumber', # E.C. number
'GO': './xmlns:dbReference[@type="GO"]', # GO
'KEGG': './xmlns:dbReference[@type="KEGG"]', # KEGG,
'Pfam': './xmlns:dbReference[@type="Pfam"]',
'eggNOG': './xmlns:dbReference[@type="eggNOG"]',
'TIGRFAM': './xmlns:dbReference[@type="TIGRFAMs"]'}
inserts = {}
with connect(db_fp) as conn:
c = conn.cursor()
# The INTEGER PRIMARY KEY column created is simply an
# alias for ROWID or _ROWID_ or OID.
# You can't ignore this column because ROWID can't server
# as foreign key.
c.execute('CREATE TABLE IF NOT EXISTS UniProt ('
' id INTEGER PRIMARY KEY,'
' accn TEXT UNIQUE,'
' name TEXT NOT NULL);')
insert = 'INSERT OR IGNORE INTO UniProt (id, accn, name) VALUES (?,?,?);'
for other_table in paths:
ct, it, clt, ilt = _cross_ref_table(other_table)
c.execute(ct)
c.execute(clt)
inserts[other_table] = [it, ilt]
for n, entry in enumerate(_parse_xml(xml_fh, entry_tag_path), 1):
try:
# get the primary accession number
accn = entry.find('./xmlns:accession', ns_map).text
# get the protein product name
name = entry.find('.//xmlns:fullName', ns_map).text
except AttributeError as e:
# customize with more informative error msg
raise AttributeError('failed to get accession and name for record %d' % n) from e
try:
# `None` to increment the id column automatically
c.execute(insert, (None, accn, name))
except IntegrityError as e:
raise IntegrityError('failed to insert {}'.format((accn, name))) from e
# get the ID for UniProt entry just inserted into the table
uniprot_id = c.execute(
'SELECT id FROM UniProt WHERE accn = ?;', (accn, )).fetchone()[0]
for other_table, path in paths.items():
it, ilt = inserts[other_table]
select = 'SELECT id FROM %s WHERE accn = ?;' % other_table
for elem in entry.findall(path, ns_map):
if other_table == 'EC_number':
other_accn = elem.text
else:
other_accn = elem.attrib['id']
c.execute(it, (None, other_accn, None))
other_table_id = c.execute(select, (other_accn, )).fetchone()[0]
c.execute(ilt, (uniprot_id, other_table_id))
conn.commit()
return n
def _cross_ref_table(name):
'''sqlite3 statement to create table for cross ref database.
name: other database name
'''
create = ('CREATE TABLE IF NOT EXISTS {} ('
' id INTEGER PRIMARY KEY,'
' accn TEXT UNIQUE,'
' name TEXT);').format(name)
insert = ('INSERT OR IGNORE INTO {} (id, accn, name)'
' VALUES (?,?,?);').format(name)
# junction table from uniprot to other reference databases
create_link_table = ('CREATE TABLE IF NOT EXISTS uniprot_{0} ('
' uniprot_id INTEGER,'
' {0}_id INTEGER,'
' PRIMARY KEY (uniprot_id, {0}_id),'
' FOREIGN KEY (uniprot_id) REFERENCES UniProt(id),'
' FOREIGN KEY ({0}_id) REFERENCES {0}(id));').format(
name)
insert_link_table = ('INSERT OR IGNORE INTO uniprot_{0} (uniprot_id, {0}_id)'
' VALUES (?,?);').format(name)
return create, insert, create_link_table, insert_link_table
def _parse_xml(xml_fh, tag):
'''Return the elem with specified tag.
Parameters
----------
xml_fh : xml file object or file path
'''
# it is very important to set the events to 'end'; otherwise,
# elem would be an incomplete record.
context = ET.iterparse(xml_fh, events=('start', 'end'))
# Skip the root element
next(context)
tag_stack = []
elem_stack = []
for event, elem in context:
if event == 'start':
tag_stack.append(elem.tag)
elem_stack.append(elem)
elif event == 'end':
if tag_stack == tag:
yield elem
try:
tag_stack.pop()
elem_stack.pop()
# for the rest 'end' events, the stacks are empty
except IndexError:
pass
| bsd-3-clause | 7,270,220,736,356,510,000 | 38.52349 | 106 | 0.525726 | false | 4.044643 | false | false | false | 0.001189 |
kevinlondon/sentry | src/sentry/db/postgres/base.py | 25 | 2640 | from __future__ import absolute_import
import psycopg2 as Database
# Some of these imports are unused, but they are inherited from other engines
# and should be available as part of the backend ``base.py`` namespace.
from django.db.backends.postgresql_psycopg2.base import ( # NOQA
DatabaseWrapper, DatabaseFeatures, DatabaseOperations, DatabaseClient,
DatabaseCreation, DatabaseIntrospection
)
from .decorators import (
capture_transaction_exceptions, auto_reconnect_cursor,
auto_reconnect_connection, less_shitty_error_messages
)
__all__ = ('DatabaseWrapper', 'DatabaseFeatures', 'DatabaseOperations',
'DatabaseOperations', 'DatabaseClient', 'DatabaseCreation',
'DatabaseIntrospection')
class CursorWrapper(object):
"""
A wrapper around the postgresql_psycopg2 backend which handles various events
from cursors, such as auto reconnects and lazy time zone evaluation.
"""
def __init__(self, db, cursor):
self.db = db
self.cursor = cursor
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
@capture_transaction_exceptions
@auto_reconnect_cursor
@less_shitty_error_messages
def execute(self, sql, params=None):
if params is not None:
return self.cursor.execute(sql, params)
return self.cursor.execute(sql)
@capture_transaction_exceptions
@auto_reconnect_cursor
@less_shitty_error_messages
def executemany(self, sql, paramlist=()):
return self.cursor.executemany(sql, paramlist)
class DatabaseWrapper(DatabaseWrapper):
@auto_reconnect_connection
def _set_isolation_level(self, level):
return super(DatabaseWrapper, self)._set_isolation_level(level)
@auto_reconnect_connection
def _cursor(self, *args, **kwargs):
cursor = super(DatabaseWrapper, self)._cursor()
return CursorWrapper(self, cursor)
def close(self, reconnect=False):
"""
This ensures we dont error if the connection has already been closed.
"""
if self.connection is not None:
if not self.connection.closed:
try:
self.connection.close()
except Database.InterfaceError:
# connection was already closed by something
# like pgbouncer idle timeout.
pass
self.connection = None
class DatabaseFeatures(DatabaseFeatures):
can_return_id_from_insert = True
def __init__(self, connection):
self.connection = connection
| bsd-3-clause | 3,322,223,451,610,241,500 | 30.807229 | 81 | 0.664773 | false | 4.467005 | false | false | false | 0.000379 |
sirgogo/TheAwesomeSimulator | AwSimLib-v0.5.0/fileIO/DFISE.py | 1 | 31496 | ####################################################################
####################################################################
#### The Awesome Simulator, DF-ISE FileIO Module ####
#### ####
#### Author: Abhejit Rajagopal, abhejit@ucla.edu ####
#### ####
#### (DFISE.py) is part of the AwSimLib software package ####
#### module DFISE.py: /fileIO/DFISE.py ####
#### ####
#### This software is free to use, subjct to terms and ####
#### conditions in the AwSimLib license (LGPLv3). ####
#### AwSimLib license: ../_LICENSE.TXT ####
####################################################################
####################################################################
####################################################################
#### Version History ####
####################################################################
#### 0.1 09/01/2013 - classes, combine by line ####
#### 0.2 12/12/2013 - combine v2.py ####
#### 0.3 04/10/2014 - read.py, unified ####
#### 0.4 05/20/2014 - lumeric, csv, ####
#### 0.5 09/02/2014 - AwSimLib initial release ####
#### ####
#### Part of the AwSimLib software package. ####
#### Copyright (C) 2014 Abhejit Rajagopal ####
####################################################################
####################################################################
####################################################################
#### Helper Functions: --> Debug ####
####################################################################
## system libs
import sys
orig_stdout = sys.stdout
def printHEADER(string):
pass
#string = string
#print (string)
def printINFO(string):
pass
#string = string
#print (string)
def printDATA(string):
pass
#string = string
#print (string)
def printADD(string):
pass
#string = string
#print (string)
####################################################################
####################################################################
####################################################################
#### Class Definitions:--> Make a DFISE_File object ####
####################################################################
class DFISE_DATfile:
## DF-ISE Data ('.dat') File handler
def __init__(self, *filenames):
self.filename = 'FilenameNotSpecified.noext'
self.INFO = Info()
self.DATA = Data()
# optional filename options to pre-load data
if len(filenames)==0:
print ('Empty DFISE_DATFile: no filename specified.')
elif len(filenames)==1:
self.filename = str(filenames[0])
#
## end DF-ISE object class
class Info:
## class to represent header in a '.dat' file
def __init__(self):
self.version = []
self.type = []
self.dimension = []
self.nb_vertices= []
self.nb_edges = []
self.nb_faces = []
self.nb_elements= []
self.nb_regions = []
self.datasets = []
self.functions = []
def setField(self, field, value):
#applies a value to a field
if (field == "version"):
self.version = value
elif (field == "type"):
self.type = value
elif (field == "dimension"):
self.dimension = value
elif (field == "nb_vertices"):
self.nb_vertices = value
elif (field == "nb_edges"):
self.nb_edges = value
elif (field == "nb_faces"):
self.nb_faces = value
elif (field == "nb_elements"):
self.nb_elements = value
elif (field == "nb_regions"):
self.nb_regions = value
elif (field == "datasets"):
self.datasets = value
elif (field == "functions"):
self.functions = value
## end Info class
class Data:
## class to represent data in a '.dat' file
def __init__(self):
self.numDatasets= [] # of datasets
self.datasets = [] #list of datasets
def setNum(self, number):
#sets numDatasets
#makes appropriate number of Dataset variables to store in datasets
self.numDatasets = number
setX = []
for i in range(number) :
setX.append(Dataset())
self.datasets = setX
def setField (self, counter, field, value):
#sets value of field in datasets[counter]
#print "field== " + str(field.strip()) + " value== " + str(value) + " length== " + str(len(self.datasets))
self.datasets[counter].setField(field.strip(),value)
def retData(self,counter):
return self.datasets[counter]
## end Data class
class Dataset:
## class to represent each dataset within data in a '.dat' file
def __init__(self):
self.dataname = []
self.function = []
self.type = []
self.dimension = []
self.location = []
self.validity = []
self.numValues = []
self.Values = []
def setField(self, field, value):
#applies a value to a field
if (field == "dataname"):
self.dataname = value
elif (field == "function"):
self.function = value
elif (field == "type"):
self.type = value
elif (field == "dimension"):
self.dimension = value
elif (field == "location"):
self.location = value
elif (field == "validity"):
self.validity = value
elif (field == "numValues"):
#print "NumVALUES! == " + str(value)
self.numValues = value
elif (field == "Values"):
#self.Values.append(value)
self.Values = value
## end Dataset class
####################################################################
####################################################################
####################################################################
#### Functions: --> File Ops ####
####################################################################
def readDAT(filenames):
#### File Parser for DF-ISE data ('.dat') files
####
#### In: List of filenames, e.g.: filenames=['testgrid1.dat'(, ...)]
####
#### Out: List of DF-ISE objects, e.g.: to_return=[dfise_object1,(, ...)]
####
####
## libraries provided by system
import numpy as np
import sys
import glob
##
to_return = [] #list of DFISE_DATfile
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE readDAT')
if type(filenames)!=list: #correct if only 1 item provided
filenames = [filenames]
#
i = 0
for filename in filenames:
grabNull = 0
grabHead = 1
grabInfo = 0
grabData = 0
grabDataVALS = 0
print ('~~~~~~~~~~~~~~~~~~~~')
f = open (filename,"r")
print ("processing: " + str(filename))
DFISE_obj = DFISE_DATfile(str(filename))
j = -1 #dataset counter
for line in f:
if grabHead == 1: ## check file type/header on first line ##
split_space = line.split(' ')
#good
if split_space[0] == "DF-ISE":
print ("-->Header OK: DF-ISE text")
grabHead = 0
grabNull = 1
continue
#bad
else:
print ("~~~~")
print (" was expecting a DF-ISE file, check file header")
sys.exit(0)
elif grabNull == 1:
split_space = line.split(' ')
if split_space[0] == 'Info':
print ("-->Info section identified")
grabInfo = 1
grabNull = 0
continue
elif split_space[0] == 'Data':
print ("-->Data section identified")
grabData = 1
grabNull = 0
continue
elif split_space[0].strip() == '':
printHEADER( "..blankline.." )
continue
else:
print ("~~~~")
print ("ERROR SHOULD NOT BE HERE -- grabNull == 1")
sys.exit(0)
elif grabInfo == 1:
split_equ = line.split('=')
field = split_equ[0].strip()
if len(split_equ) > 1:
quantity = split_equ[1]
elif split_equ[0].strip() == '}': #end criteria
print ("--end of Info section.")
grabInfo = 0
grabNull = 1
else:
print ("~~~~")
print ("ERROR SHOULD NOT BE HERE -- grabInfo == 1")
sys.exit(0)
if field == "version":
Info_version = float(quantity.strip()) #float
DFISE_obj.INFO.setField(field, Info_version)
printINFO( "version = " + str(Info_version) )
elif field == "type":
Info_type = quantity.strip() #string
DFISE_obj.INFO.setField(field, Info_type)
printINFO( "type = " + str(Info_type))
elif field == "dimension":
Info_dimension = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_dimension)
printINFO( "dimension = " + str(Info_dimension) )
elif field == "nb_vertices":
Info_nb_vertices = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_vertices)
printINFO( "nb_vertices = " + str(Info_nb_vertices) )
elif field == "nb_edges":
Info_nb_edges = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_edges)
printINFO( "nb_edges = " + str(Info_nb_edges) )
elif field == "nb_faces":
Info_nb_faces = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_faces)
printINFO( "nb_faces = " + str(Info_nb_faces) )
elif field == "nb_elements":
Info_nb_elements = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_elements)
printINFO( "nb_elements = " + str(Info_nb_elements) )
elif field == "nb_regions":
Info_nb_regions = int(quantity.strip()) #int
DFISE_obj.INFO.setField(field, Info_nb_regions)
printINFO( "nb_regions = " + str(Info_nb_regions) )
elif field == "datasets":
Info_nb_datasets = quantity.split('[ "')[1].split('" ]')[0].split('" "') #list of str
Info_num_datasets = int(len(Info_nb_datasets)) #int
DFISE_obj.INFO.setField(field, Info_nb_datasets)
#INFO.setField("version", Info_num_datasets)
printINFO( "nb_datasets (" + str(Info_num_datasets) + ") = " + str(Info_nb_datasets) )
elif field == "functions":
Info_nb_functions = quantity.split('[ ')[1].split(' ]')[0].split(' ') #list of str
Info_num_functions = int(len(Info_nb_functions)) #int
DFISE_obj.INFO.setField(field, Info_nb_functions)
#INFO.setField("version", Info_num_functions)
printINFO( "nb_functions (" + str(Info_num_functions) + ") = " + str(Info_nb_functions) )
if Info_num_functions == Info_num_datasets:
print ("number of datasets matches number of functions, ok!")
DFISE_obj.DATA.setNum(Info_num_datasets)
else:
print ("number of datasets does not match number of functions, check file!")
sys.exit(0)
elif grabData == 1:
split_equ = line.split('=')
split_space = line.split(' ')
#print (split_space)
#print (split_equ)
field = None
quantity = None
if grabDataVALS == 0:
for each in split_space:
field = each.strip()
if field == '':
#print ("..blankspace or blankline.."),
continue
elif field == "Dataset":
j = j+1
printDATA( "**NEW DATASET, j = " + str(j) + " **" )
Data_name = str(line.split(' ("')[1].split('") ')[0]) #str
DFISE_obj.DATA.setField(j, "dataname", Data_name)
printDATA( "name = " + Data_name )
elif field == "function":
Data_function = str(split_equ[1].strip()) #str
DFISE_obj.DATA.setField(j, field, Data_function)
printDATA( "function = " + Data_function )
elif field == "type":
Data_type = str(split_equ[1].strip()) #str
DFISE_obj.DATA.setField(j, field, Data_type)
printDATA( "type = " + Data_type )
elif field == "dimension":
Data_dimension = str(int(split_equ[1].strip())) #int
DFISE_obj.DATA.setField(j, field, Data_dimension)
printDATA( "dimension = " + str(Data_dimension) )
elif field == "location":
Data_location = str(split_equ[1].strip()) #str
DFISE_obj.DATA.setField(j, field, Data_location)
printDATA( "location = " + Data_location )
elif field == "validity":
Data_validity = str(split_equ[1].split('[ "')[1].split('" ]')[0]) #str
DFISE_obj.DATA.setField(j, field, Data_validity)
printDATA( "validity = " + Data_validity )
elif field == "Values":
Data_num_values = int(line.split(' (')[1].split(') ')[0]) #int
DFISE_obj.DATA.setField(j, "numValues", Data_num_values)
printDATA( "num_values = " + str(Data_num_values) )
grabDataVALS = 1
datasetvals = [] # 1D list later converted to numpy array
elif grabDataVALS == 1:
## READ VALS BY LINE (DEPRICATED)###
# if line.strip() == '}':
# #print(datasetvals)
# DFISE_obj.DATA.setField(j, "Values", datasetvals)
# grabDataVALS = 0
# continue
#
# quantities = line.split(' ')
# linevals = []
# for each in quantities:
# if each.strip() == '':
# continue
# else:
# linevals.append(float(each))
#
# linevals = np.array(linevals) #each line is stored as an array
# datasetvals.append(linevals) #inside a list for each dataset
# #print ("length = " + str(len(datasetvals)) + " values = " + str(datasetvals))
## READ VALS BY SECTION (array of dataset values)###
if line.strip() == '}': #ending brace
#print(datasetvals)
datasetvals = np.array(datasetvals) #cast as numpy array (modify for alternate datatype)
#print ("length = " + str(len(datasetvals)) )#+ " values = " + str(datasetvals))
DFISE_obj.DATA.setField(j, "Values", datasetvals)
grabDataVALS = 0
continue
quantities = line.split(' ')
linevals = []
for each in quantities:
if each.strip() == '':
continue
else:
datasetvals.append(float(each))
#
#
# # #
## Done collecting all the data, close file and append to data list
f.close()
to_return.append(DFISE_obj)
i = i+1 #file number counter
# end file
print ("~~~~~~~~~~~~~~~~~~~~")
print ('')
return to_return
## END FUNCTION
def printDAT(dat):
#### Print dataset info from DF-ISE file object
####
#### In: DF-ISE object, , e.g.: dat=DFISE.readDAT(filenames)
####
#### Out: Prints info to consol.
#### Returns 1 if sucessful
####
print ('')
print ("~~~~~~~~~~~~~~~~~~~~")
print ('DF-ISE printDAT')
print ("~~~~~~~~~~~~~~~~~~~~")
print ('Dataset verification:')
i=0
for dataset in dat.DATA.datasets:
print (' '+ str(i) +' '+ dataset.dataname +' '+ dataset.validity +' '+ dataset.dimension)
i = i+1
#
print ("~~~~~~~~~~~~~~~~~~~~")
print ('')
####
def writeDAT(data, output_filename):
#### File Writer for DF-ISE data ('.dat') files
####
#### In: List of DF-ISE objects, e.g.: data=[DFISE.readDAT(filenames)]
#### Output filename string, e.g.: output_filename=str('PythonDFISEdatOUTPUT.dat')
####
#### Out: Print ',dat' with specified filename, e.g.: program should exit if not success
#### Return 1 if completed
####
####
## libraries provided by system
import numpy as np
import sys
##
if type(data)!=list: #correct if only 1 item provided
data = [data]
#
if len(data) > 1:
print ("ERROR: You must provide only 1 valid object data=[DFISE_DATfile1(, ...)]")
return 0
#print ("...... using 1st item only...") #feature depricated
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE writeDAT')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('printing file: ' + output_filename)
FILE0 = data[0] # first data object
orig_stdout = sys.stdout # save the pointer for standard out
to_write = open(output_filename, 'wb')
sys.stdout = to_write # set the standard out pointer to the to_write file
infos = FILE0.INFO
dats = FILE0.DATA
#header
print ("DF-ISE text")
print ("")
#info
print ("Info {")
print (" " + "version = " + str(infos.version))
print (" " + "type = " + str(infos.type))
print (" " + "dimension = " + str(infos.dimension))
print (" " + "nb_vertices = " + str(infos.nb_vertices))
print (" " + "nb_edges = " + str(infos.nb_edges))
print (" " + "nb_faces = " + str(infos.nb_faces))
print (" " + "nb_elements = " + str(infos.nb_elements))
print (" " + "nb_regions = " + str(infos.nb_regions))
print (" " + "datasets = ["),
for each in infos.datasets:
print ('"'+each+'"'),
print ("]")
print (" " + "functions = ["),
for each in infos.functions:
print (each),
print ("]")
print ("}")
print ("")
#data
print ("Data {")
print ("")
for dataset in dats.datasets:
print (' Dataset ("' + dataset.dataname + '") {')
print (' function = ' + dataset.function)
print (' type = ' + dataset.type)
print (' dimension = ' + dataset.dimension)
print (' location = ' + dataset.location)
print (' validity = [ "' + dataset.validity + '" ]')
print (' Values (' + str(dataset.numValues) + ') {')
valNum = 0
for val in np.nditer(dataset.Values):
if valNum%10==0 and valNum!=0: # every ten items
print (' ') #space+newline
#elif valNum%10==0 and valNum==0: # every ten items
#print (' '),
print (''),
print ('%.15e' % float(val)),
print (''),
valNum = valNum+1
#
print(' ')
print (' }')
print (' }')
print ('')
print ('')
print ('}')
sys.stdout = orig_stdout #reset sys standard out pointer
to_write.close()
print ('~~~~~~~~~~~~~~~~~~~~')
return 1
## END FUNCTION
def combineDAT(FILEmaster, FILE0, regions0, FILE1, regions1, field_names, field_dimensions, field_location):
#### Combine DF-ISE datasets
####
#### In: Object to store resulting DF-ISE, e.g.: Filemaster=FILE0
#### File0, e.g.: FILE0=DFISE.readDAT(filename0)
#### Regions to consider in FILE0, regions0=['regA0'(, ...)]
#### File1, e.g.: FILE1=DFISE.readDAT(filename1)
#### Regions to consider in FILE1, e.g.: regions1=['regA1'(, ...)]
#### Fields to combine, e.g.: field_names=[ "PMIUserField0"(, ...)]
#### Dimensions of those fields, e.g.: field_dimensions=[ "1"(, ...)]
#### Location of the points, e.g.: field_location=[ "vertex"(, ...)]
####
#### Out: Print ',dat' with specified filename, e.g.: program should exit if not success
#### Return 1 if completed
####
#### Note: # must verify numValues are the same for two datasets, obviously
#### # must verify dataname, (function, type,) location, validity
#### # ()==warning
####
####
## libraries provided by system
import numpy as np
##
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE combineDAT')
print ('~~~~~~~~~~~~~~~~~~~~')
if len(regions0)!=len(regions1):
print ('Number of regions in regions0 and regions1 do not match!')
if len(field_names)!=len(field_dimensions) or len(field_names)!=len(field_location):
print ('Number of regions in field_<info> do not match!!')
print('files: '+FILE0.filename+' '+FILE1.filename)
#regionNUM = 0
#for region in regions0:
fieldNUM = 0
for field in field_names:
regionNUM = 0
for region in regions0:
print('--> looking for '+region)
# find dataset indices in File0
dataset0_index = []
indexNUM = 0
for dataset in FILE0.DATA.datasets:
if dataset.validity==regions0[regionNUM] and dataset.dataname==field_names[fieldNUM] and dataset.dimension==field_dimensions[fieldNUM] and dataset.location==field_location[fieldNUM]:
dataset0_index.append(indexNUM)
print(' '+'File0: @'+str(indexNUM)+' found '+dataset.dataname+' in '+dataset.validity)
else:
#print(' '+'File0: @'+str(indexNUM)+' !!!! '+dataset.dataname+' in '+dataset.validity)
pass
#
indexNUM = indexNUM+1
#
# find dataset indices in File1
dataset1_index = []
indexNUM = 0
for dataset in FILE0.DATA.datasets:
if dataset.validity==regions1[regionNUM] and dataset.dataname==field_names[fieldNUM] and dataset.dimension==field_dimensions[fieldNUM] and dataset.location==field_location[fieldNUM]:
dataset1_index.append(indexNUM)
print(' '+'File1: @'+str(indexNUM)+' found '+dataset.dataname+' in '+dataset.validity)
else:
#print(' '+'File1: @'+str(indexNUM)+' !!!! '+dataset.dataname+' in '+dataset.validity)
pass
#
indexNUM = indexNUM+1
#
## now we have two lists, (hopefully of same length), where each element corresponds to dataset# to compare in DATA.datasets[#] --> in this case add .Values
if len(dataset0_index)!=len(dataset1_index):
print (' ERROR: data files provided have some redundancy in validity/dataname.')
#
if len(dataset0_index)>1:
print (' ERROR: more than 1 dataset found for given region/info')
print(len(dataset0_index))
else:
#print(len(dataset0_index))
pass
indexNUM = 0
for each in dataset0_index:
if FILE0.DATA.datasets[dataset0_index[indexNUM]].function!=FILE1.DATA.datasets[dataset1_index[indexNUM]].function:
print('Warning: the sets being combined do not match in functionname')
print(' --> file0: '+str(FILE0.DATA.datasets[dataset0_index[indexNUM]].function))
print(' --> file1: '+str(FILE1.DATA.datasets[dataset1_index[indexNUM]].function))
pass
if FILE0.DATA.datasets[dataset0_index[indexNUM]].type!=FILE1.DATA.datasets[dataset1_index[indexNUM]].type:
print('Warning: the sets being combined do not match in type')
print(' --> file0: '+str(FILE0.DATA.datasets[dataset0_index[indexNUM]].type))
print(' --> file1: '+str(FILE1.DATA.datasets[dataset1_index[indexNUM]].type))
pass
if FILE0.DATA.datasets[dataset0_index[indexNUM]].numValues!=FILE1.DATA.datasets[dataset1_index[indexNUM]].numValues:
print('ERROR: the sets being combined do not match in numValues')
print(' --> file0: '+str(FILE0.DATA.datasets[dataset0_index[indexNUM]].numValues))
print(' --> file1: '+str(FILE1.DATA.datasets[dataset1_index[indexNUM]].numValues))
continue
#
## identifying info
print(' adding @'+str(each)+' '+FILE0.DATA.datasets[dataset1_index[indexNUM]].validity),
print (FILE0.DATA.datasets[dataset0_index[indexNUM]].dataname +'0 '+ FILE0.DATA.datasets[dataset1_index[indexNUM]].dataname+'1'),
## great, now just add them already!
tmp = np.add(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values, FILE1.DATA.datasets[dataset1_index[indexNUM]].Values)
FILEmaster.DATA.setField (dataset0_index[indexNUM], 'Values', tmp)
if all(tmp == FILEmaster.DATA.datasets[dataset0_index[indexNUM]].Values):
print('Sucess!')
else:
print('hmmph '),
print(type(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values)),
print(' '),
print(type(FILE1.DATA.datasets[dataset1_index[indexNUM]].Values))
print(' '),
print(len(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values)),
print(' '),
print(len(FILE1.DATA.datasets[dataset1_index[indexNUM]].Values))
print(' '),
print((FILE0.DATA.datasets[dataset0_index[indexNUM]].Values)[0]),
print(' '),
print((FILE1.DATA.datasets[dataset1_index[indexNUM]].Values)[0])
print(' '),
print(type(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values[0])),
print(' '),
print(type(FILE1.DATA.datasets[dataset1_index[indexNUM]].Values[0])),
print(np.add(FILE0.DATA.datasets[dataset0_index[indexNUM]].Values[0],FILE1.DATA.datasets[dataset1_index[indexNUM]].Values[0]))
indexNUM = indexNUM+1
# endADD
regionNUM = regionNUM+1
# endField
fieldNUM = fieldNUM+1
#endRegion
print ('~~~~~~~~~~~~~~~~~~~~')
return FILEmaster
## END FUNCTION
def extractDAT2matrix(extract_file, extract_regions, extract_fields, extract_dimensions):
#### Extract datasets from a DF-ISE object to a matrix
####
#### In: DF-ISE file object with data, e.g.: extract_file=DFISE.readDAT(filename)
#### Regions to consider in FILE0, extract_regions=['regA0'(, ...)]
#### Datasets to extract, e.g.: extract_fields=[ "PMIUserField0"(, ...)]
#### Dimensions of those fields, e.g.: extract_dimensions=[ "1"(, ...)]
####
#### Out: A list of matrices, where each matrix is data corresponding to a region
####
#### Note: # datasets are extracted in order, size of matrix may vary...
#### ... depending on available data in file
####
## libraries provided by system
import numpy as np
##
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE extractDAT')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('processing: ' + extract_file.filename)
# print ('~~~')
print ('')
data = []
for region in extract_regions:
print ("Region: ~" + region + "~")
coords = []
fieldnum = 0
for field in extract_fields:
#print field[fieldnum]
for dataset in extract_file.DATA.datasets:
if dataset.dataname==field and dataset.validity==region and dataset.dimension==extract_dimensions[fieldnum]:
## scalar quantities
if dataset.dimension == "1":
## GRAB BY VALUE ##
coords.append(dataset.Values)
##vector quantities
elif dataset.dimension == "3":
pntsX = []
pntsY = []
pntsZ = []
## GRAB BY VALUE ##
valNUM = 1
for each in dataset.Values:
if valNUM == 1:
pntsX.append(each)
valNUM = 2
elif valNUM == 2:
pntsY.append(each)
valNUM = 3
elif valNUM == 3:
pntsZ.append(each)
valNUM = 1
#
#important!!! for dim=3, append 3 lists
coords.append(pntsX)
coords.append(pntsY)
coords.append(pntsZ)
#endif
print ("---> retrieved: '" + field + "' in: " + region +" as dim"+str(dataset.dimension))
#endmatch
#end
fieldnum = fieldnum+1
#end
#now we have all the coords and datapoints
#break
coords = np.asarray(coords)
coords = np.asmatrix(coords)
coords = coords.transpose()
data.append(coords)
# print ("~~~")
# print ("~~~ ~~~~~~~ ~~~")
print ("")
#
print ("Succesfully grabbed (" + str(len(data)) + ") regions of data with (1-3)d profiles.")
print ('~~~~~~~~~~~~~~~~~~~~')
return data
## END FUNCTION
def write2csv(data, output_prefix, regions):
#### Print extracte data to CSV
####
#### In: List of data matrices, e.g.: data=extract2matrix(extract_file, extract_regions, extract_fields, extract_dimensions)
#### Output file prexis, e.g.: output_prefix='_ASL_write2csv'
#### Regions to print (filenames), e.g.: regions=['Reg0'(, ...)]
####
#### Out: Prints a CSV file for each region with all data in data
#### Returns 1 if sucessful
####
#### Note: # Printing will overwrite for any filename collissions
####
## libraries provided by system
import numpy as np
##
if len(data) != len(regions):
print ("~~~~")
print (" length of 'data' and 'regions' not equal; they should be. exiting..")
sys.exit(0)
print ("Printing output files for each region (will overwrite) ...")
i=0
for item in data:
name = output_prefix+'_Reg'+str(i)+'_'+str(regions[i])+'.csv'
print (".... " + name),
np.savetxt( name, item, delimiter=',', fmt="%e")
#d.tofile(name, ",")
print (" OK")
i = i+1
# end
print (" ")
print ("Job completed.")
print (" ")
return 1
#print "Printing output files for each region (will overwrite) ..."
## END FUNCTION
def buildDAT(insert_data, info, insert_filename, insert_regions, insert_dataname, insert_function, insert_type, insert_dimension, insert_location):
#### Build a DF-ISE file object from data and info provided
####
#### In: DF-ISE file object with data, e.g.: extract_file=DFISE.readDAT(filename)
#### Regions to consider in FILE0, extract_regions=['regA0'(, ...)]
#### Datasets to extract, e.g.: extract_fields=[ "PMIUserField0"(, ...)]
#### Dimensions of those fields, e.g.: extract_dimensions=[ "1"(, ...)]
####
#### Out: A list of matrices, where each matrix is data corresponding to a region
####
#### Note: # Currently: (fdgi) builds DAT from data and given a DFISE.INFO() object
#### # Currently: capable of building from scalar and vector data
####
## libraries provided by system
import numpy as np
##
print ('')
print ('~~~~~~~~~~~~~~~~~~~~')
print ('DF-ISE buildDAT_fdgi') #from data given info ## from info given data, from info&data given 0, from 0 given info&data
print ('~~~~~~~~~~~~~~~~~~~~')
print ('building file: ' + insert_filename)
print ('')
#### FROM DATA ####
newDAT = DFISE_DATfile(insert_filename)
if len(insert_data)!=len(insert_regions):
print ("ERROR: len of regions should match len of data (list type)")
if len(sum(insert_dataname,[]))!=len(sum(insert_function,[])) or len(sum(insert_function,[]))!=len(sum(insert_type,[])) or len(sum(insert_type,[]))!=len(sum(insert_dimension,[])) or len(sum(insert_dimension,[]))!=len(sum(insert_location,[])):
print ("ERROR: list of lists should have same # of elements (necessary but not sufficient condition)")
numDatasets = len(sum(insert_dataname,[])) #count datasets
#newDAT.DATA.setNum(numDatasets)
numDatasets = 0
i=0
for region in insert_regions:
#when manipulating, easier operate on transpose, whose shape is (#datasets,#rows)
tempRegionData = insert_data[i].transpose()
print ("Region: ~" + region + "~")
datacol = 0
j=0
for dataset in insert_dataname[i]:
tempD = Dataset()
tempD.dataname = insert_dataname[i][j]
tempD.function = insert_function[i][j]
tempD.type = insert_type[i][j]
tempD.dimension = insert_dimension[i][j]
tempD.location = insert_location[i][j]
tempD.validity = region
if int(tempD.dimension)==1:
#values = tempRegionData[datacol:datacol+int(tempD.dimension)].transpose()
values = tempRegionData[datacol].transpose()
#values = []
#for each in tempRegionData[datacol:datacol+int(tempD.dimension)].transpose():
# values.append(each[0,0])
#
elif int(tempD.dimension)==2:
values = []
for each in tempRegionData[datacol:datacol+int(tempD.dimension)].transpose():
values.append(each[0,0])
values.append(each[0,1])
#
elif int(tempD.dimension)==3:
values = []
for each in tempRegionData[datacol:datacol+int(tempD.dimension)].transpose():
values.append(each[0,0])
values.append(each[0,1])
values.append(each[0,2])
#
else:
print ("ERROR: DIMENSION NOT VALID")
#
tempD.Values = np.asarray(values)
tempD.numValues = (tempD.Values).size
newDAT.DATA.datasets.append(tempD)
# print tempD.dataname,
# print tempD.function,
# print tempD.type,
# print tempD.dimension,
# print tempD.location,
# print tempD.validity,
# print tempD.numValues
# print str(datacol), str(datacol+int(tempD.dimension))
# print i, j,
print numDatasets,
print newDAT.DATA.datasets[numDatasets].dataname,
print newDAT.DATA.datasets[numDatasets].function,
print newDAT.DATA.datasets[numDatasets].type,
print newDAT.DATA.datasets[numDatasets].dimension,
print newDAT.DATA.datasets[numDatasets].location,
print newDAT.DATA.datasets[numDatasets].validity,
print newDAT.DATA.datasets[numDatasets].numValues
# print newDAT.DATA.datasets[numDatasets].Values.shape
datacol = datacol+int(insert_dimension[i][j])
j=j+1
numDatasets = numDatasets+1
#
i=i+1
#
newDAT.DATA.numDatasets = numDatasets #not setNum, which makes empty Dataset() objects
print ("")
if newDAT.DATA.numDatasets == len(newDAT.DATA.datasets):
print ("Collected "+str(newDAT.DATA.numDatasets)+" datasets ...ok!")
else:
print ("ERROR: numDatasets and len(datasets) do not match!")
print ('~~~~~~~~~~~~~~~~~~~~')
#### GIVEN INFO ####
#info = info
newDAT.INFO.version = info.version
newDAT.INFO.type = info.type
newDAT.INFO.dimension = info.dimension
newDAT.INFO.nb_vertices = info.nb_vertices
newDAT.INFO.nb_edges = info.nb_edges
newDAT.INFO.nb_faces = info.nb_faces
newDAT.INFO.nb_elements = info.nb_elements
newDAT.INFO.nb_regions = info.nb_regions
newDAT.INFO.setField('datasets', sum(insert_dataname,[]))
newDAT.INFO.setField('functions', sum(insert_function,[]))
return newDAT
## END FUNCTION
####################################################################
####################################################################
| lgpl-3.0 | 8,652,145,147,275,161,000 | 31.980105 | 243 | 0.590551 | false | 3.124293 | false | false | false | 0.046958 |
haiyangd/Gelatin | src/Gelatin/generator/Builder.py | 2 | 3593 | # Copyright (C) 2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import re
import shutil
from tempfile import NamedTemporaryFile
from urlparse import urlparse
from cgi import parse_qs
value = r'"(?:\\.|[^"])*"'
attrib = r'(?:[\$\w\-]+=%s)' % value
path_re = re.compile(r'^[^/"\?]+(?:\?%s?(?:&%s?)*)?' % (attrib, attrib))
class Builder(object):
"""
Abstract base class for all generators.
"""
def __init__(self):
raise NotImplementedError('abstract method')
def serialize(self):
raise NotImplementedError('abstract method')
def serialize_to_file(self, filename):
with NamedTemporaryFile(delete = False) as thefile:
thefile.write(self.serialize())
if os.path.exists(filename):
os.unlink(filename)
shutil.move(thefile.name, filename)
def dump(self):
raise NotImplementedError('abstract method')
def _splitpath(self, path):
match = path_re.match(path)
result = []
while match is not None:
result.append(match.group(0))
path = path[len(match.group(0)) + 1:]
match = path_re.match(path)
return result
def _splittag(self, tag):
url = urlparse(tag)
attribs = []
for key, value in parse_qs(url.query).iteritems():
value = value[0]
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
attribs.append((str(key.lower()), value))
return url.path.replace(' ', '-').lower(), attribs
def create(self, path, data = None):
"""
Creates the given node, regardless of whether or not it already
exists.
Returns the new node.
"""
raise NotImplementedError('abstract method')
def add(self, path, data = None, replace = False):
"""
Creates the given node if it does not exist.
Returns the (new or existing) node.
"""
raise NotImplementedError('abstract method')
def add_attribute(self, path, name, value):
"""
Creates the given attribute and sets it to the given value.
Returns the (new or existing) node to which the attribute was added.
"""
raise NotImplementedError('abstract method')
def open(self, path):
"""
Creates and enters the given node, regardless of whether it already
exists.
Returns the new node.
"""
raise NotImplementedError('abstract method')
def enter(self, path):
"""
Enters the given node. Creates it if it does not exist.
Returns the node.
"""
raise NotImplementedError('abstract method')
def leave(self):
"""
Returns to the node that was selected before the last call to enter().
The history is a stack, to the method may be called multiple times.
"""
raise NotImplementedError('abstract method')
| gpl-2.0 | 4,521,645,221,967,713,000 | 32.579439 | 78 | 0.618981 | false | 4.381707 | false | false | false | 0.003896 |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/extra_modules/pyxmpp2/stanza.py | 46 | 17258 | #
# (C) Copyright 2003-2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""General XMPP Stanza handling.
Normative reference:
- `RFC 6120 <http://www.ietf.org/rfc/rfc6120.txt>`__
"""
__docformat__ = "restructuredtext en"
from .etree import ElementTree, ElementClass
import random
import weakref
from .exceptions import BadRequestProtocolError, JIDMalformedProtocolError
from .jid import JID
from .stanzapayload import XMLPayload, payload_factory
from .stanzapayload import payload_class_for_element_name
from .xmppserializer import serialize
from .constants import STANZA_NAMESPACES, STANZA_CLIENT_NS, XML_LANG_QNAME
from .error import StanzaErrorElement
from .interfaces import StanzaPayload
random.seed()
class Stanza(object):
"""Base class for all XMPP stanzas.
:Ivariables:
- `_payload`: the stanza payload
- `_error`: error associated a stanza of type "error"
- `_namespace`: namespace of this stanza element
- `_return_path`: weak reference to the return route object
:Types:
- `_payload`: `list` of (`str`, `StanzaPayload`)
- `_error`: `pyxmpp2.error.StanzaErrorElement`
- `_namespace`: `str`
- `_return_path`: weakref to `StanzaRoute`
"""
# pylint: disable-msg=R0902
element_name = "Unknown"
def __init__(self, element, from_jid = None, to_jid = None,
stanza_type = None, stanza_id = None,
error = None, error_cond = None,
return_path = None, language = None):
"""Initialize a Stanza object.
:Parameters:
- `element`: XML element of this stanza, or element name for a new
stanza. If element is given it must not be modified later,
unless `decode_payload()` and `mark_dirty()` methods are called
first (the element changes won't affec the stanza then).
- `from_jid`: sender JID.
- `to_jid`: recipient JID.
- `stanza_type`: staza type: one of: "get", "set", "result" or
"error".
- `stanza_id`: stanza id -- value of stanza's "id" attribute. If
not given, then unique for the session value is generated.
- `error`: error object. Ignored if `stanza_type` is not "error".
- `error_cond`: error condition name. Ignored if `stanza_type` is
not "error" or `error` is not None.
- `return_path`: route for sending responses to this stanza. Will
be weakly referenced.
- `language`: default language for the stanza content
:Types:
- `element`: `str` or :etree:`ElementTree.Element`
- `from_jid`: `JID`
- `to_jid`: `JID`
- `stanza_type`: `str`
- `stanza_id`: `str`
- `error`: `pyxmpp.error.StanzaErrorElement`
- `error_cond`: `str`
- `return_path`: `StanzaRoute`
- `language`: `str`
"""
# pylint: disable-msg=R0913
self._error = None
self._from_jid = None
self._to_jid = None
self._stanza_type = None
self._stanza_id = None
self._language = language
if isinstance(element, ElementClass):
self._element = element
self._dirty = False
self._decode_attributes()
if not element.tag.startswith("{"):
raise ValueError("Element has no namespace")
else:
self._namespace, self.element_name = element.tag[1:].split("}")
if self._namespace not in STANZA_NAMESPACES:
raise BadRequestProtocolError("Wrong stanza namespace")
self._payload = None
else:
self._element = None
self._dirty = True
self.element_name = str(element)
self._namespace = STANZA_CLIENT_NS
self._payload = []
self._ns_prefix = "{{{0}}}".format(self._namespace)
self._element_qname = self._ns_prefix + self.element_name
if from_jid is not None:
self.from_jid = from_jid
if to_jid is not None:
self.to_jid = to_jid
if stanza_type:
self.stanza_type = stanza_type
if stanza_id:
self.stanza_id = stanza_id
if self.stanza_type == "error":
if error:
self._error = StanzaErrorElement(error)
elif error_cond:
self._error = StanzaErrorElement(error_cond)
else:
self._decode_error()
if return_path is not None:
self._return_path = weakref.ref(return_path)
def _decode_attributes(self):
"""Decode attributes of the stanza XML element
and put them into the stanza properties."""
try:
from_jid = self._element.get('from')
if from_jid:
self._from_jid = JID(from_jid)
to_jid = self._element.get('to')
if to_jid:
self._to_jid = JID(to_jid)
except ValueError:
raise JIDMalformedProtocolError
self._stanza_type = self._element.get('type')
self._stanza_id = self._element.get('id')
lang = self._element.get(XML_LANG_QNAME)
if lang:
self._language = lang
def _decode_error(self):
"""Decode error element of the stanza."""
error_qname = self._ns_prefix + "error"
for child in self._element:
if child.tag == error_qname:
self._error = StanzaErrorElement(child)
return
raise BadRequestProtocolError("Error element missing in"
" an error stanza")
def copy(self):
"""Create a deep copy of the stanza.
:returntype: `Stanza`"""
result = Stanza(self.element_name, self.from_jid, self.to_jid,
self.stanza_type, self.stanza_id, self.error,
self._return_path())
if self._payload is None:
self.decode_payload()
for payload in self._payload:
result.add_payload(payload.copy())
return result
def serialize(self):
"""Serialize the stanza into a Unicode XML string.
:return: serialized stanza.
:returntype: `str`"""
return serialize(self.get_xml())
def as_xml(self):
"""Return the XML stanza representation.
Always return an independent copy of the stanza XML representation,
which can be freely modified without affecting the stanza.
:returntype: :etree:`ElementTree.Element`"""
attrs = {}
if self._from_jid:
attrs['from'] = str(self._from_jid)
if self._to_jid:
attrs['to'] = str(self._to_jid)
if self._stanza_type:
attrs['type'] = self._stanza_type
if self._stanza_id:
attrs['id'] = self._stanza_id
if self._language:
attrs[XML_LANG_QNAME] = self._language
element = ElementTree.Element(self._element_qname, attrs)
if self._payload is None:
self.decode_payload()
for payload in self._payload:
element.append(payload.as_xml())
if self._error:
element.append(self._error.as_xml(
stanza_namespace = self._namespace))
return element
def get_xml(self):
"""Return the XML stanza representation.
This returns the original or cached XML representation, which
may be much more efficient than `as_xml`.
Result of this function should never be modified.
:returntype: :etree:`ElementTree.Element`"""
if not self._dirty:
return self._element
element = self.as_xml()
self._element = element
self._dirty = False
return element
def decode_payload(self, specialize = False):
"""Decode payload from the element passed to the stanza constructor.
Iterates over stanza children and creates StanzaPayload objects for
them. Called automatically by `get_payload()` and other methods that
access the payload.
For the `Stanza` class stanza namespace child elements will also be
included as the payload. For subclasses these are no considered
payload."""
if self._payload is not None:
# already decoded
return
if self._element is None:
raise ValueError("This stanza has no element to decode""")
payload = []
if specialize:
factory = payload_factory
else:
factory = XMLPayload
for child in self._element:
if self.__class__ is not Stanza:
if child.tag.startswith(self._ns_prefix):
continue
payload.append(factory(child))
self._payload = payload
@property
def from_jid(self): # pylint: disable-msg=E0202
"""Source JID of the stanza.
:returntype: `JID`
"""
return self._from_jid
@from_jid.setter # pylint: disable-msg=E1101
def from_jid(self, from_jid): # pylint: disable-msg=E0202,E0102,C0111
if from_jid is None:
self._from_jid = None
else:
self._from_jid = JID(from_jid)
self._dirty = True
@property
def to_jid(self): # pylint: disable-msg=E0202
"""Destination JID of the stanza.
:returntype: `JID`
"""
return self._to_jid
@to_jid.setter # pylint: disable-msg=E1101
def to_jid(self, to_jid): # pylint: disable-msg=E0202,E0102,C0111
if to_jid is None:
self._to_jid = None
else:
self._to_jid = JID(to_jid)
self._dirty = True
@property
def stanza_type(self): # pylint: disable-msg=C0111,E0202
"""Stanza type, one of: "get", "set", "result" or "error".
:returntype: `str`
"""
return self._stanza_type
@stanza_type.setter # pylint: disable-msg=E1101
def stanza_type(self, stanza_type): # pylint: disable-msg=E0202,E0102,C0111
self._stanza_type = str(stanza_type)
self._dirty = True
@property
def stanza_id(self): # pylint: disable-msg=C0111,E0202
"""Stanza id.
:returntype: `str`
"""
return self._stanza_id
@stanza_id.setter # pylint: disable-msg=E1101
def stanza_id(self, stanza_id): # pylint: disable-msg=E0202,E0102,C0111
self._stanza_id = str(stanza_id)
self._dirty = True
@property
def error(self): # pylint: disable-msg=E0202
"""Stanza error element.
:returntype: `StanzaErrorElement`
"""
return self._error
@error.setter # pylint: disable-msg=E1101
def error(self, error): # pylint: disable-msg=E0202,E0102,C0111
self._error = error
self._dirty = True
@property
def return_path(self): # pylint: disable-msg=E0202
"""Stream the stanza was received from.
:returntype: `StanzaRoute`
"""
return self._return_path()
def mark_dirty(self):
"""Mark the stanza 'dirty' so the XML representation will be
re-built the next time it is requested.
This should be called each time the payload attached to the stanza is
modifed."""
self._dirty = True
def set_payload(self, payload):
"""Set stanza payload to a single item.
All current stanza content of will be dropped.
Marks the stanza dirty.
:Parameters:
- `payload`: XML element or stanza payload object to use
:Types:
- `payload`: :etree:`ElementTree.Element` or `StanzaPayload`
"""
if isinstance(payload, ElementClass):
self._payload = [ XMLPayload(payload) ]
elif isinstance(payload, StanzaPayload):
self._payload = [ payload ]
else:
raise TypeError("Bad payload type")
self._dirty = True
def add_payload(self, payload):
"""Add new the stanza payload.
Marks the stanza dirty.
:Parameters:
- `payload`: XML element or stanza payload object to add
:Types:
- `payload`: :etree:`ElementTree.Element` or `StanzaPayload`
"""
if self._payload is None:
self.decode_payload()
if isinstance(payload, ElementClass):
self._payload.append(XMLPayload(payload))
elif isinstance(payload, StanzaPayload):
self._payload.append(payload)
else:
raise TypeError("Bad payload type")
self._dirty = True
def get_all_payload(self, specialize = False):
"""Return list of stanza payload objects.
:Parameters:
- `specialize`: If `True`, then return objects of specialized
`StanzaPayload` classes whenever possible, otherwise the
representation already available will be used (often
`XMLPayload`)
:Returntype: `list` of `StanzaPayload`
"""
if self._payload is None:
self.decode_payload(specialize)
elif specialize:
for i, payload in enumerate(self._payload):
if isinstance(payload, XMLPayload):
klass = payload_class_for_element_name(
payload.element.tag)
if klass is not XMLPayload:
payload = klass.from_xml(payload.element)
self._payload[i] = payload
return list(self._payload)
def get_payload(self, payload_class, payload_key = None,
specialize = False):
"""Get the first payload item matching the given class
and optional key.
Payloads may be addressed using a specific payload class or
via the generic `XMLPayload` element, though the `XMLPayload`
representation is available only as long as the element is not
requested by a more specific type.
:Parameters:
- `payload_class`: requested payload class, a subclass of
`StanzaPayload`. If `None` get the first payload in whatever
class is available.
- `payload_key`: optional key for additional match. When used
with `payload_class` = `XMLPayload` this selects the element to
match
- `specialize`: If `True`, and `payload_class` is `None` then
return object of a specialized `StanzaPayload` subclass whenever
possible
:Types:
- `payload_class`: `StanzaPayload`
- `specialize`: `bool`
:Return: payload element found or `None`
:Returntype: `StanzaPayload`
"""
if self._payload is None:
self.decode_payload()
if payload_class is None:
if self._payload:
payload = self._payload[0]
if specialize and isinstance(payload, XMLPayload):
klass = payload_class_for_element_name(
payload.element.tag)
if klass is not XMLPayload:
payload = klass.from_xml(payload.element)
self._payload[0] = payload
return payload
else:
return None
# pylint: disable=W0212
elements = payload_class._pyxmpp_payload_element_name
for i, payload in enumerate(self._payload):
if isinstance(payload, XMLPayload):
if payload_class is not XMLPayload:
if payload.xml_element_name not in elements:
continue
payload = payload_class.from_xml(payload.element)
elif not isinstance(payload, payload_class):
continue
if payload_key is not None and payload_key != payload.handler_key():
continue
self._payload[i] = payload
return payload
return None
last_id = random.randrange(1000000)
@classmethod
def gen_id(cls):
"""Generate stanza id unique for the session.
:return: the new id."""
cls.last_id += 1
return str(cls.last_id)
# vi: sts=4 et sw=4
| apache-2.0 | 1,678,497,764,299,650,800 | 35.029228 | 80 | 0.569649 | false | 4.335092 | false | false | false | 0.003998 |
theintencity/vvowproject | server/restserver.py | 1 | 9342 | # This is the Python restserver module used in the websocket server by
# restserver_wsh.py and AJAX server by ajaxserver.py.
import logging, sqlite3, json, time, random, re
logger = logging.getLogger('restserver')
class Database():
def __init__(self, filename="restserver.db"):
self.conn = sqlite3.connect(filename, check_same_thread=False)
self.cursor = self.conn.cursor()
self._create()
def _create(self):
try:
self.commit('''CREATE TABLE resource (
rid varchar(1024) PRIMARY KEY NOT NULL DEFAULT '',
prid varchar(1024) NOT NULL DEFAULT '',
type varchar(64) NOT NULL DEFAULT 'application/json',
entity blob,
cid varchar(25)
)''')
self.commit('''create table subscribe (
rid varchar(1024) NOT NULL DEFAULT '',
cid varchar(25) NOT NULL DEFAULT '',
PRIMARY KEY (rid, cid)
)''')
logger.debug('Database created')
except sqlite3.OperationalError:
logger.debug('Database already created')
def reset(self):
# cleanup the subscribe table, since there are no subscription on startup
self.commit("DELETE FROM subscribe");
self.commit("DELETE FROM resource WHERE cid != ''");
def close(self):
if self.cursor:
self.cursor.close()
self.cursor = None
def commit(self, *args):
logger.debug('commit%r', args)
self.cursor.execute(*args)
self.conn.commit()
def iterate(self, *args):
logger.debug('iterate%r', args)
return self.cursor.execute(*args)
def fetchone(self, *args):
logger.debug('fetchone%r', args)
self.cursor.execute(*args)
result = self.cursor.fetchone()
logger.debug('fetchone%r=>\n %r', args, result)
return result
def fetchall(self, *args):
logger.debug('fetchall%r', args)
self.cursor.execute(*args)
result = self.cursor.fetchall()
logger.debug('fetchall%r=>\n %s', args, '\n '.join(['%r'%(x,) for x in result]))
return result
def uniqid():
return str(int(time.time()) * 1000 + random.randint(0, 999))
class Handler():
def __init__(self, db):
self.db = db
def POST(self, request):
parent, ctype, entity, persistent = request['resource'], request.get('type', 'application/json'), \
json.dumps(request.get('entity', {})), request.get('persistent', False)
rid = request['id'] if 'id' in request else uniqid()
resource = parent + '/' + rid
cid = '' if persistent else self.id
try:
self.db.commit('INSERT INTO resource (rid, prid, type, entity, cid) VALUES (?, ?, ?, ?, ?)',
(resource, parent, ctype, entity, cid))
except:
logger.exception('failed to insert resource')
return dict(code='failed', reason='failed to insert this resource')
self.NOTIFY(resource, 'POST')
return dict(code='success', id=rid)
def PUT(self, request):
resource, attr, ignore = self._parse(request['resource'])
ctype, entity, persistent = request.get('type', 'application/json'), \
json.dumps(request.get('entity', {})), request.get('persistent', False)
if attr:
result = None
try:
result = self.db.fetchone('SELECT type, entity FROM resource WHERE rid=?', (resource,))
except:
logger.exception('failed to get resource')
if not result:
return dict(code='failed', reason='failed to get the resource')
result = json.loads(result[1])
result[attr] = request.get('entity', None);
entity = json.dumps(result)
try:
self.db.commit('UPDATE resource SET entity=? WHERE rid=?', (entity, resource))
except:
logger.exception('failed to replace resource attribute')
return dict(code='failed', reason='failed to replace resource attribute')
else:
parent = self.get_parent(resource)
cid = '' if persistent else self.id
try:
self.db.commit('REPLACE INTO resource (rid, prid, type, entity, cid) VALUES (?, ?, ?, ?, ?)',
(resource, parent, ctype, entity, cid))
except:
logger.exception('failed to replace resource')
return dict(code='failed', reason='failed to replace this resource')
self.NOTIFY(resource, 'PUT')
return dict(code='success')
def GET(self, request):
resource, attr, params = self._parse(request['resource'])
if attr:
result = None
try:
result = self.db.fetchone('SELECT type, entity FROM resource WHERE rid=?', (resource,))
entity = json.loads(result[1])
if attr in entity:
return dict(code="success", resource=request['resource'], entity=json.dumps(entity[attr]))
else:
return dict(code="failed", reason="failed to get this resource attribute")
except:
logger.exception('failed to read resource')
return dict(code='failed', reason='failed to get this resource')
elif params:
try:
query, attrs = 'SELECT rid FROM resource WHERE prid=?', [resource]
if 'like' in params:
query += " AND rid LIKE ?"
attrs.append(params['like'])
if 'limit' in params:
query += " LIMIT " + params['limit']
if 'offset' in params:
query += " OFFSET " + params['offset']
if 'order' in params:
query += " " + params['order']
result = self.db.fetchall(query, attrs)
except:
logger.exception('failed to read parent resource')
return dict(code='failed', reason='failed to get child resources')
response = [(row[0][len(resource)+1:] if row[0].startswith(resource) else row[0]) for row in result]
else:
try:
result = self.db.fetchone('SELECT type, entity FROM resource WHERE rid=?', (resource,))
except:
logger.exception('failed to read resource')
return dict(code='failed', reason='failed to get this resource')
if result:
ctype, entity = result[0], json.loads(result[1])
entity = dict([(k, v) for k, v in entity.iteritems() if not k or k[0] != "_"])
return dict(code='success', resource=resource, type=ctype, entity=entity)
try:
result = self.db.fetchall('SELECT rid FROM resource WHERE prid=?', (resource,))
except:
logger.exception('failed to read parent resource')
return dict(code='failed', reason='failed to get child resources')
response = [(row[0][len(resource)+1:] if row[0].startswith(resource) else row[0]) for row in result]
if response:
return dict(code='success', resource=resource, type='application/json', entity=response)
return dict(code='failed', reason='no value found for this resource')
def DELETE(self, request):
resource = request['resource']
result = self.db.fetchone('SELECT count(rid) FROM resource WHERE prid=?', (resource,))
if result[0]:
return dict(code='failed', reason='this parent resource has children')
self.db.commit('DELETE FROM resource WHERE rid=?', (resource,))
self.NOTIFY(resource, 'DELETE')
return dict(code='success')
def SUBSCRIBE(self, request):
resource = request['resource']
try:
self.db.commit('REPLACE INTO subscribe (rid, cid) VALUES (?, ?)', (resource, self.id))
except:
logger.exception('failed to replace subscribe')
return dict(code='failed', reason='failed to subscribe the client to the resource')
return dict(code='success')
def UNSUBSCRIBE(self, request):
resource = request['resource']
try:
self.db.commit('DELETE FROM subscribe WHERE rid=? AND cid=?', (resource, self.id))
except:
logger.exception('failed to delete subscribe')
return dict(code='failed', reason='failed to unsubscribe the client from the resource')
return dict(code='success')
# to be overridden by the sub-class if it supports NOTIFY
def NOTIFY(self, request, method=None):
pass
def get_parent(self, resource):
index = resource.rfind('/')
return resource[:index] if index >= 0 else ''
def _parse(self, value):
match = re.match(r'([^\[\?]+)(\[([^\]\?]*)\])?(\?.*)?$', value)
if not match: return (value, None, None)
groups = match.groups()
return (groups[0], groups[2], dict([x.split('=', 1) for x in groups[3][1:].split('&')]) if groups[3] else None)
| lgpl-3.0 | -6,567,930,026,937,399,000 | 42.654206 | 119 | 0.555449 | false | 4.442225 | false | false | false | 0.007814 |
libscie/liberator | liberator/lib/python3.6/site-packages/django/contrib/admin/widgets.py | 41 | 13382 | """
Form Widget classes specific to the Django admin site.
"""
from __future__ import unicode_literals
import copy
from django import forms
from django.db.models.deletion import CASCADE
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils import six
from django.utils.encoding import force_text
from django.utils.html import smart_urlquote
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def get_context(self, name, value, attrs):
context = super(FilteredSelectMultiple, self).get_context(name, value, attrs)
context['widget']['attrs']['class'] = 'selectfilter'
if self.is_stacked:
context['widget']['attrs']['class'] += 'stacked'
context['widget']['attrs']['data-field-name'] = self.verbose_name
context['widget']['attrs']['data-is-stacked'] = int(self.is_stacked)
return context
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
template_name = 'admin/widgets/split_datetime.html'
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def get_context(self, name, value, attrs):
context = super(AdminSplitDateTime, self).get_context(name, value, attrs)
context['date_label'] = _('Date:')
context['time_label'] = _('Time:')
return context
class AdminRadioSelect(forms.RadioSelect):
template_name = 'admin/widgets/radio.html'
class AdminFileWidget(forms.ClearableFileInput):
template_name = 'admin/widgets/clearable_file_input.html'
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
v = ('0', '1')[v]
else:
v = six.text_type(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
template_name = 'admin/widgets/foreign_key_raw_id.html'
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def get_context(self, name, value, attrs):
context = super(ForeignKeyRawIdWidget, self).get_context(name, value, attrs)
rel_to = self.rel.model
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
related_url += '?' + '&'.join(
'%s=%s' % (k, v) for k, v in params.items(),
)
context['related_url'] = mark_safe(related_url)
context['link_title'] = _('Lookup')
# The JavaScript code looks for this class.
context['widget']['attrs'].setdefault('class', 'vForeignKeyRawIdAdminField')
if context['widget']['value']:
context['link_label'], context['link_url'] = self.label_and_url_for_value(value)
return context
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_and_url_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
except (ValueError, self.rel.model.DoesNotExist):
return '', ''
try:
url = reverse(
'%s:%s_%s_change' % (
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,)
)
except NoReverseMatch:
url = '' # Admin not registered for target model.
return Truncator(obj).words(14, truncate='...'), url
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
template_name = 'admin/widgets/many_to_many_raw_id.html'
def get_context(self, name, value, attrs):
context = super(ManyToManyRawIdWidget, self).get_context(name, value, attrs)
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField'
return context
def url_parameters(self):
return self.base_url_parameters()
def label_and_url_for_value(self, value):
return '', ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def format_value(self, value):
return ','.join(force_text(v) for v in value) if value else ''
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template_name = 'admin/widgets/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def get_context(self, name, value, attrs):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'rendered_widget': self.widget.render(name, value, attrs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return context
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.widget.value_omitted_from_data(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
template_name = 'admin/widgets/url.html'
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def get_context(self, name, value, attrs):
context = super(AdminURLFieldWidget, self).get_context(name, value, attrs)
context['current_label'] = _('Currently:')
context['change_label'] = _('Change:')
context['widget']['href'] = smart_urlquote(context['widget']['value']) if value else ''
return context
class AdminIntegerFieldWidget(forms.NumberInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
| cc0-1.0 | -2,025,432,190,253,747,200 | 35.36413 | 95 | 0.611867 | false | 3.920891 | false | false | false | 0.001121 |
erdc/proteus | proteus/tests/HotStart_3P/pressureincrement_p.py | 1 | 2823 | from __future__ import absolute_import
from builtins import object
from math import *
from proteus import *
from proteus.default_p import *
from .NS_hotstart import *
#domain = ctx.domain
#nd = ctx.nd
name = "pressureincrement"
from proteus.mprans import PresInc
coefficients=PresInc.Coefficients(rho_f_min = (1.0-1.0e-8)*rho_1,
rho_s_min = (1.0-1.0e-8)*rho_s,
nd = nd,
modelIndex=1,
fluidModelIndex=0,
fixNullSpace=fixNullSpace_PresInc,
INTEGRATE_BY_PARTS_DIV_U=INTEGRATE_BY_PARTS_DIV_U_PresInc)
LevelModelType = PresInc.LevelModel
def getDBC_phi(x,flag):
None
def getAdvectiveFlux_qt(x,flag):
if manufactured_solution==1: #u.n!=0
if (flag==1): #left boundary
return lambda x,t: -np.sin(x[0])*np.sin(x[1]+t)
elif (flag==2): # right boundary
return lambda x,t: np.sin(x[0])*np.sin(x[1]+t)
elif (flag==3): # bottom boundary
return lambda x,t: -np.cos(x[0])*np.cos(x[1]+t)
else:
return lambda x,t: np.cos(x[0])*np.cos(x[1]+t)
else: #u.n=0
return lambda x,t: 0.
def getDiffusiveFlux_phi(x,flag):
return lambda x,t: 0.
class getIBC_phi(object):
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
initialConditions = {0:getIBC_phi()}
dirichletConditions = {0:getDBC_phi}
advectiveFluxBoundaryConditions = {0:getAdvectiveFlux_qt}
diffusiveFluxBoundaryConditions = {0:{0:getDiffusiveFlux_phi}}
| mit | -5,260,060,366,232,125,000 | 59.06383 | 275 | 0.334396 | false | 5.095668 | false | false | false | 0.022671 |
bankonmeOS/cjdns | node_build/dependencies/cnacl/crypto_sign/ed25519/ref10/base2.py | 77 | 1231 | b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def expmod(b,e,m):
if e == 0: return 1
t = expmod(b,e/2,m)**2 % m
if e & 1: t = (t*b) % m
return t
def inv(x):
return expmod(x,q-2,q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)/4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)/8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
def edwards(P,Q):
x1 = P[0]
y1 = P[1]
x2 = Q[0]
y2 = Q[1]
x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
return [x3 % q,y3 % q]
def radix255(x):
x = x % q
if x + x > q: x -= q
x = [x,0,0,0,0,0,0,0,0,0]
bits = [26,25,26,25,26,25,26,25,26,25]
for i in range(9):
carry = (x[i] + 2**(bits[i]-1)) / 2**bits[i]
x[i] -= carry * 2**bits[i]
x[i + 1] += carry
result = ""
for i in range(9):
result = result+str(x[i])+","
result = result+str(x[9])
return result
Bi = B
for i in range(8):
print " {"
print " {",radix255(Bi[1]+Bi[0]),"},"
print " {",radix255(Bi[1]-Bi[0]),"},"
print " {",radix255(2*d*Bi[0]*Bi[1]),"},"
print " },"
Bi = edwards(B,edwards(B,Bi))
| gpl-3.0 | -2,945,460,050,342,223,000 | 19.516667 | 51 | 0.495532 | false | 1.890937 | false | false | false | 0.069862 |
paulruvolo/ThinkStats2 | code/hinc2.py | 68 | 1622 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import hinc
import thinkplot
import thinkstats2
def InterpolateSample(df, log_upper=6.0):
"""Makes a sample of log10 household income.
Assumes that log10 income is uniform in each range.
df: DataFrame with columns income and freq
log_upper: log10 of the assumed upper bound for the highest range
returns: NumPy array of log10 household income
"""
# compute the log10 of the upper bound for each range
df['log_upper'] = np.log10(df.income)
# get the lower bounds by shifting the upper bound and filling in
# the first element
df['log_lower'] = df.log_upper.shift(1)
df.log_lower[0] = 3.0
# plug in a value for the unknown upper bound of the highest range
df.log_upper[41] = log_upper
# use the freq column to generate the right number of values in
# each range
arrays = []
for _, row in df.iterrows():
vals = np.linspace(row.log_lower, row.log_upper, row.freq)
arrays.append(vals)
# collect the arrays into a single sample
log_sample = np.concatenate(arrays)
return log_sample
def main():
df = hinc.ReadData()
log_sample = InterpolateSample(df, log_upper=6.0)
log_cdf = thinkstats2.Cdf(log_sample)
thinkplot.Cdf(log_cdf)
thinkplot.Show(xlabel='household income',
ylabel='CDF')
if __name__ == "__main__":
main()
| gpl-3.0 | 7,326,676,600,089,581,000 | 25.590164 | 70 | 0.674476 | false | 3.488172 | false | false | false | 0 |
rhdedgar/openshift-tools | openshift/secrets/create-secrets.py | 12 | 1139 | #!/usr/bin/python
# vim: expandtab:tabstop=4:shiftwidth=4
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name
''' Generate a single combined yaml file to load into
OpenShift as a secret '''
import base64
import yaml
def main():
''' Create single generated-secrets.yml' that can be loaded
as a secret into OpenShift '''
yaml_files = ['scriptrunner.yml', 'zabbix-server-vars.yml',
'zagg-server-vars.yml']
yaml_dict = {}
gen_yaml = None
with open('generated-secrets.yml', 'r') as f:
gen_yaml = yaml.safe_load(f)
for yfile in yaml_files:
yfile_yaml = open(yfile, 'r').read()
yfile_b64 = base64.b64encode(yfile_yaml)
yaml_dict[yfile] = yfile_b64
for yfile, yfile_b64 in yaml_dict.iteritems():
gen_yaml['data'][yfile] = yfile_b64
with open('generated-secrets.yml', 'w') as f:
yaml.dump(gen_yaml, f, default_flow_style=False)
print "Saved as generated-secrets.yml"
print "Load into OpenShift with 'oc create -f generated-secrets.yml'"
if __name__ == '__main__':
main()
| apache-2.0 | -3,608,903,229,379,511,300 | 28.973684 | 73 | 0.634767 | false | 3.282421 | false | false | false | 0.003512 |
huangchuchuan/Spider | MouserSpider/myselenium.py | 1 | 1329 | # -*- coding:utf-8 -*-
from selenium import webdriver
import requests
import sqlite3
browser = webdriver.Firefox()
browser.get('http://www.mouser.cn')
html_source = browser.page_source
print html_source
coon = sqlite3.connect('/root/.mozilla/firefox/gmfs2ivm.default/cookies.sqlite')
cursor = coon.cursor()
cursor.execute('select name, value from moz_cookies where baseDomain="mouser.cn"')
cookies = cursor.fetchall()
coon.close()
cookie=[item[0]+"="+item[1]for item in cookies]
cookiestr=';'.join(item for item in cookie)
print cookiestr
myheaders = {
'Host': 'www.mouser.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1',
'If-None-Match': "76b9f323a7b0ec42447e8435c1bc98bd",
'Cache-Control': 'max-age=0',
'Cookie':cookiestr
}
s = requests.session()
#r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
data = r.content
f = open('data.html', 'w')
f.write(data)
f.close()
browser.close()
| apache-2.0 | -6,929,995,332,538,899,000 | 27.276596 | 97 | 0.693755 | false | 2.626482 | false | false | false | 0.008277 |
cosmiclattes/TPBviz | torrent/lib/python2.7/site-packages/django/contrib/gis/tests/test_measure.py | 221 | 8307 | """
Distance and Area objects to allow for sensible and convienient calculation
and conversions. Here are some tests.
"""
from django.contrib.gis.measure import Distance, Area, D, A
from django.utils import unittest
class DistanceTest(unittest.TestCase):
"Testing the Distance object"
def testInit(self):
"Testing initialisation from valid units"
d = Distance(m=100)
self.assertEqual(d.m, 100)
d1, d2, d3 = D(m=100), D(meter=100), D(metre=100)
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
y1, y2, y3 = D(yd=100), D(yard=100), D(Yard=100)
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
mm1, mm2 = D(millimeter=1000), D(MiLLiMeTeR=1000)
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
def testInitInvalid(self):
"Testing initialisation from invalid units"
self.assertRaises(AttributeError, D, banana=100)
def testAccess(self):
"Testing access in different units"
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
def testAccessInvalid(self):
"Testing access in invalid units"
d = D(m=100)
self.assertFalse(hasattr(d, 'banana'))
def testAddition(self):
"Test addition & subtraction"
d1 = D(m=100)
d2 = D(m=200)
d3 = d1 + d2
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = d1 - d2
self.assertEqual(d4.m, -100)
d4 -= d1
self.assertEqual(d4.m, -200)
with self.assertRaises(TypeError):
d5 = d1 + 1
self.fail('Distance + number should raise TypeError')
with self.assertRaises(TypeError):
d5 = d1 - 1
self.fail('Distance - number should raise TypeError')
with self.assertRaises(TypeError):
d1 += 1
self.fail('Distance += number should raise TypeError')
with self.assertRaises(TypeError):
d1 -= 1
self.fail('Distance -= number should raise TypeError')
def testMultiplication(self):
"Test multiplication & division"
d1 = D(m=100)
d3 = d1 * 2
self.assertEqual(d3.m, 200)
d3 = 2 * d1
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = d1 / 2
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
d5 = d1 / D(m=2)
self.assertEqual(d5, 50)
a5 = d1 * D(m=10)
self.assertTrue(isinstance(a5, Area))
self.assertEqual(a5.sq_m, 100*10)
with self.assertRaises(TypeError):
d1 *= D(m=1)
self.fail('Distance *= Distance should raise TypeError')
with self.assertRaises(TypeError):
d1 /= D(m=1)
self.fail('Distance /= Distance should raise TypeError')
def testUnitConversions(self):
"Testing default units during maths"
d1 = D(m=100)
d2 = D(km=1)
d3 = d1 + d2
self.assertEqual(d3._default_unit, 'm')
d4 = d2 + d1
self.assertEqual(d4._default_unit, 'km')
d5 = d1 * 2
self.assertEqual(d5._default_unit, 'm')
d6 = d1 / 2
self.assertEqual(d6._default_unit, 'm')
def testComparisons(self):
"Testing comparisons"
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertTrue(d2 > d1)
self.assertTrue(d1 == d1)
self.assertTrue(d1 < d2)
self.assertFalse(d3)
def testUnitsStr(self):
"Testing conversion to strings"
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
def testUnitAttName(self):
"Testing the `unit_attname` class method"
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'),
('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for nm, att in unit_tuple:
self.assertEqual(att, D.unit_attname(nm))
class AreaTest(unittest.TestCase):
"Testing the Area object"
def testInit(self):
"Testing initialisation from valid units"
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
def testInitInvaliA(self):
"Testing initialisation from invalid units"
self.assertRaises(AttributeError, A, banana=100)
def testAccess(self):
"Testing access in different units"
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
def testAccessInvaliA(self):
"Testing access in invalid units"
a = A(sq_m=100)
self.assertFalse(hasattr(a, 'banana'))
def testAddition(self):
"Test addition & subtraction"
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = a1 + a2
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = a1 - a2
self.assertEqual(a4.sq_m, -100)
a4 -= a1
self.assertEqual(a4.sq_m, -200)
with self.assertRaises(TypeError):
a5 = a1 + 1
self.fail('Area + number should raise TypeError')
with self.assertRaises(TypeError):
a5 = a1 - 1
self.fail('Area - number should raise TypeError')
with self.assertRaises(TypeError):
a1 += 1
self.fail('Area += number should raise TypeError')
with self.assertRaises(TypeError):
a1 -= 1
self.fail('Area -= number should raise TypeError')
def testMultiplication(self):
"Test multiplication & division"
a1 = A(sq_m=100)
a3 = a1 * 2
self.assertEqual(a3.sq_m, 200)
a3 = 2 * a1
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = a1 / 2
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
with self.assertRaises(TypeError):
a5 = a1 * A(sq_m=1)
self.fail('Area * Area should raise TypeError')
with self.assertRaises(TypeError):
a1 *= A(sq_m=1)
self.fail('Area *= Area should raise TypeError')
with self.assertRaises(TypeError):
a5 = a1 / A(sq_m=1)
self.fail('Area / Area should raise TypeError')
with self.assertRaises(TypeError):
a1 /= A(sq_m=1)
self.fail('Area /= Area should raise TypeError')
def testUnitConversions(self):
"Testing default units during maths"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = a1 + a2
self.assertEqual(a3._default_unit, 'sq_m')
a4 = a2 + a1
self.assertEqual(a4._default_unit, 'sq_km')
a5 = a1 * 2
self.assertEqual(a5._default_unit, 'sq_m')
a6 = a1 / 2
self.assertEqual(a6._default_unit, 'sq_m')
def testComparisons(self):
"Testing comparisons"
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertTrue(a2 > a1)
self.assertTrue(a1 == a1)
self.assertTrue(a1 < a2)
self.assertFalse(a3)
def testUnitsStr(self):
"Testing conversion to strings"
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DistanceTest))
s.addTest(unittest.makeSuite(AreaTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run()
| gpl-3.0 | -3,748,618,297,115,475,500 | 27.644828 | 105 | 0.554713 | false | 3.329459 | true | false | false | 0.000963 |
fedora-conary/conary | conary_test/cvctest/newtrovetest.py | 2 | 15484 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from StringIO import StringIO
from conary_test import rephelp
from conary import changelog
from conary import trove
from conary import versions
from conary.conaryclient import filetypes
from conary.deps import deps
from conary.repository import changeset
class ClientNewTroveTest(rephelp.RepositoryHelper):
def testNewFileRegularFile(self):
repos = self.openRepository()
client = self.getConaryClient()
foo1 = filetypes.RegularFile(contents = 'foo1')
foo2 = filetypes.RegularFile(contents = StringIO('foo2' * 8192))
foo3 = filetypes.RegularFile(contents = StringIO('foo3\n'))
files = {'/foo1': foo1, '/foo2': foo2, '/foo3': foo3}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0',
files, changelog.ChangeLog('foo', 'bar'))
repos.commitChangeSet(cs)
n,v,f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
results = client.getFilesFromTrove(n,v,f, ['/foo1', '/foo3'])
assert(not '/foo2' in results)
contents = foo1.getContents()
assert(contents.read() == 'foo1')
contents = foo3.getContents()
assert(contents.read() == 'foo3\n')
def testNewFileContents(self):
repos = self.openRepository()
client = self.getConaryClient()
fil = filetypes.RegularFile(contents = 'foo')
dir = filetypes.Directory()
sym = filetypes.Symlink('../file')
files = {'file': fil, '/dir': dir, '/dir/sym': sym}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0',
files, changelog.ChangeLog('foo', 'bar'))
repos.commitChangeSet(cs)
n,v,f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
trv = repos.getTrove(n, v, f)
processed = 0
for pathId, path, fileId, vers in trv.iterFileList():
fileObj = repos.getFileVersion(pathId, fileId, vers)
if path == '/dir':
processed += 1
self.assertEquals(fileObj.lsTag, 'd')
self.assertEquals(fileObj.inode.perms(), 0755)
self.assertEquals(fileObj.hasContents, False)
elif path == '/dir/sym':
processed += 1
self.assertEquals(fileObj.lsTag, 'l')
self.assertEquals(fileObj.target(), '../file')
self.assertEquals(fileObj.hasContents, False)
elif path == 'file':
processed += 1
self.assertEquals(fileObj.lsTag, '-')
self.assertEquals(fileObj.inode.perms(), 0644)
self.assertEquals(fileObj.hasContents, True)
# make sure we looked at all the files
self.assertEquals(processed, 3)
fileDict = client.getFilesFromTrove(n, v, f)
# we don't want to see dir or sym in the list. they don't have contents
# that can be retrieved
self.assertEquals(fileDict.keys(), ['file'])
self.assertEquals(fileDict['file'].read(), 'foo')
def testNewFileTwice(self):
repos = self.openRepository()
client = self.getConaryClient()
fil = filetypes.RegularFile(contents = 'foo')
fil2 = filetypes.RegularFile(contents = 'foo')
files = {'file': fil, 'file2': fil2}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('foo', 'bar'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
self.assertEquals(str(v), '/localhost@rpl:linux/1.0-1')
repos.commitChangeSet(cs)
files = {'file': fil}
# repeat the creation to show the source count gets bumped
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('foo', 'bar'))
n2, v2, f2 = cs.iterNewTroveList().next().getNewNameVersionFlavor()
self.assertEquals(str(v2), '/localhost@rpl:linux/1.0-2')
repos.commitChangeSet(cs)
# repeat the creation to show the source count gets bumped
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('foo', 'bar'))
n2, v2, f2 = cs.iterNewTroveList().next().getNewNameVersionFlavor()
self.assertEquals(str(v2), '/localhost@rpl:linux/1.0-3')
repos.commitChangeSet(cs)
# prove that the source count gets reset for a new upstream version
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.1', files,
changelog.ChangeLog('foo', 'bar'))
n2, v2, f2 = cs.iterNewTroveList().next().getNewNameVersionFlavor()
self.assertEquals(str(v2), '/localhost@rpl:linux/1.1-1')
def testNewFileNotSource(self):
client = self.getConaryClient()
fil = filetypes.RegularFile(contents = 'foo')
files = {'file': fil}
self.assertRaises(RuntimeError, client.createSourceTrove, \
'foo:runtime', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('foo', 'bar'))
def testNewTroveNotSource(self):
class DummyTrove(object):
def getNameVersionFlavor(self):
return 'foo:runtime', None, None
client = self.getConaryClient()
self.assertRaises(RuntimeError, client._targetNewTroves, [DummyTrove()])
def testNewTroveDupVersion(self):
class DummyTrove(object):
def getNameVersionFlavor(self):
return 'foo:source', self.getVersion(), None
def getVersion(self):
return versions.VersionFromString('/localhost@rpl:linux/1.0-1')
def changeVersion(*args, **kwargs):
pass
repos = self.openRepository()
client = self.getConaryClient()
res = self.assertRaises(RuntimeError, client._targetNewTroves,
[DummyTrove(), DummyTrove()])
def testNewFileFlavor(self):
repos = self.openRepository()
client = self.getConaryClient()
fil = filetypes.RegularFile(contents = 'foo',
flavor = deps.parseFlavor('xen,domU, is:x86'))
files = {'file': fil}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('foo', 'bar'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
# source troves don't have a flavor
self.assertEquals(f, deps.Flavor())
def testRemoveOldPathIds(self):
class DummyTroveObj(object):
def __init__(x):
x.tracked = []
def iterFileList(x):
return [['a'], ['b'], ['c']]
def removePath(x, pathId):
x.tracked.append(pathId)
client = self.getConaryClient()
trv = DummyTroveObj()
client._removeOldPathIds(trv)
self.assertEquals(trv.tracked, ['a', 'b', 'c'])
def testPreservePathIds(self):
self.openRepository()
client = self.getConaryClient()
repos = client.getRepos()
fil = filetypes.RegularFile(contents = 'foo')
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', {'file': fil},
changelog.ChangeLog('foo', 'bar'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
repos.commitChangeSet(cs)
trv = repos.getTrove(n, v, f)
fileList1 = list(trv.iterFileList())
# repeat without changing the file, but bump the upstream version
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.1', {'file': fil},
changelog.ChangeLog('foo', 'bar'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
repos.commitChangeSet(cs)
trv = repos.getTrove(n, v, f)
fileList2 = list(trv.iterFileList())
# repeat but change the file, also bump the upstream version
fil = filetypes.RegularFile(contents = 'bar')
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.2', {'file': fil},
changelog.ChangeLog('foo', 'bar'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
repos.commitChangeSet(cs)
trv = repos.getTrove(n, v, f)
fileList3 = list(trv.iterFileList())
self.assertEquals(fileList1[0][0], fileList2[0][0])
self.assertEquals(fileList2[0][0], fileList3[0][0])
def testNewFactory(self):
repos = self.openRepository()
client = self.getConaryClient()
fil = filetypes.RegularFile(contents = 'foo')
files = {'file': fil}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('foo', 'bar'), factory = 'factory-foo')
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
self.assertEquals(str(v), '/localhost@rpl:linux/1.0-1')
repos.commitChangeSet(cs)
trv = repos.getTrove(n, v, f)
self.assertEquals(trv.troveInfo.factory(), 'factory-foo')
# repeat without factory
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('foo', 'bar'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
self.assertEquals(str(v), '/localhost@rpl:linux/1.0-2')
repos.commitChangeSet(cs)
trv = repos.getTrove(n, v, f)
self.assertEquals(trv.troveInfo.factory(), '')
def testChangelog(self):
repos = self.openRepository()
client = self.getConaryClient()
fil = filetypes.RegularFile(contents = 'foo')
files = {'file': fil}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('user', 'foo'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
self.assertEquals(str(v), '/localhost@rpl:linux/1.0-1')
repos.commitChangeSet(cs)
trv = repos.getTrove(n, v, f)
self.assertEquals(trv.changeLog.freeze(),
changelog.ChangeLog('user', 'foo').freeze())
def testDuplicateFileObj(self):
# re-use the exact same fileoj and prove that it gets tracked properly
repos = self.openRepository()
client = self.getConaryClient()
fil = filetypes.RegularFile(contents = 'foo')
files = {'file1': fil, 'file2': fil}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('user', 'foo'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
repos.commitChangeSet(cs)
trv = repos.getTrove(n, v, f)
self.assertEquals(sorted([x[1] for x in trv.iterFileList()]),
['file1', 'file2'])
def testSourceFlag(self):
# prove that the createSourceTrove process marks each file as source
repos = self.openRepository()
client = self.getConaryClient()
fil = filetypes.RegularFile(contents = 'foo', config = True)
fileObj = fil.get('1234567890ABCDEF')
self.assertEquals(bool(fileObj.flags.isConfig()), True)
files = {'file1': fil}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0', files,
changelog.ChangeLog('user', 'foo'))
n, v, f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
repos.commitChangeSet(cs)
trv = repos.getTrove(n, v, f)
pathId, path, fileId, fileVersion = list(trv.iterFileList())[0]
fileObj = repos.getFileVersion(pathId, fileId, fileVersion)
self.assertEquals(bool(fileObj.flags.isConfig()), True)
def testPackageCreatorData(self):
repos = self.openRepository()
client = self.getConaryClient()
cs = client.createSourceTrove(
'foo:source', self.cfg.buildLabel, '1.0', {},
changelog.ChangeLog('user', 'foo'),
pkgCreatorData = 'FOO')
repos.commitChangeSet(cs)
cs = client.createSourceTrove(
'bar:source', self.cfg.buildLabel, '1.0', {},
changelog.ChangeLog('user', 'foo'))
repos.commitChangeSet(cs)
l = repos.getPackageCreatorTroves('localhost')
assert(len(l) == 1)
assert(l[0][0][0] == 'foo:source')
assert(l[0][1] == 'FOO')
def testNewTroveVersionSelection(self):
# CNY-3028 - make sure version selection
# picks the right version, given our constraints.
repos = self.openRepository()
client = self.getConaryClient()
self.addComponent('foo:source', '1.0-1')
self.addComponent('foo:source', '/localhost@rpl:shadow//linux/1.0-2')
self.addComponent('foo:source', '2.0-1')
cs = client.createSourceTrove(
'foo:source', self.cfg.buildLabel, '1.0', {},
changelog.ChangeLog('user', 'foo'))
trvCs = cs.iterNewTroveList().next()
assert(str(trvCs.getNewVersion().trailingRevision()) == '1.0-3')
def testCreateSourceTroveWithMetadata(self):
repos = self.openRepository()
client = self.getConaryClient()
metadata = dict(key1="val1", key2="val2")
cs = client.createSourceTrove(
'foo:source', self.cfg.buildLabel, '1.0', {},
changelog.ChangeLog('user', 'foo'),
metadata=metadata)
trvCs = cs.iterNewTroveList().next()
trv = trove.Trove(trvCs)
self.assertEqual(
dict(trv.troveInfo.metadata.flatten()[0].keyValue),
metadata)
def testCreateSourceTroveRemoved(self):
repos = self.openRepository()
client = self.getConaryClient()
foo1 = filetypes.RegularFile(contents = 'foo1')
files = {'/foo1': foo1}
cs = client.createSourceTrove( \
'foo:source', self.cfg.buildLabel, '1.0',
files, changelog.ChangeLog('foo', 'bar'))
repos.commitChangeSet(cs)
n,v,f = cs.iterNewTroveList().next().getNewNameVersionFlavor()
# markremove it
cs = changeset.ChangeSet()
trv = trove.Trove(n, v, f, type=trove.TROVE_TYPE_REMOVED)
trv.computeDigests()
cs.newTrove(trv.diff(None, absolute=True)[0])
repos.commitChangeSet(cs)
# try again
cs = client.createSourceTrove('foo:source', self.cfg.buildLabel, '1.0',
files, changelog.ChangeLog('foo', 'bar'))
repos.commitChangeSet(cs)
| apache-2.0 | 2,835,800,922,083,413,000 | 40.623656 | 80 | 0.597778 | false | 3.661386 | true | false | false | 0.005425 |
nparley/mylatitude | lib/webob/acceptparse.py | 1 | 205846 | """
Parse four ``Accept*`` headers used in server-driven content negotiation.
The four headers are ``Accept``, ``Accept-Charset``, ``Accept-Encoding`` and
``Accept-Language``.
"""
import re
import textwrap
import warnings
# RFC 7230 Section 3.2.3 "Whitespace"
# OWS = *( SP / HTAB )
# ; optional whitespace
OWS_re = '[ \t]*'
# RFC 7230 Section 3.2.6 "Field Value Components":
# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
# / DIGIT / ALPHA
tchar_re = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]"
# token = 1*tchar
token_re = tchar_re + '+'
token_compiled_re = re.compile('^' + token_re + '$')
# RFC 7231 Section 5.3.1 "Quality Values"
# qvalue = ( "0" [ "." 0*3DIGIT ] )
# / ( "1" [ "." 0*3("0") ] )
qvalue_re = (
r'(?:0(?:\.[0-9]{0,3})?)'
'|'
r'(?:1(?:\.0{0,3})?)'
)
# weight = OWS ";" OWS "q=" qvalue
weight_re = OWS_re + ';' + OWS_re + '[qQ]=(' + qvalue_re + ')'
def _item_n_weight_re(item_re):
return '(' + item_re + ')(?:' + weight_re + ')?'
def _item_qvalue_pair_to_header_element(pair):
item, qvalue = pair
if qvalue == 1.0:
element = item
elif qvalue == 0.0:
element = '{};q=0'.format(item)
else:
element = '{};q={}'.format(item, qvalue)
return element
def _list_0_or_more__compiled_re(element_re):
# RFC 7230 Section 7 "ABNF List Extension: #rule":
# #element => [ ( "," / element ) *( OWS "," [ OWS element ] ) ]
return re.compile(
'^(?:$)|' +
'(?:' +
'(?:,|(?:' + element_re + '))' +
'(?:' + OWS_re + ',(?:' + OWS_re + element_re + ')?)*' +
')$',
)
def _list_1_or_more__compiled_re(element_re):
# RFC 7230 Section 7 "ABNF List Extension: #rule":
# 1#element => *( "," OWS ) element *( OWS "," [ OWS element ] )
# and RFC 7230 Errata ID: 4169
return re.compile(
'^(?:,' + OWS_re + ')*' + element_re +
'(?:' + OWS_re + ',(?:' + OWS_re + element_re + ')?)*$',
)
class Accept(object):
"""
Represent an ``Accept`` header.
Base class for :class:`AcceptValidHeader`, :class:`AcceptNoHeader`, and
:class:`AcceptInvalidHeader`.
"""
# RFC 6838 describes syntax rules for media types that are different to
# (and stricter than) those in RFC 7231, but if RFC 7231 intended us to
# follow the rules in RFC 6838 for media ranges, it would not have
# specified its own syntax rules for media ranges, so it appears we should
# use the rules in RFC 7231 for now.
# RFC 5234 Appendix B.1 "Core Rules":
# VCHAR = %x21-7E
# ; visible (printing) characters
vchar_re = '\x21-\x7e'
# RFC 7230 Section 3.2.6 "Field Value Components":
# quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
# qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text
# obs-text = %x80-FF
# quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
obs_text_re = '\x80-\xff'
qdtext_re = '[\t \x21\x23-\x5b\\\x5d-\x7e' + obs_text_re + ']'
# The '\\' between \x5b and \x5d is needed to escape \x5d (']')
quoted_pair_re = r'\\' + '[\t ' + vchar_re + obs_text_re + ']'
quoted_string_re = \
'"(?:(?:' + qdtext_re + ')|(?:' + quoted_pair_re + '))*"'
# RFC 7231 Section 3.1.1.1 "Media Type":
# type = token
# subtype = token
# parameter = token "=" ( token / quoted-string )
type_re = token_re
subtype_re = token_re
parameter_re = token_re + '=' + \
'(?:(?:' + token_re + ')|(?:' + quoted_string_re + '))'
# Section 5.3.2 "Accept":
# media-range = ( "*/*"
# / ( type "/" "*" )
# / ( type "/" subtype )
# ) *( OWS ";" OWS parameter )
media_range_re = (
'(' +
'(?:' + type_re + '/' + subtype_re + ')' +
# '*' is included through type_re and subtype_re, so this covers */*
# and type/*
')' +
'(' +
'(?:' + OWS_re + ';' + OWS_re +
'(?![qQ]=)' + # media type parameter cannot be named "q"
parameter_re + ')*' +
')'
)
# accept-params = weight *( accept-ext )
# accept-ext = OWS ";" OWS token [ "=" ( token / quoted-string ) ]
accept_ext_re = (
OWS_re + ';' + OWS_re + token_re + '(?:' +
'=(?:' +
'(?:' + token_re + ')|(?:' + quoted_string_re + ')' +
')' +
')?'
)
accept_params_re = weight_re + '((?:' + accept_ext_re + ')*)'
media_range_n_accept_params_re = media_range_re + '(?:' + \
accept_params_re + ')?'
media_range_n_accept_params_compiled_re = re.compile(
media_range_n_accept_params_re,
)
accept_compiled_re = _list_0_or_more__compiled_re(
element_re=media_range_n_accept_params_re,
)
# For parsing repeated groups within the media type parameters and
# extension parameters segments
parameters_compiled_re = re.compile(
OWS_re + ';' + OWS_re + '(' + token_re + ')=(' + token_re + '|' +
quoted_string_re + ')',
)
accept_ext_compiled_re = re.compile(
OWS_re + ';' + OWS_re + '(' + token_re + ')' +
'(?:' +
'=(' +
'(?:' +
'(?:' + token_re + ')|(?:' + quoted_string_re + ')' +
')' +
')' +
')?',
)
# For parsing the media types in the `offers` argument to
# .acceptable_offers(), we re-use the media range regex for media types.
# This is not intended to be a validation of the offers; its main purpose
# is to extract the media type and any media type parameters.
media_type_re = media_range_re
media_type_compiled_re = re.compile('^' + media_type_re + '$')
@classmethod
def _escape_and_quote_parameter_value(cls, param_value):
"""
Escape and quote parameter value where necessary.
For media type and extension parameter values.
"""
if param_value == '':
param_value = '""'
else:
param_value = param_value.replace('\\', '\\\\').replace(
'"', r'\"',
)
if not token_compiled_re.match(param_value):
param_value = '"' + param_value + '"'
return param_value
@classmethod
def _form_extension_params_segment(cls, extension_params):
"""
Convert iterable of extension parameters to str segment for header.
`extension_params` is an iterable where each item is either a parameter
string or a (name, value) tuple.
"""
extension_params_segment = ''
for item in extension_params:
try:
extension_params_segment += (';' + item)
except TypeError:
param_name, param_value = item
param_value = cls._escape_and_quote_parameter_value(
param_value=param_value,
)
extension_params_segment += (
';' + param_name + '=' + param_value
)
return extension_params_segment
@classmethod
def _form_media_range(cls, type_subtype, media_type_params):
"""
Combine `type_subtype` and `media_type_params` to form a media range.
`type_subtype` is a ``str``, and `media_type_params` is an iterable of
(parameter name, parameter value) tuples.
"""
media_type_params_segment = ''
for param_name, param_value in media_type_params:
param_value = cls._escape_and_quote_parameter_value(
param_value=param_value,
)
media_type_params_segment += (';' + param_name + '=' + param_value)
return type_subtype + media_type_params_segment
@classmethod
def _iterable_to_header_element(cls, iterable):
"""
Convert iterable of tuples into header element ``str``.
Each tuple is expected to be in one of two forms: (media_range, qvalue,
extension_params_segment), or (media_range, qvalue).
"""
try:
media_range, qvalue, extension_params_segment = iterable
except ValueError:
media_range, qvalue = iterable
extension_params_segment = ''
if qvalue == 1.0:
if extension_params_segment:
element = '{};q=1{}'.format(
media_range, extension_params_segment,
)
else:
element = media_range
elif qvalue == 0.0:
element = '{};q=0{}'.format(media_range, extension_params_segment)
else:
element = '{};q={}{}'.format(
media_range, qvalue, extension_params_segment,
)
return element
@classmethod
def _parse_media_type_params(cls, media_type_params_segment):
"""
Parse media type parameters segment into list of (name, value) tuples.
"""
media_type_params = cls.parameters_compiled_re.findall(
media_type_params_segment,
)
for index, (name, value) in enumerate(media_type_params):
if value.startswith('"') and value.endswith('"'):
value = cls._process_quoted_string_token(token=value)
media_type_params[index] = (name, value)
return media_type_params
@classmethod
def _process_quoted_string_token(cls, token):
"""
Return unescaped and unquoted value from quoted token.
"""
# RFC 7230, section 3.2.6 "Field Value Components": "Recipients that
# process the value of a quoted-string MUST handle a quoted-pair as if
# it were replaced by the octet following the backslash."
return re.sub(r'\\(?![\\])', '', token[1:-1]).replace('\\\\', '\\')
@classmethod
def _python_value_to_header_str(cls, value):
"""
Convert Python value to header string for __add__/__radd__.
"""
if isinstance(value, str):
return value
if hasattr(value, 'items'):
if value == {}:
value = []
else:
value_list = []
for media_range, item in value.items():
# item is either (media range, (qvalue, extension
# parameters segment)), or (media range, qvalue) (supported
# for backward compatibility)
if isinstance(item, (float, int)):
value_list.append((media_range, item, ''))
else:
value_list.append((media_range, item[0], item[1]))
value = sorted(
value_list,
key=lambda item: item[1], # qvalue
reverse=True,
)
if isinstance(value, (tuple, list)):
header_elements = []
for item in value:
if isinstance(item, (tuple, list)):
item = cls._iterable_to_header_element(iterable=item)
header_elements.append(item)
header_str = ', '.join(header_elements)
else:
header_str = str(value)
return header_str
@classmethod
def parse(cls, value):
"""
Parse an ``Accept`` header.
:param value: (``str``) header value
:return: If `value` is a valid ``Accept`` header, returns an iterator
of (*media_range*, *qvalue*, *media_type_params*,
*extension_params*) tuples, as parsed from the header from
left to right.
| *media_range* is the media range, including any media type
parameters. The media range is returned in a canonicalised
form (except the case of the characters are unchanged):
unnecessary spaces around the semicolons before media type
parameters are removed; the parameter values are returned in
a form where only the '``\``' and '``"``' characters are
escaped, and the values are quoted with double quotes only
if they need to be quoted.
| *qvalue* is the quality value of the media range.
| *media_type_params* is the media type parameters, as a list
of (parameter name, value) tuples.
| *extension_params* is the extension parameters, as a list
where each item is either a parameter string or a (parameter
name, value) tuple.
:raises ValueError: if `value` is an invalid header
"""
# Check if header is valid
# Using Python stdlib's `re` module, there is currently no way to check
# the match *and* get all the groups using the same regex, so we have
# to do this in steps using multiple regexes.
if cls.accept_compiled_re.match(value) is None:
raise ValueError('Invalid value for an Accept header.')
def generator(value):
for match in (
cls.media_range_n_accept_params_compiled_re.finditer(value)
):
groups = match.groups()
type_subtype = groups[0]
media_type_params = cls._parse_media_type_params(
media_type_params_segment=groups[1],
)
media_range = cls._form_media_range(
type_subtype=type_subtype,
media_type_params=media_type_params,
)
# qvalue (groups[2]) and extension_params (groups[3]) are both
# None if neither qvalue or extension parameters are found in
# the match.
qvalue = groups[2]
qvalue = float(qvalue) if qvalue else 1.0
extension_params = groups[3]
if extension_params:
extension_params = cls.accept_ext_compiled_re.findall(
extension_params,
)
for index, (token_key, token_value) in enumerate(
extension_params
):
if token_value:
if (
token_value.startswith('"') and
token_value.endswith('"')
):
token_value = cls._process_quoted_string_token(
token=token_value,
)
extension_params[index] = (
token_key, token_value,
)
else:
extension_params[index] = token_key
else:
extension_params = []
yield (
media_range, qvalue, media_type_params, extension_params,
)
return generator(value=value)
class AcceptValidHeader(Accept):
"""
Represent a valid ``Accept`` header.
A valid header is one that conforms to :rfc:`RFC 7231, section 5.3.2
<7231#section-5.3.2>`.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptValidHeader.__add__`).
"""
@property
def header_value(self):
"""(``str`` or ``None``) The header value."""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
A list of (*media_range*, *qvalue*, *media_type_params*,
*extension_params*) tuples, where
*media_range* is the media range, including any media type parameters.
The media range is returned in a canonicalised form (except the case of
the characters are unchanged): unnecessary spaces around the semicolons
before media type parameters are removed; the parameter values are
returned in a form where only the '``\``' and '``"``' characters are
escaped, and the values are quoted with double quotes only if they need
to be quoted.
*qvalue* is the quality value of the media range.
*media_type_params* is the media type parameters, as a list of
(parameter name, value) tuples.
*extension_params* is the extension parameters, as a list where each
item is either a parameter string or a (parameter name, value) tuple.
"""
return self._parsed
def __init__(self, header_value):
"""
Create an :class:`AcceptValidHeader` instance.
:param header_value: (``str``) header value.
:raises ValueError: if `header_value` is an invalid value for an
``Accept`` header.
"""
self._header_value = header_value
self._parsed = list(self.parse(header_value))
self._parsed_nonzero = [item for item in self.parsed if item[1]]
# item[1] is the qvalue
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, with media ranges ``str``\ s (including any media type
parameters) as keys, and either qvalues ``float``\ s or (*qvalues*,
*extension_params*) tuples as values, where *extension_params* is a
``str`` of the extension parameters segment of the header element,
starting with the first '``;``'
* a ``tuple`` or ``list``, where each item is either a header element
``str``, or a (*media_range*, *qvalue*, *extension_params*) ``tuple``
or ``list`` where *media_range* is a ``str`` of the media range
including any media type parameters, and *extension_params* is a
``str`` of the extension parameters segment of the header element,
starting with the first '``;``'
* an :class:`AcceptValidHeader`, :class:`AcceptNoHeader`, or
:class:`AcceptInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or another
:class:`AcceptValidHeader` instance, and the header value it represents
is not `''`, then the two header values are joined with ``', '``, and a
new :class:`AcceptValidHeader` instance with the new header value is
returned.
If `other` is a valid header value or another
:class:`AcceptValidHeader` instance representing a header value of
`''`; or if it is ``None`` or an :class:`AcceptNoHeader` instance; or
if it is an invalid header value, or an :class:`AcceptInvalidHeader`
instance, then a new :class:`AcceptValidHeader` instance with the same
header value as ``self`` is returned.
"""
if isinstance(other, AcceptValidHeader):
if other.header_value == '':
return self.__class__(header_value=self.header_value)
else:
return create_accept_header(
header_value=self.header_value + ', ' + other.header_value,
)
if isinstance(other, (AcceptNoHeader, AcceptInvalidHeader)):
return self.__class__(header_value=self.header_value)
return self._add_instance_and_non_accept_type(
instance=self, other=other,
)
def __bool__(self):
"""
Return whether ``self`` represents a valid ``Accept`` header.
Return ``True`` if ``self`` represents a valid header, and ``False`` if
it represents an invalid header, or the header not being in the
request.
For this class, it always returns ``True``.
"""
return True
__nonzero__ = __bool__ # Python 2
def __contains__(self, offer):
"""
Return ``bool`` indicating whether `offer` is acceptable.
.. warning::
The behavior of :meth:`AcceptValidHeader.__contains__` is currently
being maintained for backward compatibility, but it will change in
the future to better conform to the RFC.
:param offer: (``str``) media type offer
:return: (``bool``) Whether ``offer`` is acceptable according to the
header.
This uses the old criterion of a match in
:meth:`AcceptValidHeader._old_match`, which is not as specified in
:rfc:`RFC 7231, section 5.3.2 <7231#section-5.3.2>`. It does not
correctly take into account media type parameters:
>>> 'text/html;p=1' in AcceptValidHeader('text/html')
False
or media ranges with ``q=0`` in the header::
>>> 'text/html' in AcceptValidHeader('text/*, text/html;q=0')
True
>>> 'text/html' in AcceptValidHeader('text/html;q=0, */*')
True
(See the docstring for :meth:`AcceptValidHeader._old_match` for other
problems with the old criterion for matching.)
"""
warnings.warn(
'The behavior of AcceptValidHeader.__contains__ is '
'currently being maintained for backward compatibility, but it '
'will change in the future to better conform to the RFC.',
DeprecationWarning,
)
for (
media_range, quality, media_type_params, extension_params
) in self._parsed_nonzero:
if self._old_match(media_range, offer):
return True
return False
def __iter__(self):
"""
Return all the ranges with non-0 qvalues, in order of preference.
.. warning::
The behavior of this method is currently maintained for backward
compatibility, but will change in the future.
:return: iterator of all the media ranges in the header with non-0
qvalues, in descending order of qvalue. If two ranges have the
same qvalue, they are returned in the order of their positions
in the header, from left to right.
Please note that this is a simple filter for the ranges in the header
with non-0 qvalues, and is not necessarily the same as what the client
prefers, e.g. ``'audio/basic;q=0, */*'`` means 'everything but
audio/basic', but ``list(instance)`` would return only ``['*/*']``.
"""
warnings.warn(
'The behavior of AcceptLanguageValidHeader.__iter__ is currently '
'maintained for backward compatibility, but will change in the '
'future.',
DeprecationWarning,
)
for media_range, qvalue, media_type_params, extension_params in sorted(
self._parsed_nonzero,
key=lambda i: i[1],
reverse=True
):
yield media_range
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptValidHeader.__add__`.
"""
return self._add_instance_and_non_accept_type(
instance=self, other=other, instance_on_the_right=True,
)
def __repr__(self):
return '<{} ({!r})>'.format(self.__class__.__name__, str(self))
def __str__(self):
r"""
Return a tidied up version of the header value.
e.g. If ``self.header_value`` is ``r',,text/html ; p1="\"\1\"" ;
q=0.50; e1=1 ;e2 , text/plain ,'``, ``str(instance)`` returns
``r'text/html;p1="\"1\"";q=0.5;e1=1;e2, text/plain'``.
"""
# self.parsed tuples are in the form: (media_range, qvalue,
# media_type_params, extension_params)
# self._iterable_to_header_element() requires iterable to be in the
# form: (media_range, qvalue, extension_params_segment).
return ', '.join(
self._iterable_to_header_element(
iterable=(
tuple_[0], # media_range
tuple_[1], # qvalue
self._form_extension_params_segment(
extension_params=tuple_[3], # extension_params
)
),
) for tuple_ in self.parsed
)
def _add_instance_and_non_accept_type(
self, instance, other, instance_on_the_right=False,
):
if not other:
return self.__class__(header_value=instance.header_value)
other_header_value = self._python_value_to_header_str(value=other)
if other_header_value == '':
# if ``other`` is an object whose type we don't recognise, and
# str(other) returns ''
return self.__class__(header_value=instance.header_value)
try:
self.parse(value=other_header_value)
except ValueError: # invalid header value
return self.__class__(header_value=instance.header_value)
new_header_value = (
(other_header_value + ', ' + instance.header_value)
if instance_on_the_right
else (instance.header_value + ', ' + other_header_value)
)
return self.__class__(header_value=new_header_value)
def _old_match(self, mask, offer):
"""
Check if the offer is covered by the mask
``offer`` may contain wildcards to facilitate checking if a ``mask``
would match a 'permissive' offer.
Wildcard matching forces the match to take place against the type or
subtype of the mask and offer (depending on where the wildcard matches)
.. warning::
This is maintained for backward compatibility, and will be
deprecated in the future.
This method was WebOb's old criterion for deciding whether a media type
matches a media range, used in
- :meth:`AcceptValidHeader.__contains__`
- :meth:`AcceptValidHeader.best_match`
- :meth:`AcceptValidHeader.quality`
It allows offers of *, */*, type/*, */subtype and types with no
subtypes, which are not media types as specified in :rfc:`RFC 7231,
section 5.3.2 <7231#section-5.3.2>`. This is also undocumented in any
of the public APIs that uses this method.
"""
# Match if comparisons are the same or either is a complete wildcard
if (mask.lower() == offer.lower() or
'*/*' in (mask, offer) or
'*' == offer):
return True
# Set mask type with wildcard subtype for malformed masks
try:
mask_type, mask_subtype = [x.lower() for x in mask.split('/')]
except ValueError:
mask_type = mask
mask_subtype = '*'
# Set offer type with wildcard subtype for malformed offers
try:
offer_type, offer_subtype = [x.lower() for x in offer.split('/')]
except ValueError:
offer_type = offer
offer_subtype = '*'
if mask_subtype == '*':
# match on type only
if offer_type == '*':
return True
else:
return mask_type.lower() == offer_type.lower()
if mask_type == '*':
# match on subtype only
if offer_subtype == '*':
return True
else:
return mask_subtype.lower() == offer_subtype.lower()
if offer_subtype == '*':
# match on type only
return mask_type.lower() == offer_type.lower()
if offer_type == '*':
# match on subtype only
return mask_subtype.lower() == offer_subtype.lower()
return offer.lower() == mask.lower()
def accept_html(self):
"""
Return ``True`` if any HTML-like type is accepted.
The HTML-like types are 'text/html', 'application/xhtml+xml',
'application/xml' and 'text/xml'.
"""
return bool(
self.acceptable_offers(
offers=[
'text/html',
'application/xhtml+xml',
'application/xml',
'text/xml',
],
)
)
accepts_html = property(fget=accept_html, doc=accept_html.__doc__)
# note the plural
def acceptable_offers(self, offers):
"""
Return the offers that are acceptable according to the header.
The offers are returned in descending order of preference, where
preference is indicated by the qvalue of the media range in the header
that best matches the offer.
This uses the matching rules described in :rfc:`RFC 7231, section 5.3.2
<7231#section-5.3.2>`.
:param offers: ``iterable`` of ``str`` media types (media types can
include media type parameters)
:return: A list of tuples of the form (media type, qvalue), in
descending order of qvalue. Where two offers have the same
qvalue, they are returned in the same order as their order in
`offers`.
"""
parsed = self.parsed
# RFC 7231, section 3.1.1.1 "Media Type":
# "The type, subtype, and parameter name tokens are case-insensitive.
# Parameter values might or might not be case-sensitive, depending on
# the semantics of the parameter name."
lowercased_ranges = [
(
media_range.partition(';')[0].lower(), qvalue,
[(name.lower(), value) for name, value in media_type_params],
[(name.lower(), value) for name, value in extension_params],
)
for media_range, qvalue, media_type_params, extension_params in
parsed
]
lowercased_offers = [offer.lower() for offer in offers]
lowercased_offers_parsed = []
for offer in lowercased_offers:
match = self.media_type_compiled_re.match(offer)
# The regex here is only used for parsing, and not intended to
# validate the offer
if not match:
raise ValueError(repr(offer) + ' is not a media type.')
lowercased_offers_parsed.append(match.groups())
acceptable_offers_n_quality_factors = {}
for (
offer_index, (offer_type_subtype, offer_media_type_params)
) in enumerate(lowercased_offers_parsed):
offer_media_type_params = self._parse_media_type_params(
media_type_params_segment=offer_media_type_params,
)
for (
range_type_subtype, range_qvalue, range_media_type_params, __,
) in lowercased_ranges:
# The specificity values below are based on the list in the
# example in RFC 7231 section 5.3.2 explaining how "media
# ranges can be overridden by more specific media ranges or
# specific media types". We assign specificity to the list
# items in reverse order, so specificity 4, 3, 2, 1 correspond
# to 1, 2, 3, 4 in the list, respectively (so that higher
# specificity has higher precedence).
if offer_type_subtype == range_type_subtype:
if range_media_type_params == []:
# If offer_media_type_params == [], the offer and the
# range match exactly, with neither having media type
# parameters.
# If offer_media_type_params is not [], the offer and
# the range are a match. See the table towards the end
# of RFC 7231 section 5.3.2, where the media type
# 'text/html;level=3' matches the range 'text/html' in
# the header.
# Both cases are a match with a specificity of 3.
specificity = 3
elif offer_media_type_params == range_media_type_params:
specificity = 4
else: # pragma: no cover
# no cover because of
# https://bitbucket.org/ned/coveragepy/issues/254/incorrect-coverage-on-continue-statement
continue
else:
offer_type = offer_type_subtype.split('/')[0]
range_type, range_subtype = range_type_subtype.split('/')
if range_subtype == '*' and offer_type == range_type:
specificity = 2
elif range_type_subtype == '*/*':
specificity = 1
else: # pragma: no cover
# no cover because of
# https://bitbucket.org/ned/coveragepy/issues/254/incorrect-coverage-on-continue-statement
continue
try:
if specificity <= acceptable_offers_n_quality_factors[
offers[offer_index]
][2]:
continue
except KeyError:
# the entry for the offer is not already in
# acceptable_offers_n_quality_factors
pass
acceptable_offers_n_quality_factors[offers[offer_index]] = (
range_qvalue, # qvalue of matched range
offer_index,
specificity, # specifity of matched range
)
acceptable_offers_n_quality_factors = [
# key is offer, value[0] is qvalue, value[1] is offer_index
(key, value[0], value[1])
for key, value in acceptable_offers_n_quality_factors.items()
if value[0] # != 0.0
# We have to filter out the offers with qvalues of 0 here instead
# of just skipping them early in the large ``for`` loop because
# that would not work for e.g. when the header is 'text/html;q=0,
# text/html' (which does not make sense, but is nonetheless valid),
# and offers is ['text/html']
]
# sort by offer_index, ascending
acceptable_offers_n_quality_factors.sort(key=lambda tuple_: tuple_[2])
# (stable) sort by qvalue, descending
acceptable_offers_n_quality_factors.sort(
key=lambda tuple_: tuple_[1], reverse=True,
)
# drop offer_index
acceptable_offers_n_quality_factors = [
(item[0], item[1]) for item in acceptable_offers_n_quality_factors
]
return acceptable_offers_n_quality_factors
# If a media range is repeated in the header (which would not make
# sense, but would be valid according to the rules in the RFC), an
# offer for which the media range is the most specific match would take
# its qvalue from the first appearance of the range in the header.
def best_match(self, offers, default_match=None):
"""
Return the best match from the sequence of media type `offers`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future.
:meth:`AcceptValidHeader.best_match` uses its own algorithm (one not
specified in :rfc:`RFC 7231 <7231>`) to determine what is a best
match. The algorithm has many issues, and does not conform to
:rfc:`RFC 7231 <7231>`.
Each media type in `offers` is checked against each non-``q=0`` range
in the header. If the two are a match according to WebOb's old
criterion for a match, the quality value of the match is the qvalue of
the media range from the header multiplied by the server quality value
of the offer (if the server quality value is not supplied, it is 1).
The offer in the match with the highest quality value is the best
match. If there is more than one match with the highest qvalue, the
match where the media range has a lower number of '*'s is the best
match. If the two have the same number of '*'s, the one that shows up
first in `offers` is the best match.
:param offers: (iterable)
| Each item in the iterable may be a ``str`` media type,
or a (media type, server quality value) ``tuple`` or
``list``. (The two may be mixed in the iterable.)
:param default_match: (optional, any type) the value to be returned if
there is no match
:return: (``str``, or the type of `default_match`)
| The offer that is the best match. If there is no match, the
value of `default_match` is returned.
This uses the old criterion of a match in
:meth:`AcceptValidHeader._old_match`, which is not as specified in
:rfc:`RFC 7231, section 5.3.2 <7231#section-5.3.2>`. It does not
correctly take into account media type parameters:
>>> instance = AcceptValidHeader('text/html')
>>> instance.best_match(offers=['text/html;p=1']) is None
True
or media ranges with ``q=0`` in the header::
>>> instance = AcceptValidHeader('text/*, text/html;q=0')
>>> instance.best_match(offers=['text/html'])
'text/html'
>>> instance = AcceptValidHeader('text/html;q=0, */*')
>>> instance.best_match(offers=['text/html'])
'text/html'
(See the docstring for :meth:`AcceptValidHeader._old_match` for other
problems with the old criterion for matching.)
Another issue is that this method considers the best matching range for
an offer to be the matching range with the highest quality value,
(where quality values are tied, the most specific media range is
chosen); whereas :rfc:`RFC 7231, section 5.3.2 <7231#section-5.3.2>`
specifies that we should consider the best matching range for a media
type offer to be the most specific matching range.::
>>> instance = AcceptValidHeader('text/html;q=0.5, text/*')
>>> instance.best_match(offers=['text/html', 'text/plain'])
'text/html'
"""
warnings.warn(
'The behavior of AcceptValidHeader.best_match is currently being '
'maintained for backward compatibility, but it will be deprecated'
' in the future, as it does not conform to the RFC.',
DeprecationWarning,
)
best_quality = -1
best_offer = default_match
matched_by = '*/*'
for offer in offers:
if isinstance(offer, (tuple, list)):
offer, server_quality = offer
else:
server_quality = 1
for item in self._parsed_nonzero:
mask = item[0]
quality = item[1]
possible_quality = server_quality * quality
if possible_quality < best_quality:
continue
elif possible_quality == best_quality:
# 'text/plain' overrides 'message/*' overrides '*/*'
# (if all match w/ the same q=)
if matched_by.count('*') <= mask.count('*'):
continue
if self._old_match(mask, offer):
best_quality = possible_quality
best_offer = offer
matched_by = mask
return best_offer
def quality(self, offer):
"""
Return quality value of given offer, or ``None`` if there is no match.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future.
:param offer: (``str``) media type offer
:return: (``float`` or ``None``)
| The highest quality value from the media range(s) that match
the `offer`, or ``None`` if there is no match.
This uses the old criterion of a match in
:meth:`AcceptValidHeader._old_match`, which is not as specified in
:rfc:`RFC 7231, section 5.3.2 <7231#section-5.3.2>`. It does not
correctly take into account media type parameters:
>>> instance = AcceptValidHeader('text/html')
>>> instance.quality('text/html;p=1') is None
True
or media ranges with ``q=0`` in the header::
>>> instance = AcceptValidHeader('text/*, text/html;q=0')
>>> instance.quality('text/html')
1.0
>>> AcceptValidHeader('text/html;q=0, */*').quality('text/html')
1.0
(See the docstring for :meth:`AcceptValidHeader._old_match` for other
problems with the old criterion for matching.)
Another issue is that this method considers the best matching range for
an offer to be the matching range with the highest quality value,
whereas :rfc:`RFC 7231, section 5.3.2 <7231#section-5.3.2>` specifies
that we should consider the best matching range for a media type offer
to be the most specific matching range.::
>>> instance = AcceptValidHeader('text/html;q=0.5, text/*')
>>> instance.quality('text/html')
1.0
"""
warnings.warn(
'The behavior of AcceptValidHeader.quality is currently being '
'maintained for backward compatibility, but it will be deprecated '
'in the future, as it does not conform to the RFC.',
DeprecationWarning,
)
bestq = 0
for item in self.parsed:
media_range = item[0]
qvalue = item[1]
if self._old_match(media_range, offer):
bestq = max(bestq, qvalue)
return bestq or None
class MIMEAccept(Accept):
"""
Backwards compatibility shim for the new functionality provided by
AcceptValidHeader, AcceptInvalidHeader, or AcceptNoHeader, that acts like
the old MIMEAccept from WebOb version 1.7 or lower.
This shim does use the newer Accept header parsing, which will mean your
application may be less liberal in what Accept headers are correctly
parsed. It is recommended that user agents be updated to send appropriate
Accept headers that are valid according to rfc:`RFC 7231, section 5.3.2
<7231#section-5.3.2>`
.. deprecated:: 1.8
Instead of directly creating the Accept object, please see:
:func:`create_accept_header(header_value)
<webob.acceptparse.create_accept_header>`, which will create the
appropriate object.
This shim has an extended deprecation period to allow for application
developers to switch the to new API.
"""
def __init__(self, header_value):
warnings.warn(
'The MIMEAccept class has been replaced by '
'webob.acceptparse.create_accept_header. This compatibility shim '
'will be deprecated in a future version of WebOb.',
DeprecationWarning
)
self._accept = create_accept_header(header_value)
if self._accept.parsed:
self._parsed = [(media, q) for (media, q, _, _) in self._accept.parsed]
self._parsed_nonzero = [(m, q) for (m, q) in self._parsed if q]
else:
self._parsed = []
self._parsed_nonzero = []
@staticmethod
def parse(value):
try:
parsed_accepted = Accept.parse(value)
for (media, q, _, _) in parsed_accepted:
yield (media, q)
except ValueError:
pass
def __repr__(self):
return self._accept.__repr__()
def __iter__(self):
return self._accept.__iter__()
def __str__(self):
return self._accept.__str__()
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(str(self._accept.__add__(other._accept)))
else:
return self.__class__(str(self._accept.__add__(other)))
def __radd__(self, other):
return self.__class__(str(self._accept.__radd__(other)))
def __contains__(self, offer):
return offer in self._accept
def quality(self, offer):
return self._accept.quality(offer)
def best_match(self, offers, default_match=None):
return self._accept.best_match(offers, default_match=default_match)
def accept_html(self):
return self._accept.accept_html()
class _AcceptInvalidOrNoHeader(Accept):
"""
Represent when an ``Accept`` header is invalid or not in request.
This is the base class for the behaviour that :class:`.AcceptInvalidHeader`
and :class:`.AcceptNoHeader` have in common.
:rfc:`7231` does not provide any guidance on what should happen if the
``Accept`` header has an invalid value. This implementation disregards the
header when the header is invalid, so :class:`.AcceptInvalidHeader` and
:class:`.AcceptNoHeader` have much behaviour in common.
"""
def __bool__(self):
"""
Return whether ``self`` represents a valid ``Accept`` header.
Return ``True`` if ``self`` represents a valid header, and ``False`` if
it represents an invalid header, or the header not being in the
request.
For this class, it always returns ``False``.
"""
return False
__nonzero__ = __bool__ # Python 2
def __contains__(self, offer):
"""
Return ``bool`` indicating whether `offer` is acceptable.
.. warning::
The behavior of ``.__contains__`` for the ``Accept`` classes is
currently being maintained for backward compatibility, but it will
change in the future to better conform to the RFC.
:param offer: (``str``) media type offer
:return: (``bool``) Whether ``offer`` is acceptable according to the
header.
For this class, either there is no ``Accept`` header in the request, or
the header is invalid, so any media type is acceptable, and this always
returns ``True``.
"""
warnings.warn(
'The behavior of .__contains__ for the Accept classes is '
'currently being maintained for backward compatibility, but it '
'will change in the future to better conform to the RFC.',
DeprecationWarning,
)
return True
def __iter__(self):
"""
Return all the ranges with non-0 qvalues, in order of preference.
.. warning::
The behavior of this method is currently maintained for backward
compatibility, but will change in the future.
:return: iterator of all the media ranges in the header with non-0
qvalues, in descending order of qvalue. If two ranges have the
same qvalue, they are returned in the order of their positions
in the header, from left to right.
When there is no ``Accept`` header in the request or the header is
invalid, there are no media ranges, so this always returns an empty
iterator.
"""
warnings.warn(
'The behavior of AcceptValidHeader.__iter__ is currently '
'maintained for backward compatibility, but will change in the '
'future.',
DeprecationWarning,
)
return iter(())
def accept_html(self):
"""
Return ``True`` if any HTML-like type is accepted.
The HTML-like types are 'text/html', 'application/xhtml+xml',
'application/xml' and 'text/xml'.
When the header is invalid, or there is no `Accept` header in the
request, all `offers` are considered acceptable, so this always returns
``True``.
"""
return bool(
self.acceptable_offers(
offers=[
'text/html',
'application/xhtml+xml',
'application/xml',
'text/xml',
],
)
)
accepts_html = property(fget=accept_html, doc=accept_html.__doc__)
# note the plural
def acceptable_offers(self, offers):
"""
Return the offers that are acceptable according to the header.
:param offers: ``iterable`` of ``str`` media types (media types can
include media type parameters)
:return: When the header is invalid, or there is no ``Accept`` header
in the request, all `offers` are considered acceptable, so
this method returns a list of (media type, qvalue) tuples
where each offer in `offers` is paired with the qvalue of 1.0,
in the same order as in `offers`.
"""
return [(offer, 1.0) for offer in offers]
def best_match(self, offers, default_match=None):
"""
Return the best match from the sequence of language tag `offers`.
This is the ``.best_match()`` method for when the header is invalid or
not found in the request, corresponding to
:meth:`AcceptValidHeader.best_match`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future (see the documentation for
:meth:`AcceptValidHeader.best_match`).
When the header is invalid, or there is no `Accept` header in the
request, all `offers` are considered acceptable, so the best match is
the media type in `offers` with the highest server quality value (if
the server quality value is not supplied for a media type, it is 1).
If more than one media type in `offers` have the same highest server
quality value, then the one that shows up first in `offers` is the best
match.
:param offers: (iterable)
| Each item in the iterable may be a ``str`` media type,
or a (media type, server quality value) ``tuple`` or
``list``. (The two may be mixed in the iterable.)
:param default_match: (optional, any type) the value to be returned if
`offers` is empty.
:return: (``str``, or the type of `default_match`)
| The offer that has the highest server quality value. If
`offers` is empty, the value of `default_match` is returned.
"""
warnings.warn(
'The behavior of .best_match for the Accept classes is currently '
'being maintained for backward compatibility, but the method will'
' be deprecated in the future, as its behavior is not specified '
'in (and currently does not conform to) RFC 7231.',
DeprecationWarning,
)
best_quality = -1
best_offer = default_match
for offer in offers:
if isinstance(offer, (list, tuple)):
offer, quality = offer
else:
quality = 1
if quality > best_quality:
best_offer = offer
best_quality = quality
return best_offer
def quality(self, offer):
"""
Return quality value of given offer, or ``None`` if there is no match.
This is the ``.quality()`` method for when the header is invalid or not
found in the request, corresponding to
:meth:`AcceptValidHeader.quality`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future (see the documentation for
:meth:`AcceptValidHeader.quality`).
:param offer: (``str``) media type offer
:return: (``float``) ``1.0``.
When the ``Accept`` header is invalid or not in the request, all offers
are equally acceptable, so 1.0 is always returned.
"""
warnings.warn(
'The behavior of .quality for the Accept classes is currently '
'being maintained for backward compatibility, but the method will'
' be deprecated in the future, as its behavior does not conform to'
'RFC 7231.',
DeprecationWarning,
)
return 1.0
class AcceptNoHeader(_AcceptInvalidOrNoHeader):
"""
Represent when there is no ``Accept`` header in the request.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptNoHeader.__add__`).
"""
@property
def header_value(self):
"""
(``str`` or ``None``) The header value.
As there is no header in the request, this is ``None``.
"""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
As there is no header in the request, this is ``None``.
"""
return self._parsed
def __init__(self):
"""
Create an :class:`AcceptNoHeader` instance.
"""
self._header_value = None
self._parsed = None
self._parsed_nonzero = None
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, with media ranges ``str``\ s (including any media type
parameters) as keys, and either qvalues ``float``\ s or (*qvalues*,
*extension_params*) tuples as values, where *extension_params* is a
``str`` of the extension parameters segment of the header element,
starting with the first '``;``'
* a ``tuple`` or ``list``, where each item is either a header element
``str``, or a (*media_range*, *qvalue*, *extension_params*) ``tuple``
or ``list`` where *media_range* is a ``str`` of the media range
including any media type parameters, and *extension_params* is a
``str`` of the extension parameters segment of the header element,
starting with the first '``;``'
* an :class:`AcceptValidHeader`, :class:`AcceptNoHeader`, or
:class:`AcceptInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or an :class:`AcceptValidHeader`
instance, a new :class:`AcceptValidHeader` instance with the valid
header value is returned.
If `other` is ``None``, an :class:`AcceptNoHeader` instance, an invalid
header value, or an :class:`AcceptInvalidHeader` instance, a new
:class:`AcceptNoHeader` instance is returned.
"""
if isinstance(other, AcceptValidHeader):
return AcceptValidHeader(header_value=other.header_value)
if isinstance(other, (AcceptNoHeader, AcceptInvalidHeader)):
return self.__class__()
return self._add_instance_and_non_accept_type(
instance=self, other=other,
)
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptNoHeader.__add__`.
"""
return self.__add__(other=other)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def __str__(self):
"""Return the ``str`` ``'<no header in request>'``."""
return '<no header in request>'
def _add_instance_and_non_accept_type(self, instance, other):
if other is None:
return self.__class__()
other_header_value = self._python_value_to_header_str(value=other)
try:
return AcceptValidHeader(header_value=other_header_value)
except ValueError: # invalid header value
return self.__class__()
class AcceptInvalidHeader(_AcceptInvalidOrNoHeader):
"""
Represent an invalid ``Accept`` header.
An invalid header is one that does not conform to
:rfc:`7231#section-5.3.2`.
:rfc:`7231` does not provide any guidance on what should happen if the
``Accept`` header has an invalid value. This implementation disregards the
header, and treats it as if there is no ``Accept`` header in the request.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptInvalidHeader.__add__`).
"""
@property
def header_value(self):
"""(``str`` or ``None``) The header value."""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
As the header is invalid and cannot be parsed, this is ``None``.
"""
return self._parsed
def __init__(self, header_value):
"""
Create an :class:`AcceptInvalidHeader` instance.
"""
self._header_value = header_value
self._parsed = None
self._parsed_nonzero = None
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, with media ranges ``str``\ s (including any media type
parameters) as keys, and either qvalues ``float``\ s or (*qvalues*,
*extension_params*) tuples as values, where *extension_params* is a
``str`` of the extension parameters segment of the header element,
starting with the first '``;``'
* a ``tuple`` or ``list``, where each item is either a header element
``str``, or a (*media_range*, *qvalue*, *extension_params*) ``tuple``
or ``list`` where *media_range* is a ``str`` of the media range
including any media type parameters, and *extension_params* is a
``str`` of the extension parameters segment of the header element,
starting with the first '``;``'
* an :class:`AcceptValidHeader`, :class:`AcceptNoHeader`, or
:class:`AcceptInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or an :class:`AcceptValidHeader`
instance, then a new :class:`AcceptValidHeader` instance with the valid
header value is returned.
If `other` is ``None``, an :class:`AcceptNoHeader` instance, an invalid
header value, or an :class:`AcceptInvalidHeader` instance, a new
:class:`AcceptNoHeader` instance is returned.
"""
if isinstance(other, AcceptValidHeader):
return AcceptValidHeader(header_value=other.header_value)
if isinstance(other, (AcceptNoHeader, AcceptInvalidHeader)):
return AcceptNoHeader()
return self._add_instance_and_non_accept_type(
instance=self, other=other,
)
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptValidHeader.__add__`.
"""
return self._add_instance_and_non_accept_type(
instance=self, other=other, instance_on_the_right=True,
)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
# We do not display the header_value, as it is untrusted input. The
# header_value could always be easily obtained from the .header_value
# property.
def __str__(self):
"""Return the ``str`` ``'<invalid header value>'``."""
return '<invalid header value>'
def _add_instance_and_non_accept_type(
self, instance, other, instance_on_the_right=False,
):
if other is None:
return AcceptNoHeader()
other_header_value = self._python_value_to_header_str(value=other)
try:
return AcceptValidHeader(header_value=other_header_value)
except ValueError: # invalid header value
return AcceptNoHeader()
def create_accept_header(header_value):
"""
Create an object representing the ``Accept`` header in a request.
:param header_value: (``str``) header value
:return: If `header_value` is ``None``, an :class:`AcceptNoHeader`
instance.
| If `header_value` is a valid ``Accept`` header, an
:class:`AcceptValidHeader` instance.
| If `header_value` is an invalid ``Accept`` header, an
:class:`AcceptInvalidHeader` instance.
"""
if header_value is None:
return AcceptNoHeader()
try:
return AcceptValidHeader(header_value=header_value)
except ValueError:
return AcceptInvalidHeader(header_value=header_value)
def accept_property():
doc = """
Property representing the ``Accept`` header.
(:rfc:`RFC 7231, section 5.3.2 <7231#section-5.3.2>`)
The header value in the request environ is parsed and a new object
representing the header is created every time we *get* the value of the
property. (*set* and *del* change the header value in the request
environ, and do not involve parsing.)
"""
ENVIRON_KEY = 'HTTP_ACCEPT'
def fget(request):
"""Get an object representing the header in the request."""
return create_accept_header(
header_value=request.environ.get(ENVIRON_KEY)
)
def fset(request, value):
"""
Set the corresponding key in the request environ.
`value` can be:
* ``None``
* a ``str`` header value
* a ``dict``, with media ranges ``str``\ s (including any media type
parameters) as keys, and either qvalues ``float``\ s or (*qvalues*,
*extension_params*) tuples as values, where *extension_params* is a
``str`` of the extension parameters segment of the header element,
starting with the first '``;``'
* a ``tuple`` or ``list``, where each item is either a header element
``str``, or a (*media_range*, *qvalue*, *extension_params*) ``tuple``
or ``list`` where *media_range* is a ``str`` of the media range
including any media type parameters, and *extension_params* is a
``str`` of the extension parameters segment of the header element,
starting with the first '``;``'
* an :class:`AcceptValidHeader`, :class:`AcceptNoHeader`, or
:class:`AcceptInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
"""
if value is None or isinstance(value, AcceptNoHeader):
fdel(request=request)
else:
if isinstance(value, (AcceptValidHeader, AcceptInvalidHeader)):
header_value = value.header_value
else:
header_value = Accept._python_value_to_header_str(value=value)
request.environ[ENVIRON_KEY] = header_value
def fdel(request):
"""Delete the corresponding key from the request environ."""
try:
del request.environ[ENVIRON_KEY]
except KeyError:
pass
return property(fget, fset, fdel, textwrap.dedent(doc))
class AcceptCharset(object):
"""
Represent an ``Accept-Charset`` header.
Base class for :class:`AcceptCharsetValidHeader`,
:class:`AcceptCharsetNoHeader`, and :class:`AcceptCharsetInvalidHeader`.
"""
# RFC 7231 Section 3.1.1.2 "Charset":
# charset = token
charset_re = token_re
# RFC 7231 Section 5.3.3 "Accept-Charset":
# Accept-Charset = 1#( ( charset / "*" ) [ weight ] )
charset_n_weight_re = _item_n_weight_re(item_re=charset_re)
charset_n_weight_compiled_re = re.compile(charset_n_weight_re)
accept_charset_compiled_re = _list_1_or_more__compiled_re(
element_re=charset_n_weight_re,
)
@classmethod
def _python_value_to_header_str(cls, value):
if isinstance(value, str):
header_str = value
else:
if hasattr(value, 'items'):
value = sorted(
value.items(),
key=lambda item: item[1],
reverse=True,
)
if isinstance(value, (tuple, list)):
result = []
for item in value:
if isinstance(item, (tuple, list)):
item = _item_qvalue_pair_to_header_element(pair=item)
result.append(item)
header_str = ', '.join(result)
else:
header_str = str(value)
return header_str
@classmethod
def parse(cls, value):
"""
Parse an ``Accept-Charset`` header.
:param value: (``str``) header value
:return: If `value` is a valid ``Accept-Charset`` header, returns an
iterator of (charset, quality value) tuples, as parsed from
the header from left to right.
:raises ValueError: if `value` is an invalid header
"""
# Check if header is valid
# Using Python stdlib's `re` module, there is currently no way to check
# the match *and* get all the groups using the same regex, so we have
# to use one regex to check the match, and another to get the groups.
if cls.accept_charset_compiled_re.match(value) is None:
raise ValueError('Invalid value for an Accept-Charset header.')
def generator(value):
for match in (cls.charset_n_weight_compiled_re.finditer(value)):
charset = match.group(1)
qvalue = match.group(2)
qvalue = float(qvalue) if qvalue else 1.0
yield (charset, qvalue)
return generator(value=value)
class AcceptCharsetValidHeader(AcceptCharset):
"""
Represent a valid ``Accept-Charset`` header.
A valid header is one that conforms to :rfc:`RFC 7231, section 5.3.3
<7231#section-5.3.3>`.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptCharsetValidHeader.__add__`).
"""
@property
def header_value(self):
"""(``str``) The header value."""
return self._header_value
@property
def parsed(self):
"""
(``list``) Parsed form of the header.
A list of (charset, quality value) tuples.
"""
return self._parsed
def __init__(self, header_value):
"""
Create an :class:`AcceptCharsetValidHeader` instance.
:param header_value: (``str``) header value.
:raises ValueError: if `header_value` is an invalid value for an
``Accept-Charset`` header.
"""
self._header_value = header_value
self._parsed = list(self.parse(header_value))
self._parsed_nonzero = [
item for item in self.parsed if item[1] # item[1] is the qvalue
]
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, where keys are charsets and values are qvalues
* a ``tuple`` or ``list``, where each item is a charset ``str`` or a
``tuple`` or ``list`` (charset, qvalue) pair (``str``\ s and pairs
can be mixed within the ``tuple`` or ``list``)
* an :class:`AcceptCharsetValidHeader`, :class:`AcceptCharsetNoHeader`,
or :class:`AcceptCharsetInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or another
:class:`AcceptCharsetValidHeader` instance, the two header values are
joined with ``', '``, and a new :class:`AcceptCharsetValidHeader`
instance with the new header value is returned.
If `other` is ``None``, an :class:`AcceptCharsetNoHeader` instance, an
invalid header value, or an :class:`AcceptCharsetInvalidHeader`
instance, a new :class:`AcceptCharsetValidHeader` instance with the
same header value as ``self`` is returned.
"""
if isinstance(other, AcceptCharsetValidHeader):
return create_accept_charset_header(
header_value=self.header_value + ', ' + other.header_value,
)
if isinstance(
other, (AcceptCharsetNoHeader, AcceptCharsetInvalidHeader)
):
return self.__class__(header_value=self.header_value)
return self._add_instance_and_non_accept_charset_type(
instance=self, other=other,
)
def __bool__(self):
"""
Return whether ``self`` represents a valid ``Accept-Charset`` header.
Return ``True`` if ``self`` represents a valid header, and ``False`` if
it represents an invalid header, or the header not being in the
request.
For this class, it always returns ``True``.
"""
return True
__nonzero__ = __bool__ # Python 2
def __contains__(self, offer):
"""
Return ``bool`` indicating whether `offer` is acceptable.
.. warning::
The behavior of :meth:`AcceptCharsetValidHeader.__contains__` is
currently being maintained for backward compatibility, but it will
change in the future to better conform to the RFC.
:param offer: (``str``) charset offer
:return: (``bool``) Whether ``offer`` is acceptable according to the
header.
This does not fully conform to :rfc:`RFC 7231, section 5.3.3
<7231#section-5.3.3>`: it incorrect interprets ``*`` to mean 'match any
charset in the header', rather than 'match any charset that is not
mentioned elsewhere in the header'::
>>> 'UTF-8' in AcceptCharsetValidHeader('UTF-8;q=0, *')
True
"""
warnings.warn(
'The behavior of AcceptCharsetValidHeader.__contains__ is '
'currently being maintained for backward compatibility, but it '
'will change in the future to better conform to the RFC.',
DeprecationWarning,
)
for mask, quality in self._parsed_nonzero:
if self._old_match(mask, offer):
return True
return False
def __iter__(self):
"""
Return all the items with non-0 qvalues, in order of preference.
.. warning::
The behavior of this method is currently maintained for backward
compatibility, but will change in the future.
:return: iterator of all the items (charset or ``*``) in the header
with non-0 qvalues, in descending order of qvalue. If two
items have the same qvalue, they are returned in the order of
their positions in the header, from left to right.
Please note that this is a simple filter for the items in the header
with non-0 qvalues, and is not necessarily the same as what the client
prefers, e.g. ``'utf-7;q=0, *'`` means 'everything but utf-7', but
``list(instance)`` would return only ``['*']``.
"""
warnings.warn(
'The behavior of AcceptCharsetValidHeader.__iter__ is currently '
'maintained for backward compatibility, but will change in the '
'future.',
DeprecationWarning,
)
for m,q in sorted(
self._parsed_nonzero,
key=lambda i: i[1],
reverse=True
):
yield m
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptCharsetValidHeader.__add__`.
"""
return self._add_instance_and_non_accept_charset_type(
instance=self, other=other, instance_on_the_right=True,
)
def __repr__(self):
return '<{} ({!r})>'.format(self.__class__.__name__, str(self))
def __str__(self):
r"""
Return a tidied up version of the header value.
e.g. If the ``header_value`` is ``', \t,iso-8859-5;q=0.000 \t,
utf-8;q=1.000, UTF-7, unicode-1-1;q=0.210 ,'``, ``str(instance)``
returns ``'iso-8859-5;q=0, utf-8, UTF-7, unicode-1-1;q=0.21'``.
"""
return ', '.join(
_item_qvalue_pair_to_header_element(pair=tuple_)
for tuple_ in self.parsed
)
def _add_instance_and_non_accept_charset_type(
self, instance, other, instance_on_the_right=False,
):
if not other:
return self.__class__(header_value=instance.header_value)
other_header_value = self._python_value_to_header_str(value=other)
try:
self.parse(value=other_header_value)
except ValueError: # invalid header value
return self.__class__(header_value=instance.header_value)
new_header_value = (
(other_header_value + ', ' + instance.header_value)
if instance_on_the_right
else (instance.header_value + ', ' + other_header_value)
)
return self.__class__(header_value=new_header_value)
def _old_match(self, mask, offer):
"""
Return whether charset offer matches header item (charset or ``*``).
.. warning::
This is maintained for backward compatibility, and will be
deprecated in the future.
This method was WebOb's old criterion for deciding whether a charset
matches a header item (charset or ``*``), used in
- :meth:`AcceptCharsetValidHeader.__contains__`
- :meth:`AcceptCharsetValidHeader.best_match`
- :meth:`AcceptCharsetValidHeader.quality`
It does not conform to :rfc:`RFC 7231, section 5.3.3
<7231#section-5.3.3>` in that it does not interpret ``*`` values in the
header correctly: ``*`` should only match charsets not mentioned
elsewhere in the header.
"""
return mask == '*' or offer.lower() == mask.lower()
def acceptable_offers(self, offers):
"""
Return the offers that are acceptable according to the header.
The offers are returned in descending order of preference, where
preference is indicated by the qvalue of the charset or ``*`` in the
header matching the offer.
This uses the matching rules described in :rfc:`RFC 7231, section 5.3.3
<7231#section-5.3.3>`.
:param offers: ``iterable`` of ``str`` charsets
:return: A list of tuples of the form (charset, qvalue), in descending
order of qvalue. Where two offers have the same qvalue, they
are returned in the same order as their order in `offers`.
"""
lowercased_parsed = [
(charset.lower(), qvalue) for (charset, qvalue) in self.parsed
]
lowercased_offers = [offer.lower() for offer in offers]
not_acceptable_charsets = set()
acceptable_charsets = dict()
asterisk_qvalue = None
for charset, qvalue in lowercased_parsed:
if charset == '*':
if asterisk_qvalue is None:
asterisk_qvalue = qvalue
elif (
charset not in acceptable_charsets and charset not in
not_acceptable_charsets
# if we have not already encountered this charset in the header
):
if qvalue == 0.0:
not_acceptable_charsets.add(charset)
else:
acceptable_charsets[charset] = qvalue
acceptable_charsets = list(acceptable_charsets.items())
# Sort acceptable_charsets by qvalue, descending order
acceptable_charsets.sort(key=lambda tuple_: tuple_[1], reverse=True)
filtered_offers = []
for index, offer in enumerate(lowercased_offers):
# If offer matches a non-* charset with q=0, it is filtered out
if any((
(offer == charset) for charset in not_acceptable_charsets
)):
continue
matched_charset_qvalue = None
for charset, qvalue in acceptable_charsets:
if offer == charset:
matched_charset_qvalue = qvalue
break
else:
if asterisk_qvalue:
matched_charset_qvalue = asterisk_qvalue
if matched_charset_qvalue is not None: # if there was a match
filtered_offers.append((
offers[index], matched_charset_qvalue, index
))
# sort by position in `offers` argument, ascending
filtered_offers.sort(key=lambda tuple_: tuple_[2])
# When qvalues are tied, position in `offers` is the tiebreaker.
# sort by qvalue, descending
filtered_offers.sort(key=lambda tuple_: tuple_[1], reverse=True)
return [(item[0], item[1]) for item in filtered_offers]
# (offer, qvalue), dropping the position
def best_match(self, offers, default_match=None):
"""
Return the best match from the sequence of charset `offers`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future.
:meth:`AcceptCharsetValidHeader.best_match` has many issues, and
does not conform to :rfc:`RFC 7231 <7231>`.
Each charset in `offers` is checked against each non-``q=0`` item
(charset or ``*``) in the header. If the two are a match according to
WebOb's old criterion for a match, the quality value of the match is
the qvalue of the item from the header multiplied by the server quality
value of the offer (if the server quality value is not supplied, it is
1).
The offer in the match with the highest quality value is the best
match. If there is more than one match with the highest qvalue, the one
that shows up first in `offers` is the best match.
:param offers: (iterable)
| Each item in the iterable may be a ``str`` charset, or
a (charset, server quality value) ``tuple`` or
``list``. (The two may be mixed in the iterable.)
:param default_match: (optional, any type) the value to be returned if
there is no match
:return: (``str``, or the type of `default_match`)
| The offer that is the best match. If there is no match, the
value of `default_match` is returned.
The algorithm behind this method was written for the ``Accept`` header
rather than the ``Accept-Charset`` header. It uses the old criterion of
a match in :meth:`AcceptCharsetValidHeader._old_match`, which does not
conform to :rfc:`RFC 7231, section 5.3.3 <7231#section-5.3.3>`, in that
it does not interpret ``*`` values in the header correctly: ``*``
should only match charsets not mentioned elsewhere in the header::
>>> AcceptCharsetValidHeader('utf-8;q=0, *').best_match(['utf-8'])
'utf-8'
"""
warnings.warn(
'The behavior of AcceptCharsetValidHeader.best_match is currently'
' being maintained for backward compatibility, but it will be '
'deprecated in the future, as it does not conform to the RFC.',
DeprecationWarning,
)
best_quality = -1
best_offer = default_match
matched_by = '*/*'
for offer in offers:
if isinstance(offer, (tuple, list)):
offer, server_quality = offer
else:
server_quality = 1
for mask, quality in self._parsed_nonzero:
possible_quality = server_quality * quality
if possible_quality < best_quality:
continue
elif possible_quality == best_quality:
# 'text/plain' overrides 'message/*' overrides '*/*'
# (if all match w/ the same q=)
# [We can see that this was written for the Accept header,
# not the Accept-Charset header.]
if matched_by.count('*') <= mask.count('*'):
continue
if self._old_match(mask, offer):
best_quality = possible_quality
best_offer = offer
matched_by = mask
return best_offer
def quality(self, offer):
"""
Return quality value of given offer, or ``None`` if there is no match.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future.
:param offer: (``str``) charset offer
:return: (``float`` or ``None``)
| The quality value from the charset that matches the `offer`,
or ``None`` if there is no match.
This uses the old criterion of a match in
:meth:`AcceptCharsetValidHeader._old_match`, which does not conform to
:rfc:`RFC 7231, section 5.3.3 <7231#section-5.3.3>`, in that it does
not interpret ``*`` values in the header correctly: ``*`` should only
match charsets not mentioned elsewhere in the header::
>>> AcceptCharsetValidHeader('utf-8;q=0, *').quality('utf-8')
1.0
>>> AcceptCharsetValidHeader('utf-8;q=0.9, *').quality('utf-8')
1.0
"""
warnings.warn(
'The behavior of AcceptCharsetValidHeader.quality is currently '
'being maintained for backward compatibility, but it will be '
'deprecated in the future, as it does not conform to the RFC.',
DeprecationWarning,
)
bestq = 0
for mask, q in self.parsed:
if self._old_match(mask, offer):
bestq = max(bestq, q)
return bestq or None
class _AcceptCharsetInvalidOrNoHeader(AcceptCharset):
"""
Represent when an ``Accept-Charset`` header is invalid or not in request.
This is the base class for the behaviour that
:class:`.AcceptCharsetInvalidHeader` and :class:`.AcceptCharsetNoHeader`
have in common.
:rfc:`7231` does not provide any guidance on what should happen if the
``Accept-Charset`` header has an invalid value. This implementation
disregards the header when the header is invalid, so
:class:`.AcceptCharsetInvalidHeader` and :class:`.AcceptCharsetNoHeader`
have much behaviour in common.
"""
def __bool__(self):
"""
Return whether ``self`` represents a valid ``Accept-Charset`` header.
Return ``True`` if ``self`` represents a valid header, and ``False`` if
it represents an invalid header, or the header not being in the
request.
For this class, it always returns ``False``.
"""
return False
__nonzero__ = __bool__ # Python 2
def __contains__(self, offer):
"""
Return ``bool`` indicating whether `offer` is acceptable.
.. warning::
The behavior of ``.__contains__`` for the ``AcceptCharset`` classes
is currently being maintained for backward compatibility, but it
will change in the future to better conform to the RFC.
:param offer: (``str``) charset offer
:return: (``bool``) Whether ``offer`` is acceptable according to the
header.
For this class, either there is no ``Accept-Charset`` header in the
request, or the header is invalid, so any charset is acceptable, and
this always returns ``True``.
"""
warnings.warn(
'The behavior of .__contains__ for the AcceptCharset classes is '
'currently being maintained for backward compatibility, but it '
'will change in the future to better conform to the RFC.',
DeprecationWarning,
)
return True
def __iter__(self):
"""
Return all the items with non-0 qvalues, in order of preference.
.. warning::
The behavior of this method is currently maintained for backward
compatibility, but will change in the future.
:return: iterator of all the items (charset or ``*``) in the header
with non-0 qvalues, in descending order of qvalue. If two
items have the same qvalue, they are returned in the order of
their positions in the header, from left to right.
When there is no ``Accept-Charset`` header in the request or the header
is invalid, there are no items, and this always returns an empty
iterator.
"""
warnings.warn(
'The behavior of AcceptCharsetValidHeader.__iter__ is currently '
'maintained for backward compatibility, but will change in the '
'future.',
DeprecationWarning,
)
return iter(())
def acceptable_offers(self, offers):
"""
Return the offers that are acceptable according to the header.
The offers are returned in descending order of preference, where
preference is indicated by the qvalue of the charset or ``*`` in the
header matching the offer.
This uses the matching rules described in :rfc:`RFC 7231, section 5.3.3
<7231#section-5.3.3>`.
:param offers: ``iterable`` of ``str`` charsets
:return: A list of tuples of the form (charset, qvalue), in descending
order of qvalue. Where two offers have the same qvalue, they
are returned in the same order as their order in `offers`.
| When the header is invalid or there is no ``Accept-Charset``
header in the request, all `offers` are considered
acceptable, so this method returns a list of (charset,
qvalue) tuples where each offer in `offers` is paired with
the qvalue of 1.0, in the same order as `offers`.
"""
return [(offer, 1.0) for offer in offers]
def best_match(self, offers, default_match=None):
"""
Return the best match from the sequence of charset `offers`.
This is the ``.best_match()`` method for when the header is invalid or
not found in the request, corresponding to
:meth:`AcceptCharsetValidHeader.best_match`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future (see the documentation for
:meth:`AcceptCharsetValidHeader.best_match`).
When the header is invalid, or there is no `Accept-Charset` header in
the request, all the charsets in `offers` are considered acceptable, so
the best match is the charset in `offers` with the highest server
quality value (if the server quality value is not supplied, it is 1).
If more than one charsets in `offers` have the same highest server
quality value, then the one that shows up first in `offers` is the best
match.
:param offers: (iterable)
| Each item in the iterable may be a ``str`` charset, or
a (charset, server quality value) ``tuple`` or
``list``. (The two may be mixed in the iterable.)
:param default_match: (optional, any type) the value to be returned if
`offers` is empty.
:return: (``str``, or the type of `default_match`)
| The charset that has the highest server quality value. If
`offers` is empty, the value of `default_match` is returned.
"""
warnings.warn(
'The behavior of .best_match for the AcceptCharset classes is '
'currently being maintained for backward compatibility, but the '
'method will be deprecated in the future, as its behavior is not '
'specified in (and currently does not conform to) RFC 7231.',
DeprecationWarning,
)
best_quality = -1
best_offer = default_match
for offer in offers:
if isinstance(offer, (list, tuple)):
offer, quality = offer
else:
quality = 1
if quality > best_quality:
best_offer = offer
best_quality = quality
return best_offer
def quality(self, offer):
"""
Return quality value of given offer, or ``None`` if there is no match.
This is the ``.quality()`` method for when the header is invalid or not
found in the request, corresponding to
:meth:`AcceptCharsetValidHeader.quality`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future (see the documentation for
:meth:`AcceptCharsetValidHeader.quality`).
:param offer: (``str``) charset offer
:return: (``float``) ``1.0``.
When the ``Accept-Charset`` header is invalid or not in the request,
all offers are equally acceptable, so 1.0 is always returned.
"""
warnings.warn(
'The behavior of .quality for the Accept-Charset classes is '
'currently being maintained for backward compatibility, but the '
'method will be deprecated in the future, as its behavior does not'
' conform to RFC 7231.',
DeprecationWarning,
)
return 1.0
class AcceptCharsetNoHeader(_AcceptCharsetInvalidOrNoHeader):
"""
Represent when there is no ``Accept-Charset`` header in the request.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptCharsetNoHeader.__add__`).
"""
@property
def header_value(self):
"""
(``str`` or ``None``) The header value.
As there is no header in the request, this is ``None``.
"""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
As there is no header in the request, this is ``None``.
"""
return self._parsed
def __init__(self):
"""
Create an :class:`AcceptCharsetNoHeader` instance.
"""
self._header_value = None
self._parsed = None
self._parsed_nonzero = None
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, where keys are charsets and values are qvalues
* a ``tuple`` or ``list``, where each item is a charset ``str`` or a
``tuple`` or ``list`` (charset, qvalue) pair (``str``\ s and pairs
can be mixed within the ``tuple`` or ``list``)
* an :class:`AcceptCharsetValidHeader`, :class:`AcceptCharsetNoHeader`,
or :class:`AcceptCharsetInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or an
:class:`AcceptCharsetValidHeader` instance, a new
:class:`AcceptCharsetValidHeader` instance with the valid header value
is returned.
If `other` is ``None``, an :class:`AcceptCharsetNoHeader` instance, an
invalid header value, or an :class:`AcceptCharsetInvalidHeader`
instance, a new :class:`AcceptCharsetNoHeader` instance is returned.
"""
if isinstance(other, AcceptCharsetValidHeader):
return AcceptCharsetValidHeader(header_value=other.header_value)
if isinstance(
other, (AcceptCharsetNoHeader, AcceptCharsetInvalidHeader)
):
return self.__class__()
return self._add_instance_and_non_accept_charset_type(
instance=self, other=other,
)
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptCharsetNoHeader.__add__`.
"""
return self.__add__(other=other)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def __str__(self):
"""Return the ``str`` ``'<no header in request>'``."""
return '<no header in request>'
def _add_instance_and_non_accept_charset_type(self, instance, other):
if not other:
return self.__class__()
other_header_value = self._python_value_to_header_str(value=other)
try:
return AcceptCharsetValidHeader(header_value=other_header_value)
except ValueError: # invalid header value
return self.__class__()
class AcceptCharsetInvalidHeader(_AcceptCharsetInvalidOrNoHeader):
"""
Represent an invalid ``Accept-Charset`` header.
An invalid header is one that does not conform to
:rfc:`7231#section-5.3.3`. As specified in the RFC, an empty header is an
invalid ``Accept-Charset`` header.
:rfc:`7231` does not provide any guidance on what should happen if the
``Accept-Charset`` header has an invalid value. This implementation
disregards the header, and treats it as if there is no ``Accept-Charset``
header in the request.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptCharsetInvalidHeader.__add__`).
"""
@property
def header_value(self):
"""(``str`` or ``None``) The header value."""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
As the header is invalid and cannot be parsed, this is ``None``.
"""
return self._parsed
def __init__(self, header_value):
"""
Create an :class:`AcceptCharsetInvalidHeader` instance.
"""
self._header_value = header_value
self._parsed = None
self._parsed_nonzero = None
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, where keys are charsets and values are qvalues
* a ``tuple`` or ``list``, where each item is a charset ``str`` or a
``tuple`` or ``list`` (charset, qvalue) pair (``str``\ s and pairs
can be mixed within the ``tuple`` or ``list``)
* an :class:`AcceptCharsetValidHeader`, :class:`AcceptCharsetNoHeader`,
or :class:`AcceptCharsetInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or an
:class:`AcceptCharsetValidHeader` instance, a new
:class:`AcceptCharsetValidHeader` instance with the valid header value
is returned.
If `other` is ``None``, an :class:`AcceptCharsetNoHeader` instance, an
invalid header value, or an :class:`AcceptCharsetInvalidHeader`
instance, a new :class:`AcceptCharsetNoHeader` instance is returned.
"""
if isinstance(other, AcceptCharsetValidHeader):
return AcceptCharsetValidHeader(header_value=other.header_value)
if isinstance(
other, (AcceptCharsetNoHeader, AcceptCharsetInvalidHeader)
):
return AcceptCharsetNoHeader()
return self._add_instance_and_non_accept_charset_type(
instance=self, other=other,
)
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptCharsetValidHeader.__add__`.
"""
return self._add_instance_and_non_accept_charset_type(
instance=self, other=other, instance_on_the_right=True,
)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
# We do not display the header_value, as it is untrusted input. The
# header_value could always be easily obtained from the .header_value
# property.
def __str__(self):
"""Return the ``str`` ``'<invalid header value>'``."""
return '<invalid header value>'
def _add_instance_and_non_accept_charset_type(
self, instance, other, instance_on_the_right=False,
):
if not other:
return AcceptCharsetNoHeader()
other_header_value = self._python_value_to_header_str(value=other)
try:
return AcceptCharsetValidHeader(header_value=other_header_value)
except ValueError: # invalid header value
return AcceptCharsetNoHeader()
def create_accept_charset_header(header_value):
"""
Create an object representing the ``Accept-Charset`` header in a request.
:param header_value: (``str``) header value
:return: If `header_value` is ``None``, an :class:`AcceptCharsetNoHeader`
instance.
| If `header_value` is a valid ``Accept-Charset`` header, an
:class:`AcceptCharsetValidHeader` instance.
| If `header_value` is an invalid ``Accept-Charset`` header, an
:class:`AcceptCharsetInvalidHeader` instance.
"""
if header_value is None:
return AcceptCharsetNoHeader()
try:
return AcceptCharsetValidHeader(header_value=header_value)
except ValueError:
return AcceptCharsetInvalidHeader(header_value=header_value)
def accept_charset_property():
doc = """
Property representing the ``Accept-Charset`` header.
(:rfc:`RFC 7231, section 5.3.3 <7231#section-5.3.3>`)
The header value in the request environ is parsed and a new object
representing the header is created every time we *get* the value of the
property. (*set* and *del* change the header value in the request
environ, and do not involve parsing.)
"""
ENVIRON_KEY = 'HTTP_ACCEPT_CHARSET'
def fget(request):
"""Get an object representing the header in the request."""
return create_accept_charset_header(
header_value=request.environ.get(ENVIRON_KEY)
)
def fset(request, value):
"""
Set the corresponding key in the request environ.
`value` can be:
* ``None``
* a ``str`` header value
* a ``dict``, where keys are charsets and values are qvalues
* a ``tuple`` or ``list``, where each item is a charset ``str`` or a
``tuple`` or ``list`` (charset, qvalue) pair (``str``\ s and pairs
can be mixed within the ``tuple`` or ``list``)
* an :class:`AcceptCharsetValidHeader`, :class:`AcceptCharsetNoHeader`,
or :class:`AcceptCharsetInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
"""
if value is None or isinstance(value, AcceptCharsetNoHeader):
fdel(request=request)
else:
if isinstance(
value, (AcceptCharsetValidHeader, AcceptCharsetInvalidHeader)
):
header_value = value.header_value
else:
header_value = AcceptCharset._python_value_to_header_str(
value=value,
)
request.environ[ENVIRON_KEY] = header_value
def fdel(request):
"""Delete the corresponding key from the request environ."""
try:
del request.environ[ENVIRON_KEY]
except KeyError:
pass
return property(fget, fset, fdel, textwrap.dedent(doc))
class AcceptEncoding(object):
"""
Represent an ``Accept-Encoding`` header.
Base class for :class:`AcceptEncodingValidHeader`,
:class:`AcceptEncodingNoHeader`, and :class:`AcceptEncodingInvalidHeader`.
"""
# RFC 7231 Section 3.1.2.1 "Content Codings":
# content-coding = token
# Section 5.3.4 "Accept-Encoding":
# Accept-Encoding = #( codings [ weight ] )
# codings = content-coding / "identity" / "*"
codings_re = token_re
# "identity" (case-insensitive) and "*" are both already included in token
# rule
codings_n_weight_re = _item_n_weight_re(item_re=codings_re)
codings_n_weight_compiled_re = re.compile(codings_n_weight_re)
accept_encoding_compiled_re = _list_0_or_more__compiled_re(
element_re=codings_n_weight_re,
)
@classmethod
def _python_value_to_header_str(cls, value):
if isinstance(value, str):
header_str = value
else:
if hasattr(value, 'items'):
value = sorted(
value.items(),
key=lambda item: item[1],
reverse=True,
)
if isinstance(value, (tuple, list)):
result = []
for item in value:
if isinstance(item, (tuple, list)):
item = _item_qvalue_pair_to_header_element(pair=item)
result.append(item)
header_str = ', '.join(result)
else:
header_str = str(value)
return header_str
@classmethod
def parse(cls, value):
"""
Parse an ``Accept-Encoding`` header.
:param value: (``str``) header value
:return: If `value` is a valid ``Accept-Encoding`` header, returns an
iterator of (codings, quality value) tuples, as parsed from
the header from left to right.
:raises ValueError: if `value` is an invalid header
"""
# Check if header is valid
# Using Python stdlib's `re` module, there is currently no way to check
# the match *and* get all the groups using the same regex, so we have
# to use one regex to check the match, and another to get the groups.
if cls.accept_encoding_compiled_re.match(value) is None:
raise ValueError('Invalid value for an Accept-Encoding header.')
def generator(value):
for match in (cls.codings_n_weight_compiled_re.finditer(value)):
codings = match.group(1)
qvalue = match.group(2)
qvalue = float(qvalue) if qvalue else 1.0
yield (codings, qvalue)
return generator(value=value)
class AcceptEncodingValidHeader(AcceptEncoding):
"""
Represent a valid ``Accept-Encoding`` header.
A valid header is one that conforms to :rfc:`RFC 7231, section 5.3.4
<7231#section-5.3.4>`.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptEncodingValidHeader.__add__`).
"""
@property
def header_value(self):
"""(``str`` or ``None``) The header value."""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
A list of (*codings*, *qvalue*) tuples, where
*codings* (``str``) is a content-coding, the string "``identity``", or
"``*``"; and
*qvalue* (``float``) is the quality value of the codings.
"""
return self._parsed
def __init__(self, header_value):
"""
Create an :class:`AcceptEncodingValidHeader` instance.
:param header_value: (``str``) header value.
:raises ValueError: if `header_value` is an invalid value for an
``Accept-Encoding`` header.
"""
self._header_value = header_value
self._parsed = list(self.parse(header_value))
self._parsed_nonzero = [item for item in self.parsed if item[1]]
# item[1] is the qvalue
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, with content-coding, ``identity`` or ``*`` ``str``\ s as
keys, and qvalue ``float``\ s as values
* a ``tuple`` or ``list``, where each item is either a header element
``str``, or a (content-coding/``identity``/``*``, qvalue) ``tuple``
or ``list``
* an :class:`AcceptEncodingValidHeader`,
:class:`AcceptEncodingNoHeader`, or
:class:`AcceptEncodingInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or another
:class:`AcceptEncodingValidHeader` instance, and the header value it
represents is not ``''``, then the two header values are joined with
``', '``, and a new :class:`AcceptEncodingValidHeader` instance with
the new header value is returned.
If `other` is a valid header value or another
:class:`AcceptEncodingValidHeader` instance representing a header value
of ``''``; or if it is ``None`` or an :class:`AcceptEncodingNoHeader`
instance; or if it is an invalid header value, or an
:class:`AcceptEncodingInvalidHeader` instance, then a new
:class:`AcceptEncodingValidHeader` instance with the same header value
as ``self`` is returned.
"""
if isinstance(other, AcceptEncodingValidHeader):
if other.header_value == '':
return self.__class__(header_value=self.header_value)
else:
return create_accept_encoding_header(
header_value=self.header_value + ', ' + other.header_value,
)
if isinstance(
other, (AcceptEncodingNoHeader, AcceptEncodingInvalidHeader)
):
return self.__class__(header_value=self.header_value)
return self._add_instance_and_non_accept_encoding_type(
instance=self, other=other,
)
def __bool__(self):
"""
Return whether ``self`` represents a valid ``Accept-Encoding`` header.
Return ``True`` if ``self`` represents a valid header, and ``False`` if
it represents an invalid header, or the header not being in the
request.
For this class, it always returns ``True``.
"""
return True
__nonzero__ = __bool__ # Python 2
def __contains__(self, offer):
"""
Return ``bool`` indicating whether `offer` is acceptable.
.. warning::
The behavior of :meth:`AcceptEncodingValidHeader.__contains__` is
currently being maintained for backward compatibility, but it will
change in the future to better conform to the RFC.
:param offer: (``str``) a content-coding or ``identity`` offer
:return: (``bool``) Whether ``offer`` is acceptable according to the
header.
The behavior of this method does not fully conform to :rfc:`7231`.
It does not correctly interpret ``*``::
>>> 'gzip' in AcceptEncodingValidHeader('gzip;q=0, *')
True
and does not handle the ``identity`` token correctly::
>>> 'identity' in AcceptEncodingValidHeader('gzip')
False
"""
warnings.warn(
'The behavior of AcceptEncodingValidHeader.__contains__ is '
'currently being maintained for backward compatibility, but it '
'will change in the future to better conform to the RFC.',
DeprecationWarning,
)
for mask, quality in self._parsed_nonzero:
if self._old_match(mask, offer):
return True
def __iter__(self):
"""
Return all the ranges with non-0 qvalues, in order of preference.
.. warning::
The behavior of this method is currently maintained for backward
compatibility, but will change in the future.
:return: iterator of all the (content-coding/``identity``/``*``) items
in the header with non-0 qvalues, in descending order of
qvalue. If two items have the same qvalue, they are returned
in the order of their positions in the header, from left to
right.
Please note that this is a simple filter for the items in the header
with non-0 qvalues, and is not necessarily the same as what the client
prefers, e.g. ``'gzip;q=0, *'`` means 'everything but gzip', but
``list(instance)`` would return only ``['*']``.
"""
warnings.warn(
'The behavior of AcceptEncodingLanguageValidHeader.__iter__ is '
'currently maintained for backward compatibility, but will change'
' in the future.',
DeprecationWarning,
)
for m,q in sorted(
self._parsed_nonzero,
key=lambda i: i[1],
reverse=True
):
yield m
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptEncodingValidHeader.__add__`.
"""
return self._add_instance_and_non_accept_encoding_type(
instance=self, other=other, instance_on_the_right=True,
)
def __repr__(self):
return '<{} ({!r})>'.format(self.__class__.__name__, str(self))
def __str__(self):
r"""
Return a tidied up version of the header value.
e.g. If the ``header_value`` is ``",\t, a ;\t q=0.20 , b ,',"``,
``str(instance)`` returns ``"a;q=0.2, b, '"``.
"""
return ', '.join(
_item_qvalue_pair_to_header_element(pair=tuple_)
for tuple_ in self.parsed
)
def _add_instance_and_non_accept_encoding_type(
self, instance, other, instance_on_the_right=False,
):
if not other:
return self.__class__(header_value=instance.header_value)
other_header_value = self._python_value_to_header_str(value=other)
if other_header_value == '':
# if ``other`` is an object whose type we don't recognise, and
# str(other) returns ''
return self.__class__(header_value=instance.header_value)
try:
self.parse(value=other_header_value)
except ValueError: # invalid header value
return self.__class__(header_value=instance.header_value)
new_header_value = (
(other_header_value + ', ' + instance.header_value)
if instance_on_the_right
else (instance.header_value + ', ' + other_header_value)
)
return self.__class__(header_value=new_header_value)
def _old_match(self, mask, offer):
"""
Return whether content-coding offer matches codings header item.
.. warning::
This is maintained for backward compatibility, and will be
deprecated in the future.
This method was WebOb's old criterion for deciding whether a
content-coding offer matches a header item (content-coding,
``identity`` or ``*``), used in
- :meth:`AcceptCharsetValidHeader.__contains__`
- :meth:`AcceptCharsetValidHeader.best_match`
- :meth:`AcceptCharsetValidHeader.quality`
It does not conform to :rfc:`RFC 7231, section 5.3.4
<7231#section-5.3.4>` in that it does not interpret ``*`` values in the
header correctly: ``*`` should only match content-codings not mentioned
elsewhere in the header.
"""
return mask == '*' or offer.lower() == mask.lower()
def acceptable_offers(self, offers):
"""
Return the offers that are acceptable according to the header.
The offers are returned in descending order of preference, where
preference is indicated by the qvalue of the item (content-coding,
"identity" or "*") in the header that matches the offer.
This uses the matching rules described in :rfc:`RFC 7231, section 5.3.4
<7231#section-5.3.4>`.
:param offers: ``iterable`` of ``str``s, where each ``str`` is a
content-coding or the string ``identity`` (the token
used to represent "no encoding")
:return: A list of tuples of the form (content-coding or "identity",
qvalue), in descending order of qvalue. Where two offers have
the same qvalue, they are returned in the same order as their
order in `offers`.
Use the string ``'identity'`` (without the quotes) in `offers` to
indicate an offer with no content-coding. From the RFC: 'If the
representation has no content-coding, then it is acceptable by default
unless specifically excluded by the Accept-Encoding field stating
either "identity;q=0" or "\*;q=0" without a more specific entry for
"identity".' The RFC does not specify the qvalue that should be
assigned to the representation/offer with no content-coding; this
implementation assigns it a qvalue of 1.0.
"""
lowercased_parsed = [
(codings.lower(), qvalue) for (codings, qvalue) in self.parsed
]
lowercased_offers = [offer.lower() for offer in offers]
not_acceptable_codingss = set()
acceptable_codingss = dict()
asterisk_qvalue = None
for codings, qvalue in lowercased_parsed:
if codings == '*':
if asterisk_qvalue is None:
asterisk_qvalue = qvalue
elif (
codings not in acceptable_codingss and codings not in
not_acceptable_codingss
# if we have not already encountered this codings in the header
):
if qvalue == 0.0:
not_acceptable_codingss.add(codings)
else:
acceptable_codingss[codings] = qvalue
acceptable_codingss = list(acceptable_codingss.items())
# Sort acceptable_codingss by qvalue, descending order
acceptable_codingss.sort(key=lambda tuple_: tuple_[1], reverse=True)
filtered_offers = []
for index, offer in enumerate(lowercased_offers):
# If offer matches a non-* codings with q=0, it is filtered out
if any((
(offer == codings) for codings in not_acceptable_codingss
)):
continue
matched_codings_qvalue = None
for codings, qvalue in acceptable_codingss:
if offer == codings:
matched_codings_qvalue = qvalue
break
else:
if asterisk_qvalue:
matched_codings_qvalue = asterisk_qvalue
elif asterisk_qvalue != 0.0 and offer == 'identity':
matched_codings_qvalue = 1.0
if matched_codings_qvalue is not None: # if there was a match
filtered_offers.append((
offers[index], matched_codings_qvalue, index
))
# sort by position in `offers` argument, ascending
filtered_offers.sort(key=lambda tuple_: tuple_[2])
# When qvalues are tied, position in `offers` is the tiebreaker.
# sort by qvalue, descending
filtered_offers.sort(key=lambda tuple_: tuple_[1], reverse=True)
return [(item[0], item[1]) for item in filtered_offers]
# (offer, qvalue), dropping the position
def best_match(self, offers, default_match=None):
"""
Return the best match from the sequence of `offers`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future.
:meth:`AcceptEncodingValidHeader.best_match` uses its own algorithm
(one not specified in :rfc:`RFC 7231 <7231>`) to determine what is a
best match. The algorithm has many issues, and does not conform to
the RFC.
Each offer in `offers` is checked against each non-``q=0`` item
(content-coding/``identity``/``*``) in the header. If the two are a
match according to WebOb's old criterion for a match, the quality value
of the match is the qvalue of the item from the header multiplied by
the server quality value of the offer (if the server quality value is
not supplied, it is 1).
The offer in the match with the highest quality value is the best
match. If there is more than one match with the highest qvalue, the one
that shows up first in `offers` is the best match.
:param offers: (iterable)
| Each item in the iterable may be a ``str`` *codings*,
or a (*codings*, server quality value) ``tuple`` or
``list``, where *codings* is either a content-coding,
or the string ``identity`` (which represents *no
encoding*). ``str`` and ``tuple``/``list`` elements
may be mixed within the iterable.
:param default_match: (optional, any type) the value to be returned if
there is no match
:return: (``str``, or the type of `default_match`)
| The offer that is the best match. If there is no match, the
value of `default_match` is returned.
This method does not conform to :rfc:`RFC 7231, section 5.3.4
<7231#section-5.3.4>`, in that it does not correctly interpret ``*``::
>>> AcceptEncodingValidHeader('gzip;q=0, *').best_match(['gzip'])
'gzip'
and does not handle the ``identity`` token correctly::
>>> instance = AcceptEncodingValidHeader('gzip')
>>> instance.best_match(['identity']) is None
True
"""
warnings.warn(
'The behavior of AcceptEncodingValidHeader.best_match is '
'currently being maintained for backward compatibility, but it '
'will be deprecated in the future, as it does not conform to the'
' RFC.',
DeprecationWarning,
)
best_quality = -1
best_offer = default_match
matched_by = '*/*'
for offer in offers:
if isinstance(offer, (tuple, list)):
offer, server_quality = offer
else:
server_quality = 1
for item in self._parsed_nonzero:
mask = item[0]
quality = item[1]
possible_quality = server_quality * quality
if possible_quality < best_quality:
continue
elif possible_quality == best_quality:
# 'text/plain' overrides 'message/*' overrides '*/*'
# (if all match w/ the same q=)
# [We can see that this was written for the Accept header,
# not the Accept-Encoding header.]
if matched_by.count('*') <= mask.count('*'):
continue
if self._old_match(mask, offer):
best_quality = possible_quality
best_offer = offer
matched_by = mask
return best_offer
def quality(self, offer):
"""
Return quality value of given offer, or ``None`` if there is no match.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future.
:param offer: (``str``) A content-coding, or ``identity``.
:return: (``float`` or ``None``)
| The quality value from the header item
(content-coding/``identity``/``*``) that matches the
`offer`, or ``None`` if there is no match.
The behavior of this method does not conform to :rfc:`RFC 7231, section
5.3.4<7231#section-5.3.4>`, in that it does not correctly interpret
``*``::
>>> AcceptEncodingValidHeader('gzip;q=0, *').quality('gzip')
1.0
and does not handle the ``identity`` token correctly::
>>> AcceptEncodingValidHeader('gzip').quality('identity') is None
True
"""
warnings.warn(
'The behavior of AcceptEncodingValidHeader.quality is currently '
'being maintained for backward compatibility, but it will be '
'deprecated in the future, as it does not conform to the RFC.',
DeprecationWarning,
)
bestq = 0
for mask, q in self.parsed:
if self._old_match(mask, offer):
bestq = max(bestq, q)
return bestq or None
class _AcceptEncodingInvalidOrNoHeader(AcceptEncoding):
"""
Represent when an ``Accept-Encoding`` header is invalid or not in request.
This is the base class for the behaviour that
:class:`.AcceptEncodingInvalidHeader` and :class:`.AcceptEncodingNoHeader`
have in common.
:rfc:`7231` does not provide any guidance on what should happen if the
``AcceptEncoding`` header has an invalid value. This implementation
disregards the header when the header is invalid, so
:class:`.AcceptEncodingInvalidHeader` and :class:`.AcceptEncodingNoHeader`
have much behaviour in common.
"""
def __bool__(self):
"""
Return whether ``self`` represents a valid ``Accept-Encoding`` header.
Return ``True`` if ``self`` represents a valid header, and ``False`` if
it represents an invalid header, or the header not being in the
request.
For this class, it always returns ``False``.
"""
return False
__nonzero__ = __bool__ # Python 2
def __contains__(self, offer):
"""
Return ``bool`` indicating whether `offer` is acceptable.
.. warning::
The behavior of ``.__contains__`` for the ``Accept-Encoding``
classes is currently being maintained for backward compatibility,
but it will change in the future to better conform to the RFC.
:param offer: (``str``) a content-coding or ``identity`` offer
:return: (``bool``) Whether ``offer`` is acceptable according to the
header.
For this class, either there is no ``Accept-Encoding`` header in the
request, or the header is invalid, so any content-coding is acceptable,
and this always returns ``True``.
"""
warnings.warn(
'The behavior of .__contains__ for the Accept-Encoding classes is '
'currently being maintained for backward compatibility, but it '
'will change in the future to better conform to the RFC.',
DeprecationWarning,
)
return True
def __iter__(self):
"""
Return all the header items with non-0 qvalues, in order of preference.
.. warning::
The behavior of this method is currently maintained for backward
compatibility, but will change in the future.
:return: iterator of all the (content-coding/``identity``/``*``) items
in the header with non-0 qvalues, in descending order of
qvalue. If two items have the same qvalue, they are returned
in the order of their positions in the header, from left to
right.
When there is no ``Accept-Encoding`` header in the request or the
header is invalid, there are no items in the header, so this always
returns an empty iterator.
"""
warnings.warn(
'The behavior of AcceptEncodingValidHeader.__iter__ is currently '
'maintained for backward compatibility, but will change in the '
'future.',
DeprecationWarning,
)
return iter(())
def acceptable_offers(self, offers):
"""
Return the offers that are acceptable according to the header.
:param offers: ``iterable`` of ``str``s, where each ``str`` is a
content-coding or the string ``identity`` (the token
used to represent "no encoding")
:return: When the header is invalid, or there is no ``Accept-Encoding``
header in the request, all `offers` are considered acceptable,
so this method returns a list of (content-coding or
"identity", qvalue) tuples where each offer in `offers` is
paired with the qvalue of 1.0, in the same order as in
`offers`.
"""
return [(offer, 1.0) for offer in offers]
def best_match(self, offers, default_match=None):
"""
Return the best match from the sequence of `offers`.
This is the ``.best_match()`` method for when the header is invalid or
not found in the request, corresponding to
:meth:`AcceptEncodingValidHeader.best_match`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future (see the documentation for
:meth:`AcceptEncodingValidHeader.best_match`).
When the header is invalid, or there is no `Accept-Encoding` header in
the request, all `offers` are considered acceptable, so the best match
is the offer in `offers` with the highest server quality value (if the
server quality value is not supplied for a media type, it is 1).
If more than one offer in `offers` have the same highest server quality
value, then the one that shows up first in `offers` is the best match.
:param offers: (iterable)
| Each item in the iterable may be a ``str`` *codings*,
or a (*codings*, server quality value) ``tuple`` or
``list``, where *codings* is either a content-coding,
or the string ``identity`` (which represents *no
encoding*). ``str`` and ``tuple``/``list`` elements
may be mixed within the iterable.
:param default_match: (optional, any type) the value to be returned if
`offers` is empty.
:return: (``str``, or the type of `default_match`)
| The offer that has the highest server quality value. If
`offers` is empty, the value of `default_match` is returned.
"""
warnings.warn(
'The behavior of .best_match for the Accept-Encoding classes is '
'currently being maintained for backward compatibility, but the '
'method will be deprecated in the future, as its behavior is not '
'specified in (and currently does not conform to) RFC 7231.',
DeprecationWarning,
)
best_quality = -1
best_offer = default_match
for offer in offers:
if isinstance(offer, (list, tuple)):
offer, quality = offer
else:
quality = 1
if quality > best_quality:
best_offer = offer
best_quality = quality
return best_offer
def quality(self, offer):
"""
Return quality value of given offer, or ``None`` if there is no match.
This is the ``.quality()`` method for when the header is invalid or not
found in the request, corresponding to
:meth:`AcceptEncodingValidHeader.quality`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future (see the documentation for
:meth:`AcceptEncodingValidHeader.quality`).
:param offer: (``str``) A content-coding, or ``identity``.
:return: (``float``) ``1.0``.
When the ``Accept-Encoding`` header is invalid or not in the request,
all offers are equally acceptable, so 1.0 is always returned.
"""
warnings.warn(
'The behavior of .quality for the Accept-Encoding classes is '
'currently being maintained for backward compatibility, but the '
'method will be deprecated in the future, as its behavior does '
'not conform to RFC 7231.',
DeprecationWarning,
)
return 1.0
class AcceptEncodingNoHeader(_AcceptEncodingInvalidOrNoHeader):
"""
Represent when there is no ``Accept-Encoding`` header in the request.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptEncodingNoHeader.__add__`).
"""
@property
def header_value(self):
"""
(``str`` or ``None``) The header value.
As there is no header in the request, this is ``None``.
"""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
As there is no header in the request, this is ``None``.
"""
return self._parsed
def __init__(self):
"""
Create an :class:`AcceptEncodingNoHeader` instance.
"""
self._header_value = None
self._parsed = None
self._parsed_nonzero = None
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, with content-coding, ``identity`` or ``*`` ``str``\ s as
keys, and qvalue ``float``\ s as values
* a ``tuple`` or ``list``, where each item is either a header element
``str``, or a (content-coding/``identity``/``*``, qvalue) ``tuple``
or ``list``
* an :class:`AcceptEncodingValidHeader`,
:class:`AcceptEncodingNoHeader`, or
:class:`AcceptEncodingInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or an
:class:`AcceptEncodingValidHeader` instance, a new
:class:`AcceptEncodingValidHeader` instance with the valid header value
is returned.
If `other` is ``None``, an :class:`AcceptEncodingNoHeader` instance, an
invalid header value, or an :class:`AcceptEncodingInvalidHeader`
instance, a new :class:`AcceptEncodingNoHeader` instance is returned.
"""
if isinstance(other, AcceptEncodingValidHeader):
return AcceptEncodingValidHeader(header_value=other.header_value)
if isinstance(
other, (AcceptEncodingNoHeader, AcceptEncodingInvalidHeader)
):
return self.__class__()
return self._add_instance_and_non_accept_encoding_type(
instance=self, other=other,
)
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptEncodingNoHeader.__add__`.
"""
return self.__add__(other=other)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def __str__(self):
"""Return the ``str`` ``'<no header in request>'``."""
return '<no header in request>'
def _add_instance_and_non_accept_encoding_type(self, instance, other):
if other is None:
return self.__class__()
other_header_value = self._python_value_to_header_str(value=other)
try:
return AcceptEncodingValidHeader(header_value=other_header_value)
except ValueError: # invalid header value
return self.__class__()
class AcceptEncodingInvalidHeader(_AcceptEncodingInvalidOrNoHeader):
"""
Represent an invalid ``Accept-Encoding`` header.
An invalid header is one that does not conform to
:rfc:`7231#section-5.3.4`.
:rfc:`7231` does not provide any guidance on what should happen if the
``Accept-Encoding`` header has an invalid value. This implementation
disregards the header, and treats it as if there is no ``Accept-Encoding``
header in the request.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptEncodingInvalidHeader.__add__`).
"""
@property
def header_value(self):
"""(``str`` or ``None``) The header value."""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
As the header is invalid and cannot be parsed, this is ``None``.
"""
return self._parsed
def __init__(self, header_value):
"""
Create an :class:`AcceptEncodingInvalidHeader` instance.
"""
self._header_value = header_value
self._parsed = None
self._parsed_nonzero = None
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str`` header value
* a ``dict``, with content-coding, ``identity`` or ``*`` ``str``\ s as
keys, and qvalue ``float``\ s as values
* a ``tuple`` or ``list``, where each item is either a header element
``str``, or a (content-coding/``identity``/``*``, qvalue) ``tuple``
or ``list``
* an :class:`AcceptEncodingValidHeader`,
:class:`AcceptEncodingNoHeader`, or
:class:`AcceptEncodingInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or an
:class:`AcceptEncodingValidHeader` instance, then a new
:class:`AcceptEncodingValidHeader` instance with the valid header value
is returned.
If `other` is ``None``, an :class:`AcceptEncodingNoHeader` instance, an
invalid header value, or an :class:`AcceptEncodingInvalidHeader`
instance, a new :class:`AcceptEncodingNoHeader` instance is returned.
"""
if isinstance(other, AcceptEncodingValidHeader):
return AcceptEncodingValidHeader(header_value=other.header_value)
if isinstance(
other, (AcceptEncodingNoHeader, AcceptEncodingInvalidHeader)
):
return AcceptEncodingNoHeader()
return self._add_instance_and_non_accept_encoding_type(
instance=self, other=other,
)
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptEncodingValidHeader.__add__`.
"""
return self._add_instance_and_non_accept_encoding_type(
instance=self, other=other, instance_on_the_right=True,
)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
# We do not display the header_value, as it is untrusted input. The
# header_value could always be easily obtained from the .header_value
# property.
def __str__(self):
"""Return the ``str`` ``'<invalid header value>'``."""
return '<invalid header value>'
def _add_instance_and_non_accept_encoding_type(
self, instance, other, instance_on_the_right=False,
):
if other is None:
return AcceptEncodingNoHeader()
other_header_value = self._python_value_to_header_str(value=other)
try:
return AcceptEncodingValidHeader(header_value=other_header_value)
except ValueError: # invalid header value
return AcceptEncodingNoHeader()
def create_accept_encoding_header(header_value):
"""
Create an object representing the ``Accept-Encoding`` header in a request.
:param header_value: (``str``) header value
:return: If `header_value` is ``None``, an :class:`AcceptEncodingNoHeader`
instance.
| If `header_value` is a valid ``Accept-Encoding`` header, an
:class:`AcceptEncodingValidHeader` instance.
| If `header_value` is an invalid ``Accept-Encoding`` header, an
:class:`AcceptEncodingInvalidHeader` instance.
"""
if header_value is None:
return AcceptEncodingNoHeader()
try:
return AcceptEncodingValidHeader(header_value=header_value)
except ValueError:
return AcceptEncodingInvalidHeader(header_value=header_value)
def accept_encoding_property():
doc = """
Property representing the ``Accept-Encoding`` header.
(:rfc:`RFC 7231, section 5.3.4 <7231#section-5.3.4>`)
The header value in the request environ is parsed and a new object
representing the header is created every time we *get* the value of the
property. (*set* and *del* change the header value in the request
environ, and do not involve parsing.)
"""
ENVIRON_KEY = 'HTTP_ACCEPT_ENCODING'
def fget(request):
"""Get an object representing the header in the request."""
return create_accept_encoding_header(
header_value=request.environ.get(ENVIRON_KEY)
)
def fset(request, value):
"""
Set the corresponding key in the request environ.
`value` can be:
* ``None``
* a ``str`` header value
* a ``dict``, with content-coding, ``identity`` or ``*`` ``str``\ s as
keys, and qvalue ``float``\ s as values
* a ``tuple`` or ``list``, where each item is either a header element
``str``, or a (content-coding/``identity``/``*``, qvalue) ``tuple``
or ``list``
* an :class:`AcceptEncodingValidHeader`,
:class:`AcceptEncodingNoHeader`, or
:class:`AcceptEncodingInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
"""
if value is None or isinstance(value, AcceptEncodingNoHeader):
fdel(request=request)
else:
if isinstance(
value, (AcceptEncodingValidHeader, AcceptEncodingInvalidHeader)
):
header_value = value.header_value
else:
header_value = AcceptEncoding._python_value_to_header_str(
value=value,
)
request.environ[ENVIRON_KEY] = header_value
def fdel(request):
"""Delete the corresponding key from the request environ."""
try:
del request.environ[ENVIRON_KEY]
except KeyError:
pass
return property(fget, fset, fdel, textwrap.dedent(doc))
class AcceptLanguage(object):
"""
Represent an ``Accept-Language`` header.
Base class for :class:`AcceptLanguageValidHeader`,
:class:`AcceptLanguageNoHeader`, and :class:`AcceptLanguageInvalidHeader`.
"""
# RFC 7231 Section 5.3.5 "Accept-Language":
# Accept-Language = 1#( language-range [ weight ] )
# language-range =
# <language-range, see [RFC4647], Section 2.1>
# RFC 4647 Section 2.1 "Basic Language Range":
# language-range = (1*8ALPHA *("-" 1*8alphanum)) / "*"
# alphanum = ALPHA / DIGIT
lang_range_re = (
r'\*|'
'(?:'
'[A-Za-z]{1,8}'
'(?:-[A-Za-z0-9]{1,8})*'
')'
)
lang_range_n_weight_re = _item_n_weight_re(item_re=lang_range_re)
lang_range_n_weight_compiled_re = re.compile(lang_range_n_weight_re)
accept_language_compiled_re = _list_1_or_more__compiled_re(
element_re=lang_range_n_weight_re,
)
@classmethod
def _python_value_to_header_str(cls, value):
if isinstance(value, str):
header_str = value
else:
if hasattr(value, 'items'):
value = sorted(
value.items(),
key=lambda item: item[1],
reverse=True,
)
if isinstance(value, (tuple, list)):
result = []
for element in value:
if isinstance(element, (tuple, list)):
element = _item_qvalue_pair_to_header_element(
pair=element
)
result.append(element)
header_str = ', '.join(result)
else:
header_str = str(value)
return header_str
@classmethod
def parse(cls, value):
"""
Parse an ``Accept-Language`` header.
:param value: (``str``) header value
:return: If `value` is a valid ``Accept-Language`` header, returns an
iterator of (language range, quality value) tuples, as parsed
from the header from left to right.
:raises ValueError: if `value` is an invalid header
"""
# Check if header is valid
# Using Python stdlib's `re` module, there is currently no way to check
# the match *and* get all the groups using the same regex, so we have
# to use one regex to check the match, and another to get the groups.
if cls.accept_language_compiled_re.match(value) is None:
raise ValueError('Invalid value for an Accept-Language header.')
def generator(value):
for match in (
cls.lang_range_n_weight_compiled_re.finditer(value)
):
lang_range = match.group(1)
qvalue = match.group(2)
qvalue = float(qvalue) if qvalue else 1.0
yield (lang_range, qvalue)
return generator(value=value)
class AcceptLanguageValidHeader(AcceptLanguage):
"""
Represent a valid ``Accept-Language`` header.
A valid header is one that conforms to :rfc:`RFC 7231, section 5.3.5
<7231#section-5.3.5>`.
We take the reference from the ``language-range`` syntax rule in :rfc:`RFC
7231, section 5.3.5 <7231#section-5.3.5>` to :rfc:`RFC 4647, section 2.1
<4647#section-2.1>` to mean that only basic language ranges (and not
extended language ranges) are expected in the ``Accept-Language`` header.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptLanguageValidHeader.__add__`).
"""
def __init__(self, header_value):
"""
Create an :class:`AcceptLanguageValidHeader` instance.
:param header_value: (``str``) header value.
:raises ValueError: if `header_value` is an invalid value for an
``Accept-Language`` header.
"""
self._header_value = header_value
self._parsed = list(self.parse(header_value))
self._parsed_nonzero = [item for item in self.parsed if item[1]]
# item[1] is the qvalue
@property
def header_value(self):
"""(``str`` or ``None``) The header value."""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
A list of (language range, quality value) tuples.
"""
return self._parsed
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str``
* a ``dict``, with language ranges as keys and qvalues as values
* a ``tuple`` or ``list``, of language range ``str``\ s or of ``tuple``
or ``list`` (language range, qvalue) pairs (``str``\ s and pairs can
be mixed within the ``tuple`` or ``list``)
* an :class:`AcceptLanguageValidHeader`,
:class:`AcceptLanguageNoHeader`, or
:class:`AcceptLanguageInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or another
:class:`AcceptLanguageValidHeader` instance, the two header values are
joined with ``', '``, and a new :class:`AcceptLanguageValidHeader`
instance with the new header value is returned.
If `other` is ``None``, an :class:`AcceptLanguageNoHeader` instance, an
invalid header value, or an :class:`AcceptLanguageInvalidHeader`
instance, a new :class:`AcceptLanguageValidHeader` instance with the
same header value as ``self`` is returned.
"""
if isinstance(other, AcceptLanguageValidHeader):
return create_accept_language_header(
header_value=self.header_value + ', ' + other.header_value,
)
if isinstance(
other, (AcceptLanguageNoHeader, AcceptLanguageInvalidHeader)
):
return self.__class__(header_value=self.header_value)
return self._add_instance_and_non_accept_language_type(
instance=self, other=other,
)
def __nonzero__(self):
"""
Return whether ``self`` represents a valid ``Accept-Language`` header.
Return ``True`` if ``self`` represents a valid header, and ``False`` if
it represents an invalid header, or the header not being in the
request.
For this class, it always returns ``True``.
"""
return True
__bool__ = __nonzero__ # Python 3
def __contains__(self, offer):
"""
Return ``bool`` indicating whether `offer` is acceptable.
.. warning::
The behavior of :meth:`AcceptLanguageValidHeader.__contains__` is
currently being maintained for backward compatibility, but it will
change in the future to better conform to the RFC.
What is 'acceptable' depends on the needs of your application.
:rfc:`RFC 7231, section 5.3.5 <7231#section-5.3.5>` suggests three
matching schemes from :rfc:`RFC 4647 <4647>`, two of which WebOb
supports with :meth:`AcceptLanguageValidHeader.basic_filtering` and
:meth:`AcceptLanguageValidHeader.lookup` (we interpret the RFC to
mean that Extended Filtering cannot apply for the
``Accept-Language`` header, as the header only accepts basic
language ranges.) If these are not suitable for the needs of your
application, you may need to write your own matching using
:attr:`AcceptLanguageValidHeader.parsed`.
:param offer: (``str``) language tag offer
:return: (``bool``) Whether ``offer`` is acceptable according to the
header.
This uses the old criterion of a match in
:meth:`AcceptLanguageValidHeader._old_match`, which does not conform to
:rfc:`RFC 7231, section 5.3.5 <7231#section-5.3.5>` or any of the
matching schemes suggested there. It also does not properly take into
account ranges with ``q=0`` in the header::
>>> 'en-gb' in AcceptLanguageValidHeader('en, en-gb;q=0')
True
>>> 'en' in AcceptLanguageValidHeader('en;q=0, *')
True
(See the docstring for :meth:`AcceptLanguageValidHeader._old_match` for
other problems with the old criterion for a match.)
"""
warnings.warn(
'The behavior of AcceptLanguageValidHeader.__contains__ is '
'currently being maintained for backward compatibility, but it '
'will change in the future to better conform to the RFC.',
DeprecationWarning,
)
for mask, quality in self._parsed_nonzero:
if self._old_match(mask, offer):
return True
return False
def __iter__(self):
"""
Return all the ranges with non-0 qvalues, in order of preference.
.. warning::
The behavior of this method is currently maintained for backward
compatibility, but will change in the future.
:return: iterator of all the language ranges in the header with non-0
qvalues, in descending order of qvalue. If two ranges have the
same qvalue, they are returned in the order of their positions
in the header, from left to right.
Please note that this is a simple filter for the ranges in the header
with non-0 qvalues, and is not necessarily the same as what the client
prefers, e.g. ``'en-gb;q=0, *'`` means 'everything but British
English', but ``list(instance)`` would return only ``['*']``.
"""
warnings.warn(
'The behavior of AcceptLanguageValidHeader.__iter__ is currently '
'maintained for backward compatibility, but will change in the '
'future.',
DeprecationWarning,
)
for m, q in sorted(
self._parsed_nonzero,
key=lambda i: i[1],
reverse=True
):
yield m
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptLanguageValidHeader.__add__`.
"""
return self._add_instance_and_non_accept_language_type(
instance=self, other=other, instance_on_the_right=True,
)
def __repr__(self):
return '<{} ({!r})>'.format(self.__class__.__name__, str(self))
def __str__(self):
r"""
Return a tidied up version of the header value.
e.g. If the ``header_value`` is ``', \t,de;q=0.000 \t, es;q=1.000, zh,
jp;q=0.210 ,'``, ``str(instance)`` returns ``'de;q=0, es, zh,
jp;q=0.21'``.
"""
return ', '.join(
_item_qvalue_pair_to_header_element(pair=tuple_)
for tuple_ in self.parsed
)
def _add_instance_and_non_accept_language_type(
self, instance, other, instance_on_the_right=False,
):
if not other:
return self.__class__(header_value=instance.header_value)
other_header_value = self._python_value_to_header_str(value=other)
try:
self.parse(value=other_header_value)
except ValueError: # invalid header value
return self.__class__(header_value=instance.header_value)
new_header_value = (
(other_header_value + ', ' + instance.header_value)
if instance_on_the_right
else (instance.header_value + ', ' + other_header_value)
)
return self.__class__(header_value=new_header_value)
def _old_match(self, mask, item):
"""
Return whether a language tag matches a language range.
.. warning::
This is maintained for backward compatibility, and will be
deprecated in the future.
This method was WebOb's old criterion for deciding whether a language
tag matches a language range, used in
- :meth:`AcceptLanguageValidHeader.__contains__`
- :meth:`AcceptLanguageValidHeader.best_match`
- :meth:`AcceptLanguageValidHeader.quality`
It does not conform to :rfc:`RFC 7231, section 5.3.5
<7231#section-5.3.5>`, or any of the matching schemes suggested there.
:param mask: (``str``)
| language range
:param item: (``str``)
| language tag. Subtags in language tags are separated by
``-`` (hyphen). If there are underscores (``_``) in this
argument, they will be converted to hyphens before
checking the match.
:return: (``bool``) whether the tag in `item` matches the range in
`mask`.
`mask` and `item` are a match if:
- ``mask == *``.
- ``mask == item``.
- If the first subtag of `item` equals `mask`, or if the first subtag
of `mask` equals `item`.
This means that::
>>> instance._old_match(mask='en-gb', item='en')
True
>>> instance._old_match(mask='en', item='en-gb')
True
Which is different from any of the matching schemes suggested in
:rfc:`RFC 7231, section 5.3.5 <7231#section-5.3.5>`, in that none of
those schemes match both more *and* less specific tags.
However, this method appears to be only designed for language tags
and ranges with at most two subtags. So with an `item`/language tag
with more than two subtags like ``zh-Hans-CN``::
>>> instance._old_match(mask='zh', item='zh-Hans-CN')
True
>>> instance._old_match(mask='zh-Hans', item='zh-Hans-CN')
False
From commit history, this does not appear to have been from a
decision to match only the first subtag, but rather because only
language ranges and tags with at most two subtags were expected.
"""
item = item.replace('_', '-').lower()
mask = mask.lower()
return (mask == '*'
or item == mask
or item.split('-')[0] == mask
or item == mask.split('-')[0]
)
def basic_filtering(self, language_tags):
"""
Return the tags that match the header, using Basic Filtering.
This is an implementation of the Basic Filtering matching scheme,
suggested as a matching scheme for the ``Accept-Language`` header in
:rfc:`RFC 7231, section 5.3.5 <7231#section-5.3.5>`, and defined in
:rfc:`RFC 4647, section 3.3.1 <4647#section-3.3.1>`. It filters the
tags in the `language_tags` argument and returns the ones that match
the header according to the matching scheme.
:param language_tags: (``iterable``) language tags
:return: A list of tuples of the form (language tag, qvalue), in
descending order of qvalue. If two or more tags have the same
qvalue, they are returned in the same order as that in the
header of the ranges they matched. If the matched range is the
same for two or more tags (i.e. their matched ranges have the
same qvalue and the same position in the header), then they
are returned in the same order as that in the `language_tags`
argument. If `language_tags` is unordered, e.g. if it is a set
or a dict, then that order may not be reliable.
For each tag in `language_tags`:
1. If the tag matches a non-``*`` language range in the header with
``q=0`` (meaning "not acceptable", see :rfc:`RFC 7231, section 5.3.1
<7231#section-5.3.1>`), the tag is filtered out.
2. The non-``*`` language ranges in the header that do not have ``q=0``
are considered in descending order of qvalue; where two or more
language ranges have the same qvalue, they are considered in the
order in which they appear in the header.
3. A language range 'matches a particular language tag if, in a
case-insensitive comparison, it exactly equals the tag, or if it
exactly equals a prefix of the tag such that the first character
following the prefix is "-".' (:rfc:`RFC 4647, section 3.3.1
<4647#section-3.3.1>`)
4. If the tag does not match any of the non-``*`` language ranges, and
there is a ``*`` language range in the header, then if the ``*``
language range has ``q=0``, the language tag is filtered out,
otherwise the tag is considered a match.
(If a range (``*`` or non-``*``) appears in the header more than once
-- this would not make sense, but is nonetheless a valid header
according to the RFC -- the first in the header is used for matching,
and the others are ignored.)
"""
# The Basic Filtering matching scheme as applied to the Accept-Language
# header is very under-specified by RFCs 7231 and 4647. This
# implementation combines the description of the matching scheme in RFC
# 4647 and the rules of the Accept-Language header in RFC 7231 to
# arrive at an algorithm for Basic Filtering as applied to the
# Accept-Language header.
lowercased_parsed = [
(range_.lower(), qvalue) for (range_, qvalue) in self.parsed
]
lowercased_tags = [tag.lower() for tag in language_tags]
not_acceptable_ranges = set()
acceptable_ranges = dict()
asterisk_qvalue = None
for position_in_header, (range_, qvalue) in enumerate(
lowercased_parsed
):
if range_ == '*':
if asterisk_qvalue is None:
asterisk_qvalue = qvalue
asterisk_position = position_in_header
elif (
range_ not in acceptable_ranges and range_ not in
not_acceptable_ranges
# if we have not already encountered this range in the header
):
if qvalue == 0.0:
not_acceptable_ranges.add(range_)
else:
acceptable_ranges[range_] = (qvalue, position_in_header)
acceptable_ranges = [
(range_, qvalue, position_in_header)
for range_, (qvalue, position_in_header)
in acceptable_ranges.items()
]
# Sort acceptable_ranges by position_in_header, ascending order
acceptable_ranges.sort(key=lambda tuple_: tuple_[2])
# Sort acceptable_ranges by qvalue, descending order
acceptable_ranges.sort(key=lambda tuple_: tuple_[1], reverse=True)
# Sort guaranteed to be stable with Python >= 2.2, so position in
# header is tiebreaker when two ranges have the same qvalue
def match(tag, range_):
# RFC 4647, section 2.1: 'A language range matches a particular
# language tag if, in a case-insensitive comparison, it exactly
# equals the tag, or if it exactly equals a prefix of the tag such
# that the first character following the prefix is "-".'
return (tag == range_) or tag.startswith(range_ + '-')
# We can assume here that the language tags are valid tags, so we
# do not have to worry about them being malformed and ending with
# '-'.
filtered_tags = []
for index, tag in enumerate(lowercased_tags):
# If tag matches a non-* range with q=0, it is filtered out
if any((
match(tag=tag, range_=range_)
for range_ in not_acceptable_ranges
)):
continue
matched_range_qvalue = None
for range_, qvalue, position_in_header in acceptable_ranges:
# acceptable_ranges is in descending order of qvalue, and tied
# ranges are in ascending order of position_in_header, so the
# first range_ that matches the tag is the best match
if match(tag=tag, range_=range_):
matched_range_qvalue = qvalue
matched_range_position = position_in_header
break
else:
if asterisk_qvalue:
# From RFC 4647, section 3.3.1: '...HTTP/1.1 [RFC2616]
# specifies that the range "*" matches only languages not
# matched by any other range within an "Accept-Language"
# header.' (Though RFC 2616 is obsolete, and there is no
# mention of the meaning of "*" in RFC 7231, as the
# ``language-range`` syntax rule in RFC 7231 section 5.3.1
# directs us to RFC 4647, we can only assume that the
# meaning of "*" in the Accept-Language header remains the
# same).
matched_range_qvalue = asterisk_qvalue
matched_range_position = asterisk_position
if matched_range_qvalue is not None: # if there was a match
filtered_tags.append((
language_tags[index], matched_range_qvalue,
matched_range_position
))
# sort by matched_range_position, ascending
filtered_tags.sort(key=lambda tuple_: tuple_[2])
# When qvalues are tied, matched range position in the header is the
# tiebreaker.
# sort by qvalue, descending
filtered_tags.sort(key=lambda tuple_: tuple_[1], reverse=True)
return [(item[0], item[1]) for item in filtered_tags]
# (tag, qvalue), dropping the matched_range_position
# We return a list of tuples with qvalues, instead of just a set or
# a list of language tags, because
# RFC 4647 section 3.3: "If the language priority list contains more
# than one range, the content returned is typically ordered in
# descending level of preference, but it MAY be unordered, according to
# the needs of the application or protocol."
# We return the filtered tags in order of preference, each paired with
# the qvalue of the range that was their best match, as the ordering
# and the qvalues may well be needed in some applications, and a simple
# set or list of language tags can always be easily obtained from the
# returned list if the qvalues are not required. One use for qvalues,
# for example, would be to indicate that two tags are equally preferred
# (same qvalue), which we would not be able to do easily with a set or
# a list without e.g. making a member of the set or list a sequence.
def best_match(self, offers, default_match=None):
"""
Return the best match from the sequence of language tag `offers`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future.
:meth:`AcceptLanguageValidHeader.best_match` uses its own algorithm
(one not specified in :rfc:`RFC 7231 <7231>`) to determine what is a
best match. The algorithm has many issues, and does not conform to
:rfc:`RFC 7231 <7231>`.
:meth:`AcceptLanguageValidHeader.lookup` is a possible alternative
for finding a best match -- it conforms to, and is suggested as a
matching scheme for the ``Accept-Language`` header in, :rfc:`RFC
7231, section 5.3.5 <7231#section-5.3.5>` -- but please be aware
that there are differences in how it determines what is a best
match. If that is not suitable for the needs of your application,
you may need to write your own matching using
:attr:`AcceptLanguageValidHeader.parsed`.
Each language tag in `offers` is checked against each non-0 range in
the header. If the two are a match according to WebOb's old criterion
for a match, the quality value of the match is the qvalue of the
language range from the header multiplied by the server quality value
of the offer (if the server quality value is not supplied, it is 1).
The offer in the match with the highest quality value is the best
match. If there is more than one match with the highest qvalue, the
match where the language range has a lower number of '*'s is the best
match. If the two have the same number of '*'s, the one that shows up
first in `offers` is the best match.
:param offers: (iterable)
| Each item in the iterable may be a ``str`` language
tag, or a (language tag, server quality value)
``tuple`` or ``list``. (The two may be mixed in the
iterable.)
:param default_match: (optional, any type) the value to be returned if
there is no match
:return: (``str``, or the type of `default_match`)
| The language tag that is the best match. If there is no
match, the value of `default_match` is returned.
**Issues**:
- Incorrect tiebreaking when quality values of two matches are the same
(https://github.com/Pylons/webob/issues/256)::
>>> header = AcceptLanguageValidHeader(
... header_value='en-gb;q=1, en;q=0.8'
... )
>>> header.best_match(offers=['en', 'en-GB'])
'en'
>>> header.best_match(offers=['en-GB', 'en'])
'en-GB'
>>> header = AcceptLanguageValidHeader(header_value='en-gb, en')
>>> header.best_match(offers=['en', 'en-gb'])
'en'
>>> header.best_match(offers=['en-gb', 'en'])
'en-gb'
- Incorrect handling of ``q=0``::
>>> header = AcceptLanguageValidHeader(header_value='en;q=0, *')
>>> header.best_match(offers=['en'])
'en'
>>> header = AcceptLanguageValidHeader(header_value='fr, en;q=0')
>>> header.best_match(offers=['en-gb'], default_match='en')
'en'
- Matching only takes into account the first subtag when matching a
range with more specific or less specific tags::
>>> header = AcceptLanguageValidHeader(header_value='zh')
>>> header.best_match(offers=['zh-Hans-CN'])
'zh-Hans-CN'
>>> header = AcceptLanguageValidHeader(header_value='zh-Hans')
>>> header.best_match(offers=['zh-Hans-CN'])
>>> header.best_match(offers=['zh-Hans-CN']) is None
True
>>> header = AcceptLanguageValidHeader(header_value='zh-Hans-CN')
>>> header.best_match(offers=['zh'])
'zh'
>>> header.best_match(offers=['zh-Hans'])
>>> header.best_match(offers=['zh-Hans']) is None
True
"""
warnings.warn(
'The behavior of AcceptLanguageValidHeader.best_match is '
'currently being maintained for backward compatibility, but it '
'will be deprecated in the future as it does not conform to the '
'RFC.',
DeprecationWarning,
)
best_quality = -1
best_offer = default_match
matched_by = '*/*'
# [We can see that this was written for the ``Accept`` header and not
# the ``Accept-Language`` header, as there are no '/'s in a valid
# ``Accept-Language`` header.]
for offer in offers:
if isinstance(offer, (tuple, list)):
offer, server_quality = offer
else:
server_quality = 1
for mask, quality in self._parsed_nonzero:
possible_quality = server_quality * quality
if possible_quality < best_quality:
continue
elif possible_quality == best_quality:
# 'text/plain' overrides 'message/*' overrides '*/*'
# (if all match w/ the same q=)
if matched_by.count('*') <= mask.count('*'):
continue
# [This tiebreaking was written for the `Accept` header. A
# basic language range in a valid ``Accept-Language``
# header can only be either '*' or a range with no '*' in
# it. This happens to work here, but is not sufficient as a
# tiebreaker.
#
# A best match here, given this algorithm uses
# self._old_match() which matches both more *and* less
# specific tags, should be the match where the absolute
# value of the difference between the subtag counts of
# `mask` and `offer` is the lowest.]
if self._old_match(mask, offer):
best_quality = possible_quality
best_offer = offer
matched_by = mask
return best_offer
def lookup(
self, language_tags, default_range=None, default_tag=None,
default=None,
):
"""
Return the language tag that best matches the header, using Lookup.
This is an implementation of the Lookup matching scheme,
suggested as a matching scheme for the ``Accept-Language`` header in
:rfc:`RFC 7231, section 5.3.5 <7231#section-5.3.5>`, and described in
:rfc:`RFC 4647, section 3.4 <4647#section-3.4>`.
Each language range in the header is considered in turn, by descending
order of qvalue; where qvalues are tied, ranges are considered from
left to right.
Each language range in the header represents the most specific tag that
is an acceptable match: Lookup progressively truncates subtags from the
end of the range until a matching language tag is found. An example is
given in :rfc:`RFC 4647, section 3.4 <4647#section-3.4>`, under
"Example of a Lookup Fallback Pattern":
::
Range to match: zh-Hant-CN-x-private1-private2
1. zh-Hant-CN-x-private1-private2
2. zh-Hant-CN-x-private1
3. zh-Hant-CN
4. zh-Hant
5. zh
6. (default)
:param language_tags: (``iterable``) language tags
:param default_range: (optional, ``None`` or ``str``)
| If Lookup finds no match using the ranges in
the header, and this argument is not None,
Lookup will next attempt to match the range in
this argument, using the same subtag
truncation.
| `default_range` cannot be '*', as '*' is
skipped in Lookup. See :ref:`note
<acceptparse-lookup-asterisk-note>`.
| This parameter corresponds to the functionality
described in :rfc:`RFC 4647, section 3.4.1
<4647#section-3.4.1>`, in the paragraph
starting with "One common way to provide for a
default is to allow a specific language range
to be set as the default..."
:param default_tag: (optional, ``None`` or ``str``)
| At least one of `default_tag` or `default` must
be supplied as an argument to the method, to
define the defaulting behaviour.
| If Lookup finds no match using the ranges in the
header and `default_range`, this argument is not
``None``, and it does not match any range in the
header with ``q=0`` (exactly, with no subtag
truncation), then this value is returned.
| This parameter corresponds to "return a
particular language tag designated for the
operation", one of the examples of "defaulting
behavior" described in :rfc:`RFC 4647, section
3.4.1 <4647#section-3.4.1>`.
:param default: (optional, ``None`` or any type, including a callable)
| At least one of `default_tag` or `default` must be
supplied as an argument to the method, to define the
defaulting behaviour.
| If Lookup finds no match using the ranges in the
header and `default_range`, and `default_tag` is
``None`` or not acceptable because it matches a
``q=0`` range in the header, then Lookup will next
examine the `default` argument.
| If `default` is a callable, it will be called, and
the callable's return value will be returned.
| If `default` is not a callable, the value itself will
be returned.
| The difference between supplying a ``str`` to
`default_tag` and `default` is that `default_tag` is
checked against ``q=0`` ranges in the header to see
if it matches one of the ranges specified as not
acceptable, whereas a ``str`` for the `default`
argument is simply returned.
| This parameter corresponds to the "defaulting
behavior" described in :rfc:`RFC 4647, section 3.4.1
<4647#section-3.4.1>`
:return: (``str``, ``None``, or any type)
| The best match according to the Lookup matching scheme, or a
return value from one of the default arguments.
**Notes**:
.. _acceptparse-lookup-asterisk-note:
- Lookup's behaviour with '*' language ranges in the header may be
surprising. From :rfc:`RFC 4647, section 3.4 <4647#section-3.4>`:
In the lookup scheme, this range does not convey enough
information by itself to determine which language tag is most
appropriate, since it matches everything. If the language range
"*" is followed by other language ranges, it is skipped. If the
language range "*" is the only one in the language priority list
or if no other language range follows, the default value is
computed and returned.
So
::
>>> header = AcceptLanguageValidHeader('de, zh, *')
>>> header.lookup(language_tags=['ja', 'en'], default='default')
'default'
- Any tags in `language_tags` and `default_tag` and any tag matched
during the subtag truncation search for `default_range`, that are an
exact match for a non-``*`` range with ``q=0`` in the header, are
considered not acceptable and ruled out.
- If there is a ``*;q=0`` in the header, then `default_range` and
`default_tag` have no effect, as ``*;q=0`` means that all languages
not already matched by other ranges within the header are
unacceptable.
"""
if default_tag is None and default is None:
raise TypeError(
'`default_tag` and `default` arguments cannot both be None.'
)
# We need separate `default_tag` and `default` arguments because if we
# only had the `default` argument, there would be no way to tell
# whether a str is a language tag (in which case we have to check
# whether it has been specified as not acceptable with a q=0 range in
# the header) or not (in which case we can just return the value).
if default_range == '*':
raise ValueError('default_range cannot be *.')
parsed = list(self.parsed)
tags = language_tags
not_acceptable_ranges = []
acceptable_ranges = []
asterisk_non0_found = False
# Whether there is a '*' range in the header with q={not 0}
asterisk_q0_found = False
# Whether there is a '*' range in the header with q=0
# While '*' is skipped in Lookup because it "does not convey enough
# information by itself to determine which language tag is most
# appropriate" (RFC 4647, section 3.4), '*;q=0' is clear in meaning:
# languages not matched by any other range within the header are not
# acceptable.
for range_, qvalue in parsed:
if qvalue == 0.0:
if range_ == '*': # *;q=0
asterisk_q0_found = True
else: # {non-* range};q=0
not_acceptable_ranges.append(range_.lower())
elif not asterisk_q0_found and range_ == '*': # *;q={not 0}
asterisk_non0_found = True
# if asterisk_q0_found, then it does not matter whether
# asterisk_non0_found
else: # {non-* range};q={not 0}
acceptable_ranges.append((range_, qvalue))
# Sort acceptable_ranges by qvalue, descending order
acceptable_ranges.sort(key=lambda tuple_: tuple_[1], reverse=True)
# Sort guaranteed to be stable with Python >= 2.2, so position in
# header is tiebreaker when two ranges have the same qvalue
acceptable_ranges = [tuple_[0] for tuple_ in acceptable_ranges]
lowered_tags = [tag.lower() for tag in tags]
def best_match(range_):
subtags = range_.split('-')
while True:
for index, tag in enumerate(lowered_tags):
if tag in not_acceptable_ranges:
continue
# We think a non-'*' range with q=0 represents only
# itself as a tag, and there should be no falling back
# with subtag truncation. For example, with
# 'en-gb;q=0', it should not mean 'en;q=0': the client
# is unlikely to expect that specifying 'en-gb' as not
# acceptable would mean that 'en' is also not
# acceptable. There is no guidance on this at all in
# the RFCs, so it is left to us to decide how it should
# work.
if tag == range_:
return tags[index] # return the pre-lowered tag
try:
subtag_before_this = subtags[-2]
except IndexError: # len(subtags) == 1
break
# len(subtags) >= 2
if len(subtag_before_this) == 1 and (
subtag_before_this.isdigit() or
subtag_before_this.isalpha()
): # if subtag_before_this is a single-letter or -digit subtag
subtags.pop(-1) # pop twice instead of once
subtags.pop(-1)
range_ = '-'.join(subtags)
for range_ in acceptable_ranges:
match = best_match(range_=range_.lower())
if match is not None:
return match
if not asterisk_q0_found:
if default_range is not None:
lowered_default_range = default_range.lower()
match = best_match(range_=lowered_default_range)
if match is not None:
return match
if default_tag is not None:
lowered_default_tag = default_tag.lower()
if lowered_default_tag not in not_acceptable_ranges:
return default_tag
try:
return default()
except TypeError: # default is not a callable
return default
def quality(self, offer):
"""
Return quality value of given offer, or ``None`` if there is no match.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future.
:meth:`AcceptLanguageValidHeader.quality` uses its own algorithm
(one not specified in :rfc:`RFC 7231 <7231>`) to determine what is a
best match. The algorithm has many issues, and does not conform to
:rfc:`RFC 7231 <7231>`.
What should be considered a match depends on the needs of your
application (for example, should a language range in the header
match a more specific language tag offer, or a less specific tag
offer?) :rfc:`RFC 7231, section 5.3.5 <7231#section-5.3.5>` suggests
three matching schemes from :rfc:`RFC 4647 <4647>`, two of which
WebOb supports with
:meth:`AcceptLanguageValidHeader.basic_filtering` and
:meth:`AcceptLanguageValidHeader.lookup` (we interpret the RFC to
mean that Extended Filtering cannot apply for the
``Accept-Language`` header, as the header only accepts basic
language ranges.) :meth:`AcceptLanguageValidHeader.basic_filtering`
returns quality values with the matched language tags.
:meth:`AcceptLanguageValidHeader.lookup` returns a language tag
without the quality value, but the quality value is less likely to
be useful when we are looking for a best match.
If these are not suitable or sufficient for the needs of your
application, you may need to write your own matching using
:attr:`AcceptLanguageValidHeader.parsed`.
:param offer: (``str``) language tag offer
:return: (``float`` or ``None``)
| The highest quality value from the language range(s) that
match the `offer`, or ``None`` if there is no match.
**Issues**:
- Incorrect handling of ``q=0`` and ``*``::
>>> header = AcceptLanguageValidHeader(header_value='en;q=0, *')
>>> header.quality(offer='en')
1.0
- Matching only takes into account the first subtag when matching a
range with more specific or less specific tags::
>>> header = AcceptLanguageValidHeader(header_value='zh')
>>> header.quality(offer='zh-Hans-CN')
1.0
>>> header = AcceptLanguageValidHeader(header_value='zh-Hans')
>>> header.quality(offer='zh-Hans-CN')
>>> header.quality(offer='zh-Hans-CN') is None
True
>>> header = AcceptLanguageValidHeader(header_value='zh-Hans-CN')
>>> header.quality(offer='zh')
1.0
>>> header.quality(offer='zh-Hans')
>>> header.quality(offer='zh-Hans') is None
True
"""
warnings.warn(
'The behavior of AcceptLanguageValidHeader.quality is'
'currently being maintained for backward compatibility, but it '
'will be deprecated in the future as it does not conform to the '
'RFC.',
DeprecationWarning,
)
bestq = 0
for mask, q in self.parsed:
if self._old_match(mask, offer):
bestq = max(bestq, q)
return bestq or None
class _AcceptLanguageInvalidOrNoHeader(AcceptLanguage):
"""
Represent when an ``Accept-Language`` header is invalid or not in request.
This is the base class for the behaviour that
:class:`.AcceptLanguageInvalidHeader` and :class:`.AcceptLanguageNoHeader`
have in common.
:rfc:`7231` does not provide any guidance on what should happen if the
``Accept-Language`` header has an invalid value. This implementation
disregards the header when the header is invalid, so
:class:`.AcceptLanguageInvalidHeader` and :class:`.AcceptLanguageNoHeader`
have much behaviour in common.
"""
def __nonzero__(self):
"""
Return whether ``self`` represents a valid ``Accept-Language`` header.
Return ``True`` if ``self`` represents a valid header, and ``False`` if
it represents an invalid header, or the header not being in the
request.
For this class, it always returns ``False``.
"""
return False
__bool__ = __nonzero__ # Python 3
def __contains__(self, offer):
"""
Return ``bool`` indicating whether `offer` is acceptable.
.. warning::
The behavior of ``.__contains__`` for the ``AcceptLanguage`` classes
is currently being maintained for backward compatibility, but it
will change in the future to better conform to the RFC.
:param offer: (``str``) language tag offer
:return: (``bool``) Whether ``offer`` is acceptable according to the
header.
For this class, either there is no ``Accept-Language`` header in the
request, or the header is invalid, so any language tag is acceptable,
and this always returns ``True``.
"""
warnings.warn(
'The behavior of .__contains__ for the AcceptLanguage classes is '
'currently being maintained for backward compatibility, but it '
'will change in the future to better conform to the RFC.',
DeprecationWarning,
)
return True
def __iter__(self):
"""
Return all the ranges with non-0 qvalues, in order of preference.
.. warning::
The behavior of this method is currently maintained for backward
compatibility, but will change in the future.
:return: iterator of all the language ranges in the header with non-0
qvalues, in descending order of qvalue. If two ranges have the
same qvalue, they are returned in the order of their positions
in the header, from left to right.
For this class, either there is no ``Accept-Language`` header in the
request, or the header is invalid, so there are no language ranges, and
this always returns an empty iterator.
"""
warnings.warn(
'The behavior of AcceptLanguageValidHeader.__iter__ is currently '
'maintained for backward compatibility, but will change in the '
'future.',
DeprecationWarning,
)
return iter(())
def basic_filtering(self, language_tags):
"""
Return the tags that match the header, using Basic Filtering.
:param language_tags: (``iterable``) language tags
:return: A list of tuples of the form (language tag, qvalue), in
descending order of preference.
When the header is invalid and when the header is not in the request,
there are no matches, so this method always returns an empty list.
"""
return []
def best_match(self, offers, default_match=None):
"""
Return the best match from the sequence of language tag `offers`.
This is the ``.best_match()`` method for when the header is invalid or
not found in the request, corresponding to
:meth:`AcceptLanguageValidHeader.best_match`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future (see the documentation for
:meth:`AcceptLanguageValidHeader.best_match`).
When the header is invalid, or there is no `Accept-Language` header in
the request, any of the language tags in `offers` are considered
acceptable, so the best match is the tag in `offers` with the highest
server quality value (if the server quality value is not supplied, it
is 1).
If more than one language tags in `offers` have the same highest server
quality value, then the one that shows up first in `offers` is the best
match.
:param offers: (iterable)
| Each item in the iterable may be a ``str`` language
tag, or a (language tag, server quality value)
``tuple`` or ``list``. (The two may be mixed in the
iterable.)
:param default_match: (optional, any type) the value to be returned if
`offers` is empty.
:return: (``str``, or the type of `default_match`)
| The language tag that has the highest server quality value.
If `offers` is empty, the value of `default_match` is
returned.
"""
warnings.warn(
'The behavior of .best_match for the AcceptLanguage classes is '
'currently being maintained for backward compatibility, but the '
'method will be deprecated in the future, as its behavior is not '
'specified in (and currently does not conform to) RFC 7231.',
DeprecationWarning,
)
best_quality = -1
best_offer = default_match
for offer in offers:
if isinstance(offer, (list, tuple)):
offer, quality = offer
else:
quality = 1
if quality > best_quality:
best_offer = offer
best_quality = quality
return best_offer
def lookup(
self, language_tags=None, default_range=None, default_tag=None,
default=None,
):
"""
Return the language tag that best matches the header, using Lookup.
When the header is invalid, or there is no ``Accept-Language`` header
in the request, all language tags are considered acceptable, so it is
as if the header is '*'. As specified for the Lookup matching scheme in
:rfc:`RFC 4647, section 3.4 <4647#section-3.4>`, when the header is
'*', the default value is to be computed and returned. So this method
will ignore the `language_tags` and `default_range` arguments, and
proceed to `default_tag`, then `default`.
:param language_tags: (optional, any type)
| This argument is ignored, and is only used as a
placeholder so that the method signature
corresponds to that of
:meth:`AcceptLanguageValidHeader.lookup`.
:param default_range: (optional, any type)
| This argument is ignored, and is only used as a
placeholder so that the method signature
corresponds to that of
:meth:`AcceptLanguageValidHeader.lookup`.
:param default_tag: (optional, ``None`` or ``str``)
| At least one of `default_tag` or `default` must
be supplied as an argument to the method, to
define the defaulting behaviour.
| If this argument is not ``None``, then it is
returned.
| This parameter corresponds to "return a
particular language tag designated for the
operation", one of the examples of "defaulting
behavior" described in :rfc:`RFC 4647, section
3.4.1 <4647#section-3.4.1>`.
:param default: (optional, ``None`` or any type, including a callable)
| At least one of `default_tag` or `default` must be
supplied as an argument to the method, to define the
defaulting behaviour.
| If `default_tag` is ``None``, then Lookup will next
examine the `default` argument.
| If `default` is a callable, it will be called, and
the callable's return value will be returned.
| If `default` is not a callable, the value itself will
be returned.
| This parameter corresponds to the "defaulting
behavior" described in :rfc:`RFC 4647, section 3.4.1
<4647#section-3.4.1>`
:return: (``str``, or any type)
| the return value from `default_tag` or `default`.
"""
if default_tag is None and default is None:
raise TypeError(
'`default_tag` and `default` arguments cannot both be None.'
)
if default_tag is not None:
return default_tag
try:
return default()
except TypeError: # default is not a callable
return default
def quality(self, offer):
"""
Return quality value of given offer, or ``None`` if there is no match.
This is the ``.quality()`` method for when the header is invalid or not
found in the request, corresponding to
:meth:`AcceptLanguageValidHeader.quality`.
.. warning::
This is currently maintained for backward compatibility, and will be
deprecated in the future (see the documentation for
:meth:`AcceptLanguageValidHeader.quality`).
:param offer: (``str``) language tag offer
:return: (``float``) ``1.0``.
When the ``Accept-Language`` header is invalid or not in the request,
all offers are equally acceptable, so 1.0 is always returned.
"""
warnings.warn(
'The behavior of .quality for the AcceptLanguage classes is '
'currently being maintained for backward compatibility, but the '
'method will be deprecated in the future, as its behavior is not '
'specified in (and currently does not conform to) RFC 7231.',
DeprecationWarning,
)
return 1.0
class AcceptLanguageNoHeader(_AcceptLanguageInvalidOrNoHeader):
"""
Represent when there is no ``Accept-Language`` header in the request.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptLanguageNoHeader.__add__`).
"""
def __init__(self):
"""
Create an :class:`AcceptLanguageNoHeader` instance.
"""
self._header_value = None
self._parsed = None
self._parsed_nonzero = None
@property
def header_value(self):
"""
(``str`` or ``None``) The header value.
As there is no header in the request, this is ``None``.
"""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
As there is no header in the request, this is ``None``.
"""
return self._parsed
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str``
* a ``dict``, with language ranges as keys and qvalues as values
* a ``tuple`` or ``list``, of language range ``str``\ s or of ``tuple``
or ``list`` (language range, qvalue) pairs (``str``\ s and pairs can be
mixed within the ``tuple`` or ``list``)
* an :class:`AcceptLanguageValidHeader`,
:class:`AcceptLanguageNoHeader`, or
:class:`AcceptLanguageInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or an
:class:`AcceptLanguageValidHeader` instance, a new
:class:`AcceptLanguageValidHeader` instance with the valid header value
is returned.
If `other` is ``None``, an :class:`AcceptLanguageNoHeader` instance, an
invalid header value, or an :class:`AcceptLanguageInvalidHeader`
instance, a new :class:`AcceptLanguageNoHeader` instance is returned.
"""
if isinstance(other, AcceptLanguageValidHeader):
return AcceptLanguageValidHeader(header_value=other.header_value)
if isinstance(
other, (AcceptLanguageNoHeader, AcceptLanguageInvalidHeader)
):
return self.__class__()
return self._add_instance_and_non_accept_language_type(
instance=self, other=other,
)
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptLanguageNoHeader.__add__`.
"""
return self.__add__(other=other)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def __str__(self):
"""Return the ``str`` ``'<no header in request>'``."""
return '<no header in request>'
def _add_instance_and_non_accept_language_type(self, instance, other):
if not other:
return self.__class__()
other_header_value = self._python_value_to_header_str(value=other)
try:
return AcceptLanguageValidHeader(header_value=other_header_value)
except ValueError: # invalid header value
return self.__class__()
class AcceptLanguageInvalidHeader(_AcceptLanguageInvalidOrNoHeader):
"""
Represent an invalid ``Accept-Language`` header.
An invalid header is one that does not conform to
:rfc:`7231#section-5.3.5`. As specified in the RFC, an empty header is an
invalid ``Accept-Language`` header.
:rfc:`7231` does not provide any guidance on what should happen if the
``Accept-Language`` header has an invalid value. This implementation
disregards the header, and treats it as if there is no ``Accept-Language``
header in the request.
This object should not be modified. To add to the header, we can use the
addition operators (``+`` and ``+=``), which return a new object (see the
docstring for :meth:`AcceptLanguageInvalidHeader.__add__`).
"""
def __init__(self, header_value):
"""
Create an :class:`AcceptLanguageInvalidHeader` instance.
"""
self._header_value = header_value
self._parsed = None
self._parsed_nonzero = None
@property
def header_value(self):
"""(``str`` or ``None``) The header value."""
return self._header_value
@property
def parsed(self):
"""
(``list`` or ``None``) Parsed form of the header.
As the header is invalid and cannot be parsed, this is ``None``.
"""
return self._parsed
def __add__(self, other):
"""
Add to header, creating a new header object.
`other` can be:
* ``None``
* a ``str``
* a ``dict``, with language ranges as keys and qvalues as values
* a ``tuple`` or ``list``, of language range ``str``\ s or of ``tuple``
or ``list`` (language range, qvalue) pairs (``str``\ s and pairs can
be mixed within the ``tuple`` or ``list``)
* an :class:`AcceptLanguageValidHeader`,
:class:`AcceptLanguageNoHeader`, or
:class:`AcceptLanguageInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
If `other` is a valid header value or an
:class:`AcceptLanguageValidHeader` instance, a new
:class:`AcceptLanguageValidHeader` instance with the valid header value
is returned.
If `other` is ``None``, an :class:`AcceptLanguageNoHeader` instance, an
invalid header value, or an :class:`AcceptLanguageInvalidHeader`
instance, a new :class:`AcceptLanguageNoHeader` instance is returned.
"""
if isinstance(other, AcceptLanguageValidHeader):
return AcceptLanguageValidHeader(header_value=other.header_value)
if isinstance(
other, (AcceptLanguageNoHeader, AcceptLanguageInvalidHeader)
):
return AcceptLanguageNoHeader()
return self._add_instance_and_non_accept_language_type(
instance=self, other=other,
)
def __radd__(self, other):
"""
Add to header, creating a new header object.
See the docstring for :meth:`AcceptLanguageValidHeader.__add__`.
"""
return self._add_instance_and_non_accept_language_type(
instance=self, other=other, instance_on_the_right=True,
)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
# We do not display the header_value, as it is untrusted input. The
# header_value could always be easily obtained from the .header_value
# property.
def __str__(self):
"""Return the ``str`` ``'<invalid header value>'``."""
return '<invalid header value>'
def _add_instance_and_non_accept_language_type(
self, instance, other, instance_on_the_right=False,
):
if not other:
return AcceptLanguageNoHeader()
other_header_value = self._python_value_to_header_str(value=other)
try:
return AcceptLanguageValidHeader(header_value=other_header_value)
except ValueError: # invalid header value
return AcceptLanguageNoHeader()
def create_accept_language_header(header_value):
"""
Create an object representing the ``Accept-Language`` header in a request.
:param header_value: (``str``) header value
:return: If `header_value` is ``None``, an :class:`AcceptLanguageNoHeader`
instance.
| If `header_value` is a valid ``Accept-Language`` header, an
:class:`AcceptLanguageValidHeader` instance.
| If `header_value` is an invalid ``Accept-Language`` header, an
:class:`AcceptLanguageInvalidHeader` instance.
"""
if header_value is None:
return AcceptLanguageNoHeader()
try:
return AcceptLanguageValidHeader(header_value=header_value)
except ValueError:
return AcceptLanguageInvalidHeader(header_value=header_value)
def accept_language_property():
doc = """
Property representing the ``Accept-Language`` header.
(:rfc:`RFC 7231, section 5.3.5 <7231#section-5.3.5>`)
The header value in the request environ is parsed and a new object
representing the header is created every time we *get* the value of the
property. (*set* and *del* change the header value in the request
environ, and do not involve parsing.)
"""
ENVIRON_KEY = 'HTTP_ACCEPT_LANGUAGE'
def fget(request):
"""Get an object representing the header in the request."""
return create_accept_language_header(
header_value=request.environ.get(ENVIRON_KEY)
)
def fset(request, value):
"""
Set the corresponding key in the request environ.
`value` can be:
* ``None``
* a ``str``
* a ``dict``, with language ranges as keys and qvalues as values
* a ``tuple`` or ``list``, of language range ``str``\ s or of ``tuple``
or ``list`` (language range, qvalue) pairs (``str``\ s and pairs can
be mixed within the ``tuple`` or ``list``)
* an :class:`AcceptLanguageValidHeader`,
:class:`AcceptLanguageNoHeader`, or
:class:`AcceptLanguageInvalidHeader` instance
* object of any other type that returns a value for ``__str__``
"""
if value is None or isinstance(value, AcceptLanguageNoHeader):
fdel(request=request)
else:
if isinstance(
value, (AcceptLanguageValidHeader, AcceptLanguageInvalidHeader)
):
header_value = value.header_value
else:
header_value = AcceptLanguage._python_value_to_header_str(
value=value,
)
request.environ[ENVIRON_KEY] = header_value
def fdel(request):
"""Delete the corresponding key from the request environ."""
try:
del request.environ[ENVIRON_KEY]
except KeyError:
pass
return property(fget, fset, fdel, textwrap.dedent(doc))
| mit | -3,862,957,837,787,617,000 | 38.707948 | 114 | 0.573982 | false | 4.464819 | false | false | false | 0.000209 |
hyOzd/kicad-python | kicad/3rdparty/enum/__init__.py | 4 | 31017 | """Python Enumerations"""
import sys as _sys
__all__ = ['Enum', 'IntEnum', 'unique']
version = 1, 1, 1
pyver = float('%s.%s' % _sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
try:
basestring
except NameError:
# In Python 2 basestring is the ancestor of both str and unicode
# in Python 3 it's just str, but was missing in 3.1
basestring = str
try:
unicode
except NameError:
# In Python 3 unicode no longer exists (it's just str)
unicode = str
class _RouteClassAttributeToGetattr(object):
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
"""
def __init__(self, fget=None):
self.fget = fget
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError()
return self.fget(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _is_descriptor(obj):
"""Returns True if obj is a descriptor, False otherwise."""
return (
hasattr(obj, '__get__') or
hasattr(obj, '__set__') or
hasattr(obj, '__delete__'))
def _is_dunder(name):
"""Returns True if a __dunder__ name, False otherwise."""
return (name[:2] == name[-2:] == '__' and
name[2:3] != '_' and
name[-3:-2] != '_' and
len(name) > 4)
def _is_sunder(name):
"""Returns True if a _sunder_ name, False otherwise."""
return (name[0] == name[-1] == '_' and
name[1:2] != '_' and
name[-2:-1] != '_' and
len(name) > 2)
def _make_class_unpicklable(cls):
"""Make the given class un-picklable."""
def _break_on_call_reduce(self, protocol=None):
raise TypeError('%r cannot be pickled' % self)
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
class _EnumDict(dict):
"""Track enum member order and ensure member names are not reused.
EnumMeta will use the names found in self._member_names as the
enumeration member names.
"""
def __init__(self):
super(_EnumDict, self).__init__()
self._member_names = []
def __setitem__(self, key, value):
"""Changes anything not dundered or not a descriptor.
If a descriptor is added with the same name as an enum member, the name
is removed from _member_names (this may leave a hole in the numerical
sequence of values).
If an enum member name is used twice, an error is raised; duplicate
values are not checked for.
Single underscore (sunder) names are reserved.
Note: in 3.x __order__ is simply discarded as a not necessary piece
leftover from 2.x
"""
if pyver >= 3.0 and key == '__order__':
return
if _is_sunder(key):
raise ValueError('_names_ are reserved for future Enum use')
elif _is_dunder(key):
pass
elif key in self._member_names:
# descriptor overwriting an enum?
raise TypeError('Attempted to reuse key: %r' % key)
elif not _is_descriptor(value):
if key in self:
# enum overwriting a descriptor?
raise TypeError('Key already defined as: %r' % self[key])
self._member_names.append(key)
super(_EnumDict, self).__setitem__(key, value)
# Dummy value for Enum as EnumMeta explicity checks for it, but of course until
# EnumMeta finishes running the first time the Enum class doesn't exist. This
# is also why there are checks in EnumMeta like `if Enum is not None`
Enum = None
class EnumMeta(type):
"""Metaclass for Enum"""
@classmethod
def __prepare__(metacls, cls, bases):
return _EnumDict()
def __new__(metacls, cls, bases, classdict):
# an Enum class is final once enumeration items have been defined; it
# cannot be mixed with other types (int, float, etc.) if it has an
# inherited __new__ unless a new __new__ is defined (or the resulting
# class will fail).
if type(classdict) is dict:
original_dict = classdict
classdict = _EnumDict()
for k, v in original_dict.items():
classdict[k] = v
member_type, first_enum = metacls._get_mixins_(bases)
__new__, save_new, use_args = metacls._find_new_(classdict, member_type,
first_enum)
# save enum items into separate mapping so they don't get baked into
# the new class
members = dict((k, classdict[k]) for k in classdict._member_names)
for name in classdict._member_names:
del classdict[name]
# py2 support for definition order
__order__ = classdict.get('__order__')
if __order__ is None:
if pyver < 3.0:
try:
__order__ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])]
except TypeError:
__order__ = [name for name in sorted(members.keys())]
else:
__order__ = classdict._member_names
else:
del classdict['__order__']
if pyver < 3.0:
__order__ = __order__.replace(',', ' ').split()
aliases = [name for name in members if name not in __order__]
__order__ += aliases
# check for illegal enum names (any others?)
invalid_names = set(members) & set(['mro'])
if invalid_names:
raise ValueError('Invalid enum member name(s): %s' % (
', '.join(invalid_names), ))
# save attributes from super classes so we know if we can take
# the shortcut of storing members in the class dict
base_attributes = set([a for b in bases for a in b.__dict__])
# create our new Enum type
enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict)
enum_class._member_names_ = [] # names in random order
if OrderedDict is not None:
enum_class._member_map_ = OrderedDict()
else:
enum_class._member_map_ = {} # name->value map
enum_class._member_type_ = member_type
# Reverse value->name map for hashable values.
enum_class._value2member_map_ = {}
# instantiate them, checking for duplicates as we go
# we instantiate first instead of checking for duplicates first in case
# a custom __new__ is doing something funky with the values -- such as
# auto-numbering ;)
if __new__ is None:
__new__ = enum_class.__new__
for member_name in __order__:
value = members[member_name]
if not isinstance(value, tuple):
args = (value, )
else:
args = value
if member_type is tuple: # special case for tuple enums
args = (args, ) # wrap it one more time
if not use_args or not args:
enum_member = __new__(enum_class)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = value
else:
enum_member = __new__(enum_class, *args)
if not hasattr(enum_member, '_value_'):
enum_member._value_ = member_type(*args)
value = enum_member._value_
enum_member._name_ = member_name
enum_member.__objclass__ = enum_class
enum_member.__init__(*args)
# If another member with the same value was already defined, the
# new member becomes an alias to the existing one.
for name, canonical_member in enum_class._member_map_.items():
if canonical_member.value == enum_member._value_:
enum_member = canonical_member
break
else:
# Aliases don't appear in member names (only in __members__).
enum_class._member_names_.append(member_name)
# performance boost for any member that would not shadow
# a DynamicClassAttribute (aka _RouteClassAttributeToGetattr)
if member_name not in base_attributes:
setattr(enum_class, member_name, enum_member)
# now add to _member_map_
enum_class._member_map_[member_name] = enum_member
try:
# This may fail if value is not hashable. We can't add the value
# to the map, and by-value lookups for this value will be
# linear.
enum_class._value2member_map_[value] = enum_member
except TypeError:
pass
# If a custom type is mixed into the Enum, and it does not know how
# to pickle itself, pickle.dumps will succeed but pickle.loads will
# fail. Rather than have the error show up later and possibly far
# from the source, sabotage the pickle protocol for this class so
# that pickle.dumps also fails.
#
# However, if the new class implements its own __reduce_ex__, do not
# sabotage -- it's on them to make sure it works correctly. We use
# __reduce_ex__ instead of any of the others as it is preferred by
# pickle over __reduce__, and it handles all pickle protocols.
unpicklable = False
if '__reduce_ex__' not in classdict:
if member_type is not object:
methods = ('__getnewargs_ex__', '__getnewargs__',
'__reduce_ex__', '__reduce__')
if not any(m in member_type.__dict__ for m in methods):
_make_class_unpicklable(enum_class)
unpicklable = True
# double check that repr and friends are not the mixin's or various
# things break (such as pickle)
for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
class_method = getattr(enum_class, name)
obj_method = getattr(member_type, name, None)
enum_method = getattr(first_enum, name, None)
if name not in classdict and class_method is not enum_method:
if name == '__reduce_ex__' and unpicklable:
continue
setattr(enum_class, name, enum_method)
# method resolution and int's are not playing nice
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
if issubclass(enum_class, int):
setattr(enum_class, '__cmp__', getattr(int, '__cmp__'))
elif pyver < 3.0:
if issubclass(enum_class, int):
for method in (
'__le__',
'__lt__',
'__gt__',
'__ge__',
'__eq__',
'__ne__',
'__hash__',
):
setattr(enum_class, method, getattr(int, method))
# replace any other __new__ with our own (as long as Enum is not None,
# anyway) -- again, this is to support pickle
if Enum is not None:
# if the user defined their own __new__, save it before it gets
# clobbered in case they subclass later
if save_new:
setattr(enum_class, '__member_new__', enum_class.__dict__['__new__'])
setattr(enum_class, '__new__', Enum.__dict__['__new__'])
return enum_class
def __call__(cls, value, names=None, module=None, type=None, start=1):
"""Either returns an existing member, or creates a new enum class.
This method is used both when an enum class is given a value to match
to an enumeration member (i.e. Color(3)) and for the functional API
(i.e. Color = Enum('Color', names='red green blue')).
When used for the functional API: `module`, if set, will be stored in
the new class' __module__ attribute; `type`, if set, will be mixed in
as the first base class.
Note: if `module` is not set this routine will attempt to discover the
calling module by walking the frame stack; if this is unsuccessful
the resulting class will not be pickleable.
"""
if names is None: # simple value lookup
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
return cls._create_(value, names, module=module, type=type, start=start)
def __contains__(cls, member):
return isinstance(member, cls) and member.name in cls._member_map_
def __delattr__(cls, attr):
# nicer error message when someone tries to delete an attribute
# (see issue19025).
if attr in cls._member_map_:
raise AttributeError(
"%s: cannot delete Enum member." % cls.__name__)
super(EnumMeta, cls).__delattr__(attr)
def __dir__(self):
return (['__class__', '__doc__', '__members__', '__module__'] +
self._member_names_)
@property
def __members__(cls):
"""Returns a mapping of member name->value.
This mapping lists all enum members, including aliases. Note that this
is a copy of the internal mapping.
"""
return cls._member_map_.copy()
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
if _is_dunder(name):
raise AttributeError(name)
try:
return cls._member_map_[name]
except KeyError:
raise AttributeError(name)
def __getitem__(cls, name):
return cls._member_map_[name]
def __iter__(cls):
return (cls._member_map_[name] for name in cls._member_names_)
def __reversed__(cls):
return (cls._member_map_[name] for name in reversed(cls._member_names_))
def __len__(cls):
return len(cls._member_names_)
def __repr__(cls):
return "<enum %r>" % cls.__name__
def __setattr__(cls, name, value):
"""Block attempts to reassign Enum members.
A simple assignment to the class namespace only changes one of the
several possible ways to get an Enum member from the Enum class,
resulting in an inconsistent Enumeration.
"""
member_map = cls.__dict__.get('_member_map_', {})
if name in member_map:
raise AttributeError('Cannot reassign members.')
super(EnumMeta, cls).__setattr__(name, value)
def _create_(cls, class_name, names=None, module=None, type=None, start=1):
"""Convenience method to create a new Enum class.
`names` can be:
* A string containing member names, separated either with spaces or
commas. Values are auto-numbered from 1.
* An iterable of member names. Values are auto-numbered from 1.
* An iterable of (member name, value) pairs.
* A mapping of member name -> value.
"""
if pyver < 3.0:
# if class_name is unicode, attempt a conversion to ASCII
if isinstance(class_name, unicode):
try:
class_name = class_name.encode('ascii')
except UnicodeEncodeError:
raise TypeError('%r is not representable in ASCII' % class_name)
metacls = cls.__class__
if type is None:
bases = (cls, )
else:
bases = (type, cls)
classdict = metacls.__prepare__(class_name, bases)
__order__ = []
# special processing needed for names?
if isinstance(names, basestring):
names = names.replace(',', ' ').split()
if isinstance(names, (tuple, list)) and isinstance(names[0], basestring):
names = [(e, i+start) for (i, e) in enumerate(names)]
# Here, names is either an iterable of (name, value) or a mapping.
item = None # in case names is empty
for item in names:
if isinstance(item, basestring):
member_name, member_value = item, names[item]
else:
member_name, member_value = item
classdict[member_name] = member_value
__order__.append(member_name)
# only set __order__ in classdict if name/value was not from a mapping
if not isinstance(item, basestring):
classdict['__order__'] = ' '.join(__order__)
enum_class = metacls.__new__(metacls, class_name, bases, classdict)
# TODO: replace the frame hack if a blessed way to know the calling
# module is ever developed
if module is None:
try:
module = _sys._getframe(2).f_globals['__name__']
except (AttributeError, ValueError):
pass
if module is None:
_make_class_unpicklable(enum_class)
else:
enum_class.__module__ = module
return enum_class
@staticmethod
def _get_mixins_(bases):
"""Returns the type for creating enum members, and the first inherited
enum class.
bases: the tuple of bases that was given to __new__
"""
if not bases or Enum is None:
return object, Enum
# double check that we are not subclassing a class with existing
# enumeration members; while we're at it, see if any other data
# type has been mixed in so we can use the correct __new__
member_type = first_enum = None
for base in bases:
if (base is not Enum and
issubclass(base, Enum) and
base._member_names_):
raise TypeError("Cannot extend enumerations")
# base is now the last base in bases
if not issubclass(base, Enum):
raise TypeError("new enumerations must be created as "
"`ClassName([mixin_type,] enum_type)`")
# get correct mix-in type (either mix-in type of Enum subclass, or
# first base if last base is Enum)
if not issubclass(bases[0], Enum):
member_type = bases[0] # first data type
first_enum = bases[-1] # enum type
else:
for base in bases[0].__mro__:
# most common: (IntEnum, int, Enum, object)
# possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>,
# <class 'int'>, <Enum 'Enum'>,
# <class 'object'>)
if issubclass(base, Enum):
if first_enum is None:
first_enum = base
else:
if member_type is None:
member_type = base
return member_type, first_enum
if pyver < 3.0:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
if __new__:
return None, True, True # __new__, save_new, use_args
N__new__ = getattr(None, '__new__')
O__new__ = getattr(object, '__new__')
if Enum is None:
E__new__ = N__new__
else:
E__new__ = Enum.__dict__['__new__']
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
try:
target = possible.__dict__[method]
except (AttributeError, KeyError):
target = getattr(possible, method, None)
if target not in [
None,
N__new__,
O__new__,
E__new__,
]:
if method == '__member_new__':
classdict['__new__'] = target
return None, False, True
if isinstance(target, staticmethod):
target = target.__get__(member_type)
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, False, use_args
else:
@staticmethod
def _find_new_(classdict, member_type, first_enum):
"""Returns the __new__ to be used for creating the enum members.
classdict: the class dictionary given to __new__
member_type: the data type whose __new__ will be used by default
first_enum: enumeration to check for an overriding __new__
"""
# now find the correct __new__, checking to see of one was defined
# by the user; also check earlier enum classes in case a __new__ was
# saved as __member_new__
__new__ = classdict.get('__new__', None)
# should __new__ be saved as __member_new__ later?
save_new = __new__ is not None
if __new__ is None:
# check all possibles for __member_new__ before falling back to
# __new__
for method in ('__member_new__', '__new__'):
for possible in (member_type, first_enum):
target = getattr(possible, method, None)
if target not in (
None,
None.__new__,
object.__new__,
Enum.__new__,
):
__new__ = target
break
if __new__ is not None:
break
else:
__new__ = object.__new__
# if a non-object.__new__ is used then whatever value/tuple was
# assigned to the enum member name will be passed to __new__ and to the
# new enum member's __init__
if __new__ is object.__new__:
use_args = False
else:
use_args = True
return __new__, save_new, use_args
########################################################
# In order to support Python 2 and 3 with a single
# codebase we have to create the Enum methods separately
# and then use the `type(name, bases, dict)` method to
# create the class.
########################################################
temp_enum_dict = {}
temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n"
def __new__(cls, value):
# all enum instances are actually created during class construction
# without calling this method; this method is called by the metaclass'
# __call__ (i.e. Color(3) ), and by pickle
if type(value) is cls:
# For lookups like Color(Color.red)
value = value.value
#return value
# by-value search for a matching enum member
# see if it's in the reverse mapping (for hashable values)
try:
if value in cls._value2member_map_:
return cls._value2member_map_[value]
except TypeError:
# not there, now do long search -- O(n) behavior
for member in cls._member_map_.values():
if member.value == value:
return member
raise ValueError("%s is not a valid %s" % (value, cls.__name__))
temp_enum_dict['__new__'] = __new__
del __new__
def __repr__(self):
return "<%s.%s: %r>" % (
self.__class__.__name__, self._name_, self._value_)
temp_enum_dict['__repr__'] = __repr__
del __repr__
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._name_)
temp_enum_dict['__str__'] = __str__
del __str__
if pyver >= 3.0:
def __dir__(self):
added_behavior = [
m
for cls in self.__class__.mro()
for m in cls.__dict__
if m[0] != '_' and m not in self._member_map_
]
return (['__class__', '__doc__', '__module__', ] + added_behavior)
temp_enum_dict['__dir__'] = __dir__
del __dir__
def __format__(self, format_spec):
# mixed-in Enums should use the mixed-in type's __format__, otherwise
# we can get strange results with the Enum name showing up instead of
# the value
# pure Enum branch
if self._member_type_ is object:
cls = str
val = str(self)
# mix-in branch
else:
cls = self._member_type_
val = self.value
return cls.__format__(val, format_spec)
temp_enum_dict['__format__'] = __format__
del __format__
####################################
# Python's less than 2.6 use __cmp__
if pyver < 2.6:
def __cmp__(self, other):
if type(other) is self.__class__:
if self is other:
return 0
return -1
return NotImplemented
raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__cmp__'] = __cmp__
del __cmp__
else:
def __le__(self, other):
raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__le__'] = __le__
del __le__
def __lt__(self, other):
raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__lt__'] = __lt__
del __lt__
def __ge__(self, other):
raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__ge__'] = __ge__
del __ge__
def __gt__(self, other):
raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__))
temp_enum_dict['__gt__'] = __gt__
del __gt__
def __eq__(self, other):
if type(other) is self.__class__:
return self is other
return NotImplemented
temp_enum_dict['__eq__'] = __eq__
del __eq__
def __ne__(self, other):
if type(other) is self.__class__:
return self is not other
return NotImplemented
temp_enum_dict['__ne__'] = __ne__
del __ne__
def __hash__(self):
return hash(self._name_)
temp_enum_dict['__hash__'] = __hash__
del __hash__
def __bool__(self):
return bool(self._value_)
if pyver < 3.0:
temp_enum_dict['__nonzero__'] = __bool__
else:
temp_enum_dict['__bool__'] = __bool__
del __bool__
def __reduce_ex__(self, proto):
return self.__class__, (self._value_, )
temp_enum_dict['__reduce_ex__'] = __reduce_ex__
del __reduce_ex__
# _RouteClassAttributeToGetattr is used to provide access to the `name`
# and `value` properties of enum members while keeping some measure of
# protection from modification, while still allowing for an enumeration
# to have members named `name` and `value`. This works because enumeration
# members are not set directly on the enum class -- __getattr__ is
# used to look them up.
@_RouteClassAttributeToGetattr
def name(self):
return self._name_
temp_enum_dict['name'] = name
del name
@_RouteClassAttributeToGetattr
def value(self):
return self._value_
temp_enum_dict['value'] = value
del value
@classmethod
def _convert(cls, name, module, filter, source=None):
"""
Create a new Enum subclass that replaces a collection of global constants
"""
# convert all constants from source (or module) that pass filter() to
# a new Enum called name, and export the enum and its members back to
# module;
# also, replace the __reduce_ex__ method so unpickling works in
# previous Python versions
module_globals = vars(_sys.modules[module])
if source:
source = vars(source)
else:
source = module_globals
members = dict((name, value) for name, value in source.items() if filter(name))
cls = cls(name, members, module=module)
cls.__reduce_ex__ = _reduce_ex_by_name
module_globals.update(cls.__members__)
module_globals[name] = cls
return cls
temp_enum_dict['_convert'] = _convert
del _convert
Enum = EnumMeta('Enum', (object, ), temp_enum_dict)
del temp_enum_dict
# Enum has now been created
###########################
class IntEnum(int, Enum):
"""Enum where members are also (and must be) ints"""
def _reduce_ex_by_name(self, proto):
return self.name
def unique(enumeration):
"""Class decorator that ensures only unique members exist in an enumeration."""
duplicates = []
for name, member in enumeration.__members__.items():
if name != member.name:
duplicates.append((name, member.name))
if duplicates:
duplicate_names = ', '.join(
["%s -> %s" % (alias, name) for (alias, name) in duplicates]
)
raise ValueError('duplicate names found in %r: %s' %
(enumeration, duplicate_names)
)
return enumeration
| gpl-2.0 | 5,680,790,833,359,392,000 | 36.190647 | 113 | 0.540768 | false | 4.321722 | false | false | false | 0.00187 |
wwdxfa/pymining | tests/itemmining_tests.py | 3 | 2334 | import unittest
from pymining import itemmining, perftesting
class TestItemSetAlgo(unittest.TestCase):
def test_relim(self):
ts1 = perftesting.get_default_transactions()
relim_input = itemmining.get_relim_input(ts1)
report = itemmining.relim(relim_input, 2)
self.assertEqual(17, len(report))
self.assertEqual(6, report[frozenset(['b', 'd'])])
ts2 = perftesting.get_default_transactions_alt()
relim_input = itemmining.get_relim_input(ts2)
report = itemmining.relim(relim_input, 2)
self.assertEqual(19, len(report))
self.assertEqual(5, report[frozenset(['a', 'b'])])
def test_sam(self):
ts1 = perftesting.get_default_transactions()
sam_input = itemmining.get_sam_input(ts1)
report = itemmining.sam(sam_input, 2)
self.assertEqual(17, len(report))
self.assertEqual(6, report[frozenset(['b', 'd'])])
ts2 = perftesting.get_default_transactions_alt()
sam_input = itemmining.get_sam_input(ts2)
report = itemmining.sam(sam_input, 2)
self.assertEqual(19, len(report))
self.assertEqual(5, report[frozenset(['a', 'b'])])
def test_fpgrowth_pruning_on(self):
ts1 = perftesting.get_default_transactions()
fp_input = itemmining.get_fptree(ts1)
report = itemmining.fpgrowth(fp_input, 2, pruning=True)
self.assertEqual(17, len(report))
self.assertEqual(6, report[frozenset(['b', 'd'])])
ts2 = perftesting.get_default_transactions_alt()
fp_input = itemmining.get_fptree(ts2)
report = itemmining.fpgrowth(fp_input, 2, pruning=True)
self.assertEqual(19, len(report))
self.assertEqual(5, report[frozenset(['a', 'b'])])
def test_fpgrowth_pruning_off(self):
ts1 = perftesting.get_default_transactions()
fp_input = itemmining.get_fptree(ts1)
report = itemmining.fpgrowth(fp_input, 2, pruning=False)
self.assertEqual(17, len(report))
self.assertEqual(6, report[frozenset(['b', 'd'])])
ts2 = perftesting.get_default_transactions_alt()
fp_input = itemmining.get_fptree(ts2)
report = itemmining.fpgrowth(fp_input, 2, pruning=False)
self.assertEqual(19, len(report))
self.assertEqual(5, report[frozenset(['a', 'b'])])
| bsd-3-clause | -8,573,681,148,220,578,000 | 39.947368 | 64 | 0.640103 | false | 3.320057 | true | false | false | 0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.0/Lib/plat-freebsd2/SOCKET.py | 4 | 2316 | # Generated by h2py from /usr/include/sys/socket.h
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SO_DEBUG = 0x0001
SO_ACCEPTCONN = 0x0002
SO_REUSEADDR = 0x0004
SO_KEEPALIVE = 0x0008
SO_DONTROUTE = 0x0010
SO_BROADCAST = 0x0020
SO_USELOOPBACK = 0x0040
SO_LINGER = 0x0080
SO_OOBINLINE = 0x0100
SO_REUSEPORT = 0x0200
SO_TIMESTAMP = 0x0400
SO_SNDBUF = 0x1001
SO_RCVBUF = 0x1002
SO_SNDLOWAT = 0x1003
SO_RCVLOWAT = 0x1004
SO_SNDTIMEO = 0x1005
SO_RCVTIMEO = 0x1006
SO_ERROR = 0x1007
SO_TYPE = 0x1008
SO_PRIVSTATE = 0x1009
SOL_SOCKET = 0xffff
AF_UNSPEC = 0
AF_LOCAL = 1
AF_UNIX = AF_LOCAL
AF_INET = 2
AF_IMPLINK = 3
AF_PUP = 4
AF_CHAOS = 5
AF_NS = 6
AF_ISO = 7
AF_OSI = AF_ISO
AF_ECMA = 8
AF_DATAKIT = 9
AF_CCITT = 10
AF_SNA = 11
AF_DECnet = 12
AF_DLI = 13
AF_LAT = 14
AF_HYLINK = 15
AF_APPLETALK = 16
AF_ROUTE = 17
AF_LINK = 18
pseudo_AF_XTP = 19
AF_COIP = 20
AF_CNT = 21
pseudo_AF_RTIP = 22
AF_IPX = 23
AF_SIP = 24
pseudo_AF_PIP = 25
AF_ISDN = 26
AF_E164 = AF_ISDN
pseudo_AF_KEY = 27
AF_INET6 = 28
AF_MAX = 29
PF_UNSPEC = AF_UNSPEC
PF_LOCAL = AF_LOCAL
PF_UNIX = PF_LOCAL
PF_INET = AF_INET
PF_IMPLINK = AF_IMPLINK
PF_PUP = AF_PUP
PF_CHAOS = AF_CHAOS
PF_NS = AF_NS
PF_ISO = AF_ISO
PF_OSI = AF_ISO
PF_ECMA = AF_ECMA
PF_DATAKIT = AF_DATAKIT
PF_CCITT = AF_CCITT
PF_SNA = AF_SNA
PF_DECnet = AF_DECnet
PF_DLI = AF_DLI
PF_LAT = AF_LAT
PF_HYLINK = AF_HYLINK
PF_APPLETALK = AF_APPLETALK
PF_ROUTE = AF_ROUTE
PF_LINK = AF_LINK
PF_XTP = pseudo_AF_XTP
PF_COIP = AF_COIP
PF_CNT = AF_CNT
PF_SIP = AF_SIP
PF_IPX = AF_IPX
PF_RTIP = pseudo_AF_RTIP
PF_PIP = pseudo_AF_PIP
PF_ISDN = AF_ISDN
PF_KEY = pseudo_AF_KEY
PF_INET6 = AF_INET6
PF_MAX = AF_MAX
NET_MAXID = AF_MAX
NET_RT_DUMP = 1
NET_RT_FLAGS = 2
NET_RT_IFLIST = 3
NET_RT_MAXID = 4
SOMAXCONN = 128
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_DONTROUTE = 0x4
MSG_EOR = 0x8
MSG_TRUNC = 0x10
MSG_CTRUNC = 0x20
MSG_WAITALL = 0x40
MSG_DONTWAIT = 0x80
MSG_EOF = 0x100
MSG_COMPAT = 0x8000
SCM_RIGHTS = 0x01
SCM_TIMESTAMP = 0x02
# Included from sys/cdefs.h
def __P(protos): return protos
def __STRING(x): return #x
def __XSTRING(x): return __STRING(x)
def __P(protos): return ()
def __STRING(x): return "x"
def __RCSID(s): return __IDSTRING(rcsid,s)
def __RCSID_SOURCE(s): return __IDSTRING(rcsid_source,s)
def __COPYRIGHT(s): return __IDSTRING(copyright,s)
| mit | 1,067,963,590,332,229,500 | 17.09375 | 56 | 0.685233 | false | 2.090253 | false | false | false | 0.007772 |
standage/tag | tag/cli/merge.py | 2 | 1169 | #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Daniel Standage <daniel.standage@gmail.com>
#
# This file is part of tag (http://github.com/standage/tag) and is licensed
# under the BSD 3-clause license: see LICENSE.
# -----------------------------------------------------------------------------
import argparse
import tag
from tag import GFF3Reader, GFF3Writer
def subparser(subparsers):
subparser = subparsers.add_parser('merge')
subparser.add_argument(
'-o', '--out', metavar='FILE', help='write output in GFF3 to FILE; '
'default is terminal (stdout)'
)
subparser.add_argument(
'-r', '--relax', action='store_false', default=True, dest='strict',
help='relax parsing stringency'
)
subparser.add_argument(
'gff3', nargs='+', help='input files in GFF3 format'
)
def main(args):
instreams = [
GFF3Reader(infilename=fn, strict=args.strict, assumesorted=True)
for fn in args.gff3
]
merger = tag.select.merge(*instreams)
writer = tag.writer.GFF3Writer(merger, args.out)
writer.write()
| bsd-3-clause | 1,790,208,472,518,599,000 | 30.594595 | 79 | 0.567151 | false | 3.783172 | false | false | false | 0 |
zding5/Microblog-Flask | flask/lib/python2.7/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py | 21 | 37737 | # oracle/cx_oracle.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+cx_oracle
:name: cx-Oracle
:dbapi: cx_oracle
:connectstring: oracle+cx_oracle://user:pass@host:port/dbname\
[?key=value&key=value...]
:url: http://cx-oracle.sourceforge.net/
Additional Connect Arguments
----------------------------
When connecting with ``dbname`` present, the host, port, and dbname tokens are
converted to a TNS name using
the cx_oracle ``makedsn()`` function. Otherwise, the host token is taken
directly as a TNS name.
Additional arguments which may be specified either as query string arguments
on the URL, or as keyword arguments to :func:`.create_engine()` are:
* ``allow_twophase`` - enable two-phase transactions. Defaults to ``True``.
* ``arraysize`` - set the cx_oracle.arraysize value on cursors, defaulted
to 50. This setting is significant with cx_Oracle as the contents of LOB
objects are only readable within a "live" row (e.g. within a batch of
50 rows).
* ``auto_convert_lobs`` - defaults to True; See :ref:`cx_oracle_lob`.
* ``auto_setinputsizes`` - the cx_oracle.setinputsizes() call is issued for
all bind parameters. This is required for LOB datatypes but can be
disabled to reduce overhead. Defaults to ``True``. Specific types
can be excluded from this process using the ``exclude_setinputsizes``
parameter.
* ``coerce_to_unicode`` - see :ref:`cx_oracle_unicode` for detail.
* ``coerce_to_decimal`` - see :ref:`cx_oracle_numeric` for detail.
* ``exclude_setinputsizes`` - a tuple or list of string DBAPI type names to
be excluded from the "auto setinputsizes" feature. The type names here
must match DBAPI types that are found in the "cx_Oracle" module namespace,
such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to
``(STRING, UNICODE)``.
.. versionadded:: 0.8 specific DBAPI types can be excluded from the
auto_setinputsizes feature via the exclude_setinputsizes attribute.
* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or
alternatively an integer value. This value is only available as a URL query
string argument.
* ``threaded`` - enable multithreaded access to cx_oracle connections.
Defaults to ``True``. Note that this is the opposite default of the
cx_Oracle DBAPI itself.
.. _cx_oracle_unicode:
Unicode
-------
The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the
ability to return string results as Python unicode objects natively.
When used in Python 3, cx_Oracle returns all strings as Python unicode objects
(that is, plain ``str`` in Python 3). In Python 2, it will return as Python
unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For
column values that are of type ``VARCHAR`` or other non-unicode string types,
it will return values as Python strings (e.g. bytestrings).
The cx_Oracle SQLAlchemy dialect presents two different options for the use
case of returning ``VARCHAR`` column values as Python unicode objects under
Python 2:
* the cx_Oracle DBAPI has the ability to coerce all string results to Python
unicode objects unconditionally using output type handlers. This has
the advantage that the unicode conversion is global to all statements
at the cx_Oracle driver level, meaning it works with raw textual SQL
statements that have no typing information associated. However, this system
has been observed to incur signfiicant performance overhead, not only
because it takes effect for all string values unconditionally, but also
because cx_Oracle under Python 2 seems to use a pure-Python function call in
order to do the decode operation, which under cPython can orders of
magnitude slower than doing it using C functions alone.
* SQLAlchemy has unicode-decoding services built in, and when using
SQLAlchemy's C extensions, these functions do not use any Python function
calls and are very fast. The disadvantage to this approach is that the
unicode conversion only takes effect for statements where the
:class:`.Unicode` type or :class:`.String` type with
``convert_unicode=True`` is explicitly associated with the result column.
This is the case for any ORM or Core query or SQL expression as well as for
a :func:`.text` construct that specifies output column types, so in the vast
majority of cases this is not an issue. However, when sending a completely
raw string to :meth:`.Connection.execute`, this typing information isn't
present, unless the string is handled within a :func:`.text` construct that
adds typing information.
As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's
typing system. This keeps cx_Oracle's expensive Python 2 approach
disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy
detects that cx_Oracle is returning unicode objects natively and cx_Oracle's
system is used.
To re-enable cx_Oracle's output type handler under Python 2, the
``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to
:func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_unicode=True)
Alternatively, to run a pure string SQL statement and get ``VARCHAR`` results
as Python unicode under Python 2 without using cx_Oracle's native handlers,
the :func:`.text` feature can be used::
from sqlalchemy import text, Unicode
result = conn.execute(
text("select username from user").columns(username=Unicode))
.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used
for unicode results of non-unicode datatypes in Python 2, after they were
identified as a major performance bottleneck. SQLAlchemy's own unicode
facilities are used instead.
.. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable
cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior.
.. _cx_oracle_returning:
RETURNING Support
-----------------
The cx_oracle DBAPI supports a limited subset of Oracle's already limited
RETURNING support. Typically, results can only be guaranteed for at most one
column being returned; this is the typical case when SQLAlchemy uses RETURNING
to get just the value of a primary-key-associated sequence value.
Additional column expressions will cause problems in a non-determinative way,
due to cx_oracle's lack of support for the OCI_DATA_AT_EXEC API which is
required for more complex RETURNING scenarios.
For this reason, stability may be enhanced by disabling RETURNING support
completely; SQLAlchemy otherwise will use RETURNING to fetch newly
sequence-generated primary keys. As illustrated in :ref:`oracle_returning`::
engine = create_engine("oracle://scott:tiger@dsn",
implicit_returning=False)
.. seealso::
http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693
- OCI documentation for RETURNING
http://sourceforge.net/mailarchive/message.php?msg_id=31338136
- cx_oracle developer commentary
.. _cx_oracle_lob:
LOB Objects
-----------
cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy
converts these to strings so that the interface of the Binary type is
consistent with that of other backends, and so that the linkage to a live
cursor is not needed in scenarios like result.fetchmany() and
result.fetchall(). This means that by default, LOB objects are fully fetched
unconditionally by SQLAlchemy, and the linkage to a live cursor is broken.
To disable this processing, pass ``auto_convert_lobs=False`` to
:func:`.create_engine()`.
Two Phase Transaction Support
-----------------------------
Two Phase transactions are implemented using XA transactions, and are known
to work in a rudimental fashion with recent versions of cx_Oracle
as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet
considered to be robust and should still be regarded as experimental.
In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding
two phase which prevents
a particular DBAPI connection from being consistently usable in both
prepared transactions as well as traditional DBAPI usage patterns; therefore
once a particular connection is used via :meth:`.Connection.begin_prepared`,
all subsequent usages of the underlying DBAPI connection must be within
the context of prepared transactions.
The default behavior of :class:`.Engine` is to maintain a pool of DBAPI
connections. Therefore, due to the above glitch, a DBAPI connection that has
been used in a two-phase operation, and is then returned to the pool, will
not be usable in a non-two-phase context. To avoid this situation,
the application can make one of several choices:
* Disable connection pooling using :class:`.NullPool`
* Ensure that the particular :class:`.Engine` in use is only used
for two-phase operations. A :class:`.Engine` bound to an ORM
:class:`.Session` which includes ``twophase=True`` will consistently
use the two-phase transaction style.
* For ad-hoc two-phase operations without disabling pooling, the DBAPI
connection in use can be evicted from the connection pool using the
:meth:`.Connection.detach` method.
.. versionchanged:: 0.8.0b2,0.7.10
Support for cx_oracle prepared transactions has been implemented
and tested.
.. _cx_oracle_numeric:
Precision Numerics
------------------
The SQLAlchemy dialect goes through a lot of steps to ensure
that decimal numbers are sent and received with full accuracy.
An "outputtypehandler" callable is associated with each
cx_oracle connection object which detects numeric types and
receives them as string values, instead of receiving a Python
``float`` directly, which is then passed to the Python
``Decimal`` constructor. The :class:`.Numeric` and
:class:`.Float` types under the cx_oracle dialect are aware of
this behavior, and will coerce the ``Decimal`` to ``float`` if
the ``asdecimal`` flag is ``False`` (default on :class:`.Float`,
optional on :class:`.Numeric`).
Because the handler coerces to ``Decimal`` in all cases first,
the feature can detract significantly from performance.
If precision numerics aren't required, the decimal handling
can be disabled by passing the flag ``coerce_to_decimal=False``
to :func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False)
.. versionadded:: 0.7.6
Add the ``coerce_to_decimal`` flag.
Another alternative to performance is to use the
`cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library;
see :class:`.Numeric` for additional notes.
The handler attempts to use the "precision" and "scale"
attributes of the result set column to best determine if
subsequent incoming values should be received as ``Decimal`` as
opposed to int (in which case no processing is added). There are
several scenarios where OCI_ does not provide unambiguous data
as to the numeric type, including some situations where
individual rows may return a combination of floating point and
integer values. Certain values for "precision" and "scale" have
been observed to determine this scenario. When it occurs, the
outputtypehandler receives as string and then passes off to a
processing function which detects, for each returned value, if a
decimal point is present, and if so converts to ``Decimal``,
otherwise to int. The intention is that simple int-based
statements like "SELECT my_seq.nextval() FROM DUAL" continue to
return ints and not ``Decimal`` objects, and that any kind of
floating point value is received as a string so that there is no
floating point loss of precision.
The "decimal point is present" logic itself is also sensitive to
locale. Under OCI_, this is controlled by the NLS_LANG
environment variable. Upon first connection, the dialect runs a
test to determine the current "decimal" character, which can be
a comma "," for European locales. From that point forward the
outputtypehandler uses that character to represent a decimal
point. Note that cx_oracle 5.0.3 or greater is required
when dealing with numerics with locale settings that don't use
a period "." as the decimal character.
.. versionchanged:: 0.6.6
The outputtypehandler supports the case where the locale uses a
comma "," character to represent a decimal point.
.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html
"""
from __future__ import absolute_import
from .base import OracleCompiler, OracleDialect, OracleExecutionContext
from . import base as oracle
from ...engine import result as _result
from sqlalchemy import types as sqltypes, util, exc, processors
import random
import collections
import decimal
import re
class _OracleNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
# cx_oracle accepts Decimal objects and floats
return None
def result_processor(self, dialect, coltype):
# we apply a cx_oracle type handler to all connections
# that converts floating point strings to Decimal().
# However, in some subquery situations, Oracle doesn't
# give us enough information to determine int or Decimal.
# It could even be int/Decimal differently on each row,
# regardless of the scale given for the originating type.
# So we still need an old school isinstance() handler
# here for decimals.
if dialect.supports_native_decimal:
if self.asdecimal:
fstring = "%%.%df" % self._effective_decimal_return_scale
def to_decimal(value):
if value is None:
return None
elif isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(fstring % value)
return to_decimal
else:
if self.precision is None and self.scale is None:
return processors.to_float
elif not getattr(self, '_is_oracle_number', False) \
and self.scale is not None:
return processors.to_float
else:
return None
else:
# cx_oracle 4 behavior, will assume
# floats
return super(_OracleNumeric, self).\
result_processor(dialect, coltype)
class _OracleDate(sqltypes.Date):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return value.date()
else:
return value
return process
class _LOBMixin(object):
def result_processor(self, dialect, coltype):
if not dialect.auto_convert_lobs:
# return the cx_oracle.LOB directly.
return None
def process(value):
if value is not None:
return value.read()
else:
return value
return process
class _NativeUnicodeMixin(object):
if util.py2k:
def bind_processor(self, dialect):
if dialect._cx_oracle_with_unicode:
def process(value):
if value is None:
return value
else:
return unicode(value)
return process
else:
return super(
_NativeUnicodeMixin, self).bind_processor(dialect)
# we apply a connection output handler that returns
# unicode in all cases, so the "native_unicode" flag
# will be set for the default String.result_processor.
class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
def get_dbapi_type(self, dbapi):
return dbapi.FIXED_CHAR
class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR):
def get_dbapi_type(self, dbapi):
return getattr(dbapi, 'UNICODE', dbapi.STRING)
class _OracleText(_LOBMixin, sqltypes.Text):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleLong(oracle.LONG):
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
class _OracleString(_NativeUnicodeMixin, sqltypes.String):
pass
class _OracleUnicodeText(
_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
def get_dbapi_type(self, dbapi):
return dbapi.NCLOB
def result_processor(self, dialect, coltype):
lob_processor = _LOBMixin.result_processor(self, dialect, coltype)
if lob_processor is None:
return None
string_processor = sqltypes.UnicodeText.result_processor(
self, dialect, coltype)
if string_processor is None:
return lob_processor
else:
def process(value):
return string_processor(lob_processor(value))
return process
class _OracleInteger(sqltypes.Integer):
def result_processor(self, dialect, coltype):
def to_int(val):
if val is not None:
val = int(val)
return val
return to_int
class _OracleBinary(_LOBMixin, sqltypes.LargeBinary):
def get_dbapi_type(self, dbapi):
return dbapi.BLOB
def bind_processor(self, dialect):
return None
class _OracleInterval(oracle.INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
class _OracleRaw(oracle.RAW):
pass
class _OracleRowid(oracle.ROWID):
def get_dbapi_type(self, dbapi):
return dbapi.ROWID
class OracleCompiler_cx_oracle(OracleCompiler):
def bindparam_string(self, name, **kw):
quote = getattr(name, 'quote', None)
if quote is True or quote is not False and \
self.preparer._bindparam_requires_quotes(name):
quoted_name = '"%s"' % name
self._quoted_bind_names[name] = quoted_name
return OracleCompiler.bindparam_string(self, quoted_name, **kw)
else:
return OracleCompiler.bindparam_string(self, name, **kw)
class OracleExecutionContext_cx_oracle(OracleExecutionContext):
def pre_exec(self):
quoted_bind_names = \
getattr(self.compiled, '_quoted_bind_names', None)
if quoted_bind_names:
if not self.dialect.supports_unicode_statements:
# if DBAPI doesn't accept unicode statements,
# keys in self.parameters would have been encoded
# here. so convert names in quoted_bind_names
# to encoded as well.
quoted_bind_names = \
dict(
(fromname.encode(self.dialect.encoding),
toname.encode(self.dialect.encoding))
for fromname, toname in
quoted_bind_names.items()
)
for param in self.parameters:
for fromname, toname in quoted_bind_names.items():
param[toname] = param[fromname]
del param[fromname]
if self.dialect.auto_setinputsizes:
# cx_oracle really has issues when you setinputsizes
# on String, including that outparams/RETURNING
# breaks for varchars
self.set_input_sizes(
quoted_bind_names,
exclude_types=self.dialect.exclude_setinputsizes
)
# if a single execute, check for outparams
if len(self.compiled_parameters) == 1:
for bindparam in self.compiled.binds.values():
if bindparam.isoutparam:
dbtype = bindparam.type.dialect_impl(self.dialect).\
get_dbapi_type(self.dialect.dbapi)
if not hasattr(self, 'out_parameters'):
self.out_parameters = {}
if dbtype is None:
raise exc.InvalidRequestError(
"Cannot create out parameter for parameter "
"%r - its type %r is not supported by"
" cx_oracle" %
(bindparam.key, bindparam.type)
)
name = self.compiled.bind_names[bindparam]
self.out_parameters[name] = self.cursor.var(dbtype)
self.parameters[0][quoted_bind_names.get(name, name)] = \
self.out_parameters[name]
def create_cursor(self):
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def get_result_proxy(self):
if hasattr(self, 'out_parameters') and self.compiled.returning:
returning_params = dict(
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return ReturningResultProxy(self, returning_params)
result = None
if self.cursor.description is not None:
for column in self.cursor.description:
type_code = column[1]
if type_code in self.dialect._cx_oracle_binary_types:
result = _result.BufferedColumnResultProxy(self)
if result is None:
result = _result.ResultProxy(self)
if hasattr(self, 'out_parameters'):
if self.compiled_parameters is not None and \
len(self.compiled_parameters) == 1:
result.out_parameters = out_parameters = {}
for bind, name in self.compiled.bind_names.items():
if name in self.out_parameters:
type = bind.type
impl_type = type.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(
self.dialect.dbapi)
result_processor = impl_type.\
result_processor(self.dialect,
dbapi_type)
if result_processor is not None:
out_parameters[name] = \
result_processor(
self.out_parameters[name].getvalue())
else:
out_parameters[name] = self.out_parameters[
name].getvalue()
else:
result.out_parameters = dict(
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return result
class OracleExecutionContext_cx_oracle_with_unicode(
OracleExecutionContext_cx_oracle):
"""Support WITH_UNICODE in Python 2.xx.
WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
behavior under Python 2.x. This mode in some cases disallows
and in other cases silently passes corrupted data when
non-Python-unicode strings (a.k.a. plain old Python strings)
are passed as arguments to connect(), the statement sent to execute(),
or any of the bind parameter keys or values sent to execute().
This optional context therefore ensures that all statements are
passed as Python unicode objects.
"""
def __init__(self, *arg, **kw):
OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw)
self.statement = util.text_type(self.statement)
def _execute_scalar(self, stmt):
return super(OracleExecutionContext_cx_oracle_with_unicode, self).\
_execute_scalar(util.text_type(stmt))
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""Result proxy which stuffs the _returning clause + outparams
into the fetch."""
def __init__(self, context, returning_params):
self._returning_params = returning_params
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
returning = self.context.compiled.returning
return [
("ret_%d" % i, None)
for i, col in enumerate(returning)
]
def _buffer_rows(self):
return collections.deque(
[tuple(self._returning_params["ret_%d" % i]
for i, c in enumerate(self._returning_params))]
)
class OracleDialect_cx_oracle(OracleDialect):
execution_ctx_cls = OracleExecutionContext_cx_oracle
statement_compiler = OracleCompiler_cx_oracle
driver = "cx_oracle"
colspecs = colspecs = {
sqltypes.Numeric: _OracleNumeric,
# generic type, assume datetime.date is desired
sqltypes.Date: _OracleDate,
sqltypes.LargeBinary: _OracleBinary,
sqltypes.Boolean: oracle._OracleBoolean,
sqltypes.Interval: _OracleInterval,
oracle.INTERVAL: _OracleInterval,
sqltypes.Text: _OracleText,
sqltypes.String: _OracleString,
sqltypes.UnicodeText: _OracleUnicodeText,
sqltypes.CHAR: _OracleChar,
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
oracle.LONG: _OracleLong,
# this is only needed for OUT parameters.
# it would be nice if we could not use it otherwise.
sqltypes.Integer: _OracleInteger,
oracle.RAW: _OracleRaw,
sqltypes.Unicode: _OracleNVarChar,
sqltypes.NVARCHAR: _OracleNVarChar,
oracle.ROWID: _OracleRowid,
}
execute_sequence_format = list
def __init__(self,
auto_setinputsizes=True,
exclude_setinputsizes=("STRING", "UNICODE"),
auto_convert_lobs=True,
threaded=True,
allow_twophase=True,
coerce_to_decimal=True,
coerce_to_unicode=False,
arraysize=50, **kwargs):
OracleDialect.__init__(self, **kwargs)
self.threaded = threaded
self.arraysize = arraysize
self.allow_twophase = allow_twophase
self.supports_timestamp = self.dbapi is None or \
hasattr(self.dbapi, 'TIMESTAMP')
self.auto_setinputsizes = auto_setinputsizes
self.auto_convert_lobs = auto_convert_lobs
if hasattr(self.dbapi, 'version'):
self.cx_oracle_ver = tuple([int(x) for x in
self.dbapi.version.split('.')])
else:
self.cx_oracle_ver = (0, 0, 0)
def types(*names):
return set(
getattr(self.dbapi, name, None) for name in names
).difference([None])
self.exclude_setinputsizes = types(*(exclude_setinputsizes or ()))
self._cx_oracle_string_types = types("STRING", "UNICODE",
"NCLOB", "CLOB")
self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
self.coerce_to_unicode = (
self.cx_oracle_ver >= (5, 0) and
coerce_to_unicode
)
self.supports_native_decimal = (
self.cx_oracle_ver >= (5, 0) and
coerce_to_decimal
)
self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0)
if self.cx_oracle_ver is None:
# this occurs in tests with mock DBAPIs
self._cx_oracle_string_types = set()
self._cx_oracle_with_unicode = False
elif self.cx_oracle_ver >= (5,) and not \
hasattr(self.dbapi, 'UNICODE'):
# cx_Oracle WITH_UNICODE mode. *only* python
# unicode objects accepted for anything
self.supports_unicode_statements = True
self.supports_unicode_binds = True
self._cx_oracle_with_unicode = True
if util.py2k:
# There's really no reason to run with WITH_UNICODE under
# Python 2.x. Give the user a hint.
util.warn(
"cx_Oracle is compiled under Python 2.xx using the "
"WITH_UNICODE flag. Consider recompiling cx_Oracle "
"without this flag, which is in no way necessary for "
"full support of Unicode. Otherwise, all string-holding "
"bind parameters must be explicitly typed using "
"SQLAlchemy's String type or one of its subtypes,"
"or otherwise be passed as Python unicode. "
"Plain Python strings passed as bind parameters will be "
"silently corrupted by cx_Oracle."
)
self.execution_ctx_cls = \
OracleExecutionContext_cx_oracle_with_unicode
else:
self._cx_oracle_with_unicode = False
if self.cx_oracle_ver is None or \
not self.auto_convert_lobs or \
not hasattr(self.dbapi, 'CLOB'):
self.dbapi_type_map = {}
else:
# only use this for LOB objects. using it for strings, dates
# etc. leads to a little too much magic, reflection doesn't know
# if it should expect encoded strings or unicodes, etc.
self.dbapi_type_map = {
self.dbapi.CLOB: oracle.CLOB(),
self.dbapi.NCLOB: oracle.NCLOB(),
self.dbapi.BLOB: oracle.BLOB(),
self.dbapi.BINARY: oracle.RAW(),
}
@classmethod
def dbapi(cls):
import cx_Oracle
return cx_Oracle
def initialize(self, connection):
super(OracleDialect_cx_oracle, self).initialize(connection)
if self._is_oracle_8:
self.supports_unicode_binds = False
self._detect_decimal_char(connection)
def _detect_decimal_char(self, connection):
"""detect if the decimal separator character is not '.', as
is the case with European locale settings for NLS_LANG.
cx_oracle itself uses similar logic when it formats Python
Decimal objects to strings on the bind side (as of 5.0.3),
as Oracle sends/receives string numerics only in the
current locale.
"""
if self.cx_oracle_ver < (5,):
# no output type handlers before version 5
return
cx_Oracle = self.dbapi
conn = connection.connection
# override the output_type_handler that's
# on the cx_oracle connection with a plain
# one on the cursor
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
return cursor.var(
cx_Oracle.STRING,
255, arraysize=cursor.arraysize)
cursor = conn.cursor()
cursor.outputtypehandler = output_type_handler
cursor.execute("SELECT 0.1 FROM DUAL")
val = cursor.fetchone()[0]
cursor.close()
char = re.match(r"([\.,])", val).group(1)
if char != '.':
_detect_decimal = self._detect_decimal
self._detect_decimal = \
lambda value: _detect_decimal(value.replace(char, '.'))
self._to_decimal = \
lambda value: decimal.Decimal(value.replace(char, '.'))
def _detect_decimal(self, value):
if "." in value:
return decimal.Decimal(value)
else:
return int(value)
_to_decimal = decimal.Decimal
def on_connect(self):
if self.cx_oracle_ver < (5,):
# no output type handlers before version 5
return
cx_Oracle = self.dbapi
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
# convert all NUMBER with precision + positive scale to Decimal
# this almost allows "native decimal" mode.
if self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER and \
precision and scale > 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._to_decimal,
arraysize=cursor.arraysize)
# if NUMBER with zero precision and 0 or neg scale, this appears
# to indicate "ambiguous". Use a slower converter that will
# make a decision based on each value received - the type
# may change from row to row (!). This kills
# off "native decimal" mode, handlers still needed.
elif self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER \
and not precision and scale <= 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._detect_decimal,
arraysize=cursor.arraysize)
# allow all strings to come back natively as Unicode
elif self.coerce_to_unicode and \
defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(util.text_type, size, cursor.arraysize)
def on_connect(conn):
conn.outputtypehandler = output_type_handler
return on_connect
def create_connect_args(self, url):
dialect_opts = dict(url.query)
for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs',
'threaded', 'allow_twophase'):
if opt in dialect_opts:
util.coerce_kw_type(dialect_opts, opt, bool)
setattr(self, opt, dialect_opts[opt])
if url.database:
# if we have a database, then we have a remote host
port = url.port
if port:
port = int(port)
else:
port = 1521
dsn = self.dbapi.makedsn(url.host, port, url.database)
else:
# we have a local tnsname
dsn = url.host
opts = dict(
user=url.username,
password=url.password,
dsn=dsn,
threaded=self.threaded,
twophase=self.allow_twophase,
)
if util.py2k:
if self._cx_oracle_with_unicode:
for k, v in opts.items():
if isinstance(v, str):
opts[k] = unicode(v)
else:
for k, v in opts.items():
if isinstance(v, unicode):
opts[k] = str(v)
if 'mode' in url.query:
opts['mode'] = url.query['mode']
if isinstance(opts['mode'], util.string_types):
mode = opts['mode'].upper()
if mode == 'SYSDBA':
opts['mode'] = self.dbapi.SYSDBA
elif mode == 'SYSOPER':
opts['mode'] = self.dbapi.SYSOPER
else:
util.coerce_kw_type(opts, 'mode', int)
return ([], opts)
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.version.split('.')
)
def is_disconnect(self, e, connection, cursor):
error, = e.args
if isinstance(e, self.dbapi.InterfaceError):
return "not connected" in str(e)
elif hasattr(error, 'code'):
# ORA-00028: your session has been killed
# ORA-03114: not connected to ORACLE
# ORA-03113: end-of-file on communication channel
# ORA-03135: connection lost contact
# ORA-01033: ORACLE initialization or shutdown in progress
# ORA-02396: exceeded maximum idle time, please connect again
# TODO: Others ?
return error.code in (28, 3114, 3113, 3135, 1033, 2396)
else:
return False
def create_xid(self):
"""create a two-phase transaction ID.
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified."""
id = random.randint(0, 2 ** 128)
return (0x1234, "%032x" % id, "%032x" % 9)
def do_executemany(self, cursor, statement, parameters, context=None):
if isinstance(parameters, tuple):
parameters = list(parameters)
cursor.executemany(statement, parameters)
def do_begin_twophase(self, connection, xid):
connection.connection.begin(*xid)
def do_prepare_twophase(self, connection, xid):
result = connection.connection.prepare()
connection.info['cx_oracle_prepared'] = result
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_commit(connection.connection)
else:
oci_prepared = connection.info['cx_oracle_prepared']
if oci_prepared:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
connection.info.pop('cx_oracle_prepared', None)
dialect = OracleDialect_cx_oracle
| mit | -6,139,773,499,118,628,000 | 38.065217 | 78 | 0.625354 | false | 4.324662 | false | false | false | 0.000026 |
mixdesign/radioice-track | workflow/web.py | 4 | 14685 | # encoding: utf-8
#
# Copyright (c) 2014 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-02-15
#
"""
A lightweight HTTP library with a requests-like interface.
"""
from __future__ import print_function
import urllib
import urllib2
import socket
import mimetypes
import string
import random
import json
import re
import unicodedata
import codecs
USER_AGENT = u'alfred-workflow-0.1'
# Valid characters for multipart form data boundaries
BOUNDARY_CHARS = string.digits + string.ascii_letters
# HTTP response codes
RESPONSES = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
def str_dict(dic):
"""Convert keys and values in ``dic`` into UTF-8-encoded :class:`str`
:param dic: :class:`dict` of Unicode strings
:returns: :class:`dict`
"""
dic2 = {}
for k, v in dic.items():
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
dic2[k] = v
return dic2
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
"""Prevent redirections"""
def redirect_request(self, *args):
return None
class Response(object):
"""
Returned by :func:`request` / :func:`get` / :func:`post` functions.
A simplified version of the ``Response`` object in the ``requests`` library.
>>> r = request('http://www.google.com')
>>> r.status_code
200
>>> r.encoding
ISO-8859-1
>>> r.content # bytes
<html> ...
>>> r.text # unicode, decoded according to charset in HTTP header/meta tag
u'<html> ...'
>>> r.json() # content parsed as JSON
"""
def __init__(self, request):
"""Call `request` with :mod:`urllib2` and process results.
:param request: :class:`urllib2.Request` instance
"""
self.request = request
self.url = None
self.raw = None
self._encoding = None
self.error = None
self.status_code = None
self.reason = None
self.headers = {}
self._content = None
# Execute query
try:
self.raw = urllib2.urlopen(request)
except urllib2.HTTPError as err:
self.error = err
try:
self.url = err.geturl()
# sometimes (e.g. when authentication fails)
# urllib can't get a URL from an HTTPError
except AttributeError:
pass
self.status_code = err.code
else:
self.status_code = self.raw.getcode()
self.url = self.raw.geturl()
self.reason = RESPONSES.get(self.status_code)
# Parse additional info if request succeeded
if not self.error:
headers = self.raw.info()
self.transfer_encoding = headers.getencoding()
self.mimetype = headers.gettype()
for key in headers.keys():
self.headers[key.lower()] = headers.get(key)
def json(self):
"""Decode response contents as JSON.
:returns: object decoded from JSON
:rtype: :class:`list` / :class:`dict`
"""
return json.loads(self.content, self.encoding or 'utf-8')
@property
def encoding(self):
"""Text encoding of document or ``None``
:returns: :class:`str` or ``None``
"""
if not self._encoding:
self._encoding = self._get_encoding()
return self._encoding
@property
def content(self):
"""Raw content of response (i.e. bytes)
:returns: Body of HTTP response
:rtype: :class:`str`
"""
if not self._content:
self._content = self.raw.read()
return self._content
@property
def text(self):
"""Unicode-decoded content of response body.
If no encoding can be determined from HTTP headers or the content
itself, the encoded response body will be returned instead.
:returns: Body of HTTP response
:rtype: :class:`unicode` or :class:`str`
"""
if self.encoding:
return unicodedata.normalize('NFC', unicode(self.content,
self.encoding))
return self.content
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterate over response data.
.. versionadded:: 1.6
:param chunk_size: Number of bytes to read into memory
:type chunk_size: ``int``
:param decode_unicode: Decode to Unicode using detected encoding
:type decode_unicode: ``Boolean``
:returns: iterator
"""
def decode_stream(iterator, r):
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv # pragma: nocover
def generate():
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
chunks = generate()
if decode_unicode and self.encoding:
chunks = decode_stream(chunks, self)
return chunks
def raise_for_status(self):
"""Raise stored error if one occurred.
error will be instance of :class:`urllib2.HTTPError`
"""
if self.error is not None:
raise self.error
return
def _get_encoding(self):
"""Get encoding from HTTP headers or content.
:returns: encoding or `None`
:rtype: ``unicode`` or ``None``
"""
headers = self.raw.info()
encoding = None
if headers.getparam('charset'):
encoding = headers.getparam('charset')
# HTTP Content-Type header
for param in headers.getplist():
if param.startswith('charset='):
encoding = param[8:]
break
# Encoding declared in document should override HTTP headers
if self.mimetype == 'text/html': # sniff HTML headers
m = re.search("""<meta.+charset=["']{0,1}(.+?)["'].*>""",
self.content)
if m:
encoding = m.group(1)
elif ((self.mimetype.startswith('application/') or
self.mimetype.startswith('text/')) and
'xml' in self.mimetype):
m = re.search("""<?xml.+encoding=["'](.+?)["'][^>]*\?>""",
self.content)
if m:
encoding = m.group(1)
# Format defaults
if self.mimetype == 'application/json' and not encoding:
# The default encoding for JSON
encoding = 'utf-8'
elif self.mimetype == 'application/xml' and not encoding:
# The default for 'application/xml'
encoding = 'utf-8'
if encoding:
encoding = encoding.lower()
return encoding
def request(method, url, params=None, data=None, headers=None, cookies=None,
files=None, auth=None, timeout=60, allow_redirects=False):
"""Initiate an HTTP(S) request. Returns :class:`Response` object.
:param method: 'GET' or 'POST'
:type method: ``unicode``
:param url: URL to open
:type url: ``unicode``
:param params: mapping of URL parameters
:type params: :class:`dict`
:param data: mapping of form data ``{'field_name': 'value'}`` or
:class:`str`
:type data: :class:`dict` or :class:`str`
:param headers: HTTP headers
:type headers: :class:`dict`
:param cookies: cookies to send to server
:type cookies: :class:`dict`
:param files: files to upload (see below).
:type files: :class:`dict`
:param auth: username, password
:type auth: ``tuple``
:param timeout: connection timeout limit in seconds
:type timeout: ``int``
:param allow_redirects: follow redirections
:type allow_redirects: ``Boolean``
:returns: :class:`Response` object
The ``files`` argument is a dictionary::
{'fieldname' : { 'filename': 'blah.txt',
'content': '<binary data>',
'mimetype': 'text/plain'}
}
* ``fieldname`` is the name of the field in the HTML form.
* ``mimetype`` is optional. If not provided, :mod:`mimetypes` will
be used to guess the mimetype, or ``application/octet-stream``
will be used.
"""
socket.setdefaulttimeout(timeout)
# Default handlers
openers = []
if not allow_redirects:
openers.append(NoRedirectHandler())
if auth is not None: # Add authorisation handler
username, password = auth
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_manager = urllib2.HTTPBasicAuthHandler(password_manager)
openers.append(auth_manager)
# Install our custom chain of openers
opener = urllib2.build_opener(*openers)
urllib2.install_opener(opener)
if not headers:
headers = {}
if 'User-Agent' not in headers:
headers['User-Agent'] = USER_AGENT
if files:
if not data:
data = {}
new_headers, data = encode_multipart_formdata(data, files)
headers.update(new_headers)
elif data and isinstance(data, dict):
data = urllib.urlencode(str_dict(data))
# Make sure everything is encoded text
headers = str_dict(headers)
if isinstance(url, unicode):
url = url.encode('utf-8')
if params: # GET args (POST args are handled in encode_multipart_formdata)
url = url + '?' + urllib.urlencode(str_dict(params))
req = urllib2.Request(url, data, headers)
return Response(req)
def get(url, params=None, headers=None, cookies=None, auth=None,
timeout=60, allow_redirects=True):
"""Initiate a GET request. Arguments as for :func:`request`.
:returns: :class:`Response` instance
"""
return request('GET', url, params, headers=headers, cookies=cookies,
auth=auth, timeout=timeout, allow_redirects=allow_redirects)
def post(url, params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=60, allow_redirects=False):
"""Initiate a POST request. Arguments as for :func:`request`.
:returns: :class:`Response` instance
"""
return request('POST', url, params, data, headers, cookies, files, auth,
timeout, allow_redirects)
def encode_multipart_formdata(fields, files):
"""Encode form data (``fields``) and ``files`` for POST request.
:param fields: mapping of ``{name : value}`` pairs for normal form fields.
:type fields: :class:`dict`
:param files: dictionary of fieldnames/files elements for file data.
See below for details.
:type files: :class:`dict` of :class:`dicts`
:returns: ``(headers, body)`` ``headers`` is a :class:`dict` of HTTP headers
:rtype: 2-tuple ``(dict, str)``
The ``files`` argument is a dictionary::
{'fieldname' : { 'filename': 'blah.txt',
'content': '<binary data>',
'mimetype': 'text/plain'}
}
- ``fieldname`` is the name of the field in the HTML form.
- ``mimetype`` is optional. If not provided, :mod:`mimetypes` will be used to guess the mimetype, or ``application/octet-stream`` will be used.
"""
def get_content_type(filename):
"""Return or guess mimetype of ``filename``.
:param filename: filename of file
:type filename: unicode/string
:returns: mime-type, e.g. ``text/html``
:rtype: :class::class:`str`
"""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
boundary = '-----' + ''.join(random.choice(BOUNDARY_CHARS)
for i in range(30))
CRLF = '\r\n'
output = []
# Normal form fields
for (name, value) in fields.items():
if isinstance(name, unicode):
name = name.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
output.append('--' + boundary)
output.append('Content-Disposition: form-data; name="%s"' % name)
output.append('')
output.append(value)
# Files to upload
for name, d in files.items():
filename = d[u'filename']
content = d[u'content']
if u'mimetype' in d:
mimetype = d[u'mimetype']
else:
mimetype = get_content_type(filename)
if isinstance(name, unicode):
name = name.encode('utf-8')
if isinstance(filename, unicode):
filename = filename.encode('utf-8')
if isinstance(mimetype, unicode):
mimetype = mimetype.encode('utf-8')
output.append('--' + boundary)
output.append('Content-Disposition: form-data; '
'name="%s"; filename="%s"' % (name, filename))
output.append('Content-Type: %s' % mimetype)
output.append('')
output.append(content)
output.append('--' + boundary + '--')
output.append('')
body = CRLF.join(output)
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Content-Length': str(len(body)),
}
return (headers, body)
| mit | 3,962,086,400,844,393,000 | 28.079208 | 147 | 0.578005 | false | 4.091669 | false | false | false | 0.00034 |
Sodki/ansible-modules-extras | cloud/vmware/vmware_vm_facts.py | 75 | 3068 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vm_facts
short_description: Return basic facts pertaining to a vSphere virtual machine guest
description:
- Return basic facts pertaining to a vSphere virtual machine guest
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather all registered virtual machines
local_action:
module: vmware_vm_facts
hostname: esxi_or_vcenter_ip_or_hostname
username: username
password: password
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
def get_all_virtual_machines(content):
virtual_machines = get_all_objs(content, [vim.VirtualMachine])
_virtual_machines = {}
for vm in virtual_machines:
_ip_address = ""
summary = vm.summary
if summary.guest is not None:
_ip_address = summary.guest.ipAddress
if _ip_address is None:
_ip_address = ""
virtual_machine = {
summary.config.name: {
"guest_fullname": summary.config.guestFullName,
"power_state": summary.runtime.powerState,
"ip_address": _ip_address
}
}
_virtual_machines.update(virtual_machine)
return _virtual_machines
def main():
argument_spec = vmware_argument_spec()
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
try:
content = connect_to_api(module)
_virtual_machines = get_all_virtual_machines(content)
module.exit_json(changed=False, virtual_machines=_virtual_machines)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 4,183,156,755,517,082,600 | 29.376238 | 86 | 0.680248 | false | 3.741463 | false | false | false | 0.00163 |
cmin764/cloudbase-init | cloudbaseinit/plugins/windows/winrmlistener.py | 2 | 4519 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as oslo_logging
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.plugins.common import base
from cloudbaseinit.utils.windows import security
from cloudbaseinit.utils.windows import winrmconfig
from cloudbaseinit.utils.windows import x509
opts = [
cfg.BoolOpt('winrm_enable_basic_auth', default=True,
help='Enables basic authentication for the WinRM '
'HTTPS listener'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = oslo_logging.getLogger(__name__)
class ConfigWinRMListenerPlugin(base.BasePlugin):
_cert_subject = "CN=Cloudbase-Init WinRM"
_winrm_service_name = "WinRM"
def _check_winrm_service(self, osutils):
if not osutils.check_service_exists(self._winrm_service_name):
LOG.warn("Cannot configure the WinRM listener as the service "
"is not available")
return False
start_mode = osutils.get_service_start_mode(self._winrm_service_name)
if start_mode in [osutils.SERVICE_START_MODE_MANUAL,
osutils.SERVICE_START_MODE_DISABLED]:
# TODO(alexpilotti) Set to "Delayed Start"
osutils.set_service_start_mode(
self._winrm_service_name,
osutils.SERVICE_START_MODE_AUTOMATIC)
service_status = osutils.get_service_status(self._winrm_service_name)
if service_status == osutils.SERVICE_STATUS_STOPPED:
osutils.start_service(self._winrm_service_name)
return True
def execute(self, service, shared_data):
osutils = osutils_factory.get_os_utils()
security_utils = security.WindowsSecurityUtils()
if not self._check_winrm_service(osutils):
return base.PLUGIN_EXECUTE_ON_NEXT_BOOT, False
# On Windows Vista, 2008, 2008 R2 and 7, changing the configuration of
# the winrm service will fail with an "Access is denied" error if the
# User Account Control remote restrictions are enabled.
# The solution to this issue is to temporarily disable the User Account
# Control remote restrictions.
# https://support.microsoft.com/kb/951016
disable_uac_remote_restrictions = (osutils.check_os_version(6, 0) and
not osutils.check_os_version(6, 2)
and security_utils
.get_uac_remote_restrictions())
try:
if disable_uac_remote_restrictions:
LOG.debug("Disabling UAC remote restrictions")
security_utils.set_uac_remote_restrictions(enable=False)
winrm_config = winrmconfig.WinRMConfig()
winrm_config.set_auth_config(basic=CONF.winrm_enable_basic_auth)
cert_manager = x509.CryptoAPICertManager()
cert_thumbprint = cert_manager.create_self_signed_cert(
self._cert_subject)
protocol = winrmconfig.LISTENER_PROTOCOL_HTTPS
if winrm_config.get_listener(protocol=protocol):
winrm_config.delete_listener(protocol=protocol)
winrm_config.create_listener(cert_thumbprint=cert_thumbprint,
protocol=protocol)
listener_config = winrm_config.get_listener(protocol=protocol)
listener_port = listener_config.get("Port")
rule_name = "WinRM %s" % protocol
osutils.firewall_create_rule(rule_name, listener_port,
osutils.PROTOCOL_TCP)
finally:
if disable_uac_remote_restrictions:
LOG.debug("Enabling UAC remote restrictions")
security_utils.set_uac_remote_restrictions(enable=True)
return base.PLUGIN_EXECUTION_DONE, False
| apache-2.0 | -1,545,628,111,935,573,200 | 39.711712 | 79 | 0.641292 | false | 4.126941 | true | false | false | 0 |
silenius/amnesia | amnesia/order.py | 1 | 6741 | # -*- coding: utf-8 -*-
import json
from operator import attrgetter
from sqlalchemy import inspect
from sqlalchemy import orm
from .modules.content import Content
class Path:
def __init__(self, class_, prop):
self.class_ = class_
self.prop = prop
@property
def class_(self):
return self.mapper.entity
@class_.setter
def class_(self, value):
self.mapper = orm.class_mapper(value)
def __eq__(self, other):
return self.class_ == other.class_ and self.prop == other.prop
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return '%s.%s' % (self.class_.__name__, self.prop)
def to_dict(self):
return {
'identity': self.mapper.polymorphic_identity,
'prop': self.prop
}
class EntityOrder:
def __init__(self, src, prop, direction='asc', nulls=None, doc=None,
path=None):
insp = inspect(src)
self._mapper = insp.mapper
self.prop = prop
self.direction = direction
self.nulls = nulls
self.doc = doc
# If a path is required, the first one should be a polymorphic entity
self.path = path if path is not None else []
#######################################################################
# PROPERTIES ##########################################################
#######################################################################
@property
def direction(self):
return self._direction
@direction.setter
def direction(self, value):
self._direction = 'desc' if value.lower() == 'desc' else 'asc'
@property
def nulls(self):
return self._nulls
@nulls.setter
def nulls(self, value):
try:
value = value.lower()
if value in ('first', 'last'):
self._nulls = value
else:
self._nulls = None
except:
self._nulls = None
@property
def doc(self):
if self._doc:
return self._doc
doc = [self.prop.replace('_', ' ')]
return ' '.join(doc)
@doc.setter
def doc(self, value):
try:
value = value.strip()
if value:
self._doc = value
else:
raise ValueError
except:
self._doc = None
#########################################################################
def __eq__(self, other):
if self.class_ == other.class_ and self.prop == other.prop:
if self.path:
if self.path == other.path:
return True
return False
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def mapper(self):
return self._mapper
@property
def class_(self):
return self.mapper.class_
@property
def col(self):
return getattr(self.class_, self.prop)
@property
def identity(self):
return self.mapper.polymorphic_identity
def to_dict(self):
return {
'identity': self.mapper.polymorphic_identity,
'cls': self.mapper.entity.__name__,
'prop': self.prop,
'direction': self.direction,
'nulls': self.nulls,
'doc': self.doc,
'path': [p.to_dict() for p in self.path]
}
def to_json(self):
return json.dumps(self.to_dict())
JSON = to_json
def to_sql(self, direction=None, nulls=None):
""" Returns an SQL expression """
col = self.col
if not direction:
direction = self.direction
if not nulls:
nulls = self.nulls
if direction == 'desc':
col = col.desc()
return col.nullslast() if nulls == 'last' else col.nullsfirst()
@classmethod
def from_dict(cls, data, pm):
path = []
for p in data['path']:
mapper = pm.get(p['identity'])
if mapper:
mapper = mapper.class_
path.append(Path(mapper, p['prop']))
mapper = pm.get(data['identity'])
if mapper:
mapper = mapper.class_
return cls(
src=mapper,
prop=data['prop'],
direction=data['direction'],
nulls=data['nulls'],
doc='FIXME',
path=path
)
def has_path(self, cls):
for path in self.path:
if path.class_ == cls or path.class_ is None:
return True
return False
def polymorphic_entity(self, base):
# The sort is on a polymorphic entity which is used in an
# inheritance scenario and which share a common ancestor with
# pl_cfg.base class (Content).
# ex: Event.starts, File.file_size, ...
if self.mapper.polymorphic_identity and self.mapper.isa(base):
return self.mapper.entity
# The sort is on a mapped class which is reachable # through a
# polymorphic entity.
# ex: Country.name (Content -> Event -> Country)
if (self.path and self.path[0].mapper.polymorphic_identity and
self.path[0].mapper.isa(base)):
return self.path[0].mapper.entity
return None
def for_entity(entity, orders):
insp = inspect(entity)
# insp is an AliasedInsp instance
# entity is an AliasedClass.
# ex: entity = orm.with_polymorphic(Content, [Page, Event])
# insp.mapper -> <Mapper at 0x805aeb310; Content>
if insp.is_aliased_class:
if insp.with_polymorphic_mappers:
cls = map(attrgetter('class_'), insp.with_polymorphic_mappers)
cls = tuple(cls)
else:
cls = (insp.mapper.class_, )
return {
k: v for (k, v) in orders.items()
if v.class_ in cls
or any((v.has_path(c) for c in cls))
or v.class_ is None
}
# Entity is an object instance
# entity = Session.query(Page).get(id)
elif isinstance(insp, orm.state.InstanceState):
cls = list(map(attrgetter('class_'), insp.mapper.iterate_to_root()))
base = insp.mapper.class_
return {
k: v for (k, v) in orders.items()
if v.class_ in cls or v.has_path(base)
}
# Entity is a mapper (mapped class)
elif isinstance(insp, orm.Mapper):
cls = list(map(attrgetter('class_'), insp.iterate_to_root()))
base = insp.base_mapper
return {
k: v for (k, v) in orders.items()
if v.class_ in cls or v.has_path(base)
}
| bsd-2-clause | 2,033,192,552,393,915,100 | 25.435294 | 77 | 0.512832 | false | 4.181762 | false | false | false | 0.000297 |
davidhubbard/v0lum3 | buildtools/checkdeps/proto_checker.py | 8 | 3914 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Checks protobuf files for illegal imports."""
import codecs
import os
import re
import results
from rules import Rule, MessageRule
class ProtoChecker(object):
EXTENSIONS = [
'.proto',
]
# The maximum number of non-import lines we can see before giving up.
_MAX_UNINTERESTING_LINES = 50
# The maximum line length, this is to be efficient in the case of very long
# lines (which can't be import).
_MAX_LINE_LENGTH = 128
# This regular expression will be used to extract filenames from import
# statements.
_EXTRACT_IMPORT_PATH = re.compile(
'[ \t]*[ \t]*import[ \t]+"(.*)"')
def __init__(self, verbose, resolve_dotdot=False, root_dir=''):
self._verbose = verbose
self._resolve_dotdot = resolve_dotdot
self._root_dir = root_dir
def IsFullPath(self, import_path):
"""Checks if the given path is a valid path starting from |_root_dir|."""
match = re.match('(.*)/([^/]*\.proto)', import_path)
if not match:
return False
return os.path.isdir(self._root_dir + "/" + match.group(1))
def CheckLine(self, rules, line, dependee_path, fail_on_temp_allow=False):
"""Checks the given line with the given rule set.
Returns a tuple (is_import, dependency_violation) where
is_import is True only if the line is an import
statement, and dependency_violation is an instance of
results.DependencyViolation if the line violates a rule, or None
if it does not.
"""
found_item = self._EXTRACT_IMPORT_PATH.match(line)
if not found_item:
return False, None # Not a match
import_path = found_item.group(1)
if '\\' in import_path:
return True, results.DependencyViolation(
import_path,
MessageRule('Import paths may not include backslashes.'),
rules)
if '/' not in import_path:
# Don't fail when no directory is specified. We may want to be more
# strict about this in the future.
if self._verbose:
print ' WARNING: import specified with no directory: ' + import_path
return True, None
if self._resolve_dotdot and '../' in import_path:
dependee_dir = os.path.dirname(dependee_path)
import_path = os.path.join(dependee_dir, import_path)
import_path = os.path.relpath(import_path, self._root_dir)
if not self.IsFullPath(import_path):
return True, None
rule = rules.RuleApplyingTo(import_path, dependee_path)
if (rule.allow == Rule.DISALLOW or
(fail_on_temp_allow and rule.allow == Rule.TEMP_ALLOW)):
return True, results.DependencyViolation(import_path, rule, rules)
return True, None
def CheckFile(self, rules, filepath):
if self._verbose:
print 'Checking: ' + filepath
dependee_status = results.DependeeStatus(filepath)
last_import = 0
with codecs.open(filepath, encoding='utf-8') as f:
for line_num, line in enumerate(f):
if line_num - last_import > self._MAX_UNINTERESTING_LINES:
break
line = line.strip()
is_import, violation = self.CheckLine(rules, line, filepath)
if is_import:
last_import = line_num
if violation:
dependee_status.AddViolation(violation)
return dependee_status
@staticmethod
def IsProtoFile(file_path):
"""Returns True iff the given path ends in one of the extensions
handled by this checker.
"""
return os.path.splitext(file_path)[1] in ProtoChecker.EXTENSIONS
def ShouldCheck(self, file_path):
"""Check if the new #include file path should be presubmit checked.
Args:
file_path: file path to be checked
Return:
bool: True if the file should be checked; False otherwise.
"""
return self.IsProtoFile(file_path)
| gpl-3.0 | -6,869,710,310,231,507,000 | 30.312 | 77 | 0.664282 | false | 3.745455 | false | false | false | 0.008942 |
CoderBotOrg/coderbotsrv | server/lib/cryptography/hazmat/bindings/commoncrypto/cf.py | 15 | 2934 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <CoreFoundation/CoreFoundation.h>
"""
TYPES = """
typedef bool Boolean;
typedef signed long OSStatus;
typedef unsigned char UInt8;
typedef uint32_t UInt32;
typedef const void * CFAllocatorRef;
const CFAllocatorRef kCFAllocatorDefault;
typedef ... *CFDataRef;
typedef signed long long CFIndex;
typedef ... *CFStringRef;
typedef ... *CFArrayRef;
typedef ... *CFBooleanRef;
typedef ... *CFErrorRef;
typedef ... *CFNumberRef;
typedef ... *CFTypeRef;
typedef ... *CFDictionaryRef;
typedef ... *CFMutableDictionaryRef;
typedef struct {
...;
} CFDictionaryKeyCallBacks;
typedef struct {
...;
} CFDictionaryValueCallBacks;
typedef struct {
...;
} CFRange;
typedef UInt32 CFStringEncoding;
enum {
kCFStringEncodingASCII = 0x0600
};
enum {
kCFNumberSInt8Type = 1,
kCFNumberSInt16Type = 2,
kCFNumberSInt32Type = 3,
kCFNumberSInt64Type = 4,
kCFNumberFloat32Type = 5,
kCFNumberFloat64Type = 6,
kCFNumberCharType = 7,
kCFNumberShortType = 8,
kCFNumberIntType = 9,
kCFNumberLongType = 10,
kCFNumberLongLongType = 11,
kCFNumberFloatType = 12,
kCFNumberDoubleType = 13,
kCFNumberCFIndexType = 14,
kCFNumberNSIntegerType = 15,
kCFNumberCGFloatType = 16,
kCFNumberMaxType = 16
};
typedef int CFNumberType;
const CFDictionaryKeyCallBacks kCFTypeDictionaryKeyCallBacks;
const CFDictionaryValueCallBacks kCFTypeDictionaryValueCallBacks;
const CFBooleanRef kCFBooleanTrue;
const CFBooleanRef kCFBooleanFalse;
"""
FUNCTIONS = """
CFDataRef CFDataCreate(CFAllocatorRef, const UInt8 *, CFIndex);
CFStringRef CFStringCreateWithCString(CFAllocatorRef, const char *,
CFStringEncoding);
CFDictionaryRef CFDictionaryCreate(CFAllocatorRef, const void **,
const void **, CFIndex,
const CFDictionaryKeyCallBacks *,
const CFDictionaryValueCallBacks *);
CFMutableDictionaryRef CFDictionaryCreateMutable(
CFAllocatorRef,
CFIndex,
const CFDictionaryKeyCallBacks *,
const CFDictionaryValueCallBacks *
);
void CFDictionarySetValue(CFMutableDictionaryRef, const void *, const void *);
CFIndex CFArrayGetCount(CFArrayRef);
const void *CFArrayGetValueAtIndex(CFArrayRef, CFIndex);
CFIndex CFDataGetLength(CFDataRef);
void CFDataGetBytes(CFDataRef, CFRange, UInt8 *);
CFRange CFRangeMake(CFIndex, CFIndex);
void CFShow(CFTypeRef);
Boolean CFBooleanGetValue(CFBooleanRef);
CFNumberRef CFNumberCreate(CFAllocatorRef, CFNumberType, const void *);
void CFRelease(CFTypeRef);
CFTypeRef CFRetain(CFTypeRef);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
| gpl-3.0 | 1,704,733,431,558,073,000 | 26.942857 | 79 | 0.730743 | false | 3.480427 | false | false | false | 0 |
mhabrnal/abrt | src/cli-ng/tests/test_filtering.py | 3 | 1855 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import logging
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
import clitests
import problem
from abrtcli.filtering import (filter_reported,
filter_not_reported,
filter_since,
filter_since_timestamp,
filter_until,
filter_until_timestamp)
class FilteringTestCase(clitests.TestCase):
'''
Test filtering functionality
'''
def test_filter_since(self):
pl = problem.list()
since = datetime.datetime(2015, 1, 1, 1, 1, 1)
res = filter_since(pl, since)
self.assertEqual(len(res), 3)
def test_filter_since_timestamp(self):
pl = problem.list()
since = datetime.datetime(2015, 1, 1, 1, 1, 1)
since_ts = since.strftime('%s')
res = filter_since_timestamp(pl, since_ts)
self.assertEqual(len(res), 3)
def test_filter_until(self):
pl = problem.list()
until = datetime.datetime(2015, 1, 1, 1, 1, 1)
res = filter_until(pl, until)
self.assertEqual(len(res), 2)
def test_filter_until_timestamp(self):
pl = problem.list()
until = datetime.datetime(2015, 1, 1, 1, 1, 1)
until_ts = until.strftime('%s')
res = filter_until_timestamp(pl, until_ts)
self.assertEqual(len(res), 2)
def test_filter_reported(self):
pl = problem.list()
res = filter_reported(pl)
self.assertEqual(len(res), 1)
def test_filter_not_reported(self):
pl = problem.list()
res = filter_not_reported(pl)
self.assertEqual(len(res), 4)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main()
| gpl-2.0 | 2,650,738,813,625,702,400 | 27.106061 | 54 | 0.568194 | false | 3.80123 | true | false | false | 0 |
Proggie02/TestRepo | django/contrib/staticfiles/storage.py | 6 | 12152 | from __future__ import unicode_literals
import hashlib
import os
import posixpath
import re
try:
from urllib.parse import unquote, urlsplit, urlunsplit, urldefrag
except ImportError: # Python 2
from urllib import unquote
from urlparse import urlsplit, urlunsplit, urldefrag
from django.conf import settings
from django.core.cache import (get_cache, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class CachedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.cache = get_cache('staticfiles')
except InvalidCacheBackendError:
# Use the default backend
self.cache = default_cache
self._patterns = SortedDict()
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Retuns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def cache_key(self, name):
return 'staticfiles:%s' % hashlib.md5(force_bytes(name)).hexdigest()
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
cache_key = self.cache_key(name)
hashed_name = self.cache.get(cache_key)
if hashed_name is None:
hashed_name = self.hashed_name(clean_name).replace('\\', '/')
# set the cache if there was a miss
# (e.g. if cache server goes down)
self.cache.set(cache_key, hashed_name)
final_url = super(CachedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Returns the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return template % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given list of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
content = pattern.sub(converter, content)
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name.replace('\\', '/'))] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
mod = import_module(app)
mod_path = os.path.dirname(mod.__file__)
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| bsd-3-clause | -3,999,183,373,394,543,000 | 38.454545 | 85 | 0.561718 | false | 4.490761 | false | false | false | 0.000658 |
petemounce/ansible | lib/ansible/modules/identity/ipa/ipa_sudocmd.py | 71 | 6199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_sudocmd
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo command
description:
- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
options:
sudocmd:
description:
- Sudo Command.
aliases: ['name']
required: true
description:
description:
- A description of this command.
required: false
state:
description: State to ensure
required: false
default: present
choices: ['present', 'absent']
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: "ipa.example.com"
ipa_user:
description: Administrative account used on IPA server
required: false
default: "admin"
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: "https"
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure sudo command exists
- ipa_sudocmd:
name: su
description: Allow to run su via sudo
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure sudo command does not exist
- ipa_sudocmd:
name: su
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
sudocmd:
description: Sudo command as return from IPA API
returned: always
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.ipa import IPAClient
class SudoCmdIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
def sudocmd_find(self, name):
return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
def sudocmd_add(self, name, item):
return self._post_json(method='sudocmd_add', name=name, item=item)
def sudocmd_mod(self, name, item):
return self._post_json(method='sudocmd_mod', name=name, item=item)
def sudocmd_del(self, name):
return self._post_json(method='sudocmd_del', name=name)
def get_sudocmd_dict(description=None):
data = {}
if description is not None:
data['description'] = description
return data
def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd):
return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd)
def ensure(module, client):
name = module.params['sudocmd']
state = module.params['state']
module_sudocmd = get_sudocmd_dict(description=module.params['description'])
ipa_sudocmd = client.sudocmd_find(name=name)
changed = False
if state == 'present':
if not ipa_sudocmd:
changed = True
if not module.check_mode:
client.sudocmd_add(name=name, item=module_sudocmd)
else:
diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_sudocmd.get(key)
client.sudocmd_mod(name=name, item=data)
else:
if ipa_sudocmd:
changed = True
if not module.check_mode:
client.sudocmd_del(name=name)
return changed, client.sudocmd_find(name=name)
def main():
module = AnsibleModule(
argument_spec=dict(
description=dict(type='str', required=False),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
sudocmd=dict(type='str', required=True, aliases=['name']),
ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
ipa_host=dict(type='str', required=False, default='ipa.example.com'),
ipa_port=dict(type='int', required=False, default=443),
ipa_user=dict(type='str', required=False, default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
),
supports_check_mode=True,
)
client = SudoCmdIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, sudocmd = ensure(module, client)
module.exit_json(changed=changed, sudocmd=sudocmd)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | 7,711,921,722,581,277,000 | 30.627551 | 103 | 0.633812 | false | 3.729844 | false | false | false | 0.001129 |
emersonsoftware/ansiblefork | lib/ansible/modules/cloud/openstack/os_port_facts.py | 6 | 7703 | #!/usr/bin/python
# Copyright (c) 2016 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: os_port_facts
short_description: Retrieve facts about ports within OpenStack.
version_added: "2.1"
author: "David Shrewsbury (@Shrews)"
description:
- Retrieve facts about ports from OpenStack.
notes:
- Facts are placed in the C(openstack_ports) variable.
requirements:
- "python >= 2.6"
- "shade"
options:
port:
description:
- Unique name or ID of a port.
required: false
default: null
filters:
description:
- A dictionary of meta data to use for further filtering. Elements
of this dictionary will be matched against the returned port
dictionaries. Matching is currently limited to strings within
the port dictionary, or strings within nested dictionaries.
required: false
default: null
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about all ports
- os_port_facts:
cloud: mycloud
# Gather facts about a single port
- os_port_facts:
cloud: mycloud
port: 6140317d-e676-31e1-8a4a-b1913814a471
# Gather facts about all ports that have device_id set to a specific value
# and with a status of ACTIVE.
- os_port_facts:
cloud: mycloud
filters:
device_id: 1038a010-3a37-4a9d-82ea-652f1da36597
status: ACTIVE
'''
RETURN = '''
openstack_ports:
description: List of port dictionaries. A subset of the dictionary keys
listed below may be returned, depending on your cloud provider.
returned: always, but can be null
type: complex
contains:
admin_state_up:
description: The administrative state of the router, which is
up (true) or down (false).
returned: success
type: boolean
sample: true
allowed_address_pairs:
description: A set of zero or more allowed address pairs. An
address pair consists of an IP address and MAC address.
returned: success
type: list
sample: []
"binding:host_id":
description: The UUID of the host where the port is allocated.
returned: success
type: string
sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
"binding:profile":
description: A dictionary the enables the application running on
the host to pass and receive VIF port-specific
information to the plug-in.
returned: success
type: dict
sample: {}
"binding:vif_details":
description: A dictionary that enables the application to pass
information about functions that the Networking API
provides.
returned: success
type: dict
sample: {"port_filter": true}
"binding:vif_type":
description: The VIF type for the port.
returned: success
type: dict
sample: "ovs"
"binding:vnic_type":
description: The virtual network interface card (vNIC) type that is
bound to the neutron port.
returned: success
type: string
sample: "normal"
device_id:
description: The UUID of the device that uses this port.
returned: success
type: string
sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
device_owner:
description: The UUID of the entity that uses this port.
returned: success
type: string
sample: "network:router_interface"
dns_assignment:
description: DNS assignment information.
returned: success
type: list
dns_name:
description: DNS name
returned: success
type: string
sample: ""
extra_dhcp_opts:
description: A set of zero or more extra DHCP option pairs.
An option pair consists of an option value and name.
returned: success
type: list
sample: []
fixed_ips:
description: The IP addresses for the port. Includes the IP address
and UUID of the subnet.
returned: success
type: list
id:
description: The UUID of the port.
returned: success
type: string
sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
ip_address:
description: The IP address.
returned: success
type: string
sample: "127.0.0.1"
mac_address:
description: The MAC address.
returned: success
type: string
sample: "00:00:5E:00:53:42"
name:
description: The port name.
returned: success
type: string
sample: "port_name"
network_id:
description: The UUID of the attached network.
returned: success
type: string
sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d"
port_security_enabled:
description: The port security status. The status is enabled (true) or disabled (false).
returned: success
type: boolean
sample: false
security_groups:
description: The UUIDs of any attached security groups.
returned: success
type: list
status:
description: The port status.
returned: success
type: string
sample: "ACTIVE"
tenant_id:
description: The UUID of the tenant who owns the network.
returned: success
type: string
sample: "51fce036d7984ba6af4f6c849f65ef00"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
port=dict(required=False),
filters=dict(type='dict', required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
port = module.params.pop('port')
filters = module.params.pop('filters')
try:
cloud = shade.openstack_cloud(**module.params)
ports = cloud.search_ports(port, filters)
module.exit_json(changed=False, ansible_facts=dict(
openstack_ports=ports))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 8,851,063,494,657,022,000 | 32.637555 | 100 | 0.592886 | false | 4.478488 | false | false | false | 0.000779 |
daren-thomas/rps-sample-scripts | RoomNameUpper.py | 1 | 1212 | #RoomNameUpper
#dwane@dsdraughting.com
#2014
#
#import libraries and reference the RevitAPI and RevitAPIUI
import clr
import math
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
# to access all the Name-spaces in the RevitAPI & UI, we import them all using *
from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
#set the active Revit application and document
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
#selection = list(__revit__.ActiveUIDocument.Selection.Elements)
#define a transaction variable and describe the transaction
t = Transaction(doc, 'rename selected room names to upper case')
#start a transaction in the Revit database
t.Start()
#perform some action here...
for rooms in uidoc.Selection.Elements:
currentRoomName = rooms.get_Parameter('Name').AsString()
print 'Old Name = ' + currentRoomName
newRoomName = currentRoomName.upper()
print 'New Name = ' + newRoomName
rooms.get_Parameter(BuiltInParameter.ROOM_NAME).Set(newRoomName)
#commit the transaction to the Revit database
t.Commit()
#close the script window
__window__.Close()
| mit | 5,352,328,091,143,414,000 | 29.076923 | 80 | 0.74835 | false | 3.197889 | false | false | false | 0.019802 |
miing/mci_migo | identityprovider/migrations/0007_auto__add_field_invalidatedemailaddress_date_invalidated__add_field_in.py | 1 | 19118 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from identityprovider.models import InvalidatedEmailAddress
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'InvalidatedEmailAddress.date_invalidated'
db.add_column(u'invalidated_emailaddress', 'date_invalidated',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.utcnow, null=True, blank=True),
keep_default=False)
# Adding field 'InvalidatedEmailAddress.account_notified'
db.add_column(u'invalidated_emailaddress', 'account_notified',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# existing InvalidatedEmailAddress should not be notified
InvalidatedEmailAddress.objects.all().update(account_notified=True)
def backwards(self, orm):
# Deleting field 'InvalidatedEmailAddress.date_invalidated'
db.delete_column(u'invalidated_emailaddress', 'date_invalidated')
# Deleting field 'InvalidatedEmailAddress.account_notified'
db.delete_column(u'invalidated_emailaddress', 'account_notified')
models = {
'identityprovider.account': {
'Meta': {'object_name': 'Account', 'db_table': "u'account'"},
'creation_rationale': ('django.db.models.fields.IntegerField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'date_status_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'displayname': ('identityprovider.models.account.DisplaynameField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_openid_identifier': ('django.db.models.fields.TextField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'openid_identifier': ('django.db.models.fields.TextField', [], {'default': "u'EeE8MYB'", 'unique': 'True'}),
'preferredlanguage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'status_comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twofactor_attempts': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'null': 'True'}),
'twofactor_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'warn_about_backup_device': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'identityprovider.accountpassword': {
'Meta': {'object_name': 'AccountPassword', 'db_table': "u'accountpassword'"},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['identityprovider.Account']", 'unique': 'True', 'db_column': "'account'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('identityprovider.models.account.PasswordField', [], {})
},
'identityprovider.apiuser': {
'Meta': {'object_name': 'APIUser', 'db_table': "'api_user'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'identityprovider.authenticationdevice': {
'Meta': {'ordering': "('id',)", 'object_name': 'AuthenticationDevice'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'devices'", 'to': "orm['identityprovider.Account']"}),
'counter': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'device_type': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.TextField', [], {})
},
'identityprovider.authtoken': {
'Meta': {'object_name': 'AuthToken', 'db_table': "u'authtoken'"},
'date_consumed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'db_index': 'True', 'blank': 'True'}),
'displayname': ('identityprovider.models.account.DisplaynameField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('identityprovider.models.account.PasswordField', [], {'null': 'True', 'blank': 'True'}),
'redirection_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'null': 'True', 'db_column': "'requester'", 'blank': 'True'}),
'requester_email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'token_type': ('django.db.models.fields.IntegerField', [], {})
},
'identityprovider.emailaddress': {
'Meta': {'object_name': 'EmailAddress', 'db_table': "u'emailaddress'"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'null': 'True', 'db_column': "'account'", 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'blank': 'True'}),
'email': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lp_person': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'person'", 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {})
},
'identityprovider.invalidatedemailaddress': {
'Meta': {'object_name': 'InvalidatedEmailAddress', 'db_table': "u'invalidated_emailaddress'"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'null': 'True', 'db_column': "'account'", 'blank': 'True'}),
'account_notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'date_invalidated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'identityprovider.lpopenididentifier': {
'Meta': {'object_name': 'LPOpenIdIdentifier', 'db_table': "u'lp_openididentifier'"},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.date.today'}),
'identifier': ('django.db.models.fields.TextField', [], {'unique': 'True', 'primary_key': 'True'}),
'lp_account': ('django.db.models.fields.IntegerField', [], {'db_column': "'account'", 'db_index': 'True'})
},
'identityprovider.openidassociation': {
'Meta': {'unique_together': "(('server_url', 'handle'),)", 'object_name': 'OpenIDAssociation', 'db_table': "u'openidassociation'"},
'assoc_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.TextField', [], {}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '2047'})
},
'identityprovider.openidauthorization': {
'Meta': {'object_name': 'OpenIDAuthorization', 'db_table': "u'openidauthorization'"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'db_column': "'account'"}),
'client_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'blank': 'True'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trust_root': ('django.db.models.fields.TextField', [], {})
},
'identityprovider.openidnonce': {
'Meta': {'unique_together': "(('server_url', 'timestamp', 'salt'),)", 'object_name': 'OpenIDNonce', 'db_table': "'openidnonce'"},
'salt': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '2047', 'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {})
},
'identityprovider.openidrpconfig': {
'Meta': {'object_name': 'OpenIDRPConfig', 'db_table': "'ssoopenidrpconfig'"},
'allow_unverified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allowed_sreg': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'auto_authorize': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_query_any_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'creation_rationale': ('django.db.models.fields.IntegerField', [], {'default': '13'}),
'description': ('django.db.models.fields.TextField', [], {}),
'displayname': ('django.db.models.fields.TextField', [], {}),
'flag_twofactor': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'ga_snippet': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prefer_canonical_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'require_two_factor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'trust_root': ('django.db.models.fields.TextField', [], {'unique': 'True'})
},
'identityprovider.openidrpsummary': {
'Meta': {'unique_together': "(('account', 'trust_root', 'openid_identifier'),)", 'object_name': 'OpenIDRPSummary', 'db_table': "u'openidrpsummary'"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Account']", 'db_column': "'account'"}),
'approved_data': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'blank': 'True'}),
'date_last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'openid_identifier': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'total_logins': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'trust_root': ('django.db.models.fields.TextField', [], {'db_index': 'True'})
},
'identityprovider.person': {
'Meta': {'object_name': 'Person', 'db_table': "u'lp_person'"},
'addressline1': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'addressline2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'country'", 'blank': 'True'}),
'creation_comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creation_rationale': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'datecreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'defaultmembershipperiod': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'defaultrenewalperiod': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'displayname': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fti': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'hide_email_addresses': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'homepage_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'icon': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'icon'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'language'", 'blank': 'True'}),
'logo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'logo'", 'blank': 'True'}),
'lp_account': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_column': "'account'"}),
'mail_resumption_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mailing_list_auto_subscribe_policy': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'mailing_list_receive_duplicates': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'merged': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'merged'", 'blank': 'True'}),
'mugshot': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'mugshot'", 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'unique': 'True', 'null': 'True'}),
'organization': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'personal_standing': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'personal_standing_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'province': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'registrant': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'registrant'", 'blank': 'True'}),
'renewal_policy': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True'}),
'subscriptionpolicy': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'teamdescription': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'teamowner': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'teamowner'", 'blank': 'True'}),
'verbose_bugnotifications': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'identityprovider.personlocation': {
'Meta': {'object_name': 'PersonLocation', 'db_table': "u'lp_personlocation'"},
'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified_by': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_column': "'last_modified_by'"}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['identityprovider.Person']", 'unique': 'True', 'null': 'True', 'db_column': "'person'"}),
'time_zone': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'})
},
'identityprovider.teamparticipation': {
'Meta': {'unique_together': "(('team', 'person'),)", 'object_name': 'TeamParticipation', 'db_table': "u'lp_teamparticipation'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identityprovider.Person']", 'null': 'True', 'db_column': "'person'"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_participations'", 'null': 'True', 'db_column': "'team'", 'to': "orm['identityprovider.Person']"})
}
}
complete_apps = ['identityprovider'] | agpl-3.0 | -6,829,572,852,611,019,000 | 84.352679 | 192 | 0.571242 | false | 3.748627 | false | false | false | 0.007375 |
vaal-/il2_stats | src/stats/migrations/0010_jsonb_step_1.py | 2 | 5319 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-15 18:56
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
import stats.models
def json_to_jsonb(apps, schema_editor):
Mission = apps.get_model('stats', 'Mission')
Player = apps.get_model('stats', 'Player')
PlayerMission = apps.get_model('stats', 'PlayerMission')
PlayerAircraft = apps.get_model('stats', 'PlayerAircraft')
Sortie = apps.get_model('stats', 'Sortie')
LogEntry = apps.get_model('stats', 'LogEntry')
for m in Mission.objects.all():
m.score_dict_new = m.score_dict
m.save()
for m in Player.objects.all():
m.sorties_cls_new = m.sorties_cls
m.ammo_new = m.ammo
m.killboard_pvp_new = m.killboard_pvp
m.killboard_pve_new = m.killboard_pve
m.save()
for m in PlayerMission.objects.all():
m.ammo_new = m.ammo
m.killboard_pvp_new = m.killboard_pvp
m.killboard_pve_new = m.killboard_pve
m.save()
for m in PlayerAircraft.objects.all():
m.ammo_new = m.ammo
m.killboard_pvp_new = m.killboard_pvp
m.killboard_pve_new = m.killboard_pve
m.save()
for m in Sortie.objects.all():
m.ammo_new = m.ammo
m.killboard_pvp_new = m.killboard_pvp
m.killboard_pve_new = m.killboard_pve
m.score_dict_new = m.score_dict
m.bonus_new = m.bonus
m.debug_new = m.debug
m.save()
for m in LogEntry.objects.all():
m.extra_data_new = m.extra_data
m.save()
class Migration(migrations.Migration):
dependencies = [
('stats', '0009_french'),
]
operations = [
migrations.AddField(
model_name='logentry',
name='extra_data_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='mission',
name='score_dict_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='player',
name='ammo_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=stats.models.default_ammo),
),
migrations.AddField(
model_name='player',
name='killboard_pve_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='player',
name='killboard_pvp_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='player',
name='sorties_cls_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=stats.models.default_sorties_cls),
),
migrations.AddField(
model_name='playeraircraft',
name='ammo_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=stats.models.default_ammo),
),
migrations.AddField(
model_name='playeraircraft',
name='killboard_pve_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='playeraircraft',
name='killboard_pvp_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='playermission',
name='ammo_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=stats.models.default_ammo),
),
migrations.AddField(
model_name='playermission',
name='killboard_pve_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='playermission',
name='killboard_pvp_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='sortie',
name='ammo_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=stats.models.default_ammo),
),
migrations.AddField(
model_name='sortie',
name='bonus_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='sortie',
name='debug_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='sortie',
name='killboard_pve_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='sortie',
name='killboard_pvp_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='sortie',
name='score_dict_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.RunPython(json_to_jsonb),
]
| mit | 1,530,059,953,827,233,800 | 33.316129 | 107 | 0.589397 | false | 3.709205 | false | false | false | 0.00094 |
xHeliotrope/injustice_dropper | env/lib/python3.4/site-packages/django/templatetags/tz.py | 251 | 5574 | from datetime import datetime, tzinfo
from django.template import Library, Node, TemplateSyntaxError
from django.utils import six, timezone
try:
import pytz
except ImportError:
pytz = None
register = Library()
# HACK: datetime is an old-style class, create a new-style equivalent
# so we can define additional attributes.
class datetimeobject(datetime, object):
pass
# Template filters
@register.filter
def localtime(value):
"""
Converts a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Converts a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Converts a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
If it is a time zone name, pytz is required.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, six.string_types) and pytz is not None:
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
result = timezone.localtime(value, tz)
# HACK: the convert_to_local_time flag will prevent
# automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __init__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
old_setting = context.use_tz
context.use_tz = self.use_tz
output = self.nodelist.render(context)
context.use_tz = old_setting
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Forces or prevents conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enables a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, the default time zone is used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Stores the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
| mit | -2,625,648,709,518,907,400 | 27.294416 | 100 | 0.627198 | false | 4.159701 | false | false | false | 0.000179 |
vied12/superdesk | server/macros/currency.py | 1 | 1096 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import re
import requests
USD_TO_AUD = 1.27 # backup
def get_rate():
"""Get USD to AUD rate."""
try:
r = requests.get('http://rate-exchange.appspot.com/currency?from=USD&to=AUD', timeout=5)
return float(r.json()['rate'])
except Exception:
return USD_TO_AUD
def usd_to_aud(item, **kwargs):
"""Convert USD to AUD."""
rate = get_rate()
if os.environ.get('BEHAVE_TESTING'):
rate = USD_TO_AUD
def convert(match):
usd = float(match.group(1))
aud = rate * usd
return '$%d' % aud
item['body_html'] = re.sub('\$([0-9]+)', convert, item['body_html'])
return item
name = 'usd_to_aud'
label = 'Convert USD to AUD'
shortcut = 'c'
callback = usd_to_aud
desks = ['SPORTS DESK', 'POLITICS']
| agpl-3.0 | 1,969,786,226,495,163,600 | 22.319149 | 96 | 0.625 | false | 3.061453 | false | false | false | 0.002737 |
icsi-berkeley/framework_code | src/main/nluas/app/core_solver.py | 2 | 7749 | """
Simple solver "core". Contains capabilities for unpacking
a JSON n-tuple, as well as routing this n-tuple based
on the predicate_type (command, query, assertion, etc.).
Other general capabilities can be added. The design
is general enough that the same "unpacking" and "routing"
method can be used, as long as a new method is written for a given
predicate_type.
"Route_action" can be called by command/query/assertion methods,
to route each parameter to the task-specific method. E.g., "solve_move",
or "solve_push_move", etc.
Author: seantrott <seantrott@icsi.berkeley.edu>
------
See LICENSE.txt for licensing information.
------
"""
from nluas.ntuple_decoder import *
from nluas.core_agent import *
import sys, traceback
import pprint
import os
path = os.path.dirname(os.path.realpath(__file__))
def check_complexity(n):
s = int(n)
if s not in [1, 2, 3]:
raise argparse.ArgumentTypeError("{} is an invalid entry for the complexity level. Should be 1, 2, or 3.".format(n))
return s
class CoreProblemSolver(CoreAgent):
def __init__(self, args):
self.__path__ = os.getcwd() + "/src/main/nluas/"
self.ntuple = None
self.decoder = NtupleDecoder()
CoreAgent.__init__(self, args)
self.world = []
self.solver_parser = self.setup_solver_parser()
args = self.solver_parser.parse_args(self.unknown)
self.complexity = args.complexity
self.ui_address = "{}_{}".format(self.federation, "AgentUI")
self.transport.subscribe(self.ui_address, self.callback)
self._incapable = "I cannot do that yet."
self.history = list()
self.p_features = None
self.eventFeatures=None
self.parameter_templates = OrderedDict()
def setup_solver_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--complexity", default=1, type=check_complexity, help="indicate level of complexity: 1, 2, or 3.")
return parser
def callback(self, ntuple):
if self.is_quit(ntuple):
return self.close()
self.solve(ntuple)
def initialize_templates(self):
""" Initializes templates from path, set above. """
self.parameter_templates = self.read_templates(self.__path__+"parameter_templates.json")
def request_clarification(self, ntuple, message="This ntuple requires clarification."):
request = {'ntuple': ntuple, 'message': message, 'type': 'clarification', 'tag': self.address}
self.transport.send(self.ui_address, request)
def identification_failure(self, message):
request = {'type': 'id_failure', 'message': message, 'tag': self.address}
self.transport.send(self.ui_address, request)
def respond_to_query(self, message):
request = {'type': 'response', 'message': message, 'tag': self.address}
self.transport.send(self.ui_address, request)
def return_error_descriptor(self, message):
request = {'type': 'error_descriptor', 'message': message, 'tag': self.address}
self.transport.send(self.ui_address, request)
def solve(self, ntuple):
if self.check_for_clarification(ntuple):
self.request_clarification(ntuple=ntuple)
else:
self.ntuple = ntuple
predicate_type = ntuple['predicate_type']
try:
dispatch = getattr(self, "solve_%s" %predicate_type)
dispatch(ntuple)
self.broadcast()
self.p_features = None # Testing, took it out from route_action
except AttributeError as e:
traceback.print_exc()
message = "I cannot solve a(n) {}.".format(predicate_type)
self.identification_failure(message)
def broadcast(self):
""" Here, does nothing. Later, an AgentSolver will broadcast information back to BossSolver. """
pass
def update_world(self, discovered=[]):
for item in discovered:
self.world.append(item)
def solve_command(self, ntuple):
self.route_event(ntuple['eventDescriptor'], "command")
if self.verbose:
self.decoder.pprint_ntuple(ntuple)
def solve_query(self, ntuple):
self.route_event(ntuple['eventDescriptor'], "query")
if self.verbose:
self.decoder.pprint_ntuple(ntuple)
def solve_assertion(self, ntuple):
self.route_event(ntuple['eventDescriptor'], "assertion")
if self.verbose:
self.decoder.pprint_ntuple(ntuple)
def solve_conditional_command(self, ntuple):
""" Takes in conditionalED. (API changed 5/26/16, ST) """
print("Function is deprecated!")
print(ntuple.keys())
def solve_conditional_assertion(self, ntuple):
""" Takes in conditionalED. (API changed 5/26/16, ST) """
print("Function is deprecated!")
print(ntuple.keys())
def solve_conditional_query(self, ntuple):
""" Takes in conditionalED. (API changed 5/26/16, ST) """
print("Function is deprecated!")
print(ntuple.keys())
def route_event(self, eventDescription, predicate):
if "complexKind" in eventDescription and eventDescription['complexKind'] == "conditional":
dispatch = getattr(self, "solve_conditional_{}".format(predicate))
return dispatch(eventDescription)
features = eventDescription['e_features']
if features:
# Set eventFeatures
self.eventFeatures = features['eventFeatures']
parameters = eventDescription['eventProcess']
return_value = self.route_action(parameters, predicate)
self.eventFeatures = None
if return_value:
if predicate == "query":
self.respond_to_query(return_value)
elif predicate == "command":
self.return_error_descriptor(return_value)
return return_value
def route_action(self, parameters, predicate):
if "complexKind" in parameters and parameters['complexKind'] == "serial":
return self.solve_serial(parameters, predicate)
elif "complexKind" in parameters and parameters['complexKind'] == "causal":
return self.solve_causal(parameters, predicate)
else:
template = parameters['template']
action = parameters['actionary']
try:
if parameters['p_features']:
self.p_features = parameters['p_features']['processFeatures']
dispatch = getattr(self, "{}_{}".format(predicate, action))
return_value = self.route_dispatch(dispatch, parameters)
self.history.insert(0, (parameters, True))
self.p_features = None
return return_value
except AttributeError as e:
message = "I cannot solve the '{}_{}' action".format(predicate,action)
self.history.insert(0, (parameters, False))
self.identification_failure(message)
def route_dispatch(self, dispatch_function, parameters):
""" Simply runs dispatch_function on PARAMETERS. """
return dispatch_function(parameters)
def check_for_clarification(self, ntuple):
""" Will need to be replaced by a process that checks whether ntuple needs clarification.
Requires some sort of context/world model. """
#return random.choice([True, False])
return False
def solve_serial(self, parameters, predicate):
self.route_action(parameters['process1'], predicate)
self.route_action(parameters['process2'], predicate)
if __name__ == '__main__':
ps = CoreProblemSolver(sys.argv[1:])
| apache-2.0 | -3,054,324,586,030,613,000 | 38.335025 | 133 | 0.630017 | false | 4.108696 | false | false | false | 0.003226 |
israellevin/plugin.video.walla | addon.py | 1 | 2152 | #!/usr/bin/python
# coding=utf-8
import sys
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
import xbmcplugin
xbmcplugin.setContent(addon_handle, 'episodes')
import urlparse
args = urlparse.parse_qs(sys.argv[2][1:])
mode = args.get('mode', None)
from urllib import FancyURLopener, urlencode
class URLOpener(FancyURLopener):
version = 'Mozilla/5.0 (X11; Linux i686; rv:31.0) Gecko/20100101 Firefox/31.0 Iceweasel/31.0'
urlopen = URLOpener().open
urlmake = lambda query: base_url + '?' + urlencode(query)
rooturl = 'http://nick.walla.co.il'
def getpage(url):
if url.startswith('/'): url = rooturl + url
elif not url.startswith('http://'): url = rooturl + '/' + url
resets = 0
for tries in range(5):
try:
page = urlopen(url).read()
break
except IOError:
page = u''
if isinstance(page, str): page = page.decode('windows-1255', 'replace')
page = page.encode('utf-8')
return page
import re
vidregexp = re.compile(
'class="vitem.*?"',
re.DOTALL
)
nextregexp = re.compile(
'<a class="p_r" style="" href="(.+?)"'
)
def vidsfromseason(url):
page = getpage(url)
vids = vidregexp.findall(page)
for nexturl in nextregexp.findall(page):
vids += vidregexp.findall(getpage(nexturl))
return vids
def vidsfromshow(showurl):
return [vidsfromseason(url) for url in re.findall(
'href="([^"]*)"[^>]*>[^<]*פרקים מלאים',
getpage(showurl)
)]
import xbmcgui
if mode is None:
for show in re.findall(
'<a href="([^"]+)" class="item right w3" style=".*?">([^<]+)</a>',
getpage('/')
):
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=urlmake({'mode': 'show', 'showurl': show[0]}),
listitem=xbmcgui.ListItem(show[1]),
isFolder=True
)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'show':
print(vidsfromshow(args['showurl'][0]))
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url='/',
listitem=xbmcgui.ListItem('Video')
)
xbmcplugin.endOfDirectory(addon_handle)
| mit | 5,933,738,009,999,845,000 | 26.113924 | 97 | 0.610177 | false | 3.201794 | false | false | false | 0.007937 |
the-blue-alliance/the-blue-alliance | helpers/validation_helper.py | 4 | 5159 | from consts.district_type import DistrictType
from models.district import District
from models.event import Event
from models.match import Match
from models.team import Team
import tba_config
class ValidationHelper(object):
"""
A collection of methods to validate model ids and return standard
error messages if they are invalid.
"""
@classmethod
def validate(cls, validators):
"""
Takes a list of tuples that defines a call to a validator
(ie team_id_validator) and it's corresponding value to validate.
Returns a dictionary of error messages if invalid.
Example: ValidationHelper.validate([('team_id_validator', 'frc101')])
"""
error_dict = { "Errors": list() }
valid = True
for v in validators:
results = getattr(ValidationHelper, v[0])(v[1])
if results:
error_dict["Errors"].append(results)
valid = False
if valid is False:
return error_dict
@classmethod
def validate_request(cls, handler):
kwargs = handler.request.route_kwargs
error_dict = {'Errors': []}
valid = True
team_future = None
event_future = None
match_future = None
district_future = None
# Check key formats
if 'team_key' in kwargs:
team_key = kwargs['team_key']
results = cls.team_id_validator(team_key)
if results:
error_dict['Errors'].append(results)
valid = False
else:
team_future = Team.get_by_id_async(team_key)
if 'event_key' in kwargs:
event_key = kwargs['event_key']
results = cls.event_id_validator(event_key)
if results:
error_dict['Errors'].append(results)
valid = False
else:
event_future = Event.get_by_id_async(event_key)
if 'match_key' in kwargs:
match_key = kwargs['match_key']
results = cls.match_id_validator(match_key)
if results:
error_dict['Errors'].append(results)
valid = False
else:
match_future = Match.get_by_id_async(match_key)
if 'district_key' in kwargs:
district_key = kwargs['district_key']
results = cls.district_id_validator(district_key)
if results:
error_dict['Errors'].append(results)
valid = False
else:
district_future = District.get_by_id_async(district_key)
if 'year' in kwargs:
year = int(kwargs['year'])
if year > tba_config.MAX_YEAR or year < tba_config.MIN_YEAR:
error_dict['Errors'].append({'year': 'Invalid year: {}. Must be between {} and {} inclusive.'.format(year, tba_config.MIN_YEAR, tba_config.MAX_YEAR)})
valid = False
# Check if keys exist
if team_future and team_future.get_result() is None:
error_dict['Errors'].append({'team_id': 'team id {} does not exist'.format(team_key)})
valid = False
if event_future and event_future.get_result() is None:
error_dict['Errors'].append({'event_id': 'event id {} does not exist'.format(event_key)})
valid = False
if match_future and match_future.get_result() is None:
error_dict['Errors'].append({'match_id': 'match id {} does not exist'.format(match_key)})
valid = False
if district_future and district_future.get_result() is None:
error_dict['Errors'].append({'district_id': 'district id {} does not exist'.format(district_key)})
valid = False
if not valid:
return error_dict
@classmethod
def is_valid_model_key(cls, key):
return (Team.validate_key_name(key) or
Event.validate_key_name(key) or
Match.validate_key_name(key) or
District.validate_key_name(key))
@classmethod
def team_id_validator(cls, value):
error_message = "{} is not a valid team id".format(value)
team_key_error = { "team_id": error_message}
if Team.validate_key_name(value) is False:
return team_key_error
@classmethod
def event_id_validator(cls, value):
error_message = "{} is not a valid event id".format(value)
event_key_error = { "event_id": error_message}
if Event.validate_key_name(value) is False:
return event_key_error
@classmethod
def match_id_validator(cls, value):
error_message = "{} is not a valid match id".format(value)
match_key_error = { "match_id": error_message}
if Match.validate_key_name(value) is False:
return match_key_error
@classmethod
def district_id_validator(cls, value):
error_message = "{} is not a valid district abbreviation".format(value)
district_key_error = {"district_abbrev": error_message}
if District.validate_key_name(value) is False:
return district_key_error
| mit | -4,771,333,836,746,863,000 | 37.789474 | 166 | 0.581314 | false | 4.094444 | false | false | false | 0.00252 |
TEAM-Gummy/platform_external_chromium_org | chrome/test/pyautolib/timer_queue.py | 80 | 2561 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import threading
import time
class TimerQueue(threading.Thread):
"""Executes timers at a given interval.
This class provides the ability to run methods at a given interval. All
methods are fired synchronously. Only one method is running at a time.
Example of using TimerQueue:
def _fooPrinter(word):
print('foo : %s' % word)
timers = TimerQueue()
timers.addTimer(self._fooPrinter, 15, args=('hello',))
timers.start()
>> hello will be printed after 15 seconds
Note: TimerQueue is a subclass of threading.Thread, call start() to activate;
do not call run() directly.
"""
def __init__(self):
"""Initializes a TimerQueue object."""
threading.Thread.__init__(self, name='timer_thread')
self.timer_queue_lock = threading.Lock()
self.terminate = False
self.wait_time = 1
self.timers = []
def AddTimer(self, method, interval, args=()):
"""Adds a timer to the queue.
Args:
method: the method to be called at the given interval
interval: delay between method runs, in seconds
args: arguments to be passed to the method
"""
self.timer_queue_lock.acquire()
next_time = time.time() + interval
self.timers.append({'method': method, 'interval': interval,
'next time': next_time, 'args': copy.copy(args)})
self.timer_queue_lock.release()
def SetResolution(self, resolution):
"""Sets the timer check frequency, in seconds."""
self.wait_time = resolution
def RemoveTimer(self, method):
"""Removes a timer from the queue.
Args:
method: the timer containing the given method to be removed
"""
self.timer_queue_lock.acquire()
for timer in self.timers:
if timer['method'] == method:
self.timers.remove(timer)
break
self.timer_queue_lock.release()
def Stop(self):
"""Stops the timer."""
self.terminate = True
def run(self):
"""Primary run loop for the timer."""
while True:
now = time.time()
self.timer_queue_lock.acquire()
for timer in self.timers:
if timer['next time'] <= now:
# Use * to break the list into separate arguments
timer['method'](*timer['args'])
timer['next time'] += timer['interval']
self.timer_queue_lock.release()
if self.terminate:
return
time.sleep(self.wait_time)
| bsd-3-clause | 2,451,543,385,618,818,000 | 29.129412 | 79 | 0.646232 | false | 4.014107 | false | false | false | 0.007029 |
deployed/django | django/utils/translation/trans_real.py | 5 | 27492 | """Translation helper functions."""
from __future__ import unicode_literals
from collections import OrderedDict
import locale
import os
import re
import sys
import gettext as gettext_module
from threading import local
import warnings
from django.apps import apps
from django.dispatch import receiver
from django.test.signals import setting_changed
from django.utils.encoding import force_str, force_text
from django.utils._os import upath
from django.utils.safestring import mark_safe, SafeData
from django.utils import six, lru_cache
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning, trim_whitespace
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = re.compile(r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*$', re.IGNORECASE)
language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
# some browsers use deprecated locales. refs #18419
_BROWSERS_DEPRECATED_LOCALES = {
'zh-cn': 'zh-hans',
'zh-tw': 'zh-hant',
}
_DJANGO_DEPRECATED_LOCALES = _BROWSERS_DEPRECATED_LOCALES
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] == 'LANGUAGES':
global _accepted
_accepted = {}
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower() + '_' + language[p + 1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p + 1:]) > 2:
return language[:p].lower() + '_' + language[p + 1].upper() + language[p + 2:].lower()
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for app_config in reversed(list(apps.get_app_configs())):
apppath = os.path.join(app_config.path, 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
if language in _DJANGO_DEPRECATED_LOCALES:
msg = ("The use of the language code '%s' is deprecated. "
"Please use the '%s' translation instead.")
warnings.warn(msg % (language, _DJANGO_DEPRECATED_LOCALES[language]),
PendingDeprecationWarning, stacklevel=2)
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from django.conf import settings
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
@lru_cache.lru_cache(maxsize=None)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if not language_code_re.search(lang_code):
return False
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
def get_supported_language_variant(lang_code, supported=None, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = OrderedDict(settings.LANGUAGES)
if lang_code:
# some browsers use deprecated language codes -- #18419
replacement = _BROWSERS_DEPRECATED_LOCALES.get(lang_code)
if lang_code not in supported and replacement in supported:
return replacement
# if fr-CA is not supported, try fr-ca; if that fails, fallback to fr.
generic_lang_code = lang_code.split('-')[0]
variants = (lang_code, lang_code.lower(), generic_lang_code,
generic_lang_code.lower())
for code in variants:
if code in supported and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported:
if supported_code.startswith((generic_lang_code + '-',
generic_lang_code.lower() + '-')):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, supported=None, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = OrderedDict(settings.LANGUAGES)
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, supported, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
global _accepted
from django.conf import settings
supported = OrderedDict(settings.LANGUAGES)
if check_path:
lang_code = get_language_from_path(request.path_info, supported)
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
# for backwards compatibility django_language is also checked (remove in 1.8)
lang_code = request.session.get('_language', request.session.get('django_language'))
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code, supported)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
try:
accept_lang = get_supported_language_variant(accept_lang, supported)
except LookupError:
continue
else:
_accepted[normalized] = accept_lang
return accept_lang
try:
return get_supported_language_variant(settings.LANGUAGE_CODE, supported)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.conf import settings
from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO()
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
def join_tokens(tokens, trim=False):
message = ''.join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed)))
else:
out.write(' ngettext(%r, %r, count) ' % (
join_tokens(singular, trimmed),
join_tokens(plural, trimmed)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (
message_context,
join_tokens(singular, trimmed)))
else:
out.write(' gettext(%r) ' % join_tokens(singular,
trimmed))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = one_percent_re.sub('%%', t.contents)
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = one_percent_re.sub('%%', g)
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = 'trimmed' in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return force_str(out.getvalue())
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return []
if priority:
try:
priority = float(priority)
except ValueError:
return []
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
| bsd-3-clause | -1,063,632,945,561,637,400 | 35.60719 | 143 | 0.57002 | false | 4.307741 | false | false | false | 0.001273 |
danasp/Ticketraker_light | ticketracker_light/models.py | 1 | 4135 | # -*By- coding: utf-8 -*-
from django.db import models
from django.db.models import signals
from django.dispatch import receiver
from django.contrib.auth.models import User
from time import time
from django.utils import timezone
from django.utils.safestring import mark_safe
def upload_file_name(instance, filename):
#return '%s/%s' % (instance.ticket.id, filename)
return 'ticket/%s/%s_%s' % (instance.ticket.id, str(time()).replace('.','_'), filename)
class Ticket(models.Model):
title = models.CharField(verbose_name=u'*Заголовок', max_length=30)
body = models.TextField(verbose_name=u'*Описание проблемы')
open_date = models.DateTimeField(verbose_name=u'Дата открытия', auto_now_add=True)
close_date = models.DateTimeField(verbose_name=u'Дата закрытия', null=True, blank=True)
author = models.ForeignKey(User, verbose_name=u'Сообщил')
author_email = models.EmailField(verbose_name='*e-mail')
responsible_person = models.ForeignKey(User, related_name='responsible_person', null=True, blank=True, verbose_name=u'Ответственный')
STATUS_CHOICE = (
('o', 'Открыт'),
('c', 'Закрыт'),
('m', 'Под наблюдением'),
)
'''По умолчанию все тикеты не имеют responsible_person_id'''
status = models.CharField(verbose_name=u'Статус', max_length = 1,
choices = STATUS_CHOICE,
default ='o')
classification = models.BooleanField('Общая проблема', default=False)
depend_from = models.PositiveIntegerField(null=True, blank=True)
def __unicode__(self):
return self.title
def is_ticket_open(self):
return self.status == 'o'
def does_close_date(self):
if self.close_date == None:
return u'Тикет еще на закрыт'
else:
return self.close_date
is_ticket_open.short_description = u'Тикет открыт?'
is_ticket_open.boolean = True
does_close_date.short_description = u"Дата закрытия"
class TicketComment(models.Model):
author = models.ForeignKey(User)
ticket = models.ForeignKey(Ticket)
body = models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
class TicketFile(models.Model):
ticket = models.ForeignKey(Ticket)
upload_file = models.FileField(upload_to=upload_file_name, blank=True, verbose_name=u'Приложить файл')
'''
При закрытие тикета изменить дату закрытия на текущее время
'''
@receiver(signals.pre_save, sender=Ticket)
def modify_close_date(sender, instance, **kwargs):
if (instance.close_date == None or instance.close_date < timezone.now()) and instance.status == 'c':
instance.close_date = timezone.now()
# signals.pre_save.connect(modify_close_date, sender=Ticket)'''
'''
Если изменяем статус общего тикета, то должнен измениться и статус прикрепленных тикетов.
Если у прикрепленного тикета нет responsible_person, то это поле меняется на того, кто изменил состояние тикета.
'''
@receiver(signals.post_save, sender=Ticket)
def chose_dependent_ticket_status(sender, instance, **kwargs):
if instance.classification:
dependent_tickets = Ticket.objects.filter(depend_from=instance.id)
for tt in dependent_tickets:
if tt.responsible_person_id == None:
tt.responsible_person_id = instance.responsible_person_id
tt.status = instance.status
tt.save()
'''
Если файл не добавлен, убрать пустую строку из БД
'''
@receiver(signals.post_save, sender=TicketFile)
def delete_blank_file_field(sender, instance, **kwargs):
if (instance.upload_file == '' or instance.upload_file == None):
instance.delete()
| gpl-2.0 | -2,561,041,861,352,926,000 | 36.373737 | 137 | 0.676669 | false | 2.758389 | false | false | false | 0.011625 |
0jpq0/kbengine | kbe/src/lib/python/Lib/email/parser.py | 86 | 5045 | # Copyright (C) 2001-2007 Python Software Foundation
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
# Contact: email-sig@python.org
"""A parser of RFC 2822 and MIME email messages."""
__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser',
'FeedParser', 'BytesFeedParser']
from io import StringIO, TextIOWrapper
from email.feedparser import FeedParser, BytesFeedParser
from email._policybase import compat32
class Parser:
def __init__(self, _class=None, *, policy=compat32):
"""Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
The policy keyword specifies a policy object that controls a number of
aspects of the parser's operation. The default policy maintains
backward compatibility.
"""
self._class = _class
self.policy = policy
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
feedparser = FeedParser(self._class, policy=self.policy)
if headersonly:
feedparser._set_headersonly()
while True:
data = fp.read(8192)
if not data:
break
feedparser.feed(data)
return feedparser.close()
def parsestr(self, text, headersonly=False):
"""Create a message structure from a string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
return self.parse(StringIO(text), headersonly=headersonly)
class HeaderParser(Parser):
def parse(self, fp, headersonly=True):
return Parser.parse(self, fp, True)
def parsestr(self, text, headersonly=True):
return Parser.parsestr(self, text, True)
class BytesParser:
def __init__(self, *args, **kw):
"""Parser of binary RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The input must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the input or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
self.parser = Parser(*args, **kw)
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a binary file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')
try:
return self.parser.parse(fp, headersonly)
finally:
fp.detach()
def parsebytes(self, text, headersonly=False):
"""Create a message structure from a byte string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
text = text.decode('ASCII', errors='surrogateescape')
return self.parser.parsestr(text, headersonly)
class BytesHeaderParser(BytesParser):
def parse(self, fp, headersonly=True):
return BytesParser.parse(self, fp, headersonly=True)
def parsebytes(self, text, headersonly=True):
return BytesParser.parsebytes(self, text, headersonly=True)
| lgpl-3.0 | -3,946,299,496,832,863,000 | 36.37037 | 78 | 0.671358 | false | 4.476486 | false | false | false | 0.000595 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Werkzeug-0.10.4/werkzeug/urls.py | 148 | 36596 | # -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
``werkzeug.urls`` used to provide several wrapper functions for Python 2
urlparse, whose main purpose were to work around the behavior of the Py2
stdlib and its lack of unicode support. While this was already a somewhat
inconvenient situation, it got even more complicated because Python 3's
``urllib.parse`` actually does handle unicode properly. In other words,
this module would wrap two libraries with completely different behavior. So
now this module contains a 2-and-3-compatible backport of Python 3's
``urllib.parse``, which is mostly API-compatible.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
from werkzeug._compat import text_type, PY2, to_unicode, \
to_native, implements_to_string, try_coerce_native, \
normalize_string_tuple, make_literal_wrapper, \
fix_tuple_repr
from werkzeug._internal import _encode_idna, _decode_idna
from werkzeug.datastructures import MultiDict, iter_multi_items
from collections import namedtuple
# A regular expression for what a valid schema looks like
_scheme_re = re.compile(r'^[a-zA-Z0-9+-.]+$')
# Characters that are safe in any part of an URL.
_always_safe = (b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.-+')
_hexdigits = '0123456789ABCDEFabcdef'
_hextobyte = dict(
((a + b).encode(), int(a + b, 16))
for a in _hexdigits for b in _hexdigits
)
_URLTuple = fix_tuple_repr(namedtuple('_URLTuple',
['scheme', 'netloc', 'path', 'query', 'fragment']))
class BaseURL(_URLTuple):
'''Superclass of :py:class:`URL` and :py:class:`BytesURL`.'''
__slots__ = ()
def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs)
@property
def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0]
@property
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
rv = _encode_idna(rv)
return to_native(rv, 'ascii', 'ignore')
@property
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
@property
def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0]
@property
def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0]
@property
def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1]
def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs)
def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs))
def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self)
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or '')
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
_url_unquote_legacy(self.raw_username or '', '/:%@'),
_url_unquote_legacy(self.raw_password or '', '/:%@'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return rv
def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode('ascii'))
def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self))
def get_file_location(self, pathformat=None):
"""Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``.
The `pathformat` by default is autodetection but needs to be set
when working with URLs of a specific system. The supported values
are ``'windows'`` when working with Windows or DOS paths and
``'posix'`` when working with posix paths.
If the URL does not point to to a local file, the server and location
are both represented as ``None``.
:param pathformat: The expected format of the path component.
Currently ``'windows'`` and ``'posix'`` are
supported. Defaults to ``None`` which is
autodetect.
"""
if self.scheme != 'file':
return None, None
path = url_unquote(self.path)
host = self.netloc or None
if pathformat is None:
if os.name == 'nt':
pathformat = 'windows'
else:
pathformat = 'posix'
if pathformat == 'windows':
if path[:1] == '/' and path[1:2].isalpha() and path[2:3] in '|:':
path = path[1:2] + ':' + path[3:]
windows_share = path[:3] in ('\\' * 3, '/' * 3)
import ntpath
path = ntpath.normpath(path)
# Windows shared drives are represented as ``\\host\\directory``.
# That results in a URL like ``file://///host/directory``, and a
# path like ``///host/directory``. We need to special-case this
# because the path contains the hostname.
if windows_share and host is None:
parts = path.lstrip('\\').split('\\', 1)
if len(parts) == 2:
host, path = parts
else:
host = parts[0]
path = ''
elif pathformat == 'posix':
import posixpath
path = posixpath.normpath(path)
else:
raise TypeError('Invalid path format %s' % repr(pathformat))
if host in ('127.0.0.1', '::1', 'localhost'):
host = None
return host, path
def _split_netloc(self):
if self._at in self.netloc:
return self.netloc.split(self._at, 1)
return None, self.netloc
def _split_auth(self):
auth = self._split_netloc()[0]
if not auth:
return None, None
if self._colon not in auth:
return auth, None
return auth.split(self._colon, 1)
def _split_host(self):
rv = self._split_netloc()[1]
if not rv:
return None, None
if not rv.startswith(self._lbracket):
if self._colon in rv:
return rv.split(self._colon, 1)
return rv, None
idx = rv.find(self._rbracket)
if idx < 0:
return rv, None
host = rv[1:idx]
rest = rv[idx + 1:]
if rest.startswith(self._colon):
return host, rest[1:]
return host, None
@implements_to_string
class URL(BaseURL):
"""Represents a parsed URL. This behaves like a regular tuple but
also has some extra attributes that give further insight into the
URL.
"""
__slots__ = ()
_at = '@'
_colon = ':'
_lbracket = '['
_rbracket = ']'
def __str__(self):
return self.to_url()
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ''
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
url_quote(self.raw_username or '', 'utf-8', 'strict', '/:%'),
url_quote(self.raw_password or '', 'utf-8', 'strict', '/:%'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return to_native(rv)
def encode(self, charset='utf-8', errors='replace'):
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
return BytesURL(
self.scheme.encode('ascii'),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors)
)
class BytesURL(BaseURL):
"""Represents a parsed URL in bytes."""
__slots__ = ()
_at = b'@'
_colon = b':'
_lbracket = b'['
_rbracket = b']'
def __str__(self):
return self.to_url().decode('utf-8', 'replace')
def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc
def decode(self, charset='utf-8', errors='replace'):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode('ascii'),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors)
)
def _unquote_to_bytes(string, unsafe=''):
if isinstance(string, text_type):
string = string.encode('utf-8')
if isinstance(unsafe, text_type):
unsafe = unsafe.encode('utf-8')
unsafe = frozenset(bytearray(unsafe))
bits = iter(string.split(b'%'))
result = bytearray(next(bits, b''))
for item in bits:
try:
char = _hextobyte[item[:2]]
if char in unsafe:
raise KeyError()
result.append(char)
result.extend(item[2:])
except KeyError:
result.extend(b'%')
result.extend(item)
return bytes(result)
def _url_encode_impl(obj, charset, encode_keys, sort, key):
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
for key, value in iterable:
if value is None:
continue
if not isinstance(key, bytes):
key = text_type(key).encode(charset)
if not isinstance(value, bytes):
value = text_type(value).encode(charset)
yield url_quote_plus(key) + '=' + url_quote_plus(value)
def _url_unquote_legacy(value, unsafe=''):
try:
return url_unquote(value, charset='utf-8',
errors='strict', unsafe=unsafe)
except UnicodeError:
return url_unquote(value, charset='latin1', unsafe=unsafe)
def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s('')
netloc = query = fragment = s('')
i = url.find(s(':'))
if i > 0 and _scheme_re.match(to_native(url[:i], errors='replace')):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1:]
if not rest or any(c not in s('0123456789') for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s('//'):
delim = len(url)
for c in s('/?#'):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s('[') in netloc and s(']') not in netloc) or \
(s(']') in netloc and s('[') not in netloc):
raise ValueError('Invalid IPv6 URL')
if allow_fragments and s('#') in url:
url, fragment = url.split(s('#'), 1)
if s('?') in url:
url, query = url.split(s('?'), 1)
result_type = is_text_based and URL or BytesURL
return result_type(scheme, netloc, url, query, fragment)
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = frozenset(bytearray(safe) + _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(('%%%02X' % char).encode('ascii'))
return to_native(bytes(rv))
def url_quote_plus(string, charset='utf-8', errors='strict', safe=''):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + ' ', '+').replace(' ', '+')
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = \
normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s('')
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differenciate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s('/'))):
if path and path[:1] != s('/'):
path = s('/') + path
url = s('//') + (netloc or s('')) + path
elif path:
url += path
if scheme:
url = scheme + s(':') + url
if query:
url = url + s('?') + query
if fragment:
url = url + s('#') + fragment
return url
def url_unquote(string, charset='utf-8', errors='replace', unsafe=''):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
def url_unquote_plus(s, charset='utf-8', errors='replace'):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u'+', u' ')
else:
s = s.replace(b'+', b' ')
return url_unquote(s, charset, errors)
def url_fix(s, charset='utf-8'):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, 'replace').replace('\\', '/')
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'):
s = 'file:///' + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe='/%+$!*\'(),')
qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),')
anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),')
return to_native(url_unparse((url.scheme, url.encode_netloc(),
path, qs, anchor)))
def uri_to_iri(uri, charset='utf-8', errors='replace'):
r"""
Converts a URI in a given charset to a IRI.
Examples for URI versus IRI:
>>> uri_to_iri(b'http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: The URI to convert.
:param charset: The charset of the URI.
:param errors: The error handling on decode.
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, '%/;?')
query = url_unquote(uri.query, charset, errors, '%;/?:@&=+,$')
fragment = url_unquote(uri.fragment, charset, errors, '%;/?:@&=+,$')
return url_unparse((uri.scheme, uri.decode_netloc(),
path, query, fragment))
def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False):
r"""
Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always
uses utf-8 URLs internally because this is what browsers and HTTP do as
well. In some places where it accepts an URL it also accepts a unicode IRI
and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
There is a general problem with IRI and URI conversion with some
protocols that appear in the wild that are in violation of the URI
specification. In places where Werkzeug goes through a forced IRI to
URI conversion it will set the `safe_conversion` flag which will
not perform a conversion if the end result is already ASCII. This
can mean that the return value is not an entirely correct URI but
it will not destroy such invalid URLs in the process.
As an example consider the following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
The internal representation after parsing of those URLs is the same
and there is no way to reconstruct the original one. If safe
conversion is enabled however this function becomes a noop for both of
those strings as they both can be considered URIs.
.. versionadded:: 0.6
.. versionchanged:: 0.9.6
The `safe_conversion` parameter was added.
:param iri: The IRI to convert.
:param charset: The charset for the URI.
:param safe_conversion: indicates if a safe conversion should take place.
For more information see the explanation above.
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
try:
native_iri = to_native(iri)
ascii_iri = to_native(iri).encode('ascii')
if ascii_iri.split() == [ascii_iri]:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
netloc = iri.encode_netloc()
path = url_quote(iri.path, charset, errors, '/:~+%')
query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=')
fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/')
return to_native(url_unparse((iri.scheme, netloc,
path, query, fragment)))
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='replace', separator='&', cls=None):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or 'ascii')
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or 'ascii')
return cls(_url_decode_impl(s.split(separator), charset, decode_keys,
include_empty, errors))
def url_decode_stream(stream, charset='utf-8', decode_keys=False,
include_empty=True, errors='replace', separator='&',
cls=None, limit=None, return_iterator=False):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from werkzeug.wsgi import make_chunk_iter
if return_iterator:
cls = lambda x: x
elif cls is None:
cls = MultiDict
pair_iter = make_chunk_iter(stream, separator, limit)
return cls(_url_decode_impl(pair_iter, charset, decode_keys,
include_empty, errors))
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
for pair in pair_iter:
if not pair:
continue
s = make_literal_wrapper(pair)
equal = s('=')
if equal in pair:
key, value = pair.split(equal, 1)
else:
if not include_empty:
continue
key = pair
value = s('')
key = url_unquote_plus(key, charset, errors)
if charset is not None and PY2 and not decode_keys:
key = try_coerce_native(key)
yield key, url_unquote_plus(value, charset, errors)
def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
separator=b'&'):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False,
sort=False, key=None, separator=b'&'):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = \
url_parse(base, allow_fragments=allow_fragments)
scheme, netloc, path, query, fragment = \
url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s('/'):
segments = path.split(s('/'))
elif not path:
segments = bpath.split(s('/'))
if not query:
query = bquery
else:
segments = bpath.split(s('/'))[:-1] + path.split(s('/'))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s('.'):
segments[-1] = s('')
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s('.')]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s('..') and \
segments[i - 1] not in (s(''), s('..')):
del segments[i - 1:i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(''), s('..')]
while segments[:2] == unwanted_marker:
del segments[1]
path = s('/').join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base='./', charset='utf-8', sort=False, key=None):
if not base:
base = './'
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
base = self.base
if base[-1:] != '/':
base += '/'
return Href(url_join(base, name), self.charset, self.sort, self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError('keyword arguments and query-dicts '
'can\'t be combined')
query, path = path[-1], path[:-1]
elif query:
query = dict([(k.endswith('_') and k[:-1] or k, v)
for k, v in query.items()])
path = '/'.join([to_unicode(url_quote(x, self.charset), 'ascii')
for x in path if x is not None]).lstrip('/')
rv = self.base
if path:
if not rv.endswith('/'):
rv += '/'
rv = url_join(rv, './' + path)
if query:
rv += '?' + to_unicode(url_encode(query, self.charset, sort=self.sort,
key=self.key), 'ascii')
return to_native(rv)
| mit | 2,255,066,238,181,139,000 | 35.772864 | 83 | 0.591735 | false | 3.986598 | false | false | false | 0.000191 |
lr292358/connectomics | auc.py | 10 | 1715 |
def tied_rank(x):
"""
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
def auc(actual, posterior):
"""
Computes the area under the receiver-operater characteristic (AUC)
This function computes the AUC error metric for binary classification.
Parameters
----------
actual : list of binary numbers, numpy array
The ground truth value
posterior : same type as actual
Defines a ranking on the binary numbers, from most likely to
be positive to least likely to be positive.
Returns
-------
score : double
The mean squared error between actual and posterior
"""
r = tied_rank(posterior)
num_positive = len([0 for x in actual if x==1])
num_negative = len(actual)-num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1])
auc = ((sum_positive - num_positive*(num_positive+1)/2.0) /
(num_negative*num_positive))
return auc | bsd-2-clause | 7,420,208,220,182,692,000 | 28.084746 | 76 | 0.567347 | false | 3.688172 | false | false | false | 0.004665 |
soldag/home-assistant | tests/components/google_assistant/test_helpers.py | 5 | 8208 | """Test Google Assistant helpers."""
from datetime import timedelta
import pytest
from homeassistant.components.google_assistant import helpers
from homeassistant.components.google_assistant.const import ( # noqa: F401
EVENT_COMMAND_RECEIVED,
NOT_EXPOSE_LOCAL,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from . import MockConfig
from tests.async_mock import Mock, call, patch
from tests.common import (
async_capture_events,
async_fire_time_changed,
async_mock_service,
)
async def test_google_entity_sync_serialize_with_local_sdk(hass):
"""Test sync serialize attributes of a GoogleEntity."""
hass.states.async_set("light.ceiling_lights", "off")
hass.config.api = Mock(port=1234, use_ssl=True)
await async_process_ha_core_config(
hass,
{"external_url": "https://hostname:1234"},
)
hass.http = Mock(server_port=1234)
config = MockConfig(
hass=hass,
local_sdk_webhook_id="mock-webhook-id",
local_sdk_user_id="mock-user-id",
)
entity = helpers.GoogleEntity(hass, config, hass.states.get("light.ceiling_lights"))
serialized = await entity.sync_serialize(None)
assert "otherDeviceIds" not in serialized
assert "customData" not in serialized
config.async_enable_local_sdk()
with patch("homeassistant.helpers.instance_id.async_get", return_value="abcdef"):
serialized = await entity.sync_serialize(None)
assert serialized["otherDeviceIds"] == [{"deviceId": "light.ceiling_lights"}]
assert serialized["customData"] == {
"httpPort": 1234,
"httpSSL": True,
"proxyDeviceId": None,
"webhookId": "mock-webhook-id",
"baseUrl": "https://hostname:1234",
"uuid": "abcdef",
}
for device_type in NOT_EXPOSE_LOCAL:
with patch(
"homeassistant.components.google_assistant.helpers.get_google_type",
return_value=device_type,
):
serialized = await entity.sync_serialize(None)
assert "otherDeviceIds" not in serialized
assert "customData" not in serialized
async def test_config_local_sdk(hass, hass_client):
"""Test the local SDK."""
command_events = async_capture_events(hass, EVENT_COMMAND_RECEIVED)
turn_on_calls = async_mock_service(hass, "light", "turn_on")
hass.states.async_set("light.ceiling_lights", "off")
assert await async_setup_component(hass, "webhook", {})
config = MockConfig(
hass=hass,
local_sdk_webhook_id="mock-webhook-id",
local_sdk_user_id="mock-user-id",
)
client = await hass_client()
config.async_enable_local_sdk()
resp = await client.post(
"/api/webhook/mock-webhook-id",
json={
"inputs": [
{
"context": {"locale_country": "US", "locale_language": "en"},
"intent": "action.devices.EXECUTE",
"payload": {
"commands": [
{
"devices": [{"id": "light.ceiling_lights"}],
"execution": [
{
"command": "action.devices.commands.OnOff",
"params": {"on": True},
}
],
}
],
"structureData": {},
},
}
],
"requestId": "mock-req-id",
},
)
assert resp.status == 200
result = await resp.json()
assert result["requestId"] == "mock-req-id"
assert len(command_events) == 1
assert command_events[0].context.user_id == config.local_sdk_user_id
assert len(turn_on_calls) == 1
assert turn_on_calls[0].context is command_events[0].context
config.async_disable_local_sdk()
# Webhook is no longer active
resp = await client.post("/api/webhook/mock-webhook-id")
assert resp.status == 200
assert await resp.read() == b""
async def test_config_local_sdk_if_disabled(hass, hass_client):
"""Test the local SDK."""
assert await async_setup_component(hass, "webhook", {})
config = MockConfig(
hass=hass,
local_sdk_webhook_id="mock-webhook-id",
local_sdk_user_id="mock-user-id",
enabled=False,
)
client = await hass_client()
config.async_enable_local_sdk()
resp = await client.post(
"/api/webhook/mock-webhook-id", json={"requestId": "mock-req-id"}
)
assert resp.status == 200
result = await resp.json()
assert result == {
"payload": {"errorCode": "deviceTurnedOff"},
"requestId": "mock-req-id",
}
config.async_disable_local_sdk()
# Webhook is no longer active
resp = await client.post("/api/webhook/mock-webhook-id")
assert resp.status == 200
assert await resp.read() == b""
async def test_agent_user_id_storage(hass, hass_storage):
"""Test a disconnect message."""
hass_storage["google_assistant"] = {
"version": 1,
"key": "google_assistant",
"data": {"agent_user_ids": {"agent_1": {}}},
}
store = helpers.GoogleConfigStore(hass)
await store.async_load()
assert hass_storage["google_assistant"] == {
"version": 1,
"key": "google_assistant",
"data": {"agent_user_ids": {"agent_1": {}}},
}
async def _check_after_delay(data):
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=2))
await hass.async_block_till_done()
assert hass_storage["google_assistant"] == {
"version": 1,
"key": "google_assistant",
"data": data,
}
store.add_agent_user_id("agent_2")
await _check_after_delay({"agent_user_ids": {"agent_1": {}, "agent_2": {}}})
store.pop_agent_user_id("agent_1")
await _check_after_delay({"agent_user_ids": {"agent_2": {}}})
async def test_agent_user_id_connect():
"""Test the connection and disconnection of users."""
config = MockConfig()
store = config._store
await config.async_connect_agent_user("agent_2")
assert store.add_agent_user_id.call_args == call("agent_2")
await config.async_connect_agent_user("agent_1")
assert store.add_agent_user_id.call_args == call("agent_1")
await config.async_disconnect_agent_user("agent_2")
assert store.pop_agent_user_id.call_args == call("agent_2")
await config.async_disconnect_agent_user("agent_1")
assert store.pop_agent_user_id.call_args == call("agent_1")
@pytest.mark.parametrize("agents", [{}, {"1"}, {"1", "2"}])
async def test_report_state_all(agents):
"""Test a disconnect message."""
config = MockConfig(agent_user_ids=agents)
data = {}
with patch.object(config, "async_report_state") as mock:
await config.async_report_state_all(data)
assert sorted(mock.mock_calls) == sorted(
[call(data, agent) for agent in agents]
)
@pytest.mark.parametrize(
"agents, result",
[({}, 204), ({"1": 200}, 200), ({"1": 200, "2": 300}, 300)],
)
async def test_sync_entities_all(agents, result):
"""Test sync entities ."""
config = MockConfig(agent_user_ids=set(agents.keys()))
with patch.object(
config,
"async_sync_entities",
side_effect=lambda agent_user_id: agents[agent_user_id],
) as mock:
res = await config.async_sync_entities_all()
assert sorted(mock.mock_calls) == sorted([call(agent) for agent in agents])
assert res == result
def test_supported_features_string(caplog):
"""Test bad supported features."""
entity = helpers.GoogleEntity(
None, None, State("test.entity_id", "on", {"supported_features": "invalid"})
)
assert entity.is_supported() is False
assert "Entity test.entity_id contains invalid supported_features value invalid"
| apache-2.0 | -3,576,065,125,852,023,000 | 31.314961 | 88 | 0.59174 | false | 3.812355 | true | false | false | 0.001218 |
daskos/mentos | mentos/utils.py | 2 | 4957 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
from binascii import a2b_base64, b2a_base64
from contextlib import contextmanager
from multiprocessing.pool import ThreadPool
from mentos.exceptions import (DetectorClosed, NoLeadingMaster,
NoRedirectException)
from tornado import gen, ioloop
from tornado.escape import json_decode, json_encode
from zoonado import Zoonado
log = logging.getLogger(__name__)
decode = json_decode
encode = json_encode
def encode_data(data):
return b2a_base64(data).strip().decode('ascii')
def decode_data(data):
return a2b_base64(data)
_workers = ThreadPool(10)
def run_background(func, callback, args=(), kwds={}):
def _callback(result):
ioloop.IOLoop.instance().add_callback(lambda: callback(result))
_workers.apply_async(func, args, kwds, _callback)
@contextmanager
def log_errors(pdb=False): # pragma: no cover
try:
yield
except (gen.Return):
raise
except Exception as e:
log.exception(e)
if pdb:
import pdb
pdb.set_trace()
raise
POSTFIX = {
'ns': 1e-9,
'us': 1e-6,
'ms': 1e-3,
'secs': 1,
'mins': 60,
'hrs': 60 * 60,
'days': 24 * 60 * 60,
'weeks': 7 * 24 * 60 * 60
}
def parse_duration(s):
s = s.strip()
unit = None
postfix = None
for postfix, unit in POSTFIX.items():
if s.endswith(postfix):
try:
return float(s[:-len(postfix)]) * unit
except ValueError: # pragma: no cover
continue
raise Exception('Unknown duration `{}`; supported units are {}'.format(
s, ','.join('`{}`'.format(n) for n in POSTFIX)))
class MasterInfo(object):
detector = None
def __init__(self, uri):
self.uri = uri
self.seq = None
self.info = {'address': {}}
self.closing = False
if 'zk://' in uri:
log.warn('Using Zookeeper for Discovery')
self.quorum = ','.join([zoo[zoo.index('://') + 3:]
for zoo in self.uri.split(',')])
self.detector = Zoonado(self.quorum, session_timeout=6000)
ioloop.IOLoop.current().add_callback(self.detector.start)
self.current_location = None
def redirected_uri(self, uri):
if not self.detector:
self.uri = uri
else:
raise NoRedirectException(
'Using Zookeeper, cannot set a redirect url')
@gen.coroutine
def get_endpoint(self, path=''):
if self.closing:
raise DetectorClosed('Detecor is closed')
if self.detector:
children = yield self.detector.get_children('/mesos')
children = [child for child in children if child != 'log_replicas']
if not children: # pragma: no cover
log.error('No leading Master found in zookeeper')
raise NoLeadingMaster('No leading Master found in zookeeper')
self.seq = min(children)
data = yield self.detector.get_data('/mesos/' + self.seq)
self.info = decode(data)
else:
host_port = self.uri.split(':')
log.debug(host_port)
if len(host_port) == 2:
self.info['address']['hostname'] = host_port[0]
self.info['address']['port'] = int(host_port[1])
else:
self.info['address']['hostname'] = host_port[0]
self.info['address']['port'] = 5050
log.debug('Found new Master, info={info}'.format(info=self.info))
if 'hostname' in self.info['address']:
host = self.info['address']['hostname']
elif 'ip' in self.info['address']: # pragma: no cover
host = self.info['address']['ip']
port = self.info['address']['port']
self.current_location = '{host}:{port}'.format(host=host, port=port)
raise gen.Return('http://{current_location}{path}'.format(
current_location=self.current_location, path=path))
def close(self):
if self.closing:
return
self.closing = True
def on_complete(self):
log.debug('Closed detector')
run_background(self.detector.close, on_complete)
def drain(iterable):
'''Helper method that empties an iterable as it is iterated over.
Works for: dict, collections.deque, list, set
'''
if getattr(iterable, 'popleft', False):
def next_item(coll):
return coll.popleft()
elif getattr(iterable, 'popitem', False):
def next_item(coll):
return coll.popitem()
else:
def next_item(coll):
return coll.pop()
while True:
try:
yield next_item(iterable)
except (IndexError, KeyError):
raise StopIteration
| apache-2.0 | -7,923,254,968,304,999,000 | 27.819767 | 79 | 0.571313 | false | 3.96243 | false | false | false | 0 |
EdLogan18/logan-repository | plugin.video.SportsDevil/lib/utils/pyDes.py | 54 | 32294 | #############################################################################
# Documentation #
#############################################################################
# Author: Todd Whiteman
# Date: 16th March, 2009
# Verion: 2.0.0
# License: Public Domain - free to do as you wish
# Homepage: http://twhiteman.netfirms.com/des.html
#
# This is a pure python implementation of the DES encryption algorithm.
# It's pure python to avoid portability issues, since most DES
# implementations are programmed in C (for performance reasons).
#
# Triple DES class is also implemented, utilising the DES base. Triple DES
# is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key.
#
# See the README.txt that should come with this python module for the
# implementation methods used.
#
# Thanks to:
# * David Broadwell for ideas, comments and suggestions.
# * Mario Wolff for pointing out and debugging some triple des CBC errors.
# * Santiago Palladino for providing the PKCS5 padding technique.
# * Shaya for correcting the PAD_PKCS5 triple des CBC errors.
#
"""A pure python implementation of the DES and TRIPLE DES encryption algorithms.
Class initialization
--------------------
pyDes.des(key, [mode], [IV], [pad], [padmode])
pyDes.triple_des(key, [mode], [IV], [pad], [padmode])
key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes
for Triple DES
mode -> Optional argument for encryption type, can be either
pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Length must be 8 bytes.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use during
all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5)
to use during all encrypt/decrpt operations done with this instance.
I recommend to use PAD_PKCS5 padding, as then you never need to worry about any
padding issues, as the padding can be removed unambiguously upon decrypting
data that was encrypted using PAD_PKCS5 padmode.
Common methods
--------------
encrypt(data, [pad], [padmode])
decrypt(data, [pad], [padmode])
data -> Bytes to be encrypted/decrypted
pad -> Optional argument. Only when using padmode of PAD_NORMAL. For
encryption, adds this characters to the end of the data block when
data is not a multiple of 8 bytes. For decryption, will remove the
trailing characters that match this pad character from the last 8
bytes of the unencrypted data block.
padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL
or PAD_PKCS5). Defaults to PAD_NORMAL.
Example
-------
from pyDes import *
data = "Please encrypt my data"
k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
# For Python3, you'll need to use bytes, i.e.:
# data = b"Please encrypt my data"
# k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
d = k.encrypt(data)
print "Encrypted: %r" % d
print "Decrypted: %r" % k.decrypt(d)
assert k.decrypt(d, padmode=PAD_PKCS5) == data
See the module source (pyDes.py) for more examples of use.
You can also run the pyDes.py file without and arguments to see a simple test.
Note: This code was not written for high-end systems needing a fast
implementation, but rather a handy portable solution with small usage.
"""
import sys
# _pythonMajorVersion is used to handle Python2 and Python3 differences.
_pythonMajorVersion = sys.version_info[0]
# Modes of crypting / cyphering
ECB = 0
CBC = 1
# Modes of padding
PAD_NORMAL = 1
PAD_PKCS5 = 2
# PAD_PKCS5: is a method that will unambiguously remove all padding
# characters after decryption, when originally encrypted with
# this padding mode.
# For a good description of the PKCS5 padding technique, see:
# http://www.faqs.org/rfcs/rfc1423.html
# The base class shared by des and triple des.
class _baseDes(object):
def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
if IV:
IV = self._guardAgainstUnicode(IV)
if pad:
pad = self._guardAgainstUnicode(pad)
self.block_size = 8
# Sanity checking of arguments.
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if IV and len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
# Set the passed in variables
self._mode = mode
self._iv = IV
self._padding = pad
self._padmode = padmode
def getKey(self):
"""getKey() -> bytes"""
return self.__key
def setKey(self, key):
"""Will set the crypting key for this object."""
key = self._guardAgainstUnicode(key)
self.__key = key
def getMode(self):
"""getMode() -> pyDes.ECB or pyDes.CBC"""
return self._mode
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
self._mode = mode
def getPadding(self):
"""getPadding() -> bytes of length 1. Padding character."""
return self._padding
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
if pad is not None:
pad = self._guardAgainstUnicode(pad)
self._padding = pad
def getPadMode(self):
"""getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
return self._padmode
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
self._padmode = mode
def getIV(self):
"""getIV() -> bytes"""
return self._iv
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
if not IV or len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
IV = self._guardAgainstUnicode(IV)
self._iv = IV
def _padData(self, data, pad, padmode):
# Pad data depending on the mode
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode == PAD_NORMAL:
if len(data) % self.block_size == 0:
# No padding required.
return data
if not pad:
# Get the default padding.
pad = self.getPadding()
if not pad:
raise ValueError("Data must be a multiple of " + str(self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.")
data += (self.block_size - (len(data) % self.block_size)) * pad
elif padmode == PAD_PKCS5:
pad_len = 8 - (len(data) % self.block_size)
if _pythonMajorVersion < 3:
data += pad_len * chr(pad_len)
else:
data += bytes([pad_len] * pad_len)
return data
def _unpadData(self, data, pad, padmode):
# Unpad data depending on the mode.
if not data:
return data
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if padmode == PAD_NORMAL:
if not pad:
# Get the default padding.
pad = self.getPadding()
if pad:
data = data[:-self.block_size] + \
data[-self.block_size:].rstrip(pad)
elif padmode == PAD_PKCS5:
if _pythonMajorVersion < 3:
pad_len = ord(data[-1])
else:
pad_len = data[-1]
data = data[:-pad_len]
return data
def _guardAgainstUnicode(self, data):
# Only accept byte strings or ascii unicode values, otherwise
# there is no way to correctly decode the data into bytes.
if _pythonMajorVersion < 3:
if isinstance(data, unicode):
raise ValueError("pyDes can only work with bytes, not Unicode strings.")
else:
if isinstance(data, str):
# Only accept ascii unicode values.
try:
return data.encode('ascii')
except UnicodeEncodeError:
pass
raise ValueError("pyDes can only work with encoded strings, not Unicode.")
return data
#############################################################################
# DES #
#############################################################################
class des(_baseDes):
"""DES encryption/decrytpion class
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key,[mode], [IV])
key -> Bytes containing the encryption key, must be exactly 8 bytes
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
# Permutation and translation tables for DES
__pc1 = [56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
]
# number left rotations of pc1
__left_rotations = [
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
]
# permuted choice key (table 2)
__pc2 = [
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
]
# initial permutation IP
__ip = [57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
]
# Expansion table for turning 32 bit blocks into 48 bits
__expansion_table = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
# The (in)famous S-boxes
__sbox = [
# S1
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
# S2
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
# S3
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
# S4
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
# S5
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
# S6
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
# S7
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
# S8
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
# 32-bit permutation function P used on the output of the S-boxes
__p = [
15, 6, 19, 20, 28, 11,
27, 16, 0, 14, 22, 25,
4, 17, 30, 9, 1, 7,
23,13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10,
3, 24
]
# final permutation IP^-1
__fp = [
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
]
# Type of crypting being done
ENCRYPT = 0x00
DECRYPT = 0x01
# Initialisation
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
# Sanity checking of arguments.
if len(key) != 8:
raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.")
_baseDes.__init__(self, mode, IV, pad, padmode)
self.key_size = 8
self.L = []
self.R = []
self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16)
self.final = []
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Must be 8 bytes."""
_baseDes.setKey(self, key)
self.__create_sub_keys()
def __String_to_BitList(self, data):
"""Turn the string data, into a list of bits (1, 0)'s"""
if _pythonMajorVersion < 3:
# Turn the strings into integers. Python 3 uses a bytes
# class, which already has this behaviour.
data = [ord(c) for c in data]
l = len(data) * 8
result = [0] * l
pos = 0
for ch in data:
i = 7
while i >= 0:
if ch & (1 << i) != 0:
result[pos] = 1
else:
result[pos] = 0
pos += 1
i -= 1
return result
def __BitList_to_String(self, data):
"""Turn the list of bits -> data, into a string"""
result = []
pos = 0
c = 0
while pos < len(data):
c += data[pos] << (7 - (pos % 8))
if (pos % 8) == 7:
result.append(c)
c = 0
pos += 1
if _pythonMajorVersion < 3:
return ''.join([ chr(c) for c in result ])
else:
return bytes(result)
def __permutate(self, table, block):
"""Permutate this block with the specified table"""
return list(map(lambda x: block[x], table))
# Transform the secret key, so that it is ready for data processing
# Create the 16 subkeys, K[1] - K[16]
def __create_sub_keys(self):
"""Create the 16 subkeys K[1] to K[16] from the given key"""
key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey()))
i = 0
# Split into Left and Right sections
self.L = key[:28]
self.R = key[28:]
while i < 16:
j = 0
# Perform circular left shifts
while j < des.__left_rotations[i]:
self.L.append(self.L[0])
del self.L[0]
self.R.append(self.R[0])
del self.R[0]
j += 1
# Create one of the 16 subkeys through pc2 permutation
self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R)
i += 1
# Main part of the encryption algorithm, the number cruncher :)
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = self.__permutate(des.__ip, block)
self.L = block[:32]
self.R = block[32:]
# Encryption starts from Kn[1] through to Kn[16]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
# Decryption starts from Kn[16] down to Kn[1]
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
# Make a copy of R[i-1], this will later become L[i]
tempR = self.R[:]
# Permutate R[i - 1] to start creating R[i]
self.R = self.__permutate(des.__expansion_table, self.R)
# Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
# Optimization: Replaced below commented code with above
#j = 0
#B = []
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.Kn[iteration][j]
# j += 1
# if j % 6 == 0:
# B.append(self.R[j-6:j])
# Permutate B[1] to B[8] using the S-Boxes
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
# Work out the offsets
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
# Find the permutation value
v = des.__sbox[j][(m << 4) + n]
# Turn value into bits, add it to result: Bn
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
# Permutate the concatination of B[1] to B[8] (Bn)
self.R = self.__permutate(des.__p, Bn)
# Xor with L[i - 1]
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
# Optimization: This now replaces the below commented code
#j = 0
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.L[j]
# j += 1
# L[i] becomes R[i - 1]
self.L = tempR
i += 1
iteration += iteration_adjustment
# Final permutation of R[16]L[16]
self.final = self.__permutate(des.__fp, self.R + self.L)
return self.final
# Data to be encrypted/decrypted
def crypt(self, data, crypt_type):
"""Crypt the data in blocks, running it through des_crypt()"""
# Error check the data
if not data:
return ''
if len(data) % self.block_size != 0:
if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.")
if not self.getPadding():
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character")
else:
data += (self.block_size - (len(data) % self.block_size)) * self.getPadding()
# print "Len of data: %f" % (len(data) / self.block_size)
if self.getMode() == CBC:
if self.getIV():
iv = self.__String_to_BitList(self.getIV())
else:
raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering")
# Split the data into blocks, crypting each one seperately
i = 0
dict = {}
result = []
#cached = 0
#lines = 0
while i < len(data):
# Test code for caching encryption results
#lines += 1
#if dict.has_key(data[i:i+8]):
#print "Cached result for: %s" % data[i:i+8]
# cached += 1
# result.append(dict[data[i:i+8]])
# i += 8
# continue
block = self.__String_to_BitList(data[i:i+8])
# Xor with IV if using CBC mode
if self.getMode() == CBC:
if crypt_type == des.ENCRYPT:
block = list(map(lambda x, y: x ^ y, block, iv))
#j = 0
#while j < len(block):
# block[j] = block[j] ^ iv[j]
# j += 1
processed_block = self.__des_crypt(block, crypt_type)
if crypt_type == des.DECRYPT:
processed_block = list(map(lambda x, y: x ^ y, processed_block, iv))
#j = 0
#while j < len(processed_block):
# processed_block[j] = processed_block[j] ^ iv[j]
# j += 1
iv = block
else:
iv = processed_block
else:
processed_block = self.__des_crypt(block, crypt_type)
# Add the resulting crypted block to our list
#d = self.__BitList_to_String(processed_block)
#result.append(d)
result.append(self.__BitList_to_String(processed_block))
#dict[data[i:i+8]] = d
i += 8
# print "Lines: %d, cached: %d" % (lines, cached)
# Return the full crypted string
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self._padData(data, pad, padmode)
return self.crypt(data, des.ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after decrypting.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self.crypt(data, des.DECRYPT)
return self._unpadData(data, pad, padmode)
#############################################################################
# Triple DES #
#############################################################################
class triple_des(_baseDes):
"""Triple DES encryption/decrytpion class
This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or
the DES-EDE2 (when a 16 byte key is supplied) encryption methods.
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key, [mode], [IV])
key -> Bytes containing the encryption key, must be either 16 or
24 bytes long
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
_baseDes.__init__(self, mode, IV, pad, padmode)
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Either 16 or 24 bytes long."""
self.key_size = 24 # Use DES-EDE3 mode
if len(key) != self.key_size:
if len(key) == 16: # Use DES-EDE2 mode
self.key_size = 16
else:
raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long")
if self.getMode() == CBC:
if not self.getIV():
# Use the first 8 bytes of the key
self._iv = key[:self.block_size]
if len(self.getIV()) != self.block_size:
raise ValueError("Invalid IV, must be 8 bytes in length")
self.__key1 = des(key[:8], self._mode, self._iv,
self._padding, self._padmode)
self.__key2 = des(key[8:16], self._mode, self._iv,
self._padding, self._padmode)
if self.key_size == 16:
self.__key3 = self.__key1
else:
self.__key3 = des(key[16:], self._mode, self._iv,
self._padding, self._padmode)
_baseDes.setKey(self, key)
# Override setter methods to work on all 3 keys.
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
_baseDes.setMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setMode(mode)
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
_baseDes.setPadding(self, pad)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadding(pad)
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
_baseDes.setPadMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadMode(mode)
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
_baseDes.setIV(self, IV)
for key in (self.__key1, self.__key2, self.__key3):
key.setIV(IV)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
# Pad the data accordingly.
data = self._padData(data, pad, padmode)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
block = self.__key1.crypt(data[i:i+8], ENCRYPT)
block = self.__key2.crypt(block, DECRYPT)
block = self.__key3.crypt(block, ENCRYPT)
self.__key1.setIV(block)
self.__key2.setIV(block)
self.__key3.setIV(block)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
else:
data = self.__key1.crypt(data, ENCRYPT)
data = self.__key2.crypt(data, DECRYPT)
return self.__key3.crypt(data, ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after
decrypting, no pad character is required for PAD_PKCS5.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
iv = data[i:i+8]
block = self.__key3.crypt(iv, DECRYPT)
block = self.__key2.crypt(block, ENCRYPT)
block = self.__key1.crypt(block, DECRYPT)
self.__key1.setIV(iv)
self.__key2.setIV(iv)
self.__key3.setIV(iv)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
data = ''.join(result)
else:
data = bytes.fromhex('').join(result)
else:
data = self.__key3.crypt(data, DECRYPT)
data = self.__key2.crypt(data, ENCRYPT)
data = self.__key1.crypt(data, DECRYPT)
return self._unpadData(data, pad, padmode)
| gpl-2.0 | -5,865,539,242,076,980,000 | 36.85932 | 164 | 0.525887 | false | 3.499946 | false | false | false | 0.002694 |
jiachenning/odoo | addons/stock_landed_costs/__openerp__.py | 220 | 1914 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'WMS Landed Costs',
'version': '1.1',
'author': 'OpenERP SA',
'summary': 'Landed Costs',
'description': """
Landed Costs Management
=======================
This module allows you to easily add extra costs on pickings and decide the split of these costs among their stock moves in order to take them into account in your stock valuation.
""",
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['stock_account'],
'category': 'Warehouse Management',
'sequence': 16,
'demo': [
],
'data': [
'security/ir.model.access.csv',
'stock_landed_costs_sequence.xml',
'product_view.xml',
'stock_landed_costs_view.xml',
'stock_landed_costs_data.xml',
],
'test': [
'test/stock_landed_costs.yml'
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,648,034,896,617,505,000 | 35.807692 | 180 | 0.598746 | false | 4.029474 | false | false | false | 0.000522 |
whn09/tensorflow | tensorflow/python/ops/string_ops.py | 63 | 5194 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for working with string Tensors.
See the @{$python/string_ops} guide.
@@string_to_hash_bucket_fast
@@string_to_hash_bucket_strong
@@string_to_hash_bucket
@@reduce_join
@@string_join
@@string_split
@@substr
@@as_string
@@encode_base64
@@decode_base64
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_string_ops import *
from tensorflow.python.util import deprecation
# pylint: enable=wildcard-import
def string_split(source, delimiter=" "): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
If `delimiter` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
For example:
N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output
will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
Args:
source: `1-D` string `Tensor`, the strings to split.
delimiter: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1.
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
delimiter = ops.convert_to_tensor(delimiter, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
# pylint: disable=protected-access
indices, values, shape = gen_string_ops._string_split(
source, delimiter=delimiter)
# pylint: enable=protected-access
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
def _reduce_join_reduction_dims(x, axis, reduction_indices):
"""Returns range(rank(x) - 1, 0, -1) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return math_ops.range(array_ops.rank(x) - 1, -1, -1)
def reduce_join(inputs, axis=None,
keep_dims=False,
separator="",
name=None,
reduction_indices=None):
reduction_indices = _reduce_join_reduction_dims(
inputs, axis, reduction_indices)
return gen_string_ops.reduce_join(
inputs=inputs,
reduction_indices=reduction_indices,
keep_dims=keep_dims,
separator=separator,
name=name)
reduce_join.__doc__ = deprecation.rewrite_argument_docstring(
gen_string_ops.reduce_join.__doc__, "reduction_indices", "axis")
ops.NotDifferentiable("StringToHashBucket")
ops.NotDifferentiable("StringToHashBucketFast")
ops.NotDifferentiable("StringToHashBucketStrong")
ops.NotDifferentiable("ReduceJoin")
ops.NotDifferentiable("StringJoin")
ops.NotDifferentiable("StringSplit")
ops.NotDifferentiable("AsString")
ops.NotDifferentiable("EncodeBase64")
ops.NotDifferentiable("DecodeBase64")
| apache-2.0 | -7,907,905,795,752,892,000 | 34.094595 | 80 | 0.702926 | false | 3.810712 | false | false | false | 0.003851 |
mindriot101/bokeh | sphinx/source/docs/user_guide/examples/extensions_putting_together_ts.py | 9 | 2258 | from bokeh.core.properties import String, Instance
from bokeh.models import LayoutDOM, Slider
CODE ="""
import {div, empty} from "core/dom"
import * as p from "core/properties"
import {LayoutDOM, LayoutDOMView} from "models/layouts/layout_dom"
export class CustomView extends LayoutDOMView {
initialize(options) {
super.initialize(options)
this.render()
// Set BokehJS listener so that when the Bokeh slider has a change
// event, we can process the new data
this.connect(this.model.slider.change, () => this.render())
}
render() {
// BokehjS Views create <div> elements by default, accessible as
// ``this.el``. Many Bokeh views ignore this default <div>, and instead
// do things like draw to the HTML canvas. In this case though, we change
// the contents of the <div>, based on the current slider value.
empty(this.el)
this.el.appendChild(div({
style: {
'padding': '2px',
'color': '#b88d8e',
'background-color': '#2a3153',
},
}, `${this.model.text}: ${this.model.slider.value}`))
}
}
export class Custom extends LayoutDOM {
// If there is an associated view, this is typically boilerplate.
default_view = CustomView
// The ``type`` class attribute should generally match exactly the name
// of the corresponding Python class.
type = "Custom"
}
// The @define block adds corresponding "properties" to the JS model. These
// should normally line up 1-1 with the Python model class. Most property
// types have counterparts, e.g. bokeh.core.properties.String will be
// ``p.String`` in the JS implementation. Any time the JS type system is not
// yet as complete, you can use ``p.Any`` as a "wildcard" property type.
Custom.define({
text: [ p.String ],
slider: [ p.Any ],
})
"""
from bokeh.util.compiler import TypeScript
class Custom(LayoutDOM):
__implementation__ = TypeScript(CODE)
text = String(default="Custom text")
slider = Instance(Slider)
from bokeh.io import show
from bokeh.layouts import column
from bokeh.models import Slider
slider = Slider(start=0, end=10, step=0.1, value=0, title="value")
custom = Custom(text="Special Slider Display", slider=slider)
layout = column(slider, custom)
show(layout)
| bsd-3-clause | -5,654,737,549,680,749,000 | 27.582278 | 77 | 0.690434 | false | 3.647819 | false | false | false | 0.0031 |
depay/docker-registry | tests/test_all_installed_drivers.py | 35 | 1026 | # -*- coding: utf-8 -*-
from docker_registry.core import driver as driveengine
from docker_registry import testing
# Mock any boto
from docker_registry.testing import mock_boto # noqa
# Mock our s3 - xxx this smells like byte-range support is questionnable...
from . import mock_s3 # noqa
def getinit(name):
def init(self):
self.scheme = name
self.path = ''
self.config = testing.Config({})
return init
for name in driveengine.available():
# The globals shenanigan is required so that the test tool find the tests
# The dynamic type declaration is required because it is so
globals()['TestQuery%s' % name] = type('TestQuery%s' % name,
(testing.Query,),
dict(__init__=getinit(name)))
globals()['TestDriver%s' % name] = type('TestDriver%s' % name,
(testing.Driver,),
dict(__init__=getinit(name)))
| apache-2.0 | -3,028,797,366,343,124,000 | 35.642857 | 77 | 0.563353 | false | 4.310924 | true | false | false | 0.000975 |
fabfurnari/totalopenstation | docs/conf.py | 2 | 6452 |
# -*- coding: utf-8 -*-
#
# Total Open Station documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 1 21:33:14 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'Total Open Station'
copyright = u'2008-2011, Stefano Costa, Luca Bianconi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "tops.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'TotalOpenStationdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'TotalOpenStation.tex', u'Total Open Station Documentation',
u'Stefano Costa, Luca Bianconi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-3.0 | -5,345,124,991,252,055,000 | 32.087179 | 80 | 0.714352 | false | 3.720877 | true | false | false | 0.0062 |
log2timeline/l2tdevtools | l2tdevtools/dependency_writers/appveyor_scripts.py | 2 | 2279 | # -*- coding: utf-8 -*-
"""Writer for AppVeyor script files."""
import io
import os
from l2tdevtools.dependency_writers import interface
class AppVeyorInstallPS1ScriptWriter(interface.DependencyFileWriter):
"""AppVeyor install.ps1 script file writer."""
_TEMPLATE_FILE = os.path.join(
'data', 'templates', 'appveyor_scripts', 'install.ps1')
PATH = os.path.join('config', 'appveyor', 'install.ps1')
def Write(self):
"""Writes an install.ps1 file."""
dependencies = self._dependency_helper.GetL2TBinaries()
test_dependencies = self._dependency_helper.GetL2TBinaries(
test_dependencies=True)
dependencies.extend(test_dependencies)
template_mappings = {
'dependencies': ' '.join(sorted(set(dependencies)))
}
template_file = os.path.join(self._l2tdevtools_path, self._TEMPLATE_FILE)
file_content = self._GenerateFromTemplate(template_file, template_mappings)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
class AppVeyorInstallSHScriptWriter(interface.DependencyFileWriter):
"""AppVeyor install.sh script file writer."""
_TEMPLATE_FILE = os.path.join(
'data', 'templates', 'appveyor_scripts', 'install.sh')
PATH = os.path.join('config', 'appveyor', 'install.sh')
def Write(self):
"""Writes an install.sh file."""
template_mappings = {}
template_file = os.path.join(self._l2tdevtools_path, self._TEMPLATE_FILE)
file_content = self._GenerateFromTemplate(template_file, template_mappings)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
class AppVeyorRuntestsSHScriptWriter(interface.DependencyFileWriter):
"""AppVeyor runtests.sh script file writer."""
_TEMPLATE_FILE = os.path.join(
'data', 'templates', 'appveyor_scripts', 'runtests.sh')
PATH = os.path.join('config', 'appveyor', 'runtests.sh')
def Write(self):
"""Writes an runtests.sh file."""
template_mappings = {}
template_file = os.path.join(self._l2tdevtools_path, self._TEMPLATE_FILE)
file_content = self._GenerateFromTemplate(template_file, template_mappings)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
| apache-2.0 | -543,495,459,685,660,860 | 31.557143 | 79 | 0.696797 | false | 3.427068 | true | false | false | 0.006582 |