code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import os
def test_output_file_get_filename_display_returns_correct_string_without_file(output_file, db):
assert output_file.get_filename_display() == ''
def test_output_file_get_filename_display_returns_correct_string(output_file_with_file, output_file_filename, db):
output_file = output_file_with_file
assert output_file.get_filename_display() is not None
assert '/' not in output_file.get_filename_display()
assert output_file.file.name.split('/')[-1] == output_file.get_filename_display()
assert output_file_filename == output_file.get_filename_display()
def test_output_file_get_absolute_url_returns_excerpt_detail_with_no_file(output_file, db):
reverse_url = '/exports/{}/'.format(output_file.export.extraction_order.excerpt.id)
assert output_file.get_file_media_url_or_status_page() == reverse_url
def test_output_file_get_absolute_url_returns_file_download_url_with_file(output_file_with_file, db):
assert output_file_with_file.get_file_media_url_or_status_page() == output_file_with_file.file.url
def test_output_file_delete_removes_file_as_well(output_file_with_file, db):
file_path = output_file_with_file.file.path
file_directory = os.path.dirname(file_path)
assert output_file_with_file.file
assert os.path.exists(file_path)
assert os.path.exists(file_directory)
output_file_with_file.delete()
assert not os.path.exists(file_path)
assert not os.path.exists(file_directory)
| geometalab/osmaxx | tests/excerptexport/models/test_output_file.py | Python | mit | 1,468 |
'''
Created on Oct 10, 2012
@author: Gary
'''
from housemonitor.lib.base import Base
from xbee.zigbee import ZigBee
from housemonitor.lib.constants import Constants
import abc
import time
class XBeeCommunications( Base, object ):
'''
classdocs
'''
__metaclass__ = abc.ABCMeta
serial_id = None
connected = False
zigbee = None
delay = 7 # seconds
''' How long to wait before attempting to connect to the serial port on failure. '''
def __init__( self ):
super( XBeeCommunications, self ).__init__()
@property
def logger_name( self ):
return Constants.LogKeys.inputsZigBee
@abc.abstractmethod
def setup( self ): # pragma: no cover
"""
setup - a virtual function
"""
pass
def read( self ):
"""
read - function read a frame of data from the XBee
"""
packet = self.zigbee.wait_read_frame()
return packet
def connect( self ):
"""
The main method for connecting with XBee radio
"""
while True:
try:
self.logger.debug( "Attempting connection to XBee" )
self.serial_id = self.setup()
self.connected = True
except IOError as ex:
self.logger.exception( ex )
time.sleep( self.delay )
if self.connected:
break
self.zigbee = ZigBee( self.serial_id )
self.logger.debug( "Successfully connected to XBee" )
| gary-pickens/HouseMonitor | housemonitor/inputs/zigbeeinput/xbeecommunications.py | Python | mit | 1,537 |
#!/usr/bin/env python
# check_snmp_time2.py - Check
# Copyright (C) 2016 Retakfual
# 2016-2019 rsmuc <rsmuc@sec-dev.de>
# This file is part of "Health Monitoring Plugins".
# "Health Monitoring Plugins" is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# "Health Monitoring Plugins" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with "Health Monitoring Plugins". If not, see <https://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
from pynag.Plugins import ok
import health_monitoring_plugins.timesource
if __name__ == '__main__':
# pylint: disable=C0103
helper = health_monitoring_plugins.SnmpHelper()
helper.parser.add_option('-o', '--tzoffset', dest='tzoffset',
default=0, type='int',
help='the local systems utc offset to the servers utc, in minutes ('
'use only if your remote device is in a different timezone)')
helper.parser.add_option('-l', '--localtime', dest='time_flag',
default=False, action="store_true",
help='force to use local time (only recommended if you have a non '
'Windows OS remote device, that returns localtime and not utc)')
helper.parse_arguments()
sess = health_monitoring_plugins.SnmpSession(**helper.get_snmp_args())
# The default return value should be always OK
helper.status(ok)
timesource = health_monitoring_plugins.timesource.Timesource(sess)
timesource.check_time(helper, sess)
helper.check_all_metrics()
helper.exit()
| rsmuc/health_monitoring_plugins | health_monitoring_plugins/check_snmp_time2/check_snmp_time2.py | Python | gpl-2.0 | 2,141 |
import pytest
import numpy as np
import six
import pescador
import test_utils as T
@pytest.mark.parametrize('copy', [False, True])
@pytest.mark.parametrize('timeout', [None, 0.5, 2, 5])
def test_zmq(copy, timeout):
stream = pescador.Streamer(T.finite_generator, 200, size=3, lag=0.001)
reference = list(stream)
zmq_stream = pescador.ZMQStreamer(stream, copy=copy, timeout=timeout)
for _ in range(3):
query = list(zmq_stream)
assert len(reference) == len(query)
for b1, b2 in zip(reference, query):
T._eq_batch(b1, b2)
def test_zmq_align():
stream = pescador.Streamer(T.finite_generator, 200, size=3, lag=0.001)
reference = list(stream)
zmq_stream = pescador.ZMQStreamer(stream)
if six.PY2:
with pytest.warns(RuntimeWarning, match='array alignment'):
query = list(zmq_stream)
else:
query = list(zmq_stream)
assert len(reference) == len(query)
for b1, b2 in zip(reference, query):
T._eq_batch(b1, b2)
if six.PY2:
continue
for key in b2:
assert b2[key].flags['ALIGNED']
@pytest.mark.xfail(raises=pescador.PescadorError)
def test_zmq_bad_type():
def __bad_generator():
for _ in range(100):
yield dict(X=list(range(100)))
stream = pescador.Streamer(__bad_generator)
zs = pescador.ZMQStreamer(stream)
for item in zs:
pass
def test_zmq_early_stop():
stream = pescador.Streamer(T.finite_generator, 200, size=3, lag=0.001)
zmq_stream = pescador.ZMQStreamer(stream)
# Only sample five batches
assert len([x for x in zip(zmq_stream, range(5))]) == 5
def test_zmq_buffer():
n_samples = 50
stream = pescador.Streamer(T.md_generator, dimension=2, n=n_samples,
size=64, items=['X', 'Y'])
buff_size = 10
buff_stream = pescador.Streamer(pescador.buffer_stream, stream, buff_size)
zmq_stream = pescador.ZMQStreamer(buff_stream)
outputs = [x for x in zmq_stream]
assert len(outputs) == int(n_samples) / buff_size
| bmcfee/pescador | tests/test_zmq_stream.py | Python | isc | 2,096 |
# !/usr/bin/env python
# coding: utf-8
# filter()把传入的函数依次作用于每个元素,然后根据返回值是True还是False决定保留还是丢弃该元素
def is_odd(n):
return n % 2 == 1
l = [1, 2, 4, 5, 6, 8, 19]
print filter(is_odd, l)
def not_empty(s):
return s and s.strip()
l = ['A', ' ', None, 'C', ' ']
print filter(not_empty, l)
| snownothing/Python | liaoxuefeng.com/018-Filter.py | Python | mit | 376 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dladm_linkprop
short_description: Manage link properties on Solaris/illumos systems.
description:
- Set / reset link properties on Solaris/illumos systems.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
link:
description:
- Link interface name.
required: true
aliases: [ "nic", "interface" ]
property:
description:
- Specifies the name of the property we want to manage.
required: true
aliases: [ "name" ]
value:
description:
- Specifies the value we want to set for the link property.
required: false
temporary:
description:
- Specifies that lin property configuration is temporary. Temporary
link property configuration does not persist across reboots.
required: false
type: bool
default: false
state:
description:
- Set or reset the property value.
required: false
default: "present"
choices: [ "present", "absent", "reset" ]
'''
EXAMPLES = '''
- name: Set 'maxbw' to 100M on e1000g1
dladm_linkprop: name=e1000g1 property=maxbw value=100M state=present
- name: Set 'mtu' to 9000 on e1000g1
dladm_linkprop: name=e1000g1 property=mtu value=9000
- name: Reset 'mtu' property on e1000g1
dladm_linkprop: name=e1000g1 property=mtu state=reset
'''
RETURN = '''
property:
description: property name
returned: always
type: string
sample: mtu
state:
description: state of the target
returned: always
type: string
sample: present
temporary:
description: specifies if operation will persist across reboots
returned: always
type: boolean
sample: True
link:
description: link name
returned: always
type: string
sample: e100g0
value:
description: property value
returned: always
type: string
sample: 9000
'''
from ansible.module_utils.basic import AnsibleModule
class LinkProp(object):
def __init__(self, module):
self.module = module
self.link = module.params['link']
self.property = module.params['property']
self.value = module.params['value']
self.temporary = module.params['temporary']
self.state = module.params['state']
self.dladm_bin = self.module.get_bin_path('dladm', True)
def property_exists(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
self.module.fail_json(msg='Unknown property "%s" on link %s' %
(self.property, self.link),
property=self.property,
link=self.link)
def property_is_modified(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('value,default')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
(value, default) = out.split(':')
if rc == 0 and value == default:
return True
else:
return False
def property_is_readonly(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('perm')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and out == 'r-':
return True
else:
return False
def property_is_set(self):
cmd = [self.dladm_bin]
cmd.append('show-linkprop')
cmd.append('-c')
cmd.append('-o')
cmd.append('value')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and self.value == out:
return True
else:
return False
def set_property(self):
cmd = [self.dladm_bin]
cmd.append('set-linkprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property + '=' + self.value)
cmd.append(self.link)
return self.module.run_command(cmd)
def reset_property(self):
cmd = [self.dladm_bin]
cmd.append('reset-linkprop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.link)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
link=dict(required=True, default=None, type='str', aliases=['nic', 'interface']),
property=dict(required=True, type='str', aliases=['name']),
value=dict(required=False, type='str'),
temporary=dict(default=False, type='bool'),
state=dict(
default='present', choices=['absent', 'present', 'reset']),
),
required_if=[
['state', 'present', ['value']],
],
supports_check_mode=True
)
linkprop = LinkProp(module)
rc = None
out = ''
err = ''
result = {}
result['property'] = linkprop.property
result['link'] = linkprop.link
result['state'] = linkprop.state
if linkprop.value:
result['value'] = linkprop.value
if linkprop.state == 'absent' or linkprop.state == 'reset':
if linkprop.property_exists():
if not linkprop.property_is_modified():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = linkprop.reset_property()
if rc != 0:
module.fail_json(property=linkprop.property,
link=linkprop.link,
msg=err,
rc=rc)
elif linkprop.state == 'present':
if linkprop.property_exists():
if not linkprop.property_is_readonly():
if not linkprop.property_is_set():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = linkprop.set_property()
if rc != 0:
module.fail_json(property=linkprop.property,
link=linkprop.link,
msg=err,
rc=rc)
else:
module.fail_json(msg='Property "%s" is read-only!' % (linkprop.property),
property=linkprop.property,
link=linkprop.link)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/network/illumos/dladm_linkprop.py | Python | gpl-3.0 | 7,835 |
# Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`path_store` --- Path record storage and selection for path servers
========================================================================
"""
# Stdlib
import copy
import heapq
import logging
import math
from collections import defaultdict, deque
# External
import yaml
# SCION
from lib.packet.pcb import PathSegment
from lib.packet.scion_addr import ISD_AS
from lib.util import SCIONTime, load_yaml_file
from typing import cast, Dict, List, Optional, Tuple
class PathPolicy(object):
"""Stores a path policy."""
def __init__(self): # pragma: no cover
self.best_set_size = 5
self.candidates_set_size = 20
self.history_limit = 0
self.update_after_number = 0
self.update_after_time = 0
self.unwanted_ases = []
self.property_ranges = {}
self.property_weights = {}
def get_path_policy_dict(self): # pragma: no cover
return {
'best_set_size': self.best_set_size,
'candidates_set_size': self.candidates_set_size,
'history_limit': self.history_limit,
'update_after_number': self.update_after_number,
'update_after_time': self.update_after_time,
'unwanted_ases': self.unwanted_ases,
'property_ranges': self.property_ranges,
'property_weights': self.property_weights
}
def check_filters(self, pcb):
"""
Runs some checks, including: unwanted ASes and min/max property values.
:param pcb: beacon to analyze.
:type pcb: :class:`PathSegment`
:returns:
True if any unwanted AS is present or a range is not respected.
:rtype: bool
"""
assert isinstance(pcb, PathSegment)
isd_as = self._check_unwanted_ases(pcb)
if isd_as:
logging.warning("PathStore: pcb discarded, unwanted AS(%s): %s",
isd_as, pcb.short_desc())
return False
reasons = self._check_property_ranges(pcb)
if reasons:
logging.info("PathStore: pcb discarded(%s): %s",
", ".join(reasons), pcb.short_desc())
return False
ia = self._check_remote_ifid(pcb)
if ia:
logging.error("PathStore: pcb discarded, remote IFID of %s unknown",
ia)
return False
return True
def _check_unwanted_ases(self, pcb): # pragma: no cover
"""
Checks whether any of the ASes in the path belong to the black list.
:param pcb: beacon to analyze.
:type pcb: :class:`PathSegment`
"""
for asm in pcb.iter_asms():
isd_as = asm.isd_as()
if isd_as in self.unwanted_ases:
return isd_as
def _check_range(self, reasons, name, actual): # CHANGE: Moved nested function out of
# method _check_property_ranges
range_ = self.property_ranges[name]
if not range_:
return
if (actual < range_[0] or actual > range_[1]):
reasons.append("%s: %d <= %d <= %d" % (
name, range_[0], actual, range_[1]))
def _check_property_ranges(self, pcb):
"""
Checks whether any of the path properties has a value outside the
predefined min-max range.
:param pcb: beacon to analyze.
:type pcb: :class:`PathSegment`
"""
reasons = []
self._check_range(reasons, "PeerLinks", pcb.get_n_peer_links()) # CHANGE: Adapted
self._check_range(reasons, "HopsLength", pcb.get_n_hops()) # calls to nested
self._check_range(reasons, "DelayTime", # function.
int(SCIONTime.get_time()) - pcb.get_timestamp())
self._check_range(reasons, "GuaranteedBandwidth", 10)
self._check_range(reasons, "AvailableBandwidth", 10)
self._check_range(reasons, "TotalBandwidth", 10)
return reasons
def _check_remote_ifid(self, pcb):
"""
Checkes whether any PCB markings have unset remote IFID values for
up/downstream ASes. This can happen during normal startup depending
on the timing of PCB propagation vs IFID keep-alives, but should
not happen once the infrastructure is settled.
Remote IFID is only allowed to be 0 if the corresponding ISD-AS is
0-0.
"""
for asm in pcb.iter_asms():
for pcbm in asm.iter_pcbms():
if pcbm.inIA().int() and not pcbm.p.inIF:
return pcbm.inIA()
if pcbm.outIA().int() and not pcbm.p.outIF:
return pcbm.outIA()
return None
@staticmethod # CHANGE: Turned classmethod into staticmethod
def from_file(policy_file): # pragma: no cover
"""
Create a PathPolicy instance from the file.
:param str policy_file: path to the path policy file
"""
return PathPolicy.from_dict(load_yaml_file(policy_file))
@staticmethod # CHANGE: Turned classmethod into staticmethod
def from_dict(policy_dict): # pragma: no cover
"""
Create a PathPolicy instance from the dictionary.
:param dict policy_dict: dictionary representation of path policy
"""
path_policy = PathPolicy()
path_policy.parse_dict(policy_dict)
return path_policy
def parse_dict(self, path_policy):
"""
Parses the policies from the dictionary.
:param dict path_policy: path policy.
"""
self.best_set_size = cast(int, path_policy['BestSetSize']) # CHANGE: added casts
self.candidates_set_size = cast(int, path_policy['CandidatesSetSize'])
self.history_limit = cast(int, path_policy['HistoryLimit'])
self.update_after_number = cast(int, path_policy['UpdateAfterNumber'])
self.update_after_time = cast(int, path_policy['UpdateAfterTime'])
unwanted_ases = cast(str, path_policy['UnwantedASes']).split(',')
for unwanted in unwanted_ases:
self.unwanted_ases.append(ISD_AS(unwanted))
property_ranges = cast(Dict[str, str], path_policy['PropertyRanges'])
for key in property_ranges:
property_range = property_ranges[key].split('-')
property_range_temp = int(property_range[0]), int(property_range[1]) # CHANGE: Added local variable b/c variable changed type.
self.property_ranges[key] = property_range_temp
self.property_weights = cast(Dict[str, int], path_policy['PropertyWeights'])
def __str__(self):
path_policy_dict = self.get_path_policy_dict()
path_policy_str = yaml.dump(path_policy_dict)
return path_policy_str
class PathStoreRecord(object):
"""
Path record that gets stored in the the PathStore.
:cvar int DEFAULT_OFFSET:
the amount of time subtracted from the current time when the path's
initial last sent time is set.
:ivar pcb: the PCB representing the record.
:vartype pcb: :class:`lib.packet.pcb.PathSegment`
:ivar bytes id: the path segment identifier stored in the record's PCB.
:ivar float fidelity: the fidelity of the path record.
:ivar float peer_links:
the normalized number of peer links in the path segment.
:ivar float hops_length: the normalized length of the path segment.
:ivar float disjointness:
the normalized disjointness of the path segment compared to the other
paths in the PathStore.
:ivar int last_sent_time:
the Unix time at which the path segment was last sent.
:ivar int last_seen_time:
the Unix time at which the path segment was last seen.
:ivar float delay_time:
the normalized time in seconds between the PCB's creation and the time
it was last seen by the path server.
:ivar int expiration_time: the Unix time at which the path segment expires.
:ivar int guaranteed_bandwidth: the path segment's guaranteed bandwidth.
:ivar int available_bandwidth: the path segment's available bandwidth.
:ivar int total_bandwidth: the path segment's total bandwidth.
"""
DEFAULT_OFFSET = 3600 * 24 * 7 # 1 week
def __init__(self, pcb):
"""
:param pcb: beacon to analyze.
:type pcb: :class:`PathSegment`
"""
assert isinstance(pcb, PathSegment)
self.id = pcb.get_hops_hash(hex=True)
self.peer_links = pcb.get_n_peer_links()
self.hops_length = pcb.get_n_hops()
self.fidelity = 0
self.disjointness = 0
self.last_sent_time = int(SCIONTime.get_time()) - self.DEFAULT_OFFSET
self.guaranteed_bandwidth = 0
self.available_bandwidth = 0
self.total_bandwidth = 0
self.last_seen_time = -1 # CHANGE: Added attribute assignment to constructor
self.update(pcb)
def update(self, pcb):
"""
Update a candidate entry from a recent PCB.
"""
assert self.id == pcb.get_hops_hash(hex=True)
now = int(SCIONTime.get_time())
self.pcb = copy.deepcopy(pcb)
self.delay_time = now - pcb.get_timestamp()
self.last_seen_time = now
self.expiration_time = pcb.get_expiration_time()
def sending(self): # pragma: no cover
"""
Update last_sent_time to now.
"""
self.last_sent_time = int(SCIONTime.get_time())
def update_fidelity(self, path_policy):
"""
Computes a path fidelity based on all path properties and considering
the corresponding weights, which are stored in the path policy.
:param dict path_policy: path policy.
"""
self.fidelity = 0
now = SCIONTime.get_time()
self.fidelity += (path_policy.property_weights['PeerLinks'] *
self.peer_links)
self.fidelity += (path_policy.property_weights['HopsLength'] /
self.hops_length)
self.fidelity += (path_policy.property_weights['Disjointness'] *
self.disjointness)
if now != 0:
self.fidelity += (path_policy.property_weights['LastSentTime'] *
(now - self.last_sent_time) / now)
self.fidelity += (path_policy.property_weights['LastSeenTime'] *
self.last_seen_time / now)
self.fidelity += (path_policy.property_weights['DelayTime'] /
self.delay_time)
self.fidelity += (path_policy.property_weights['ExpirationTime'] *
(self.expiration_time - now) / self.expiration_time)
self.fidelity += (path_policy.property_weights['GuaranteedBandwidth'] *
self.guaranteed_bandwidth)
self.fidelity += (path_policy.property_weights['AvailableBandwidth'] *
self.available_bandwidth)
self.fidelity += (path_policy.property_weights['TotalBandwidth'] *
self.total_bandwidth)
def __eq__(self, other): # pragma: no cover
if type(other) is not type(self):
return False
return self.id == other.id
def __str__(self):
return "PathStoreRecord: ID: %s Fidelity: %s" % (
self.id, self.fidelity)
class PathStore(object):
"""Path Store class."""
def __init__(self, path_policy): # pragma: no cover
"""
:param dict path_policy: path policy.
"""
self.path_policy = path_policy
self.candidates = []
self.best_paths_history = deque(self.path_policy.history_limit) # CHANGE: changed kw arg to normal arg
self.disjointness = defaultdict(lambda : float()) # CHANGE: Added lambda
self.last_dj_update = 0
def add_segment(self, pcb):
"""
Possibly add a new path to the candidates list.
Attempt to add a path (which is an instance of PathSegment) to the set
of candidate paths. If successfully added, the candidate path is stored
in the PathStore as a PathStoreRecord.
Before adding the path, the candidate PathSegment is first checked
against the PathStore's filter criteria, listed in PathPolicy. If the
path's properties do not meet the filter criteria, the path is not
added and the set of candidate paths remains unchanged.
If the path passes the filter checks but is already in the candidate
set (as determined by its identifier), then the path is not added to
the candidate set. Instead, the delay and arrival times are updated in
the existing record.
If the path passes the filter checks and is not already in the
candidate set, it is added to the list of candidate paths. If upon
adding the path, the candidate path set is too large (i.e., larger than
candidates_set_size), the lowest-fidelity path is removed.
:param pcb: The PCB representing the potential path.
:type pcb: PathSegment
"""
assert isinstance(pcb, PathSegment)
pcb_hash = pcb.get_hops_hash(hex=True)
if not self.path_policy.check_filters(pcb):
return
for candidate in self.candidates:
if candidate.id == pcb_hash:
candidate.update(pcb)
return
record = PathStoreRecord(pcb)
self.candidates.append(record)
self._trim_candidates()
def _trim_candidates(self):
"""
Trims the set of candidate set if necessary.
"""
if len(self.candidates) > self.path_policy.candidates_set_size:
self._remove_expired_segments()
if len(self.candidates) > self.path_policy.candidates_set_size:
self._update_all_fidelity()
self.candidates = sorted(self.candidates, lambda x: x.fidelity,
True)[:-1]
def _update_disjointness_db(self):
"""
Update the disjointness database.
Based on the current time, update the disjointness database keeping
track of each path, AS, and interface previously sent.
"""
now = SCIONTime.get_time()
for k, v in self.disjointness.items():
self.disjointness[k] = v * math.exp(self.last_dj_update - now)
self.last_dj_update = now
def _update_all_disjointness(self):
"""
Update the disjointness of all path candidates.
The disjointness of a candidate path is measured with respect to
previously sent paths and is calculated as follows:
Each time a path is sent, its ASes and AS-interface pairs are added to
the (data structure). The exact path itself is also added to a list of
previously sent paths.
The disjointness is then calculated as the inverse of the sum of the
following: the entire path, each AS on the path, and each AS-interface
pair on the path.
The disjointness is normalized by the highest-scoring path's
disjointness.
"""
self._update_disjointness_db()
max_disjointness = 0.0
test_asm = None
test_isdas = None
test_index = None
for candidate in self.candidates:
path_disjointness = self.disjointness[candidate.id]
as_disjointness = 0.0
if_disjointness = 0.0
for asm in candidate.pcb.iter_asms():
test_asm = asm
test_isdas = asm.isd_as()
test_index = asm.isd_as()[1]
as_disjointness += self.disjointness[asm.isd_as()[1]]
if_disjointness += self.disjointness[
asm.pcbm(0).hof().egress_if]
candidate.disjointness = (path_disjointness + as_disjointness +
if_disjointness)
if candidate.disjointness > max_disjointness:
max_disjointness = candidate.disjointness
if max_disjointness > 0.0:
for candidate in self.candidates:
candidate.disjointness /= max_disjointness
def _update_all_delay_time(self):
"""Update the delay time property of all path candidates."""
max_delay_time = 0
for candidate in self.candidates:
candidate.delay_time = (candidate.last_seen_time -
candidate.pcb.get_timestamp() + 1)
if candidate.delay_time > max_delay_time:
max_delay_time = candidate.delay_time
for candidate in self.candidates:
candidate.delay_time /= max_delay_time
def _update_all_fidelity(self):
"""Update the fidelity of all path candidates."""
self._update_disjointness_db()
self._update_all_disjointness()
self._update_all_delay_time()
for candidate in self.candidates:
candidate.update_fidelity(self.path_policy)
def get_best_segments(self, k=None, sending=True):
"""
Return the k best paths from the temporary buffer.
Select the k best paths from the set of candidate paths. At the time of
selection, the PathStore computes the fidelity of all candidate path
segments and returns the k paths with the highest fidelity.
When computing the fidelity, only the path properties that vary in time
need to be recomputed: the freshness, delay, and disjointness. The
length and number of peering links is constant.
:param int k: default best set size.
"""
if k is None:
k = self.path_policy.best_set_size
self._remove_expired_segments()
self._update_all_fidelity()
best_candidates = heapq.nlargest(k, self.candidates,
lambda y: y.fidelity) # CHANGE: Turned kw arg into regular arg.
if sending:
for candidate in best_candidates:
candidate.sending()
return [x.pcb for x in best_candidates]
def get_latest_history_snapshot(self, k=None):
"""
Return the latest k best paths from the history.
:param int k: default best set size.
"""
if k is None:
k = self.path_policy.best_set_size
best_paths = []
if self.best_paths_history:
for candidate in self.best_paths_history[0][:k]:
best_paths.append(candidate.pcb)
return best_paths
def _remove_expired_segments(self):
"""Remove candidates if their expiration_time is up."""
rec_ids = []
now = SCIONTime.get_time()
for candidate in self.candidates:
if candidate.expiration_time <= now:
rec_ids.append(candidate.id)
self.remove_segments(rec_ids)
def remove_segments(self, rec_ids):
"""
Remove segments in 'rec_ids' from the candidates.
:param list rec_ids: list of record IDs to remove.
"""
self.candidates[:] = [c for c in self.candidates if c.id not in rec_ids]
if self.candidates:
self._update_all_fidelity()
self.candidates = sorted(self.candidates, key=lambda x: x.fidelity,
reverse=True)
def get_segment(self, rec_id):
"""
Return the segment for the corresponding record ID or None.
:param str rec_id: ID of the segment to return.
"""
for record in self.candidates:
if record.id == rec_id:
return record.pcb
return None
def __str__(self):
"""
Return a string with the path store data.
"""
ret = ["PathStore:"]
for candidate in self.candidates:
ret.append(" %s" % candidate)
return "\n".join(ret)
| caterinaurban/Typpete | typpete/tests/scion/lib/path_store.py | Python | mpl-2.0 | 20,490 |
import pytest
from kvpio import api_base
def pytest_addoption(parser):
parser.addoption(
'--local',
action='store_true',
default=False,
help='use localhost for testing rather than api.kvp.io'
)
def pytest_generate_tests(metafunc):
if 'api_base' in metafunc.fixturenames:
local = metafunc.config.option.local
localhost = 'http://localhost:8000'
metafunc.parametrize('api_base', [localhost if local else api_base])
@pytest.fixture
def valid_api_key():
return 'steelhive'
@pytest.fixture
def invalid_api_key():
return 'foobar'
@pytest.fixture(params=[
# tuple of
# (key, value,
# access key, expected value)
('foo', 'bar',
'foo', 'bar'),
('foo', {'bar': {'baz': 123}},
'foo/bar/baz', 123),
('foo', [1, {'10': 'Ten', '20': [46], '2': 'two'}, 3],
'foo/1', 1),
('foo', [1, {'10': 'Ten', '20': [46], '2': 'two'}, 3],
'foo/1', 1),
('foo', [1, {'10': 'Ten', '20': [46], '2': 'two'}, 3],
'foo/2/10', 'Ten'),
('foo', [1, {'10': 'Ten', '20': [46], '2': 'two'}, 3],
'foo/2/20/1', 46)
])
def bucket_data(request):
return request.param
@pytest.fixture(params=[
# tuple of
# (key, value,
# bucket key, bucket value,
# resulting template)
('far', 'Test template: {{ foo }} should equal bar',
'foo', 'bar',
'Test template: bar should equal bar'),
('far', 'Test template: {{ foo.bar.baz }} should equal 123',
'foo', {'bar': {'baz': 123}},
'Test template: 123 should equal 123'),
('far', 'What is {{ foo[1]["20"][0] }} and {{ foo[1]["2"] }} about anyway...',
'foo', [1, {'10': 'Ten', '20': [46], '2': 'two'}, 3],
'What is 46 and two about anyway...')
])
def template_data(request):
return request.param
| kvpio/kvp.io-python | test/conftest.py | Python | mit | 1,812 |
# Generated by Django 2.1.3 on 2018-11-23 06:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('drf_user', '0004_auto_20181015_1551'),
]
operations = [
migrations.RemoveField(
model_name='authtransaction',
name='date_created',
),
migrations.RemoveField(
model_name='authtransaction',
name='user',
),
migrations.AddField(
model_name='authtransaction',
name='create_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, verbose_name='Create Date/Time'),
preserve_default=False,
),
migrations.AddField(
model_name='authtransaction',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='authtransaction',
name='update_date',
field=models.DateTimeField(auto_now=True, verbose_name='Date/Time Modified'),
),
]
| iamhssingh/django-userprofile | drf_user/migrations/0005_auto_20181123_0657.py | Python | gpl-3.0 | 1,309 |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
def get_storage(import_path):
"""
Imports the message storage class described by import_path, where
import_path is the full Python path to the class.
"""
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a Python path." % import_path)
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, classname))
# Callable with the same interface as the storage classes i.e. accepts a
# 'request' object. It is wrapped in a lambda to stop 'settings' being used at
# the module level
default_storage = lambda request: get_storage(settings.MESSAGE_STORAGE)(request)
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/contrib/messages/storage/__init__.py | Python | bsd-3-clause | 1,185 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import pytz
from openerp import SUPERUSER_ID
class sale_order(osv.osv):
_inherit = "sale.order"
def _get_default_warehouse(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
warehouse_ids = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not warehouse_ids:
return False
return warehouse_ids[0]
def _get_shipped(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
group = sale.procurement_group_id
if group:
res[sale.id] = all([proc.state in ['cancel', 'done'] for proc in group.procurement_ids])
else:
res[sale.id] = False
return res
def _get_orders(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id:
res.add(move.procurement_id.sale_line_id.order_id.id)
return list(res)
def _get_orders_procurements(self, cr, uid, ids, context=None):
res = set()
for proc in self.pool.get('procurement.order').browse(cr, uid, ids, context=context):
if proc.state =='done' and proc.sale_line_id:
res.add(proc.sale_line_id.order_id.id)
return list(res)
def _get_picking_ids(self, cr, uid, ids, name, args, context=None):
res = {}
for sale in self.browse(cr, uid, ids, context=context):
if not sale.procurement_group_id:
res[sale.id] = []
continue
res[sale.id] = self.pool.get('stock.picking').search(cr, uid, [('group_id', '=', sale.procurement_group_id.id)], context=context)
return res
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
vals = super(sale_order, self)._prepare_order_line_procurement(cr, uid, order, line, group_id=group_id, context=context)
location_id = order.partner_shipping_id.property_stock_customer.id
vals['location_id'] = location_id
routes = line.route_id and [(4, line.route_id.id)] or []
vals['route_ids'] = routes
vals['warehouse_id'] = order.warehouse_id and order.warehouse_id.id or False
vals['partner_dest_id'] = order.partner_shipping_id.id
return vals
_columns = {
'incoterm': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'picking_policy': fields.selection([('direct', 'Deliver each product when available'), ('one', 'Deliver all products at once')],
'Shipping Policy', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""Pick 'Deliver each product when available' if you allow partial delivery."""),
'order_policy': fields.selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered."""),
'shipped': fields.function(_get_shipped, string='Delivered', type='boolean', store={
'procurement.order': (_get_orders_procurements, ['state'], 10)
}),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking associated to this sale'),
}
_defaults = {
'warehouse_id': _get_default_warehouse,
'picking_policy': 'direct',
'order_policy': 'manual',
}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
val = {}
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
if warehouse.company_id:
val['company_id'] = warehouse.company_id.id
return {'value': val}
def action_view_delivery(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing delivery orders
of given sales order ids. It can either be a in a list or in a form
view, if there is only one delivery order to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree_all')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of delivery orders to display
pick_ids = []
for so in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in so.picking_ids]
#choose the view_mode accordingly
if len(pick_ids) > 1:
result['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = pick_ids and pick_ids[0] or False
return result
def action_invoice_create(self, cr, uid, ids, grouped=False, states=['confirmed', 'done', 'exception'], date_invoice = False, context=None):
move_obj = self.pool.get("stock.move")
res = super(sale_order,self).action_invoice_create(cr, uid, ids, grouped=grouped, states=states, date_invoice = date_invoice, context=context)
for order in self.browse(cr, uid, ids, context=context):
if order.order_policy == 'picking':
for picking in order.picking_ids:
move_obj.write(cr, uid, [x.id for x in picking.move_lines], {'invoice_state': 'invoiced'}, context=context)
return res
def action_wait(self, cr, uid, ids, context=None):
res = super(sale_order, self).action_wait(cr, uid, ids, context=context)
for o in self.browse(cr, uid, ids):
noprod = self.test_no_product(cr, uid, o, context)
if noprod and o.order_policy=='picking':
self.write(cr, uid, [o.id], {'order_policy': 'manual'}, context=context)
return res
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = super(sale_order, self)._get_date_planned(cr, uid, order, line, start_date, context=context)
date_planned = (date_planned - timedelta(days=order.company_id.security_lead)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
res = super(sale_order, self)._prepare_procurement_group(cr, uid, order, context=None)
res.update({'move_type': order.picking_policy})
return res
def action_ship_end(self, cr, uid, ids, context=None):
super(sale_order, self).action_ship_end(cr, uid, ids, context=context)
for order in self.browse(cr, uid, ids, context=context):
val = {'shipped': True}
if order.state == 'shipping_except':
val['state'] = 'progress'
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
res = self.write(cr, uid, [order.id], val)
return True
def has_stockable_products(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
class product_product(osv.osv):
_inherit = 'product.product'
def need_procurement(self, cr, uid, ids, context=None):
#when sale/product is installed alone, there is no need to create procurements, but with sale_stock
#we must create a procurement for each product that is not a service.
for product in self.browse(cr, uid, ids, context=context):
if product.type != 'service':
return True
return super(product_product, self).need_procurement(cr, uid, ids, context=context)
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def _number_packages(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
try:
res[line.id] = int((line.product_uom_qty+line.product_packaging.qty-0.0001) / line.product_packaging.qty)
except:
res[line.id] = 1
return res
_columns = {
'product_packaging': fields.many2one('product.packaging', 'Packaging'),
'number_packages': fields.function(_number_packages, type='integer', string='Number Packages'),
'route_id': fields.many2one('stock.location.route', 'Route', domain=[('sale_selectable', '=', True)]),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
}
_defaults = {
'product_packaging': False,
}
def product_packaging_change(self, cr, uid, ids, pricelist, product, qty=0, uom=False,
partner_id=False, packaging=False, flag=False, context=None):
if not product:
return {'value': {'product_packaging': False}}
product_obj = self.pool.get('product.product')
product_uom_obj = self.pool.get('product.uom')
pack_obj = self.pool.get('product.packaging')
warning = {}
result = {}
warning_msgs = ''
if flag:
res = self.product_id_change(cr, uid, ids, pricelist=pricelist,
product=product, qty=qty, uom=uom, partner_id=partner_id,
packaging=packaging, flag=False, context=context)
warning_msgs = res.get('warning') and res['warning'].get('message', '') or ''
products = product_obj.browse(cr, uid, product, context=context)
if not products.packaging_ids:
packaging = result['product_packaging'] = False
if packaging:
default_uom = products.uom_id and products.uom_id.id
pack = pack_obj.browse(cr, uid, packaging, context=context)
q = product_uom_obj._compute_qty(cr, uid, uom, pack.qty, default_uom)
# qty = qty - qty % q + q
if qty and (q and not (qty % q) == 0):
ean = pack.ean or _('(n/a)')
qty_pack = pack.qty
type_ul = pack.ul
if not warning_msgs:
warn_msg = _("You selected a quantity of %d Units.\n"
"But it's not compatible with the selected packaging.\n"
"Here is a proposition of quantities according to the packaging:\n"
"EAN: %s Quantity: %s Type of ul: %s") % \
(qty, ean, qty_pack, type_ul.name)
warning_msgs += _("Picking Information ! : ") + warn_msg + "\n\n"
warning = {
'title': _('Configuration Error!'),
'message': warning_msgs
}
result['product_uom_qty'] = qty
return {'value': result, 'warning': warning}
def _check_routing(self, cr, uid, ids, product, warehouse_id, context=None):
""" Verify the route of the product based on the warehouse
return True if the product availibility in stock does not need to be verified
"""
is_available = False
if warehouse_id:
warehouse = self.pool['stock.warehouse'].browse(cr, uid, warehouse_id, context=context)
for product_route in product.route_ids:
if warehouse.mto_pull_id and warehouse.mto_pull_id.route_id and warehouse.mto_pull_id.route_id.id == product_route.id:
is_available = True
break
else:
try:
mto_route_id = self.pool['stock.warehouse']._get_mto_route(cr, uid, context=context)
except osv.except_osv:
# if route MTO not found in ir_model_data, we treat the product as in MTS
mto_route_id = False
if mto_route_id:
for product_route in product.route_ids:
if product_route.id == mto_route_id:
is_available = True
break
return is_available
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, warehouse_id=False, context=None):
context = context or {}
product_uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
warning = {}
#UoM False due to hack which makes sure uom changes price, ... in product_id_change
res = self.product_id_change(cr, uid, ids, pricelist, product, qty=qty,
uom=False, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if not product:
res['value'].update({'product_packaging': False})
return res
# set product uom in context to get virtual stock in current uom
if 'product_uom' in res.get('value', {}):
# use the uom changed by super call
context = dict(context, uom=res['value']['product_uom'])
elif uom:
# fallback on selected
context = dict(context, uom=uom)
#update of result obtained in super function
product_obj = product_obj.browse(cr, uid, product, context=context)
res['value'].update({'product_tmpl_id': product_obj.product_tmpl_id.id, 'delay': (product_obj.sale_delay or 0.0)})
# Calling product_packaging_change function after updating UoM
res_packing = self.product_packaging_change(cr, uid, ids, pricelist, product, qty, uom, partner_id, packaging, context=context)
res['value'].update(res_packing.get('value', {}))
warning_msgs = res_packing.get('warning') and res_packing['warning']['message'] or ''
if product_obj.type == 'product':
#determine if the product needs further check for stock availibility
is_available = self._check_routing(cr, uid, ids, product_obj, warehouse_id, context=context)
#check if product is available, and if not: raise a warning, but do this only for products that aren't processed in MTO
if not is_available:
uom_record = False
if uom:
uom_record = product_uom_obj.browse(cr, uid, uom, context=context)
if product_obj.uom_id.category_id.id != uom_record.category_id.id:
uom_record = False
if not uom_record:
uom_record = product_obj.uom_id
compare_qty = float_compare(product_obj.virtual_available, qty, precision_rounding=uom_record.rounding)
if compare_qty == -1:
warn_msg = _('You plan to sell %.2f %s but you only have %.2f %s available !\nThe real stock is %.2f %s. (without reservations)') % \
(qty, uom_record.name,
max(0,product_obj.virtual_available), uom_record.name,
max(0,product_obj.qty_available), uom_record.name)
warning_msgs += _("Not enough stock ! : ") + warn_msg + "\n\n"
#update of warning messages
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
res.update({'warning': warning})
return res
def button_cancel(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for procurement in lines.mapped('procurement_ids'):
for move in procurement.move_ids:
if move.state == 'done' and not move.scrapped:
raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sale order line which is linked to a stock move already done.'))
return super(sale_order_line, self).button_cancel(cr, uid, ids, context=context)
class stock_move(osv.osv):
_inherit = 'stock.move'
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if context.get('inv_type') in ('out_invoice', 'out_refund') and move.procurement_id and move.procurement_id.sale_line_id:
sale_line = move.procurement_id.sale_line_id
self.pool.get('sale.order.line').write(cr, uid, [sale_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('sale.order').write(cr, uid, [sale_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
sale_line_obj = self.pool.get('sale.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
sale_line_ids = sale_line_obj.search(cr, uid, [('order_id', '=', move.procurement_id.sale_line_id.order_id.id), ('invoiced', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context)
if sale_line_ids:
created_lines = sale_line_obj.invoice_line_create(cr, uid, sale_line_ids, context=context)
invoice_line_obj.write(cr, uid, created_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context)
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if context.get('inv_type') in ('out_invoice', 'out_refund') and move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.order_policy == 'picking':
sale_order = move.procurement_id.sale_line_id.order_id
return sale_order.partner_invoice_id, sale_order.user_id.id, sale_order.pricelist_id.currency_id.id
elif move.picking_id.sale_id and context.get('inv_type') in ('out_invoice', 'out_refund'):
# In case of extra move, it is better to use the same data as the original moves
sale_order = move.picking_id.sale_id
return sale_order.partner_invoice_id, sale_order.user_id.id, sale_order.pricelist_id.currency_id.id
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if inv_type in ('out_invoice', 'out_refund') and move.procurement_id and move.procurement_id.sale_line_id:
sale_line = move.procurement_id.sale_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in sale_line.tax_id])]
res['account_analytic_id'] = sale_line.order_id.project_id and sale_line.order_id.project_id.id or False
res['discount'] = sale_line.discount
if move.product_id.id != sale_line.product_id.id:
res['price_unit'] = self.pool['product.pricelist'].price_get(
cr, uid, [sale_line.order_id.pricelist_id.id],
move.product_id.id, move.product_uom_qty or 1.0,
sale_line.order_id.partner_id, context=context)[sale_line.order_id.pricelist_id.id]
else:
res['price_unit'] = sale_line.price_unit
uos_coeff = move.product_uom_qty and move.product_uos_qty / move.product_uom_qty or 1.0
res['price_unit'] = res['price_unit'] / uos_coeff
return res
def _get_moves_taxes(self, cr, uid, moves, inv_type, context=None):
is_extra_move, extra_move_tax = super(stock_move, self)._get_moves_taxes(cr, uid, moves, inv_type, context=context)
if inv_type == 'out_invoice':
for move in moves:
if move.procurement_id and move.procurement_id.sale_line_id:
is_extra_move[move.id] = False
extra_move_tax[move.picking_id, move.product_id] = [(6, 0, [x.id for x in move.procurement_id.sale_line_id.tax_id])]
elif move.picking_id.sale_id and move.product_id.product_tmpl_id.taxes_id:
fp = move.picking_id.sale_id.fiscal_position
res = self.pool.get("account.invoice.line").product_id_change(cr, uid, [], move.product_id.id, None, partner_id=move.picking_id.partner_id.id, fposition_id=(fp and fp.id), context=context)
extra_move_tax[0, move.product_id] = [(6, 0, res['value']['invoice_line_tax_id'])]
return (is_extra_move, extra_move_tax)
def _get_taxes(self, cr, uid, move, context=None):
if move.procurement_id.sale_line_id.tax_id:
return [tax.id for tax in move.procurement_id.sale_line_id.tax_id]
return super(stock_move, self)._get_taxes(cr, uid, move, context=context)
class stock_location_route(osv.osv):
_inherit = "stock.location.route"
_columns = {
'sale_selectable': fields.boolean("Selectable on Sales Order Line")
}
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Inherit the original function of the 'stock' module
We select the partner of the sales order as the partner of the customer invoice
"""
if picking.sale_id:
saleorder_ids = self.pool['sale.order'].search(cr, uid, [('procurement_group_id' ,'=', picking.group_id.id)], context=context)
saleorders = self.pool['sale.order'].browse(cr, uid, saleorder_ids, context=context)
if saleorders and saleorders[0] and saleorders[0].order_policy == 'picking':
saleorder = saleorders[0]
return saleorder.partner_invoice_id.id
return super(stock_picking, self)._get_partner_to_invoice(cr, uid, picking, context=context)
def _get_sale_id(self, cr, uid, ids, name, args, context=None):
sale_obj = self.pool.get("sale.order")
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
if picking.group_id:
sale_ids = sale_obj.search(cr, uid, [('procurement_group_id', '=', picking.group_id.id)], context=context)
if sale_ids:
res[picking.id] = sale_ids[0]
return res
_columns = {
'sale_id': fields.function(_get_sale_id, type="many2one", relation="sale.order", string="Sale Order"),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
sale_obj = self.pool.get('sale.order')
sale_line_obj = self.pool.get('sale.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
sale = move.picking_id.sale_id
if sale:
inv_vals.update({
'fiscal_position': sale.fiscal_position.id,
'payment_term': sale.payment_term.id,
'user_id': sale.user_id.id,
'section_id': sale.section_id.id,
'name': sale.client_order_ref or '',
'comment': sale.note,
})
return inv_vals
| oliverhr/odoo | addons/sale_stock/sale_stock.py | Python | agpl-3.0 | 26,609 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class CronTrigger(resource.Resource):
support_status = support.SupportStatus(version='5.0.0')
PROPERTIES = (
NAME, PATTERN, WORKFLOW, FIRST_TIME, COUNT
) = (
'name', 'pattern', 'workflow', 'first_time', 'count'
)
_WORKFLOW_KEYS = (
WORKFLOW_NAME, WORKFLOW_INPUT
) = (
'name', 'input'
)
ATTRIBUTES = (
NEXT_EXECUTION_TIME, REMAINING_EXECUTIONS
) = (
'next_execution_time', 'remaining_executions'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the cron trigger.')
),
PATTERN: properties.Schema(
properties.Schema.STRING,
_('Cron expression.'),
constraints=[
constraints.CustomConstraint(
'cron_expression')
]
),
WORKFLOW: properties.Schema(
properties.Schema.MAP,
_('Workflow to execute.'),
required=True,
schema={
WORKFLOW_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the workflow.')
),
WORKFLOW_INPUT: properties.Schema(
properties.Schema.MAP,
_('Input values for the workflow.')
)
}
),
FIRST_TIME: properties.Schema(
properties.Schema.STRING,
_('Time of the first execution in format "YYYY-MM-DD HH:MM".')
),
COUNT: properties.Schema(
properties.Schema.INTEGER,
_('Remaining executions.')
)
}
attributes_schema = {
NEXT_EXECUTION_TIME: attributes.Schema(
_('Time of the next execution in format "YYYY-MM-DD HH:MM:SS".'),
type=attributes.Schema.STRING
),
REMAINING_EXECUTIONS: attributes.Schema(
_('Number of remaining executions.'),
type=attributes.Schema.INTEGER
)
}
default_client_name = 'mistral'
entity = 'cron_triggers'
def _cron_trigger_name(self):
return self.properties.get(self.NAME) or self.physical_resource_name()
def handle_create(self):
workflow = self.properties.get(self.WORKFLOW)
args = {
'name': self._cron_trigger_name(),
'pattern': self.properties.get(self.PATTERN),
'workflow_name': workflow.get(self.WORKFLOW_NAME),
'workflow_input': workflow.get(self.WORKFLOW_INPUT),
'first_time': self.properties.get(self.FIRST_TIME),
'count': self.properties.get(self.COUNT)
}
cron_trigger = self.client().cron_triggers.create(**args)
self.resource_id_set(cron_trigger.name)
def _resolve_attribute(self, name):
trigger = self.client().cron_triggers.get(self.resource_id)
if name == self.NEXT_EXECUTION_TIME:
return trigger.next_execution_time
elif name == self.REMAINING_EXECUTIONS:
return trigger.remaining_executions
# TODO(tlashchova): remove this method when mistralclient>1.0.0 is used.
def _show_resource(self):
cron_trigger = self.client().cron_triggers.get(self.resource_id)
if hasattr(cron_trigger, 'to_dict'):
super(CronTrigger, self)._show_resource()
return cron_trigger._data
def resource_mapping():
return {
'OS::Mistral::CronTrigger': CronTrigger,
}
| pratikmallya/heat | heat/engine/resources/openstack/mistral/cron_trigger.py | Python | apache-2.0 | 4,295 |
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Ivo Nunes/Vasco Nunes
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from birdieapp.constants import BIRDIE_SHARE_PATH, BIRDIE_CACHE_PATH
from birdieapp.signalobject import SignalObject
from gi.repository import Gtk, Gdk
import gettext
import os.path
from gettext import gettext as _
class UserBox(Gtk.EventBox, SignalObject):
__gtype_name__ = "UserBox"
def __init__(self):
super(UserBox, self).__init__()
super(UserBox, self).init_signals()
self.following = False
self.follower = False
self.blocked = False
self.screen_name = None
# tweet box - main container
self.user_box = Gtk.Box(
orientation=Gtk.Orientation.VERTICAL, margin=0)
# avatar image
self.avatar_img = Gtk.Image()
self.avatar_img.set_from_file(BIRDIE_SHARE_PATH + "/default.png")
self.avatar_img.set_halign(Gtk.Align.CENTER)
self.avatar_img.set_valign(Gtk.Align.START)
# verified account image
self.verified_img = Gtk.Image()
self.verified_img.set_from_icon_name("twitter-verified",
Gtk.IconSize.MENU)
self.spacer = Gtk.Label("")
# name
self.name_label = Gtk.Label()
self.user_name_label = Gtk.Label()
self.local_label = Gtk.Label()
self.user_box.pack_start(self.spacer, False, False, 0)
self.user_box.pack_start(self.avatar_img, False, False, 0)
self.user_box.pack_start(self.verified_img, False, False, 0)
self.user_box.pack_start(self.name_label, False, False, 0)
self.user_box.pack_start(self.user_name_label, False, False, 0)
self.user_box.pack_start(self.local_label, False, False, 0)
# details
self.details_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL,
margin=12)
self.tweets_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,
margin=8)
self.tweets_box.set_halign(Gtk.Align.CENTER)
self.tweets_label = Gtk.Label()
self.tweets2_label = Gtk.Label()
txt = "<span color='#000000' font_weight='bold' size='small'>"
txt += _("TWEETS") + "</span>"
self.tweets2_label.set_markup(txt)
self.tweets_box.pack_start(self.tweets_label, False, False, 0)
self.tweets_box.pack_start(self.tweets2_label, False, False, 0)
self.details_box.pack_start(self.tweets_box, False, False, 0)
self.follow_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,
margin=8)
self.follow_box.set_halign(Gtk.Align.CENTER)
self.follow_label = Gtk.Label()
self.follow2_label = Gtk.Label()
txt = "<span color='#000000' font_weight='bold' size='small'>"
txt += _("FOLLOWING") + "</span>"
self.follow2_label.set_markup(txt)
self.follow_box.pack_start(self.follow_label, False, False, 0)
self.follow_box.pack_start(self.follow2_label, False, False, 0)
self.details_box.pack_start(self.follow_box, False, False, 0)
self.followers_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,
margin=8)
self.followers_box.set_halign(Gtk.Align.CENTER)
self.followers_label = Gtk.Label()
self.followers2_label = Gtk.Label()
txt = "<span color='#000000' font_weight='bold' size='small'>"
txt += _("FOLLOWERS") + "</span>"
self.followers2_label.set_markup(txt)
self.followers_box.pack_start(self.followers_label, False, False, 0)
self.followers_box.pack_start(self.followers2_label, False, False, 0)
self.details_box.pack_start(self.followers_box, False, False, 0)
self.actions_box = Gtk.MenuButton()
self.actions_box.set_tooltip_text(_('More Options'))
self.actions_box.set_relief(Gtk.ReliefStyle.NONE)
self.actions_box.set_margin_top(11)
self.actions_box.set_margin_bottom(11)
self.actions_box.set_margin_right(2)
self.actions_img = Gtk.Image()
self.actions_img.set_from_icon_name("view-more-symbolic",
Gtk.IconSize.MENU)
self.actions_box.add(self.actions_img)
self.details_box.pack_start(self.actions_box, False, False, 0)
# actions menu
self.actions_menu = Gtk.Menu()
menu_item = Gtk.MenuItem()
menu_item.set_label(_("Send Message"))
menu_item.connect("activate", self.on_dm)
self.actions_menu.append(menu_item)
menu_item = Gtk.MenuItem()
menu_item.set_label(_("Add/Remove from Lists"))
menu_item.connect("activate", self.on_add_remove_from_lists)
self.actions_menu.append(menu_item)
menu_item = Gtk.MenuItem()
menu_item.set_label(_("View profile on Twitter"))
menu_item.connect("activate", self.on_view_profile_on_twitter)
self.actions_menu.append(menu_item)
menu_item = Gtk.MenuItem()
menu_item.set_label(_("Block"))
menu_item.connect("activate", self.on_block)
self.actions_menu.append(menu_item)
self.actions_menu.show_all()
self.actions_box.set_popup(self.actions_menu)
self.status = Gtk.Button(_("Follow"))
self.status.set_margin_top(11)
self.status.set_margin_bottom(11)
self.status.get_style_context().add_class("suggested-action")
self.details_box.pack_start(self.status, True, True, 0)
self.user_box.pack_start(self.details_box, True, False, 0)
self.add(self.user_box)
self.show_all()
def set(self, data, active_account=None, friendship_data=None):
self.avatar_img.set_from_file(BIRDIE_CACHE_PATH
+ os.path.basename(
data['profile_image_url']))
txt = "<span color='#000000' font_weight='bold' size='x-large'>"
txt += data['name'] + "</span>"
self.name_label.set_markup(txt)
txt = "<span color='#999' font_weight='bold'>@"
txt += data['screen_name'] + "</span>"
self.user_name_label.set_markup(txt)
txt = "<span color='#999' size='small'>"
txt += data['location'] + "</span>"
self.local_label.set_markup(txt)
txt = "<span color='#999' font_weight='bold' size='large'>"
txt += str(data['statuses_count']) + "</span>"
self.tweets_label.set_markup(txt)
txt = "<span color='#999' font_weight='bold' size='large'>"
txt += str(data['friends_count']) + "</span>"
self.follow_label.set_markup(txt)
txt = "<span color='#999' font_weight='bold' size='large'>"
txt += str(data['followers_count']) + "</span>"
self.followers_label.set_markup(txt)
try:
self.status.disconnect(self.follow_signal)
except:
pass
self.follow_signal = self.status.connect("clicked",
self.on_follow,
data['screen_name'])
if data['following']:
self.status.get_style_context().remove_class("suggested-action")
self.status.get_style_context().add_class("destructive-action")
self.status.set_label("Unfollow")
else:
self.status.get_style_context().remove_class("destructive-action")
self.status.get_style_context().add_class("suggested-action")
self.status.set_label("Follow")
# is our own profile?
if data['screen_name'] == active_account.screen_name:
self.status.set_label(_("Edit"))
try:
self.status.disconnect(self.follow_signal)
except:
pass
self.follow_signal = self.status.connect("clicked", self.on_edit)
# are we following this user?
if friendship_data:
for user in friendship_data:
if 'following' in user['connections']:
self.toggle_follow(True)
else:
self.toggle_follow(False)
self.show_all()
if not data['verified']:
self.verified_img.hide()
def toggle_follow(self, following=False):
if following:
self.status.set_label(_("Unfollow"))
self.status.get_style_context().remove_class("suggested-action")
self.status.get_style_context().add_class("destructive-action")
else:
self.status.set_label(_("Follow"))
self.status.get_style_context().remove_class("destructive-action")
self.status.get_style_context().add_class("suggested-action")
self.following = following
def on_follow(self, _, screen_name):
if self.following:
self.emit_signal_with_arg("unfollow", screen_name)
else:
self.emit_signal_with_arg("follow", screen_name)
def on_dm(self, _):
print "dm"
def on_add_remove_from_lists(self, _):
print "add/remove from lists"
def on_view_profile_on_twitter(self, _):
Gtk.show_uri(None, 'http://www.twitter.com/' + self.screen_name,
Gdk.CURRENT_TIME)
def on_block(self, _):
print "Block!"
def on_edit(self, _):
Gtk.show_uri(None, 'https://twitter.com/settings/account',
Gdk.CURRENT_TIME) | mskala/birdie | birdieapp/gui/userbox.py | Python | gpl-3.0 | 10,112 |
import enum
class BugStatus(enum.Enum):
new = 7
incomplete = 6
invalid = 5
wont_fix = 4
in_progress = 3
fix_committed = 2
fix_released = 1
print('\nMember name: {}'.format(BugStatus.wont_fix.name))
print('Member value: {}'.format(BugStatus.wont_fix.value))
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_data_structures/enum_create.py | Python | apache-2.0 | 287 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Test the speed of rapidly updating multiple plot curves
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
from pyqtgraph.ptime import time
#QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication([])
#mw = QtGui.QMainWindow()
#mw.resize(800,800)
p = pg.plot()
p.setWindowTitle('pyqtgraph example: MultiPlotSpeedTest')
#p.setRange(QtCore.QRectF(0, -10, 5000, 20))
p.setLabel('bottom', 'Index', units='B')
nPlots = 10
#curves = [p.plot(pen=(i,nPlots*1.3)) for i in range(nPlots)]
curves = [pg.PlotCurveItem(pen=(i,nPlots*1.3)) for i in range(nPlots)]
for c in curves:
p.addItem(c)
rgn = pg.LinearRegionItem([1,100])
p.addItem(rgn)
data = np.random.normal(size=(53,5000/nPlots))
ptr = 0
lastTime = time()
fps = None
count = 0
def update():
global curve, data, ptr, p, lastTime, fps, nPlots, count
count += 1
#print "---------", count
for i in range(nPlots):
curves[i].setData(i+data[(ptr+i)%data.shape[0]])
#print " setData done."
ptr += nPlots
now = time()
dt = now - lastTime
lastTime = now
if fps is None:
fps = 1.0/dt
else:
s = np.clip(dt*3., 0, 1)
fps = fps * (1-s) + (1.0/dt) * s
p.setTitle('%0.2f fps' % fps)
#app.processEvents() ## force complete redraw for every plot
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| ibressler/pyqtgraph | examples/MultiPlotSpeedTest.py | Python | mit | 1,754 |
import sys,os
from pymongo import MongoClient
import importlib
import SwitchHandler
import CameraHandler
import SensorHandler
from pprint import pprint
class CommandHandler(object):
"""docstring for CommandHandler"""
def __init__(self, dbconn, command):
super(CommandHandler, self).__init__()
self.dbconn = dbconn
self.command = command
self.params = {}
self.params["callback"] = command["sendmessage"] #callback
self.params["callback2"] = command["sendphoto"] #callback2
self.params["command"] = self.command
self.AppConfig = self.command["AppConfig"]
self.callback = self.params["callback"]
self.callback2 = self.params["callback2"]
# self.callbacks = [self.callback,self.callback2]
def execute(self):
commandstr = self.command["message"][1:]
if " " in commandstr:
commandstr = commandstr[:commandstr.find(" ")]
# print "Command : '%s'" % commandstr
print "get from db"
cCommand = self.dbconn.commandmapper.find({"commandkey":commandstr}).limit(1)
print "get db selesai"
#if commmand is not found, then send response
if cCommand.count() > 0:
cCommand = cCommand[0]
self.callback(self.command["account_id"],"hii %s, you just sent command name : '%s' and this is callback!" % (self.command["fullname"],cCommand["commandname"]))
try:
#execute command
#get package
self.modules = cCommand["class_ref"].split(".")
#fill params
self.params["class"] = self.modules[0]
self.params["method"] = self.modules[1]
self.params["id"] = cCommand["_id"]
# module = sys.modules[self.modules[0]]
# pprint(module)
module = eval(self.modules[0])
#get class
class_ = getattr(module, self.modules[0])
#init
instance = class_(self.dbconn,self.params)
#exec
instance.execute()
except Exception, e:
self.callback(self.command["account_id"],"Unhandled Command [%s]" % e)
raise e
else:
self.callback(self.command["account_id"],"Unknown Command.") | t1g0r/ramey | src/backend/command/CommandHandler.py | Python | gpl-3.0 | 1,966 |
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
import re
from odoo.tools.misc import formatLang
class AccountMove(models.Model):
_inherit = "account.move"
l10n_latam_amount_untaxed = fields.Monetary(compute='_compute_l10n_latam_amount_and_taxes')
l10n_latam_tax_ids = fields.One2many(compute="_compute_l10n_latam_amount_and_taxes", comodel_name='account.move.line')
l10n_latam_available_document_type_ids = fields.Many2many('l10n_latam.document.type', compute='_compute_l10n_latam_available_document_types')
l10n_latam_document_type_id = fields.Many2one(
'l10n_latam.document.type', string='Document Type', readonly=False, auto_join=True, index=True,
states={'posted': [('readonly', True)]}, compute='_compute_l10n_latam_document_type', store=True)
l10n_latam_document_number = fields.Char(
compute='_compute_l10n_latam_document_number', inverse='_inverse_l10n_latam_document_number',
string='Document Number', readonly=True, states={'draft': [('readonly', False)]})
l10n_latam_use_documents = fields.Boolean(related='journal_id.l10n_latam_use_documents')
l10n_latam_manual_document_number = fields.Boolean(compute='_compute_l10n_latam_manual_document_number', string='Manual Number')
@api.depends('l10n_latam_document_type_id')
def _compute_name(self):
""" Change the way that the use_document moves name is computed:
* If move use document but does not have document type selected then name = '/' to do not show the name.
* If move use document and are numbered manually do not compute name at all (will be set manually)
* If move use document and is in draft state and has not been posted before we restart name to '/' (this is
when we change the document type) """
without_doc_type = self.filtered(lambda x: x.journal_id.l10n_latam_use_documents and not x.l10n_latam_document_type_id)
manual_documents = self.filtered(lambda x: x.journal_id.l10n_latam_use_documents and x.l10n_latam_manual_document_number)
(without_doc_type + manual_documents.filtered(lambda x: not x.name or x.name and x.state == 'draft' and not x.posted_before)).name = '/'
# if we change document or journal and we are in draft and not posted, we clean number so that is recomputed in super
self.filtered(
lambda x: x.journal_id.l10n_latam_use_documents and x.l10n_latam_document_type_id
and not x.l10n_latam_manual_document_number and x.state == 'draft' and not x.posted_before).name = '/'
super(AccountMove, self - without_doc_type - manual_documents)._compute_name()
@api.depends('l10n_latam_document_type_id', 'journal_id')
def _compute_l10n_latam_manual_document_number(self):
""" Indicates if this document type uses a sequence or if the numbering is made manually """
recs_with_journal_id = self.filtered(lambda x: x.journal_id and x.journal_id.l10n_latam_use_documents)
for rec in recs_with_journal_id:
rec.l10n_latam_manual_document_number = self._is_manual_document_number(rec.journal_id)
remaining = self - recs_with_journal_id
remaining.l10n_latam_manual_document_number = False
def _is_manual_document_number(self, journal):
return True if journal.type == 'purchase' else False
@api.depends('name')
def _compute_l10n_latam_document_number(self):
recs_with_name = self.filtered(lambda x: x.name != '/')
for rec in recs_with_name:
name = rec.name
doc_code_prefix = rec.l10n_latam_document_type_id.doc_code_prefix
if doc_code_prefix and name:
name = name.split(" ", 1)[-1]
rec.l10n_latam_document_number = name
remaining = self - recs_with_name
remaining.l10n_latam_document_number = False
@api.onchange('l10n_latam_document_type_id', 'l10n_latam_document_number')
def _inverse_l10n_latam_document_number(self):
for rec in self.filtered(lambda x: x.l10n_latam_document_type_id and (x.l10n_latam_manual_document_number or not x.highest_name)):
if not rec.l10n_latam_document_number:
rec.name = '/'
else:
l10n_latam_document_number = rec.l10n_latam_document_type_id._format_document_number(rec.l10n_latam_document_number)
if rec.l10n_latam_document_number != l10n_latam_document_number:
rec.l10n_latam_document_number = l10n_latam_document_number
rec.name = "%s %s" % (rec.l10n_latam_document_type_id.doc_code_prefix, l10n_latam_document_number)
@api.depends('journal_id', 'l10n_latam_document_type_id')
def _compute_highest_name(self):
manual_records = self.filtered('l10n_latam_manual_document_number')
manual_records.highest_name = ''
super(AccountMove, self - manual_records)._compute_highest_name()
@api.model
def _deduce_sequence_number_reset(self, name):
if self.l10n_latam_use_documents:
return 'never'
return super(AccountMove, self)._deduce_sequence_number_reset(name)
def _get_starting_sequence(self):
if self.journal_id.l10n_latam_use_documents:
if self.l10n_latam_document_type_id:
return "%s 00000000" % (self.l10n_latam_document_type_id.doc_code_prefix)
# There was no pattern found, propose one
return ""
return super(AccountMove, self)._get_starting_sequence()
def _compute_l10n_latam_amount_and_taxes(self):
recs_invoice = self.filtered(lambda x: x.is_invoice())
for invoice in recs_invoice:
tax_lines = invoice.line_ids.filtered('tax_line_id')
included_taxes = invoice.l10n_latam_document_type_id and \
invoice.l10n_latam_document_type_id._filter_taxes_included(tax_lines.mapped('tax_line_id'))
if not included_taxes:
l10n_latam_amount_untaxed = invoice.amount_untaxed
not_included_invoice_taxes = tax_lines
else:
included_invoice_taxes = tax_lines.filtered(lambda x: x.tax_line_id in included_taxes)
not_included_invoice_taxes = tax_lines - included_invoice_taxes
if invoice.is_inbound():
sign = -1
else:
sign = 1
l10n_latam_amount_untaxed = invoice.amount_untaxed + sign * sum(included_invoice_taxes.mapped('balance'))
invoice.l10n_latam_amount_untaxed = l10n_latam_amount_untaxed
invoice.l10n_latam_tax_ids = not_included_invoice_taxes
remaining = self - recs_invoice
remaining.l10n_latam_amount_untaxed = False
remaining.l10n_latam_tax_ids = [(5, 0)]
def post(self):
for rec in self.filtered(lambda x: x.l10n_latam_use_documents and (not x.name or x.name == '/')):
if rec.move_type in ('in_receipt', 'out_receipt'):
raise UserError(_('We do not accept the usage of document types on receipts yet. '))
return super().post()
@api.constrains('name', 'journal_id', 'state')
def _check_unique_sequence_number(self):
""" This uniqueness verification is only valid for customer invoices, and vendor bills that does not use
documents. A new constraint method _check_unique_vendor_number has been created just for validate for this purpose """
vendor = self.filtered(lambda x: x.is_purchase_document() and x.l10n_latam_use_documents)
return super(AccountMove, self - vendor)._check_unique_sequence_number()
@api.constrains('state', 'l10n_latam_document_type_id')
def _check_l10n_latam_documents(self):
""" This constraint checks that if a invoice is posted and does not have a document type configured will raise
an error. This only applies to invoices related to journals that has the "Use Documents" set as True.
And if the document type is set then check if the invoice number has been set, because a posted invoice
without a document number is not valid in the case that the related journals has "Use Docuemnts" set as True """
validated_invoices = self.filtered(lambda x: x.l10n_latam_use_documents and x.state == 'posted')
without_doc_type = validated_invoices.filtered(lambda x: not x.l10n_latam_document_type_id)
if without_doc_type:
raise ValidationError(_(
'The journal require a document type but not document type has been selected on invoices %s.' % (
without_doc_type.ids)))
without_number = validated_invoices.filtered(
lambda x: not x.l10n_latam_document_number and x.l10n_latam_manual_document_number)
if without_number:
raise ValidationError(_('Please set the document number on the following invoices %s.' % (
without_number.ids)))
@api.constrains('move_type', 'l10n_latam_document_type_id')
def _check_invoice_type_document_type(self):
for rec in self.filtered('l10n_latam_document_type_id.internal_type'):
internal_type = rec.l10n_latam_document_type_id.internal_type
invoice_type = rec.move_type
if internal_type in ['debit_note', 'invoice'] and invoice_type in ['out_refund', 'in_refund'] and \
rec.l10n_latam_document_type_id.code != '99':
raise ValidationError(_('You can not use a %s document type with a refund invoice', internal_type))
elif internal_type == 'credit_note' and invoice_type in ['out_invoice', 'in_invoice']:
raise ValidationError(_('You can not use a %s document type with a invoice') % (internal_type))
def _get_l10n_latam_documents_domain(self):
self.ensure_one()
if self.move_type in ['out_refund', 'in_refund']:
internal_types = ['credit_note']
else:
internal_types = ['invoice', 'debit_note']
return [('internal_type', 'in', internal_types), ('country_id', '=', self.company_id.country_id.id)]
@api.depends('journal_id', 'partner_id', 'company_id', 'move_type')
def _compute_l10n_latam_available_document_types(self):
self.l10n_latam_available_document_type_ids = False
for rec in self.filtered(lambda x: x.journal_id and x.l10n_latam_use_documents and x.partner_id):
rec.l10n_latam_available_document_type_ids = self.env['l10n_latam.document.type'].search(rec._get_l10n_latam_documents_domain())
@api.depends('l10n_latam_available_document_type_ids')
@api.depends_context('internal_type')
def _compute_l10n_latam_document_type(self):
internal_type = self._context.get('internal_type', False)
for rec in self.filtered(lambda x: x.state == 'draft'):
document_types = rec.l10n_latam_available_document_type_ids._origin
document_types = internal_type and document_types.filtered(lambda x: x.internal_type == internal_type) or document_types
rec.l10n_latam_document_type_id = document_types and document_types[0].id
def _compute_invoice_taxes_by_group(self):
report_or_portal_view = 'commit_assetsbundle' in self.env.context or \
not self.env.context.get('params', {}).get('view_type') == 'form'
if not report_or_portal_view:
return super()._compute_invoice_taxes_by_group()
move_with_doc_type = self.filtered('l10n_latam_document_type_id')
for move in move_with_doc_type:
lang_env = move.with_context(lang=move.partner_id.lang).env
tax_lines = move.l10n_latam_tax_ids
res = {}
# There are as many tax line as there are repartition lines
done_taxes = set()
for line in tax_lines:
res.setdefault(line.tax_line_id.tax_group_id, {'base': 0.0, 'amount': 0.0})
res[line.tax_line_id.tax_group_id]['amount'] += line.price_subtotal
tax_key_add_base = tuple(move._get_tax_key_for_group_add_base(line))
if tax_key_add_base not in done_taxes:
# The base should be added ONCE
res[line.tax_line_id.tax_group_id]['base'] += line.tax_base_amount
done_taxes.add(tax_key_add_base)
res = sorted(res.items(), key=lambda l: l[0].sequence)
move.amount_by_group = [(
group.name, amounts['amount'],
amounts['base'],
formatLang(lang_env, amounts['amount'], currency_obj=move.currency_id),
formatLang(lang_env, amounts['base'], currency_obj=move.currency_id),
len(res),
group.id,
) for group, amounts in res]
super(AccountMove, self - move_with_doc_type)._compute_invoice_taxes_by_group()
@api.constrains('name', 'partner_id', 'company_id', 'posted_before')
def _check_unique_vendor_number(self):
""" The constraint _check_unique_sequence_number is valid for customer bills but not valid for us on vendor
bills because the uniqueness must be per partner """
for rec in self.filtered(
lambda x: x.name and x.name != '/' and x.is_purchase_document() and x.l10n_latam_use_documents):
domain = [
('move_type', '=', rec.move_type),
# by validating name we validate l10n_latam_document_type_id
('name', '=', rec.name),
('company_id', '=', rec.company_id.id),
('id', '!=', rec.id),
('commercial_partner_id', '=', rec.commercial_partner_id.id),
# allow to have to equal if they are cancelled
('state', '!=', 'cancel'),
]
if rec.search(domain):
raise ValidationError(_('Vendor bill number must be unique per vendor and company.'))
| ddico/odoo | addons/l10n_latam_invoice_document/models/account_move.py | Python | agpl-3.0 | 14,050 |
from distutils.core import setup, Extension
ext_modules = [
Extension(
name = 'cthreading',
sources = ['cthreading.c', 'src/threadpool.c', 'src/threading.c'],
include_dirs = ['inc'],
)
]
setup(
name = 'cthreading',
author = 'Continuum Analytics, Inc.',
ext_modules = ext_modules,
)
| pykit/pykit-threads | pykit_threads/threadpool/setup.py | Python | bsd-3-clause | 328 |
# by TR
from matplotlib.mlab import psd
from numpy.fft.helper import fftfreq
from obspy.core import Trace as ObsPyTrace
from obspy.signal.util import nextpow2
from scipy.fftpack import fft, ifft
import scipy.interpolate
from sito import util
from sito.util import filterResp, fillArray
from sito.xcorr import timeNorm, spectralWhitening
import copy
import logging
import matplotlib.pyplot as plt
import numpy as np
import scipy.integrate
import scipy.signal
# from mystream import read
log = logging.getLogger(__name__)
# statshf = 'sampling_rate delta calib npts network location station channel starttime'
# statshf_r = statshf + ' endtime'
# shhf_int = 'SIGN EVENTNO MARK'
# shhf_float = 'DISTANCE AZIMUTH SLOWNESS INCI DEPTH MAGNITUDE LAT LON SIGNOISE PWDW DCVREG DCVINCI'
# shhf_str = 'COMMENT OPINFO FILTER QUALITY BYTEORDER P-ONSET S-ONSET ORIGIN'
# shhf = ' '.join([shhf_int, shhf_float, shhf_str])
class Trace(ObsPyTrace):
"""
Class derieved from obspy.core.Trace with some additional functionality.
"""
# @classmethod
# def read(cls, pathname_or_url, format=None, headonly=False, ** kwargs):
# """Read first trace of waveform file into an Trace object.
#
# If there are more than one trace, tries to glue them together.
# See obspy.core.read.
# """
#
# mystream = read(pathname_or_url, format=None, headonly=False,
# ** kwargs)
# mytrace = mystream[0]
# if len(mystream) > 1:
# log.warning('File ' + pathname_or_url + ' contains ' +
# str(len(mystream)) + ' traces.')
# mystream.merge()
# mytrace = mystream[0]
# if len(mystream) > 1:
# log.error('File ' + pathname_or_url + ' contains ' +
# str(len(mystream)) + ' traces of different id.')
# return cls(mytrace)
def __init__(self, trace=None, data=np.array([]), header={}):
"""
Extend Trace.__init__, Trace object can be an argument.
"""
if trace == None:
super(Trace, self).__init__(data=data, header=header)
else:
self.stats = trace.stats
# set data without changing npts in stats object
super(Trace, self).__setattr__('data', trace.data)
if not 'is_fft' in self.stats:
self.stats.is_fft = False
def write(self, filename, format_, **kwargs):
"""
Saves current trace into a filename.
Parameters
----------
filename : string
Name of the output filename.
format_ : string
Name of the output format_.
See :meth:`~obspy.core.stream.Stream.write` method for all possible
formats.
Basic Usage
-----------
>>> tr = Trace()
>>> tr.write("out.mseed", format_="MSEED") # doctest: +SKIP
"""
# we need to import here in order to prevent a circular import of
# Stream and Trace classes
from sito.stream import Stream
Stream([self]).write(filename, format_, **kwargs)
def print_(self, mod=0):
"""
Print some header information.
:param mod: 0-2
:return: string with information
"""
dic = copy.deepcopy(self.stats)
dic['dur'] = self.stats.endtime - self.stats.starttime
dic['pon'] = self.stats.ponset - self.stats.starttime
out = '%(network)s.%(station)s.%(location)s.%(channel)s.%(event_id)s ' \
'| %(dur).1fs %(sampling_rate).1f Hz '
out2 = ' dist %(dist).1f, pon %(pon).1fs, ' \
'azi %(azi).1f, inci %(inci).1f'
if 'event' in dic.keys():
dic['event_id'] = dic.event.id
else:
dic['event_id'] = ''
if 'mark' in dic.keys():
dic['mark'] = dic.mark
else:
dic['mark'] = 9
if mod == 0:
out += '%(npts)d samples | %(filter)s'
# dic['st'] = self.stats.starttime.isoformat()
elif mod == 1:
out += '| %(filter)s | ' + out2
# dic['st'] = self.stats.starttime.date.isoformat()
else:
out += '| ' + out2 + ', lazi %(lazi).1f, linci %(linci).1f, marked:%(mark)d' # , razi %(razi).1f, azi2 %(azi2).1f'
# dic['st'] = self.stats.starttime.date.isoformat()
# dic['razi'] = (-90-dic['lazi']) % 360
# dic['azi2'] = (dic['lazi']-180) % 360
return out % (dic)
def addZeros(self, secs_before, secs_after=None):
if secs_after is None:
secs_after = secs_before
self.data = np.hstack((np.zeros(secs_before * self.stats.sampling_rate),
self.data,
np.zeros(secs_after * self.stats.sampling_rate)))
self.stats.npts = len(self.data)
self.stats.starttime = self.stats.starttime - secs_before
def getArgMax(self, ret='index', spline=False, spline_enhance=100, func=None):
data = self.data
if func is not None:
data = func(data.copy())
if not spline:
argmax = np.argmax(data)
datmax = data[argmax]
else:
n = len(self)
x2 = np.linspace(0, n, n * spline_enhance + 1)
f = scipy.interpolate.InterpolatedUnivariateSpline(np.arange(n), data)
argmax = x2[np.argmax(f(x2))]
datmax = f(argmax)
if ret == 'time':
argmax = argmax * self.stats.delta
elif ret == 'utc':
argmax = self.stats.starttime + argmax * self.stats.delta
return argmax, datmax
def fft(self, nfft=None):
if self.stats.is_fft:
raise ValueError('Trace already ffted.')
self.stats.npts_data = self.stats.npts
if nfft is None:
nfft = nextpow2(self.stats.npts_data)
self.stats.nfft = nfft
self.stats.stdev = (np.sum(self.data ** 2)) ** 0.5
self.data = fft(self.data, nfft, overwrite_x=False)
self.stats.is_fft = True
self.stats.filter += 'FFT'
def ifft(self):
if not self.stats.is_fft:
raise ValueError('Trace is no fft.')
self.data = np.real(ifft(self.data, self.stats.nfft,
overwrite_x=False)[:self.stats.npts_data])
self.stats.is_fft = False
self.stats.filter += 'IFFT'
def fftfreq(self):
if not self.stats.is_fft:
raise ValueError('Trace is no fft.')
return fftfreq(self.stats.npts, 1. / self.stats.sampling_rate)
def ffttrim(self, min_freq=0, max_freq=100, dec=1):
if 'freq_min' in self.stats:
raise ValueError('you can use ffttrim only once.')
freqs = self.fftfreq()
freq_bool = (min_freq <= freqs) * (freqs <= max_freq)
self.data = self.data[freq_bool][::dec]
self.stats.sampling_rate /= dec
self.stats.freq_min = min_freq
self.stats.freq_max = max_freq
def demean(self):
"""
Subtract the mean from the trace.
"""
self.data = self.data - self.data.mean()
self.stats.filter += 'Dm'
def detrend(self):
"""
Detrend trace.
"""
self.data = scipy.signal.detrend(self.data) # , axis=-1, type='linear', bp=0)
self.stats.filter += 'Dt'
def integrate(self):
"""
Integrate trace.
"""
self.data = np.concatenate((self.data[:1], scipy.integrate.cumtrapz(self.data, dx=1. / self.stats.sampling_rate)))
self.stats.filter += 'Int'
def filter2(self, freqmin=None, freqmax=None, corners=2, zerophase=False):
"""
Wrapper for Trace.filter, make entry in self.stats.filter.
"""
if self.stats.is_fft:
self.data *= filterResp(freqmin, freqmax, corners=corners, zerophase=zerophase,
sr=self.stats.sampling_rate, N=self.stats.nfft,
whole=True)[1]
self.stats.filter += 'BP%4.2f,%4.2f,%d,%d' % (freqmin, freqmax, corners, zerophase)
else:
mask = np.ma.getmask(self.data)
if freqmin and freqmax:
self.filter("bandpass", freqmin=freqmin, freqmax=freqmax, corners=corners, zerophase=zerophase)
self.stats.filter += 'BP%4.2f,%4.2f,%d,%d' % (freqmin, freqmax, corners, zerophase)
elif not freqmin and freqmax:
self.filter("lowpass", freq=freqmax, corners=corners, zerophase=zerophase)
self.stats.filter += 'LP%4.2f,%d,%d' % (freqmax, corners, zerophase)
elif freqmin and not freqmax:
self.filter("highpass", freq=freqmin, corners=corners, zerophase=zerophase)
self.stats.filter += 'HP%4.2f,%d,%d' % (freqmin, corners, zerophase)
self.data = fillArray(self.data, mask=mask, fill_value=0.)
def gauss(self, gauss, hp=None):
"""
Gauss low pass filter
"""
assert not self.stats.is_fft
import sito.rf
self.data = sito.rf.gauss(self.data, self.stats.sampling_rate, gauss, hp=hp)
self.stats.gauss = gauss
self.stats.filter += 'Gauss%4.2f' % gauss
def downsample2(self, new_sampling_rate):
"""
Wrapper for Trace.decimate, make entry in trace.stats.filter.
"""
if self.stats.sampling_rate >= 2 * new_sampling_rate:
factor = int(self.stats.sampling_rate / new_sampling_rate)
self.decimate(factor, strict_length=False, no_filter=True)
self.stats.filter += 'DS%d' % factor
def taper2(self, zeros=0, taper=0):
window = self.stats.endtime - self.stats.starttime
assert window > 2 * zeros + 2 * taper
w_z = self.stats.starttime + zeros, self.stats.endtime - zeros
self.slice(None, w_z[0]).data[:] = 0
self.slice(w_z[1], None).data[:] = 0
temp = self.slice(*w_z)
temp.taper(p=2.*taper / (window - 2 * zeros))
self.slice(*w_z).data[:] = temp.data
def acorr(self, shift=None, normalize=True, oneside=False):
if shift is None:
shift = len(self.data)
else:
shift = int(shift * self.stats.sampling_rate)
size = max(2 * shift + 1, len(self.data) + shift)
nfft = nextpow2(size)
IN1 = fft(self.data, nfft)
IN1 *= np.conjugate(IN1)
ret = ifft(IN1).real
# shift data for time lag 0 to index 'shift'
ret = np.roll(ret, shift)[:2 * shift + 1]
# normalize xcorr
if normalize:
stdev = (np.sum(self.data ** 2)) ** 0.5
if stdev == 0:
log.warning('Data is zero!!')
ret[:] = 0.
else:
ret /= stdev ** 2
if oneside:
ret = ret[shift:]
self.data = ret
self.stats.npts = len(ret)
def timeNorm(self, method=None, param=None):
str_param = str(param)
if param == None:
str_param = ''
self.data = timeNorm(self.data, method=method, param=param)
self.stats.filter += 'TimeNorm%s%s' % (method, str_param)
def spectralWhitening(self, smoothi=None, apply_filter=None):
self.data = spectralWhitening(self.data, sr=self.stats.sampling_rate, smoothi=smoothi, freq_domain=self.stats.is_fft, apply_filter=apply_filter)
self.stats.filter += 'SpecWhite'
def moveout(self, model='iasp91', phase='Ps', p0=6.4):
"""
In-place moveout correction.
:param p0: reference slowness
"""
if model == 'iasp91':
model = util.Iasp91()
phc = util.phase_dict[phase]
st = self.stats
i0 = int((st.ponset - st.starttime) * st.sampling_rate)
self.data[i0:] = util.mocorr(self.data[i0:],
model.z, model.vp, model.vs,
st.slowness, p0, st.sampling_rate, phc).astype(self.data.dtype)
# trc.data[i0:] = util.mocorr(trc.data[i0:],
# model.z, model.vp, model.vs,
# p, p0, fs, phc).astype(trc.data.dtype)
# i0 = -int(float(trc.time)*trc.fsamp)
st.filter += 'MC%s,%s' % (phase, p0)
def zmigr(self, model='iasp91', phase='Ps', zmax=750, dz=0.5):
"""
Z-migration
It's not working!
"""
if model == 'iasp91':
model = util.Iasp91()
phc = util.phase_dict[phase]
st = self.stats
i0 = int((st.ponset - st.starttime) * st.sampling_rate)
fs = st.sampling_rate
self.data = util.zmigr(model.z, model.vp, model.vs,
self.data[i0:],
st.slowness, fs, 0., zmax, dz, phc)
1 / 0
print('halo2')
st.npts = len(self.data)
st.sampling_rate = 1. / dz
st.filter += 'ZM%s' % phase
def norm(self, value=1., fak=None):
"""
Normalize trace.
"""
if not fak:
fak = value / np.max(np.abs(self.data))
self.data = self.data * fak
self.stats.filter += 'N%s' % fak
def signoise(self, winsig, winnoise, relative='ponset'):
"""
Determine signal noise ratio by dividing the maximum in the two windows.
"""
st = self.stats
if relative in ('ponset', 'middle'):
if relative == 'ponset':
rel_time = getattr(st, relative)
else:
rel_time = st.starttime + (st.endtime - st.starttime) / 2
winsig0 = rel_time - st.starttime + winsig[0]
winsig1 = rel_time - st.starttime + winsig[1]
winnoise0 = rel_time - st.starttime + winnoise[0]
winnoise1 = rel_time - st.starttime + winnoise[1]
else:
winsig0 = winsig[0] - st.starttime
winsig1 = winsig[1] - st.starttime
winnoise0 = winnoise[0] - st.starttime
winnoise1 = winnoise[1] - st.starttime
t = np.arange(self.stats.npts) * 1. / st.sampling_rate
datasig = self.data[(t >= winsig0) * (t <= winsig1)]
datanoise = self.data[(t >= winnoise0) * (t <= winnoise1)]
# ipshell()
st.signoise = max(abs(datasig)) / max(abs(datanoise))
def _window(self, start, end, window='tukey', lenslope=10):
"""
Window between start and end with args passed to util.getWindow function.
"""
# if relative != 'ok':
# start, end = util.getTimeIntervall(Stream([self]), start, end, relative, ttype='secstart')
# start = start[0]
# end = end[0]
t = np.linspace(0, self.stats.endtime - self.stats.starttime, self.stats.npts)
boolarray = (t >= start) * (t <= end)
lenwindow = len(boolarray[boolarray])
alpha = 2. * lenslope / (end - start) # inserted 1- !!!
if alpha > 1:
alpha = 1
elif alpha < 0:
alpha = 0
if window == 'tukey':
self.data[boolarray] *= util.cosTaper(lenwindow, alpha)
else:
self.data[boolarray] *= util.get_window(window, lenwindow)
self.data[boolarray == False] = 0
def plotTrace(self, *args, **kwargs):
from sito import imaging
return imaging.plotTrace(self, *args, **kwargs)
@util.add_doc(psd)
def plotPSD(self, ax=None, x_time=True, scale_by_freq=True, Nfft=256 * 16 * 16,
pad_to=None,
xscale='log', yscale='log', grid=True,
xlabel='time (s)', ylabel=None,
figtitle='PSD station component date',
title_in_axis=False, smooth=True,
just_calculate=False, ** kwargs):
"""
Plot PSD of first trace.
Doc matplotlib.mlab.psd:
"""
if self.stats.is_fft:
pxx = self.data
if 'freq_min' in self.stats:
freqs = np.linspace(self.stats.freq_min, self.stats.freq_max, self.stats.npts)
else:
freqs = self.fftfreq()
else:
pxx, freqs = psd(self.data, NFFT=Nfft, Fs=self.stats.sampling_rate,
scale_by_freq=scale_by_freq, pad_to=pad_to)
if just_calculate:
return pxx, freqs
if x_time:
pxx = pxx[::-1]
freqs = 1. / freqs[::-1]
elif 'time' in xlabel:
xlabel = 'freq (Hz)'
if smooth:
pxx = util.smooth(pxx, smooth)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
# ## print title
if figtitle is not None:
figtitle = figtitle.replace('station', self.stats.station)
figtitle = figtitle.replace('component', self.stats.channel[-1])
try:
starttime = self.stats.starttime + 0.5
figtitle = figtitle.replace('time', '%s' % starttime)
figtitle = figtitle.replace('date', '%s' % starttime.date)
figtitle = figtitle.replace('year', '%d' % starttime.year)
figtitle = figtitle.replace('nfft', '%d' % Nfft)
except:
pass
if not title_in_axis:
fig.suptitle(figtitle, x=0.5,
horizontalalignment='center')
# fig.text(title, 0., 0.95, horizontalalignment = 'left' )
else:
ax.text(0.1, 1, figtitle, verticalalignment='top',
transform=ax.transAxes)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.grid(grid)
ax.plot(freqs, pxx, **kwargs)
return ax
| trichter/sito | trace.py | Python | mit | 17,972 |
from uuid import uuid4
from django.test import TestCase, RequestFactory
from shame.models import Store, StoreTemplate
from shame.views import renderwithstoretemplate
class RenderWithStoreTemplateTest(TestCase):
def test_usefile(self):
request = RequestFactory().get('/some/page')
request.store = object()
response = renderwithstoretemplate(
request,
'products.html',
{ 'products': [{
'name': 'thing',
'sku': uuid4(),
'price': 'free' }],
'cartsize': 2 })
self.assertIn(b'<html>', response.content)
def test_usefilewhennostore(self):
StoreTemplate.objects.create(
store=Store.objects.create(subdomain='test'),
name='products.html',
content='empty template')
request = RequestFactory().get('/some/page')
request.store = None
response = renderwithstoretemplate(
request,
'products.html',
{ 'products': [{
'name': 'thing',
'sku': uuid4(),
'price': 'free' }],
'cartsize': 2 })
self.assertIn(b'<html>', response.content)
def test_usedbtemplateforstore(self):
store = Store.objects.create(subdomain='test')
StoreTemplate.objects.create(
store=store,
name='products.html',
content='empty template')
request = RequestFactory().get('/some/page')
request.store = store
response = renderwithstoretemplate(
request,
'products.html',
{ 'products': [{
'name': 'thing',
'sku': uuid4(),
'price': 'free' }],
'cartsize': 2 })
self.assertEqual(response.content, b'empty template')
| tps12/freezing-shame | freezing/shame/tests/views/test_renderwithstoretemplate.py | Python | gpl-3.0 | 1,868 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from datetime import datetime, timedelta
import unittest
from trac.core import Component, TracError, implements
from trac.test import MockPerm
from trac.util.datefmt import utc
from trac.versioncontrol.api import (
Changeset, DbRepositoryProvider, IRepositoryConnector, Node,
NoSuchChangeset, Repository)
from trac.versioncontrol.web_ui.log import LogModule
from trac.web.api import parse_arg_list
from trac.web.tests.api import RequestHandlerPermissionsTestCaseBase
mock_repotype = 'mock:' + __name__
class MockRepositoryConnector(Component):
implements(IRepositoryConnector)
def get_supported_types(self):
yield mock_repotype, 8
def get_repository(self, repos_type, repos_dir, params):
return MockRepository('mock:' + repos_dir, params, self.log)
class MockRepository(Repository):
has_linear_changesets = True
def get_youngest_rev(self):
return 100
def normalize_path(self, path):
return path.strip('/') if path else ''
def normalize_rev(self, rev):
if rev is None or rev == '':
return self.youngest_rev
try:
nrev = int(rev)
except:
raise NoSuchChangeset(rev)
else:
if not (1 <= nrev <= self.youngest_rev) or nrev % 3 != 1:
raise NoSuchChangeset(rev)
return nrev
def get_node(self, path, rev):
assert rev % 3 == 1 # allow only 3n + 1
assert path in ('file', 'file-old')
return MockNode(self, path, rev, Node.FILE)
def get_changeset(self, rev):
assert rev % 3 == 1 # allow only 3n + 1
return MockChangeset(self, rev, 'message-%d' % rev, 'author-%d' % rev,
datetime(2001, 1, 1, tzinfo=utc) +
timedelta(seconds=rev))
def previous_rev(self, rev, path=''):
assert rev % 3 == 1 # allow only 3n + 1
return rev - 3 if rev > 0 else None
def get_path_history(self, path, rev=None, limit=None):
histories = [(path, 100, Changeset.DELETE),
(path, 40, Changeset.MOVE),
(path + '-old', 1, Changeset.ADD)]
for history in histories:
if limit is not None and limit <= 0:
break
if rev is None or rev >= history[1]:
yield history
if limit is not None:
limit -= 1
def rev_older_than(self, rev1, rev2):
return self.normalize_rev(rev1) < self.normalize_rev(rev2)
def close(self):
pass
def _not_implemented(self, *args, **kwargs):
raise NotImplementedError
get_changes = _not_implemented
get_oldest_rev = _not_implemented
next_rev = _not_implemented
class MockChangeset(Changeset):
def get_changes(self):
raise StopIteration
class MockNode(Node):
def __init__(self, repos, path, rev, kind):
super(MockNode, self).__init__(repos, path, rev, kind)
self.created_path = path
self.created_rev = rev
def get_history(self, limit=None):
youngest_rev = self.repos.youngest_rev
rev = self.rev
path = self.path
while rev > 0:
if limit is not None:
if limit <= 0:
return
limit -= 1
if rev == 1:
change = Changeset.ADD
elif rev == 40:
change = Changeset.MOVE
elif rev == youngest_rev:
change = Changeset.DELETE
else:
change = Changeset.EDIT
yield path, rev, change
if rev == 40:
path += '-old'
rev -= 3
def _not_implemented(self, *args, **kwargs):
raise NotImplementedError
get_annotations = _not_implemented
get_content = _not_implemented
get_content_length = _not_implemented
get_content_type = _not_implemented
get_entries = _not_implemented
get_last_modified = _not_implemented
get_properties = _not_implemented
class LogModuleTestCase(RequestHandlerPermissionsTestCaseBase):
def setUp(self):
self._super = super(LogModuleTestCase, self)
self._super.setUp(LogModule)
provider = DbRepositoryProvider(self.env)
provider.add_repository('mock', '/', mock_repotype)
def create_request(self, **kwargs):
kwargs.setdefault('path_info', '/log/mock')
kwargs.setdefault('perm', MockPerm())
return self._super.create_request(**kwargs)
def test_default_repository_not_configured(self):
"""Test for regression of http://trac.edgewall.org/ticket/11599."""
req = self.create_request(path_info='/log/', args={'new_path': '/'})
self.assertRaises(TracError, self.process_request, req)
def test_without_rev(self):
req = self.create_request(path_info='/log/mock/file',
args={'limit': '4'})
template, data, ctype = self.process_request(req)
self.assertEqual('revisionlog.html', template)
items = data['items']
self.assertEqual(5, len(items))
self.assertEqual([100, 97, 94, 91, 88],
[item['rev'] for item in items])
self.assertEqual(['delete'] + ['edit'] * 3 + [None],
[item['change'] for item in items])
links = req.chrome['links']['next']
self.assertEqual('/trac.cgi/log/mock/file?limit=4&rev=88&'
'mode=stop_on_copy', links[0]['href'])
self.assertEqual(1, len(links))
def test_with_rev(self):
req = self.create_request(path_info='/log/mock/file',
args={'rev': '49'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(5, len(items))
self.assertEqual([49, 46, 43, 40, 37],
[item['rev'] for item in items])
self.assertEqual(['file'] * 4 + ['file-old'],
[item['path'] for item in items])
self.assertEqual(['edit'] * 3 + ['move', 'edit'],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_with_rev_and_limit(self):
req = self.create_request(path_info='/log/mock/file',
args={'rev': '49', 'limit': '4'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(5, len(items))
self.assertEqual([49, 46, 43, 40, 37],
[item['rev'] for item in items])
self.assertEqual(['file'] * 4 + ['file-old'],
[item['path'] for item in items])
self.assertEqual([1] * 4 + [2], [item['depth'] for item in items])
self.assertEqual([None] * 4 + ['file-old'],
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit'] * 3 + ['move', None],
[item['change'] for item in items])
links = req.chrome['links']['next']
self.assertEqual('/trac.cgi/log/mock/file-old?limit=4&rev=37&'
'mode=stop_on_copy', links[0]['href'])
self.assertEqual(1, len(links))
def test_with_rev_on_start(self):
req = self.create_request(path_info='/log/mock/file-old',
args={'rev': '10'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(4, len(items))
self.assertEqual([10, 7, 4, 1],
[item['rev'] for item in items])
self.assertEqual(['file-old'] * 4, [item['path'] for item in items])
self.assertEqual([1] * 4, [item['depth'] for item in items])
self.assertEqual([None] * 4,
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit'] * 3 + ['add'],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_with_rev_and_limit_on_start(self):
req = self.create_request(path_info='/log/mock/file-old',
args={'rev': '10', 'limit': '4'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(4, len(items))
self.assertEqual([10, 7, 4, 1],
[item['rev'] for item in items])
self.assertEqual(['file-old'] * 4, [item['path'] for item in items])
self.assertEqual([1] * 4, [item['depth'] for item in items])
self.assertEqual([None] * 4,
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit'] * 3 + ['add'],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_with_invalid_rev(self):
def fn(message, **kwargs):
req = self.create_request(path_info='/log/mock/file', **kwargs)
try:
self.process_request(req)
except NoSuchChangeset as e:
self.assertEqual(message, unicode(e))
fn('No changeset 101 in the repository', args={'rev': '101'})
fn('No changeset 0 in the repository', args={'rev': '0'})
fn('No changeset 43-46 in the repository', args={'rev': '43-46'})
def test_revranges_1(self):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '70,79-82,94-100'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(9, len(items))
self.assertEqual([100, 97, 94, 91, 82, 79, 76, 70, 67],
[item['rev'] for item in items])
self.assertEqual(['file'] * 9,
[item['path'] for item in items])
self.assertEqual([1] * 9, [item['depth'] for item in items])
self.assertEqual([None] * 9,
[item.get('copyfrom_path') for item in items])
self.assertEqual(['delete', 'edit', 'edit', None, 'edit', 'edit', None,
'edit', None],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_revranges_2(self):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '22-49'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(5, len(items))
self.assertEqual([49, 46, 43, 40, 37],
[item['rev'] for item in items])
self.assertEqual(['file'] * 4 + ['file-old'],
[item['path'] for item in items])
self.assertEqual([1] * 4 + [2], [item['depth'] for item in items])
self.assertEqual([None] * 4 + ['file-old'],
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit'] * 3 + ['move', 'edit'],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_revranges_3(self):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '22-46,55-61'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(8, len(items))
self.assertEqual([61, 58, 55, 52, 46, 43, 40, 37],
[item['rev'] for item in items])
self.assertEqual(['file'] * 7 + ['file-old'],
[item['path'] for item in items])
self.assertEqual([1] * 7 + [2], [item['depth'] for item in items])
self.assertEqual([None] * 7 + ['file-old'],
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit', 'edit', 'edit', None,
'edit', 'edit', 'move', 'edit'],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_revranges_4(self):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '40-46,55-61'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(8, len(items))
self.assertEqual([61, 58, 55, 52, 46, 43, 40, 37],
[item['rev'] for item in items])
self.assertEqual(['file'] * 7 + ['file-old'],
[item['path'] for item in items])
self.assertEqual([1] * 7 + [2], [item['depth'] for item in items])
self.assertEqual([None] * 7 + ['file-old'],
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit', 'edit', 'edit', None,
'edit', 'edit', 'move', None],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_revranges_1_with_limit(self):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '70,79-82,94-100',
'limit': '4'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(6, len(items))
self.assertEqual([100, 97, 94, 91, 82, 79],
[item['rev'] for item in items])
self.assertEqual(['file'] * 6,
[item['path'] for item in items])
self.assertEqual([1] * 6, [item['depth'] for item in items])
self.assertEqual([None] * 6,
[item.get('copyfrom_path') for item in items])
self.assertEqual(['delete', 'edit', 'edit', None, 'edit', None],
[item['change'] for item in items])
self.assertIn('next', req.chrome['links'])
links = req.chrome['links']['next']
self.assertEqual('/trac.cgi/log/mock/file?limit=4&revs=70%2C79&'
'rev=79&mode=stop_on_copy', links[0]['href'])
self.assertEqual(1, len(links))
def test_revranges_1_next_link_with_limits(self):
def next_link_args(limit):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '70,79-82,94-100',
'limit': str(limit)})
template, data, ctype = self.process_request(req)
links = req.chrome['links']
if 'next' in links:
link = links['next'][0]['href']
path_info, query_string = link.split('?', 1)
return dict(parse_arg_list(query_string))
else:
return None
self.assertEqual({'limit': '1', 'rev': '97', 'revs': '70,79-82,94-97',
'mode': 'stop_on_copy'}, next_link_args(1))
self.assertEqual({'limit': '2', 'rev': '94', 'revs': '70,79-82,94',
'mode': 'stop_on_copy'}, next_link_args(2))
self.assertEqual({'limit': '3', 'rev': '91', 'revs': '70,79-82',
'mode': 'stop_on_copy'}, next_link_args(3))
self.assertEqual({'limit': '4', 'rev': '79', 'revs': '70,79',
'mode': 'stop_on_copy'}, next_link_args(4))
self.assertEqual({'limit': '5', 'rev': '76', 'revs': '70',
'mode': 'stop_on_copy'}, next_link_args(5))
self.assertEqual(None, next_link_args(6))
def test_revranges_2_with_limit(self):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '22-49', 'limit': '4'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(5, len(items))
self.assertEqual([49, 46, 43, 40, 37],
[item['rev'] for item in items])
self.assertEqual(['file'] * 4 + ['file-old'],
[item['path'] for item in items])
self.assertEqual([1] * 4 + [2], [item['depth'] for item in items])
self.assertEqual([None] * 4 + ['file-old'],
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit'] * 3 + ['move', None],
[item['change'] for item in items])
self.assertIn('next', req.chrome['links'])
links = req.chrome['links']['next']
self.assertEqual('/trac.cgi/log/mock/file-old?limit=4&revs=22-37&'
'rev=37&mode=stop_on_copy', links[0]['href'])
self.assertEqual(1, len(links))
def test_revranges_3_with_limit(self):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '22-46,55-61', 'limit': '7'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(8, len(items))
self.assertEqual([61, 58, 55, 52, 46, 43, 40, 37],
[item['rev'] for item in items])
self.assertEqual(['file'] * 7 + ['file-old'],
[item['path'] for item in items])
self.assertEqual([1] * 7 + [2], [item['depth'] for item in items])
self.assertEqual([None] * 7 + ['file-old'],
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit', 'edit', 'edit', None,
'edit', 'edit', 'move', 'edit'],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_revranges_4_with_limit(self):
req = self.create_request(path_info='/log/mock/file',
args={'revs': '40-46,55-61', 'limit': '7'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(8, len(items))
self.assertEqual([61, 58, 55, 52, 46, 43, 40, 37],
[item['rev'] for item in items])
self.assertEqual(['file'] * 7 + ['file-old'],
[item['path'] for item in items])
self.assertEqual([1] * 7 + [2], [item['depth'] for item in items])
self.assertEqual([None] * 7 + ['file-old'],
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit', 'edit', 'edit', None,
'edit', 'edit', 'move', None],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def test_invalid_revranges(self):
def fn(message, **kwargs):
req = self.create_request(path_info='/log/mock/file', **kwargs)
try:
self.process_request(req)
except NoSuchChangeset as e:
self.assertEqual(message, unicode(e))
fn('No changeset 101 in the repository', args={'revs': '101'})
fn('No changeset 0 in the repository', args={'revs': '0'})
fn('No changeset 0 in the repository', args={'revs': '0-43'})
fn('No changeset 101 in the repository', args={'revs': '43-101'})
fn('No changeset 43-46-49 in the repository',
args={'revs': '43-46-49'})
fn('No changeset 50 in the repository',
args={'revs': '43-46,50,52-55'})
def test_follow_copy(self):
req = self.create_request(path_info='/log/mock/file',
args={'rev': '43', 'limit': '4',
'mode': 'follow_copy'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(5, len(items))
self.assertEqual([43, 40, 37, 34, 31],
[item['rev'] for item in items])
self.assertEqual(['file', 'file', 'file-old', 'file-old', 'file-old'],
[item['path'] for item in items])
self.assertEqual([1, 1, 2, 2, 2], [item['depth'] for item in items])
self.assertEqual([None, None, 'file-old', None, None],
[item.get('copyfrom_path') for item in items])
self.assertEqual(['edit', 'move', 'edit', 'edit', None],
[item['change'] for item in items])
links = req.chrome['links']['next']
self.assertEqual('/trac.cgi/log/mock/file-old?limit=4&rev=31&'
'mode=follow_copy', links[0]['href'])
self.assertEqual(1, len(links))
def test_path_history(self):
req = self.create_request(path_info='/log/mock/file',
args={'mode': 'path_history'})
template, data, ctype = self.process_request(req)
items = data['items']
self.assertEqual(3, len(items))
self.assertEqual(['delete', 'move', 'add'],
[item['change'] for item in items])
self.assertNotIn('next', req.chrome['links'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LogModuleTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| pkdevbox/trac | trac/versioncontrol/web_ui/tests/log.py | Python | bsd-3-clause | 21,838 |
import numpy as np
import pytest
import scipy as sp
from scipy.stats import shapiro
import openmc
import openmc.lib
def test_t_percentile():
# Permutations include 1 DoF, 2 DoF, and > 2 DoF
# We will test 5 p-values at 3-DoF values
test_ps = [0.02, 0.4, 0.5, 0.6, 0.98]
test_dfs = [1, 2, 5]
# The reference solutions come from Scipy
ref_ts = [[sp.stats.t.ppf(p, df) for p in test_ps] for df in test_dfs]
test_ts = [[openmc.lib.math.t_percentile(p, df) for p in test_ps]
for df in test_dfs]
# The 5 DoF approximation in openmc.lib.math.t_percentile is off by up to
# 8e-3 from the scipy solution, so test that one separately with looser
# tolerance
assert np.allclose(ref_ts[:-1], test_ts[:-1])
assert np.allclose(ref_ts[-1], test_ts[-1], atol=1e-2)
def test_calc_pn():
max_order = 10
test_xs = np.linspace(-1., 1., num=5, endpoint=True)
# Reference solutions from scipy
ref_vals = np.array([sp.special.eval_legendre(n, test_xs)
for n in range(0, max_order + 1)])
test_vals = []
for x in test_xs:
test_vals.append(openmc.lib.math.calc_pn(max_order, x).tolist())
test_vals = np.swapaxes(np.array(test_vals), 0, 1)
assert np.allclose(ref_vals, test_vals)
def test_evaluate_legendre():
max_order = 10
# Coefficients are set to 1, but will incorporate the (2l+1)/2 norm factor
# for the reference solution
test_coeffs = [0.5 * (2. * l + 1.) for l in range(max_order + 1)]
test_xs = np.linspace(-1., 1., num=5, endpoint=True)
ref_vals = np.polynomial.legendre.legval(test_xs, test_coeffs)
# Set the coefficients back to 1s for the test values since
# evaluate legendre incorporates the (2l+1)/2 term on its own
test_coeffs = [1. for l in range(max_order + 1)]
test_vals = np.array([openmc.lib.math.evaluate_legendre(test_coeffs, x)
for x in test_xs])
assert np.allclose(ref_vals, test_vals)
def test_calc_rn():
max_order = 10
test_ns = np.array([i for i in range(0, max_order + 1)])
azi = 0.1 # Longitude
pol = 0.2 # Latitude
test_uvw = np.array([np.sin(pol) * np.cos(azi),
np.sin(pol) * np.sin(azi),
np.cos(pol)])
# Reference solutions from the equations
ref_vals = []
def coeff(n, m):
return np.sqrt((2. * n + 1) * sp.special.factorial(n - m) /
(sp.special.factorial(n + m)))
def pnm_bar(n, m, mu):
val = coeff(n, m)
if m != 0:
val *= np.sqrt(2.)
val *= sp.special.lpmv([m], [n], [mu])
return val[0]
ref_vals = []
for n in test_ns:
for m in range(-n, n + 1):
if m < 0:
ylm = pnm_bar(n, np.abs(m), np.cos(pol)) * \
np.sin(np.abs(m) * azi)
else:
ylm = pnm_bar(n, m, np.cos(pol)) * np.cos(m * azi)
# Un-normalize for comparison
ylm /= np.sqrt(2. * n + 1.)
ref_vals.append(ylm)
test_vals = []
test_vals = openmc.lib.math.calc_rn(max_order, test_uvw)
assert np.allclose(ref_vals, test_vals)
def test_calc_zn():
n = 10
rho = 0.5
phi = 0.5
# Reference solution from running the C++ implementation
ref_vals = np.array([
1.00000000e+00, 2.39712769e-01, 4.38791281e-01,
2.10367746e-01, -5.00000000e-01, 1.35075576e-01,
1.24686873e-01, -2.99640962e-01, -5.48489101e-01,
8.84215021e-03, 5.68310892e-02, -4.20735492e-01,
-1.25000000e-01, -2.70151153e-01, -2.60091773e-02,
1.87022545e-02, -3.42888902e-01, 1.49820481e-01,
2.74244551e-01, -2.43159131e-02, -2.50357380e-02,
2.20500013e-03, -1.98908812e-01, 4.07587508e-01,
4.37500000e-01, 2.61708929e-01, 9.10321205e-02,
-1.54686328e-02, -2.74049397e-03, -7.94845816e-02,
4.75368705e-01, 7.11647284e-02, 1.30266162e-01,
3.37106977e-02, 1.06401886e-01, -7.31606787e-03,
-2.95625975e-03, -1.10250006e-02, 3.55194307e-01,
-1.44627826e-01, -2.89062500e-01, -9.28644588e-02,
-1.62557358e-01, 7.73431638e-02, -2.55329539e-03,
-1.90923851e-03, 1.57578403e-02, 1.72995854e-01,
-3.66267690e-01, -1.81657333e-01, -3.32521518e-01,
-2.59738162e-02, -2.31580576e-01, 4.20673902e-02,
-4.11710546e-04, -9.36449487e-04, 1.92156884e-02,
2.82515641e-02, -3.90713738e-01, -1.69280296e-01,
-8.98437500e-02, -1.08693628e-01, 1.78813094e-01,
-1.98191857e-01, 1.65964201e-02, 2.77013853e-04])
test_vals = openmc.lib.math.calc_zn(n, rho, phi)
assert np.allclose(ref_vals, test_vals)
def test_calc_zn_rad():
n = 10
rho = 0.5
# Reference solution from running the C++ implementation
ref_vals = np.array([
1.00000000e+00, -5.00000000e-01, -1.25000000e-01,
4.37500000e-01, -2.89062500e-01,-8.98437500e-02])
test_vals = openmc.lib.math.calc_zn_rad(n, rho)
assert np.allclose(ref_vals, test_vals)
def test_rotate_angle():
uvw0 = np.array([1., 0., 0.])
phi = 0.
mu = 0.
# reference: mu of 0 pulls the vector the bottom, so:
ref_uvw = np.array([0., 0., -1.])
test_uvw = openmc.lib.math.rotate_angle(uvw0, mu, phi)
assert np.array_equal(ref_uvw, test_uvw)
# Repeat for mu = 1 (no change)
mu = 1.
ref_uvw = np.array([1., 0., 0.])
test_uvw = openmc.lib.math.rotate_angle(uvw0, mu, phi)
assert np.array_equal(ref_uvw, test_uvw)
# Now to test phi is None
mu = 0.9
phi = None
prn_seed = 1
# When seed = 1, phi will be sampled as 1.9116495709698769
# The resultant reference is from hand-calculations given the above
ref_uvw = [0.9, -0.422746750548505, 0.10623175090659095]
test_uvw = openmc.lib.math.rotate_angle(uvw0, mu, phi, prn_seed)
assert np.allclose(ref_uvw, test_uvw)
def test_maxwell_spectrum():
prn_seed = 1
T = 0.5
ref_val = 0.27767406743161277
test_val = openmc.lib.math.maxwell_spectrum(T, prn_seed)
assert ref_val == test_val
def test_watt_spectrum():
prn_seed = 1
a = 0.5
b = 0.75
ref_val = 0.30957476387766697
test_val = openmc.lib.math.watt_spectrum(a, b, prn_seed)
assert ref_val == test_val
def test_normal_dist():
# When standard deviation is zero, sampled value should be mean
prn_seed = 1
mean = 14.08
stdev = 0.0
ref_val = 14.08
test_val = openmc.lib.math.normal_variate(mean, stdev, prn_seed)
assert ref_val == pytest.approx(test_val)
# Use Shapiro-Wilk test to ensure normality of sampled vairates
stdev = 1.0
samples = []
num_samples = 10000
for _ in range(num_samples):
# sample the normal distribution from openmc
samples.append(openmc.lib.math.normal_variate(mean, stdev, prn_seed))
prn_seed += 1
stat, p = shapiro(samples)
assert p > 0.05
def test_broaden_wmp_polynomials():
# Two branches of the code to worry about, beta > 6 and otherwise
# beta = sqrtE * dopp
# First lets do beta > 6
test_E = 0.5
test_dopp = 100. # approximately U235 at room temperature
n = 6
ref_val = [2., 1.41421356, 1.0001, 0.70731891, 0.50030001, 0.353907]
test_val = openmc.lib.math.broaden_wmp_polynomials(test_E, test_dopp, n)
assert np.allclose(ref_val, test_val)
# now beta < 6
test_dopp = 5.
ref_val = [1.99999885, 1.41421356, 1.04, 0.79195959, 0.6224, 0.50346003]
test_val = openmc.lib.math.broaden_wmp_polynomials(test_E, test_dopp, n)
assert np.allclose(ref_val, test_val)
| nelsonag/openmc | tests/unit_tests/test_math.py | Python | mit | 7,654 |
# Case Conductor is a Test Case Management system.
# Copyright (C) 2011 uTest Inc.
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from ..core import decorators as dec
from ..core.filters import KeywordFilter
from ..core.util import get_object_or_404
from ..environments.filters import EnvironmentFilter
from ..products.filters import ProductFieldFilter
from ..static.filters import TestResultStatusFilter
from ..static.status import TestCycleStatus, TestRunStatus
from ..testexecution.models import (
TestCycleList, TestRunList, TestRunIncludedTestCaseList, TestResultList)
from ..users.decorators import login_redirect
from ..users.filters import UserFieldFilter, TeamFieldFilter
from .finder import ResultsFinder
from . import filters
@dec.finder(ResultsFinder)
def home(request):
return redirect(reverse("results_testcycles") + "?openfinder=1&status=2")
@login_redirect
@dec.finder(ResultsFinder)
@dec.filter("cycles",
("status", filters.NonDraftTestCycleStatusFilter),
("product", ProductFieldFilter),
("name", KeywordFilter),
("tester", TeamFieldFilter),
("environment", EnvironmentFilter),
)
@dec.paginate("cycles")
@dec.sort("cycles", "product")
def testcycles(request):
cycles = TestCycleList.ours(auth=request.auth).filter(status=[
TestCycleStatus.ACTIVE,
TestCycleStatus.LOCKED,
TestCycleStatus.CLOSED])
return TemplateResponse(
request, "results/testcycle/cycles.html", {"cycles": cycles})
@login_redirect
def testcycle_details(request, cycle_id):
cycle = get_object_or_404(TestCycleList, cycle_id, auth=request.auth)
return TemplateResponse(
request,
"results/testcycle/_cycle_details.html",
{"cycle": cycle})
@login_redirect
@dec.finder(ResultsFinder)
@dec.filter("runs",
("status", filters.NonDraftTestRunStatusFilter),
("product", ProductFieldFilter),
("testCycle", filters.NonDraftTestCycleFieldFilter),
("name", KeywordFilter),
("tester", TeamFieldFilter),
("environment", EnvironmentFilter),
)
@dec.paginate("runs")
@dec.sort("runs")
def testruns(request):
runs = TestRunList.ours(auth=request.auth).filter(status=[
TestRunStatus.ACTIVE,
TestRunStatus.LOCKED,
TestRunStatus.CLOSED])
return TemplateResponse(
request, "results/testrun/runs.html", {"runs": runs})
@login_redirect
def testrun_details(request, run_id):
run = get_object_or_404(TestRunList, run_id, auth=request.auth)
return TemplateResponse(
request,
"results/testrun/_run_details.html",
{"run": run})
@login_redirect
@dec.finder(ResultsFinder)
@dec.filter("includedcases",
("status", filters.NonDraftTestCaseStatusFilter),
("testRun", filters.NonDraftTestRunFieldFilter),
("product", ProductFieldFilter),
("testSuite", filters.TestSuiteFieldFilter),
("name", KeywordFilter),
("environment", EnvironmentFilter),
)
@dec.paginate("includedcases")
@dec.sort("includedcases")
def testcases(request):
includedcases = TestRunIncludedTestCaseList.ours(auth=request.auth)
return TemplateResponse(
request,
"results/testcase/cases.html",
{"includedcases": includedcases})
@login_redirect
def testcase_details(request, itc_id):
itc = get_object_or_404(TestRunIncludedTestCaseList, itc_id, auth=request.auth)
return TemplateResponse(
request,
"results/testcase/_case_details.html",
{"itc": itc})
@login_redirect
@dec.finder(ResultsFinder)
@dec.filter("results",
("tester", UserFieldFilter),
("status", TestResultStatusFilter),
("comment", KeywordFilter),
("environment", EnvironmentFilter),
)
@dec.paginate("results")
@dec.sort("results", "status", "desc")
def testresults(request, itc_id):
itc = get_object_or_404(TestRunIncludedTestCaseList, itc_id, auth=request.auth)
results = TestResultList.ours(auth=request.auth).filter(
testCaseVersion=itc.testCaseVersion.id,
testRun=itc.testRun.id)
return TemplateResponse(
request,
"results/testcase/included_case_detail.html",
{"includedcase": itc, "results": results})
| mozilla/caseconductor-ui | ccui/results/views.py | Python | gpl-3.0 | 5,180 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Race.size'
db.alter_column('dnd_race', 'size_id', self.gf('django.db.models.fields.related.ForeignKey')(default=5, to=orm['dnd.RaceSize']))
# Changing field 'Race.space'
db.alter_column('dnd_race', 'space', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5))
# Changing field 'Race.reach'
db.alter_column('dnd_race', 'reach', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5))
def backwards(self, orm):
# Changing field 'Race.size'
db.alter_column('dnd_race', 'size_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.RaceSize'], null=True))
# Changing field 'Race.space'
db.alter_column('dnd_race', 'space', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True))
# Changing field 'Race.reach'
db.alter_column('dnd_race', 'reach', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True))
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'object_name': 'CharacterClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.characterclassvariant': {
'Meta': {'ordering': "['character_class__name']", 'unique_together': "(('character_class', 'rulebook'),)", 'object_name': 'CharacterClassVariant'},
'advancement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'advancement_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'alignment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'class_features': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_features_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_skills': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Skill']", 'symmetrical': 'False', 'blank': 'True'}),
'hit_die': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'required_bab': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'requirements_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill_points': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'starting_gold': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
},
'dnd.characterclassvariantrequiresfeat': {
'Meta': {'object_name': 'CharacterClassVariantRequiresFeat'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remove_comma': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text_after': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'text_before': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'dnd.characterclassvariantrequiresrace': {
'Meta': {'object_name': 'CharacterClassVariantRequiresRace'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_races'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Race']"}),
'remove_comma': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text_after': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'text_before': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'dnd.characterclassvariantrequiresskill': {
'Meta': {'object_name': 'CharacterClassVariantRequiresSkill'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ranks': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'remove_comma': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"}),
'text_after': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'text_before': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'object_name': 'Domain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'benefit_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'normal': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'normal_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_feat_prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'}),
'special_html': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.featrequiresfeat': {
'Meta': {'object_name': 'FeatRequiresFeat'},
'additional_text': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'required_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by_feats'", 'to': "orm['dnd.Feat']"}),
'source_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.Feat']"})
},
'dnd.featrequiresskill': {
'Meta': {'object_name': 'FeatRequiresSkill'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'dnd.item': {
'Meta': {'ordering': "['name']", 'object_name': 'Item'},
'activation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.ItemActivationType']", 'null': 'True', 'blank': 'True'}),
'aura': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.ItemAuraType']", 'null': 'True', 'blank': 'True'}),
'aura_dc': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'aura_schools': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellSchool']", 'symmetrical': 'False', 'blank': 'True'}),
'body_slot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.ItemSlot']", 'null': 'True', 'blank': 'True'}),
'caster_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost_to_create': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'price_bonus': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'price_gp': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.ItemProperty']", 'null': 'True', 'blank': 'True'}),
'required_extra': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'required_feats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Feat']", 'symmetrical': 'False', 'blank': 'True'}),
'required_spells': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Spell']", 'symmetrical': 'False', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'synergy_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Item']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'visual_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'dnd.itemactivationtype': {
'Meta': {'ordering': "['name']", 'object_name': 'ItemActivationType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.itemauratype': {
'Meta': {'ordering': "['name']", 'object_name': 'ItemAuraType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.itemproperty': {
'Meta': {'ordering': "['name']", 'object_name': 'ItemProperty'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.itemslot': {
'Meta': {'ordering': "['name']", 'object_name': 'ItemSlot'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.monster': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Monster'},
'advancement': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'alignment': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'armor_class': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'attack': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'base_attack': ('django.db.models.fields.SmallIntegerField', [], {}),
'cha': ('django.db.models.fields.SmallIntegerField', [], {}),
'challenge_rating': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'combat': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'combat_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'con': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dex': ('django.db.models.fields.SmallIntegerField', [], {}),
'environment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'flat_footed_armor_class': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'fort_save': ('django.db.models.fields.SmallIntegerField', [], {}),
'fort_save_extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'full_attack': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'grapple': ('django.db.models.fields.SmallIntegerField', [], {}),
'hit_dice': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initiative': ('django.db.models.fields.SmallIntegerField', [], {}),
'int': ('django.db.models.fields.SmallIntegerField', [], {}),
'level_adjustment': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'reach': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'reflex_save': ('django.db.models.fields.SmallIntegerField', [], {}),
'reflex_save_extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceSize']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'space': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'special_attacks': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'special_qualities': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'str': ('django.db.models.fields.SmallIntegerField', [], {}),
'subtypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.MonsterSubtype']", 'symmetrical': 'False', 'blank': 'True'}),
'touch_armor_class': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'treasure': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.MonsterType']"}),
'will_save': ('django.db.models.fields.SmallIntegerField', [], {}),
'will_save_extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'wis': ('django.db.models.fields.SmallIntegerField', [], {})
},
'dnd.monsterhasfeat': {
'Meta': {'object_name': 'MonsterHasFeat'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monster': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feats'", 'to': "orm['dnd.Monster']"})
},
'dnd.monsterhasskill': {
'Meta': {'object_name': 'MonsterHasSkill'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monster': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skills'", 'to': "orm['dnd.Monster']"}),
'ranks': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.monsterspeed': {
'Meta': {'object_name': 'MonsterSpeed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Monster']"}),
'speed': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['dnd.RaceSpeedType']"})
},
'dnd.monstersubtype': {
'Meta': {'ordering': "['name']", 'object_name': 'MonsterSubtype'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.monstertype': {
'Meta': {'ordering': "['name']", 'object_name': 'MonsterType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.newsentry': {
'Meta': {'ordering': "['-published']", 'object_name': 'NewsEntry'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.DateField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.race': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Race'},
'automatic_languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'races_with_automatic'", 'blank': 'True', 'to': "orm['dnd.Language']"}),
'bonus_languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'races_with_bonus'", 'blank': 'True', 'to': "orm['dnd.Language']"}),
'cha': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'combat': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'combat_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'con': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dex': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'int': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'level_adjustment': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'natural_armor': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'race_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceType']", 'null': 'True', 'blank': 'True'}),
'racial_hit_dice_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'racial_traits': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'racial_traits_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reach': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceSize']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'space': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'str': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'wis': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'dnd.racefavoredcharacterclass': {
'Meta': {'object_name': 'RaceFavoredCharacterClass'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favored_classes'", 'to': "orm['dnd.Race']"})
},
'dnd.racesize': {
'Meta': {'ordering': "['order']", 'object_name': 'RaceSize'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'dnd.racespeed': {
'Meta': {'object_name': 'RaceSpeed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Race']"}),
'speed': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['dnd.RaceSpeedType']"})
},
'dnd.racespeedtype': {
'Meta': {'ordering': "['name', 'extra']", 'object_name': 'RaceSpeedType'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.racetype': {
'Meta': {'ordering': "['name']", 'object_name': 'RaceType'},
'base_attack_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'base_fort_save_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'base_reflex_save_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'base_will_save_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'hit_die_size': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.rule': {
'Meta': {'object_name': 'Rule'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page_from': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'page_to': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'required_by_feats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Feat']", 'through': "orm['dnd.FeatRequiresSkill']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.skillvariant': {
'Meta': {'unique_together': "(('skill', 'rulebook'),)", 'object_name': 'SkillVariant'},
'action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'action_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'restriction': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'restriction_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained_html': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'corrupt_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'corrupt_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False', 'blank': 'True'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'extra_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_breath_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'true_name_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'dnd.textfeatprerequisite': {
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
| gregpechiro/dndtools | dndtools/dnd/migrations/0092_auto__chg_field_race_size__chg_field_race_space__chg_field_race_reach.py | Python | mit | 43,290 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon.tables import MultiTableView # noqa
from horizon.utils import memoized
class ResourceBrowserView(MultiTableView):
browser_class = None
def __init__(self, *args, **kwargs):
if not self.browser_class:
raise ValueError("You must specify a ResourceBrowser subclass "
"for the browser_class attribute on %s."
% self.__class__.__name__)
self.table_classes = (self.browser_class.navigation_table_class,
self.browser_class.content_table_class)
self.navigation_selection = False
super(ResourceBrowserView, self).__init__(*args, **kwargs)
@memoized.memoized_method
def get_browser(self):
browser = self.browser_class(self.request, **self.kwargs)
browser.set_tables(self.get_tables())
if not self.navigation_selection:
ct = browser.content_table
item = browser.navigable_item_name.lower()
ct._no_data_message = _("Select a %s to browse.") % item
return browser
def get_context_data(self, **kwargs):
context = super(ResourceBrowserView, self).get_context_data(**kwargs)
browser = self.get_browser()
context["%s_browser" % browser.name] = browser
return context
| spandanb/horizon | horizon/browsers/views.py | Python | apache-2.0 | 1,982 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from mox import IsA
from novaclient import exceptions as novaclient_exceptions
from horizon import api
from horizon import test
import json
class InstanceViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('flavor_list', 'server_list',),
api.keystone: ('tenant_list',)})
def test_index(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list',),
api.keystone: ('tenant_list',)})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
tenants = self.tenants.list()
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndRaise(self.exceptions.nova)
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list',),
api.keystone: ('tenant_list',)})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
max_id = max([int(flavor.id) for flavor in flavors])
for server in servers:
max_id += 1
server.flavor["id"] = max_id
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndReturn(servers)
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndReturn(flavors)
api.keystone.tenant_list(IsA(http.HttpRequest), admin=True).\
AndReturn(tenants)
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
instances = res.context['table'].data
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertMessageCount(res, error=len(servers))
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('server_list',)})
def test_index_server_list_exception(self):
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
@test.create_stubs({api: ('server_get', 'flavor_get',),
api.keystone: ('tenant_get',)})
def test_ajax_loading_instances(self):
server = self.servers.first()
flavor = self.flavors.list()[0]
tenant = self.tenants.list()[0]
api.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.flavor_get(IsA(http.HttpRequest),
server.flavor['id']).AndReturn(flavor)
api.keystone.tenant_get(IsA(http.HttpRequest),
server.tenant_id,
admin=True).AndReturn(tenant)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:index') + \
"?action=row_update&table=instances&obj_id=" + server.id
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(res, "horizon/common/_data_table_row.html")
self.assertContains(res, "test_tenant", 1, 200)
self.assertContains(res, "instance-host", 1, 200)
self.assertContains(res, "server_1", 1, 200)
self.assertContains(res, "10.0.0.1", 1, 200)
self.assertContains(res, "512MB RAM | 1 VCPU | 0 Disk", 1, 200)
self.assertContains(res, "Active", 1, 200)
self.assertContains(res, "Running", 1, 200)
| 1ukash/horizon | horizon/dashboards/admin/instances/tests.py | Python | apache-2.0 | 6,189 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._blob_services_operations import build_get_service_properties_request, build_set_service_properties_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BlobServicesOperations:
"""BlobServicesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def set_service_properties(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.BlobServiceProperties",
**kwargs: Any
) -> "_models.BlobServiceProperties":
"""Sets the properties of a storage account’s Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The properties of a storage account’s Blob service, including properties for
Storage Analytics and CORS (Cross-Origin Resource Sharing) rules.
:type parameters: ~azure.mgmt.storage.v2018_07_01.models.BlobServiceProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.BlobServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'BlobServiceProperties')
request = build_set_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.set_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}'} # type: ignore
@distributed_trace_async
async def get_service_properties(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> "_models.BlobServiceProperties":
"""Gets the properties of a storage account’s Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobServiceProperties, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.BlobServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.get_service_properties.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/{BlobServicesName}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_07_01/aio/operations/_blob_services_operations.py | Python | mit | 7,843 |
import fresh_tomatoes
import media
toy_story = media.Movie("Toy Story",
"A story of a boy and his toys that come to life",
"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg",
"https://www.youtube.com/watch?v=vwyZH85NQC4")
#print(toy_story.storyline)
avatar = media.Movie("Avatar","A marine on an alien planet",
"http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg",
"http://www.youtube.com/watch?v=5PSNL1qE6VY")
dawn = media.Movie("Dawn Of The Planet Of The Apes",
"A story about an ape",
"http://upload.wikimedia.org/wikipedia/en/7/77/Dawn_of_the_Planet_of_the_Apes.jpg",
"http://www.youtube.com/watch?v=eq1sTNGDXo0")
gonegirl = media.Movie("Gone Girl",
"A sad story",
"http://upload.wikimedia.org/wikipedia/en/0/05/Gone_Girl_Poster.jpg",
"http://www.youtube.com/watch?v=Ym3LB0lOJ0o")
avenger = media.Movie("Avenger",
"A story about superheroes",
"http://upload.wikimedia.org/wikipedia/en/3/37/Captain_America_The_First_Avenger_poster.jpg",
"http://www.youtube.com/watch?v=hIR8Ar-Z4hw")
dark_knight = media.Movie("Dark knight rises",
"A story about batman",
"http://upload.wikimedia.org/wikipedia/en/8/83/Dark_knight_rises_poster.jpg",
"http://www.youtube.com/watch?v=g8evyE9TuYk")
movies = [toy_story, avatar, dawn, gonegirl, avenger, dark_knight]
#fresh_tomatoes.open_movies_page(movies)
#print (media.Movie.VALID_RATINGS)
print (media.Movie.__doc__)
| tuanvu216/udacity-course | programming_foudations_with_python/entertainment_center.py | Python | mit | 1,734 |
from logging.config import dictConfig
from environs import Env
from importlib.metadata import Distribution
pkg = Distribution.from_name(__package__)
class Config:
"""
Base configuration
"""
DEBUG = False
TESTING = False
SERVER_VERSION = pkg.version
def __init__(self):
# Environment variables
env = Env()
env.read_env() # also read .env file, if it exists
self.CONTAINER_ENV = env('CONTAINER_ENV', 'swarm')
self.STORAGE_TYPE = env('STORAGE_TYPE', 'host')
if self.STORAGE_TYPE == 'host' or self.STORAGE_TYPE == 'nfs':
self.STOREBASE = env('STOREBASE')
if self.STORAGE_TYPE == 'nfs':
self.NFS_SERVER = env('NFS_SERVER')
if self.CONTAINER_ENV == 'swarm':
docker_host = env('DOCKER_HOST', '')
if docker_host:
self.DOCKER_HOST = docker_host
docker_tls_verify = env.int('DOCKER_TLS_VERIFY', None)
if docker_tls_verify is not None:
self.DOCKER_TLS_VERIFY = docker_tls_verify
docker_cert_path = env('DOCKER_CERT_PATH', '')
if docker_cert_path:
self.DOCKER_CERT_PATH = docker_cert_path
if self.CONTAINER_ENV == 'kubernetes':
self.JOB_NAMESPACE = env('JOB_NAMESPACE', 'default')
self.SECURITYCONTEXT_RUN_AS_USER = env.int('SECURITYCONTEXT_RUN_AS_USER', None)
self.SECURITYCONTEXT_RUN_AS_GROUP = env.int('SECURITYCONTEXT_RUN_AS_GROUP', None)
if self.CONTAINER_ENV == 'cromwell':
self.CROMWELL_URL = env('CROMWELL_URL')
self.TIMELIMIT_MINUTES = env.int('TIMELIMIT_MINUTES')
self.env = env
class DevConfig(Config):
"""
Development configuration
"""
ENV = 'development'
DEBUG = True
TESTING = True
def __init__(self):
super().__init__()
# DEV LOGGING CONFIGURATION
dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] [%(levelname)s]'
'[%(module)s:%(lineno)d %(process)d %(thread)d] %(message)s'
},
},
'handlers': {
'console_simple': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/tmp/debug.log',
'formatter': 'simple'
}
},
'loggers': {
'': { # root logger
'level': 'INFO',
'handlers': ['console_simple'],
},
'pman': { # pman package logger
'level': 'DEBUG',
'handlers': ['console_simple', 'file'],
'propagate': False
# required to avoid double logging with root logger
},
}
})
class ProdConfig(Config):
"""
Production configuration
"""
ENV = 'production'
def __init__(self):
super().__init__()
# PROD LOGGING CONFIGURATION
dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] [%(levelname)s]'
'[%(module)s:%(lineno)d %(process)d %(thread)d] %(message)s'
},
},
'handlers': {
'console_simple': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/tmp/debug.log',
'formatter': 'simple'
}
},
'loggers': {
'': { # root logger
'level': 'INFO',
'handlers': ['console_simple'],
},
'pman': { # pman package logger
'level': 'INFO',
'handlers': ['file'],
'propagate': False
},
}
})
# Environment variables-based secrets
# SECURITY WARNING: keep the secret key used in production secret!
env = self.env
self.SECRET_KEY = env('SECRET_KEY')
| FNNDSC/pman | pman/config.py | Python | mit | 4,754 |
#
# This file is part of Dragonfly.
# (c) Copyright 2019 by David Zurow
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Audio input/output classes for Kaldi backend
"""
from __future__ import division, print_function
import collections, contextlib, datetime, itertools, logging, os, time, threading, wave
from io import open
from six import PY2, binary_type, text_type, print_
from six.moves import queue, range
import sounddevice
import webrtcvad
from ..base import EngineError
_log = logging.getLogger("engine")
class MicAudio(object):
"""Streams raw audio from microphone. Data is received in a separate thread, and stored in a buffer, to be read from."""
FORMAT = 'int16'
SAMPLE_WIDTH = 2
SAMPLE_RATE = 16000
CHANNELS = 1
BLOCKS_PER_SECOND = 100
BLOCK_SIZE_SAMPLES = int(SAMPLE_RATE / float(BLOCKS_PER_SECOND)) # Block size in number of samples
BLOCK_DURATION_MS = int(1000 * BLOCK_SIZE_SAMPLES // SAMPLE_RATE) # Block duration in milliseconds
def __init__(self, callback=None, buffer_s=0, flush_queue=True, start=True, input_device=None, self_threaded=None, reconnect_callback=None):
self.callback = callback if callback is not None else lambda in_data: self.buffer_queue.put(in_data, block=False)
self.flush_queue = bool(flush_queue)
self.input_device = input_device
self.self_threaded = bool(self_threaded)
if reconnect_callback is not None and not callable(reconnect_callback):
_log.error("Invalid reconnect_callback not callable: %r", reconnect_callback)
reconnect_callback = None
self.reconnect_callback = reconnect_callback
self.buffer_queue = queue.Queue(maxsize=(buffer_s * 1000 // self.BLOCK_DURATION_MS))
self.stream = None
self.thread = None
self.thread_cancelled = False
self.device_info = None
try:
device_list = sounddevice.query_devices(device=self.input_device)
if not device_list:
raise EngineError("No audio devices found.")
except ValueError as e:
message = e.args[0]
message += "\nAvailable devices are:\n" + str(sounddevice.query_devices())
raise ValueError(message)
self._connect(start=start)
def _connect(self, start=None):
callback = self.callback
def proxy_callback(in_data, frame_count, time_info, status):
callback(bytes(in_data)) # Must copy data from temporary C buffer!
self.stream = sounddevice.RawInputStream(
samplerate=self.SAMPLE_RATE,
channels=self.CHANNELS,
dtype=self.FORMAT,
blocksize=self.BLOCK_SIZE_SAMPLES,
# latency=80,
device=self.input_device,
callback=proxy_callback if not self.self_threaded else None,
)
if self.self_threaded:
self.thread_cancelled = False
self.thread = threading.Thread(target=self._reader_thread, args=(callback,))
self.thread.daemon = True
self.thread.start()
if start:
self.start()
device_info = sounddevice.query_devices(self.stream.device)
hostapi_info = sounddevice.query_hostapis(device_info['hostapi'])
_log.info("streaming audio from '%s' using %s: %i sample_rate, %i block_duration_ms, %i latency_ms",
device_info['name'], hostapi_info['name'], self.stream.samplerate, self.BLOCK_DURATION_MS, int(self.stream.latency*1000))
self.device_info = device_info
def _reader_thread(self, callback):
while not self.thread_cancelled and self.stream and not self.stream.closed:
if self.stream.active and self.stream.read_available >= self.stream.blocksize:
in_data, overflowed = self.stream.read(self.stream.blocksize)
# print('_reader_thread', read_available, len(in_data), overflowed, self.stream.blocksize)
if overflowed:
_log.warning("audio stream overflow")
callback(bytes(in_data)) # Must copy data from temporary C buffer!
else:
time.sleep(0.001)
def _cancel_reader_thread(self):
self.thread_cancelled = True
if self.thread:
self.thread.join()
self.thread = None
def destroy(self):
self._cancel_reader_thread()
if self.stream:
self.stream.close()
self.stream = None
def reconnect(self):
# FIXME: flapping
old_device_info = self.device_info
self._cancel_reader_thread()
self.stream.close()
self._connect(start=True)
if self.reconnect_callback is not None:
self.reconnect_callback(self)
if self.device_info != old_device_info:
raise EngineError("Audio reconnect could not reconnect to the same device")
def start(self):
self.stream.start()
def stop(self):
self.stream.stop()
def read(self, nowait=False):
"""Return a block of audio data. If nowait==False, waits for a block if necessary; else, returns False immediately if no block is available."""
if self.stream or (self.flush_queue and not self.buffer_queue.empty()):
if nowait:
try:
return self.buffer_queue.get_nowait() # Return good block if available
except queue.Empty as e:
return False # Queue is empty for now
else:
return self.buffer_queue.get() # Wait for a good block and return it
else:
return None # We are done
def read_loop(self, callback):
"""Block looping reading, repeatedly passing a block of audio data to callback."""
for block in iter(self):
callback(block)
def iter(self, nowait=False):
"""Generator that yields all audio blocks from microphone."""
while True:
block = self.read(nowait=nowait)
if block is None:
break
yield block
def __iter__(self):
"""Generator that yields all audio blocks from microphone."""
return self.iter()
def get_wav_length_s(self, data):
assert isinstance(data, binary_type)
length_bytes = len(data)
assert self.FORMAT == 'int16'
length_samples = length_bytes / self.SAMPLE_WIDTH
return (float(length_samples) / self.SAMPLE_RATE)
def write_wav(self, filename, data):
# _log.debug("write wav %s", filename)
wf = wave.open(filename, 'wb')
wf.setnchannels(self.CHANNELS)
# wf.setsampwidth(self.pa.get_sample_size(FORMAT))
assert self.FORMAT == 'int16'
wf.setsampwidth(self.SAMPLE_WIDTH)
wf.setframerate(self.SAMPLE_RATE)
wf.writeframes(data)
wf.close()
@staticmethod
def print_list():
print_("")
print_("LISTING ALL INPUT DEVICES SUPPORTED BY PORTAUDIO")
print_("(any device numbers not shown are for output only)")
print_("")
devices = sounddevice.query_devices()
print_(devices)
# for i in range(0, pa.get_device_count()):
# info = pa.get_device_info_by_index(i)
# if info['maxInputChannels'] > 0: # microphone? or just speakers
# print_("DEVICE #%d" % info['index'])
# print_(" %s" % info['name'])
# print_(" input channels = %d, output channels = %d, defaultSampleRate = %d" %
# (info['maxInputChannels'], info['maxOutputChannels'], info['defaultSampleRate']))
# # print_(info)
# try:
# supports16k = pa.is_format_supported(16000, # sample rate
# input_device = info['index'],
# input_channels = info['maxInputChannels'],
# input_format = pyaudio.paInt16)
# except ValueError:
# print_(" NOTE: 16k sampling not supported, configure pulseaudio to use this device")
print_("")
class VADAudio(MicAudio):
"""Filter & segment audio with voice activity detection."""
def __init__(self, aggressiveness=3, **kwargs):
super(VADAudio, self).__init__(**kwargs)
self.vad = webrtcvad.Vad(aggressiveness)
def vad_collector(self, start_window_ms=150, start_padding_ms=100,
end_window_ms=150, end_padding_ms=None, complex_end_window_ms=None,
ratio=0.8, blocks=None, nowait=False, audio_auto_reconnect=False,
):
"""Generator/coroutine that yields series of consecutive audio blocks comprising each phrase, separated by yielding a single None.
Determines voice activity by ratio of blocks in window_ms. Uses a buffer to include window_ms prior to being triggered.
Example: (block, ..., block, None, block, ..., block, None, ...)
|----phrase-----| |----phrase-----|
"""
assert end_padding_ms == None, "end_padding_ms not supported yet"
num_start_window_blocks = max(1, int(start_window_ms // self.BLOCK_DURATION_MS))
num_start_padding_blocks = max(0, int((start_padding_ms or 0) // self.BLOCK_DURATION_MS))
num_end_window_blocks = max(1, int(end_window_ms // self.BLOCK_DURATION_MS))
num_complex_end_window_blocks = max(1, int((complex_end_window_ms or end_window_ms) // self.BLOCK_DURATION_MS))
num_end_padding_blocks = max(0, int((end_padding_ms or 0) // self.BLOCK_DURATION_MS))
_log.debug("%s: vad_collector: num_start_window_blocks=%s num_end_window_blocks=%s num_complex_end_window_blocks=%s",
self, num_start_window_blocks, num_end_window_blocks, num_complex_end_window_blocks)
audio_reconnect_threshold_blocks = 5
audio_reconnect_threshold_time = 50 * self.BLOCK_DURATION_MS / 1000
ring_buffer = collections.deque(maxlen=max(
(num_start_window_blocks + num_start_padding_blocks),
(num_end_window_blocks + num_end_padding_blocks),
(num_complex_end_window_blocks + num_end_padding_blocks),
))
ring_buffer_recent_slice = lambda num_blocks: itertools.islice(ring_buffer, max(0, (len(ring_buffer) - num_blocks)), None)
triggered = False
in_complex_phrase = False
num_empty_blocks = 0
last_good_block_time = time.time()
if blocks is None: blocks = self.iter(nowait=nowait)
for block in blocks:
if block is False or block is None:
# Bad/empty block
num_empty_blocks += 1
if audio_auto_reconnect and (num_empty_blocks >= audio_reconnect_threshold_blocks) and (time.time() - last_good_block_time >= audio_reconnect_threshold_time):
_log.warning("%s: no good block received recently, so reconnecting audio", self)
self.reconnect()
num_empty_blocks = 0
last_good_block_time = time.time()
in_complex_phrase = yield False
else:
# Good block
num_empty_blocks = 0
last_good_block_time = time.time()
is_speech = self.vad.is_speech(block, self.SAMPLE_RATE)
if not triggered:
# Between phrases
ring_buffer.append((block, is_speech))
num_voiced = len([1 for (_, speech) in ring_buffer_recent_slice(num_start_window_blocks) if speech])
if num_voiced >= (num_start_window_blocks * ratio):
# Start of phrase
triggered = True
for block, _ in ring_buffer_recent_slice(num_start_padding_blocks + num_start_window_blocks):
# print('|' if is_speech else '.', end='')
# print('|' if in_complex_phrase else '.', end='')
in_complex_phrase = yield block
# print('#', end='')
ring_buffer.clear()
else:
# Ongoing phrase
in_complex_phrase = yield block
# print('|' if is_speech else '.', end='')
# print('|' if in_complex_phrase else '.', end='')
ring_buffer.append((block, is_speech))
num_unvoiced = len([1 for (_, speech) in ring_buffer_recent_slice(num_end_window_blocks) if not speech])
num_complex_unvoiced = len([1 for (_, speech) in ring_buffer_recent_slice(num_complex_end_window_blocks) if not speech])
if (not in_complex_phrase and num_unvoiced >= (num_end_window_blocks * ratio)) or \
(in_complex_phrase and num_complex_unvoiced >= (num_complex_end_window_blocks * ratio)):
# End of phrase
triggered = False
in_complex_phrase = yield None
# print('*')
ring_buffer.clear()
if triggered:
# We were in a phrase, so we must terminate it (this may be abrupt!)
yield None
def debug_print_simple(self):
print("block_duration_ms=%s" % self.BLOCK_DURATION_MS)
for block in self.iter(nowait=False):
is_speech = self.vad.is_speech(block, self.SAMPLE_RATE)
print('|' if is_speech else '.', end='')
def debug_loop(self, *args, **kwargs):
audio_iter = self.vad_collector(*args, **kwargs)
next(audio_iter)
while True:
block = audio_iter.send(False)
class AudioStore(object):
"""
Stores the current audio data being recognized, which is cleared upon calling `finalize()`.
Also, optionally stores the last `maxlen` recognitions as `AudioStoreEntry` objects,
indexed in reverse order (0 is most recent), and advanced upon calling `finalize()`.
Note: `finalize()` should be called after the recognition has been parsed and its actions executed.
Constructor arguments:
- *maxlen* (*int*, default *None*): if set, the number of previous recognitions to temporarily store.
- *save_dir* (*str*, default *None*): if set, the directory to save the `retain.tsv` file and optionally wav files.
- *save_metadata* (*bool*, default *None*): whether to automatically save the recognition metadata.
- *save_audio* (*bool*, default *None*): whether to automatically save the recognition audio data (in addition to just the recognition metadata).
- *retain_approval_func* (*Callable*, default *None*): if set, will be called with the `AudioStoreEntry` object about to be saved,
and should return `bool` whether to actually save. Example: `retain_approval_func=lambda entry: bool(entry.grammar_name != 'noisegrammar')`
"""
def __init__(self, audio_obj, maxlen=None, save_dir=None, save_audio=None, save_metadata=None, retain_approval_func=None):
self.audio_obj = audio_obj
self.maxlen = maxlen
self.save_dir = save_dir
self.save_audio = save_audio
self.save_metadata = save_metadata
if self.save_dir:
_log.info("retaining recognition audio and/or metadata to '%s'", self.save_dir)
self.retain_approval_func = retain_approval_func
self.deque = collections.deque(maxlen=maxlen) if maxlen else None
self.blocks = []
current_audio_data = property(lambda self: b''.join(bytes(self.blocks)) if PY2 else b''.join(self.blocks))
current_audio_length_ms = property(lambda self: len(self.blocks) * self.audio_obj.BLOCK_DURATION_MS)
def add_block(self, block):
self.blocks.append(block)
def finalize(self, text, grammar_name, rule_name, likelihood=None, tag='', has_dictation=None):
""" Finalizes current utterance, creating its AudioStoreEntry and saving it (if enabled). """
entry = AudioStoreEntry(self.current_audio_data, grammar_name, rule_name, text, likelihood, tag, has_dictation)
if self.deque is not None:
if len(self.deque) == self.deque.maxlen:
self.save(-1) # Save oldest, which is about to be evicted
self.deque.appendleft(entry)
self.blocks = []
def cancel(self):
self.blocks = []
def save(self, index):
""" Saves AudioStoreEntry for given index (0 is most recent). """
if slice(index).indices(len(self.deque))[1] >= len(self.deque):
raise EngineError("Invalid index to save in AudioStore")
if not self.save_dir:
return
if not os.path.isdir(self.save_dir):
_log.warning("Recognition data was not retained because '%s' was not a directory" % self.save_dir)
return
entry = self.deque[index]
if (not self.save_audio) and (not self.save_metadata) and (not entry.force_save):
return
if self.retain_approval_func and not self.retain_approval_func(entry):
return
if self.save_audio or entry.force_save:
filename = os.path.join(self.save_dir, "retain_%s.wav" % datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S_%f"))
self.audio_obj.write_wav(filename, entry.audio_data)
else:
filename = ''
with open(os.path.join(self.save_dir, "retain.tsv"), 'a', encoding='utf-8') as tsv_file:
tsv_file.write(u'\t'.join([
filename,
text_type(self.audio_obj.get_wav_length_s(entry.audio_data)),
entry.grammar_name,
entry.rule_name,
entry.text,
text_type(entry.likelihood),
text_type(entry.tag),
text_type(entry.has_dictation),
]) + '\n')
def save_all(self, remove=True):
if self.deque:
for i in reversed(range(len(self.deque))):
self.save(i)
if remove:
self.deque.clear()
def __getitem__(self, key):
return self.deque[key]
def __len__(self):
return len(self.deque)
def __bool__(self):
return True
__nonzero__ = __bool__ # PY2 compatibility
class AudioStoreEntry(object):
__slots__ = ('audio_data', 'grammar_name', 'rule_name', 'text', 'likelihood', 'tag', 'has_dictation', 'force_save')
def __init__(self, audio_data, grammar_name, rule_name, text, likelihood, tag, has_dictation, force_save=False):
self.audio_data = audio_data
self.grammar_name = grammar_name
self.rule_name = rule_name
self.text = text
self.likelihood = likelihood
self.tag = tag
self.has_dictation = has_dictation
self.force_save = force_save
def set(self, key, value):
""" Sets given key (as *str*) to value, returning the AudioStoreEntry for chaining; usable in lambda functions. """
setattr(self, key, value)
return self
class WavAudio(object):
""" Class for mimicking normal microphone input, but from wav files. """
@classmethod
def read_file(cls, filename, realtime=False):
""" Yields raw audio blocks from wav file, terminated by a None element. """
if not os.path.isfile(filename):
raise IOError("'%s' is not a file. Please use a different file path.")
with contextlib.closing(wave.open(filename, 'rb')) as file:
# Validate the wave file's header
if file.getnchannels() != MicAudio.CHANNELS:
raise ValueError("WAV file '%s' should use %d channel(s), not %d!"
% (filename, MicAudio.CHANNELS, file.getnchannels()))
elif file.getsampwidth() != MicAudio.SAMPLE_WIDTH:
raise ValueError("WAV file '%s' should use sample width %d, not "
"%d!" % (filename, MicAudio.SAMPLE_WIDTH, file.getsampwidth()))
elif file.getframerate() != MicAudio.SAMPLE_RATE:
raise ValueError("WAV file '%s' should use sample rate %d, not "
"%d!" % (filename, MicAudio.SAMPLE_RATE, file.getframerate()))
next_time = time.time()
for _ in range(0, int(file.getnframes() / MicAudio.BLOCK_SIZE_SAMPLES) + 1):
data = file.readframes(MicAudio.BLOCK_SIZE_SAMPLES)
if not data:
break
if realtime:
time_behind = next_time - time.time()
if time_behind > 0:
time.sleep(time_behind)
next_time += float(MicAudio.BLOCK_SIZE_SAMPLES) / MicAudio.SAMPLE_RATE
yield data
yield None
@classmethod
def read_file_with_vad(cls, filename, realtime=False, **kwargs):
""" Yields raw audio blocks from wav file, after processing by VAD, terminated by a None element. """
vad_audio = VADAudio()
vad_audio_iter = vad_audio.vad_collector(blocks=cls.read_file(filename, realtime=realtime), **kwargs)
return vad_audio_iter
| Versatilus/dragonfly | dragonfly/engines/backend_kaldi/audio.py | Python | lgpl-3.0 | 21,923 |
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import datetime
import os
import re
import threading
import traceback
import sickbeard
from sickbeard import clients, common, db, failed_history, helpers, history, logger, notifiers, nzbget, nzbSplitter, sab, show_name_helpers, ui
from sickbeard.common import MULTI_EP_RESULT, Quality, SEASON_RESULT, SNATCHED, SNATCHED_BEST, SNATCHED_PROPER
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import AuthException, ex
from sickrage.providers.GenericProvider import GenericProvider
def _downloadResult(result):
"""
Downloads a result to the appropriate black hole folder.
:param result: SearchResult instance to download.
:return: boolean, True on success
"""
resProvider = result.provider
if resProvider is None:
logger.log("Invalid provider name - this is a coding error, report it please", logger.ERROR)
return False
# nzbs/torrents with an URL can just be downloaded from the provider
if result.resultType in (GenericProvider.NZB, GenericProvider.TORRENT):
newResult = resProvider.download_result(result)
# if it's an nzb data result
elif result.resultType == GenericProvider.NZBDATA:
# get the final file path to the nzb
fileName = ek(os.path.join, sickbeard.NZB_DIR, result.name + ".nzb")
logger.log("Saving NZB to " + fileName)
newResult = True
# save the data to disk
try:
with ek(open, fileName, 'w') as fileOut:
fileOut.write(result.extraInfo[0])
helpers.chmodAsParent(fileName)
except EnvironmentError as e:
logger.log("Error trying to save NZB to black hole: " + ex(e), logger.ERROR)
newResult = False
else:
logger.log("Invalid provider type - this is a coding error, report it please", logger.ERROR)
newResult = False
return newResult
def snatchEpisode(result, endStatus=SNATCHED): # pylint: disable=too-many-branches, too-many-statements
"""
Contains the internal logic necessary to actually "snatch" a result that
has been found.
:param result: SearchResult instance to be snatched.
:param endStatus: the episode status that should be used for the episode object once it's snatched.
:return: boolean, True on success
"""
if result is None:
return False
result.priority = 0 # -1 = low, 0 = normal, 1 = high
if sickbeard.ALLOW_HIGH_PRIORITY:
# if it aired recently make it high priority
for curEp in result.episodes:
if datetime.date.today() - curEp.airdate <= datetime.timedelta(days=7):
result.priority = 1
endStatus = SNATCHED_PROPER if re.search(r'\b(proper|repack|real)\b', result.name, re.I) else endStatus
# NZBs can be sent straight to SAB or saved to disk
if result.resultType in (GenericProvider.NZB, GenericProvider.NZBDATA):
if sickbeard.NZB_METHOD == "blackhole":
dlResult = _downloadResult(result)
elif sickbeard.NZB_METHOD == "sabnzbd":
dlResult = sab.sendNZB(result)
elif sickbeard.NZB_METHOD == "nzbget":
is_proper = True if endStatus == SNATCHED_PROPER else False
dlResult = nzbget.sendNZB(result, is_proper)
elif sickbeard.NZB_METHOD == "download_station":
client = clients.getClientInstance(sickbeard.NZB_METHOD)(
sickbeard.SYNOLOGY_DSM_HOST, sickbeard.SYNOLOGY_DSM_USERNAME, sickbeard.SYNOLOGY_DSM_PASSWORD)
dlResult = client.sendNZB(result)
else:
logger.log("Unknown NZB action specified in config: " + sickbeard.NZB_METHOD, logger.ERROR)
dlResult = False
# Torrents can be sent to clients or saved to disk
elif result.resultType == GenericProvider.TORRENT:
# torrents are saved to disk when blackhole mode
if sickbeard.TORRENT_METHOD == "blackhole":
dlResult = _downloadResult(result)
else:
if not result.content and not result.url.startswith('magnet'):
if result.provider.login():
result.content = result.provider.get_url(result.url, returns='content')
if result.content or result.url.startswith('magnet'):
client = clients.getClientInstance(sickbeard.TORRENT_METHOD)()
dlResult = client.sendTORRENT(result)
else:
logger.log("Torrent file content is empty", logger.WARNING)
dlResult = False
else:
logger.log("Unknown result type, unable to download it ({0!r})".format(result.resultType), logger.ERROR)
dlResult = False
if not dlResult:
return False
if sickbeard.USE_FAILED_DOWNLOADS:
failed_history.logSnatch(result)
ui.notifications.message('Episode snatched', result.name)
history.logSnatch(result)
# don't notify when we re-download an episode
sql_l = []
trakt_data = []
for curEpObj in result.episodes:
with curEpObj.lock:
if isFirstBestMatch(result):
curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality)
else:
curEpObj.status = Quality.compositeStatus(endStatus, result.quality)
sql_l.append(curEpObj.get_sql())
if curEpObj.status not in Quality.DOWNLOADED:
try:
notifiers.notify_snatch("{0} from {1}".format(curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN'), result.provider.name)) # pylint: disable=protected-access
except Exception:
# Without this, when notification fail, it crashes the snatch thread and SR will
# keep snatching until notification is sent
logger.log("Failed to send snatch notification", logger.DEBUG)
trakt_data.append((curEpObj.season, curEpObj.episode))
data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
logger.log("Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str(result.show.name) + " to Traktv Watchlist", logger.DEBUG)
if data:
notifiers.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add")
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
return True
def pickBestResult(results, show): # pylint: disable=too-many-branches
"""
Find the best result out of a list of search results for a show
:param results: list of result objects
:param show: Shows we check for
:return: best result object
"""
results = results if isinstance(results, list) else [results]
logger.log("Picking the best result out of " + str([x.name for x in results]), logger.DEBUG)
bestResult = None
# find the best result for the current episode
for cur_result in results:
if show and cur_result.show is not show:
continue
# build the black And white list
if show.is_anime:
if not show.release_groups.is_valid(cur_result):
continue
logger.log("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality])
anyQualities, bestQualities = Quality.splitQuality(show.quality)
if cur_result.quality not in anyQualities + bestQualities:
logger.log(cur_result.name + " is a quality we know we don't want, rejecting it", logger.DEBUG)
continue
if not show_name_helpers.filter_bad_releases(cur_result.name, parse=False, show=show):
continue
if hasattr(cur_result, 'size'):
if sickbeard.USE_FAILED_DOWNLOADS and failed_history.hasFailed(cur_result.name, cur_result.size,
cur_result.provider.name):
logger.log(cur_result.name + " has previously failed, rejecting it")
continue
if not bestResult:
bestResult = cur_result
elif cur_result.quality in bestQualities and (bestResult.quality < cur_result.quality or bestResult.quality not in bestQualities):
bestResult = cur_result
elif cur_result.quality in anyQualities and bestResult.quality not in bestQualities and bestResult.quality < cur_result.quality:
bestResult = cur_result
elif bestResult.quality == cur_result.quality:
if "proper" in cur_result.name.lower() or "real" in cur_result.name.lower() or "repack" in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (repack/proper/real over nuked)")
bestResult = cur_result
elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (normal instead of internal)")
bestResult = cur_result
elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower():
logger.log("Preferring " + cur_result.name + " (x264 over xvid)")
bestResult = cur_result
if bestResult:
logger.log("Picked " + bestResult.name + " as the best", logger.DEBUG)
else:
logger.log("No result picked.", logger.DEBUG)
return bestResult
def isFinalResult(result):
"""
Checks if the given result is good enough quality that we can stop searching for other ones.
:param result: quality to check
:return: True if the result is the highest quality in both the any/best quality lists else False
"""
logger.log("Checking if we should keep searching after we've found " + result.name, logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities, best_qualities = Quality.splitQuality(show_obj.quality)
# if there is a re-download that's higher than this then we definitely need to keep looking
if best_qualities and result.quality < max(best_qualities):
return False
# if it does not match the shows black and white list its no good
elif show_obj.is_anime and show_obj.release_groups.is_valid(result):
return False
# if there's no re-download that's higher (above) and this is the highest initial download then we're good
elif any_qualities and result.quality in any_qualities:
return True
elif best_qualities and result.quality == max(best_qualities):
return True
# if we got here than it's either not on the lists, they're empty, or it's lower than the highest required
else:
return False
def isFirstBestMatch(result):
"""
Checks if the given result is a best quality match and if we want to stop searching providers here.
:param result: to check
:return: True if the result is the best quality match else False
"""
logger.log("Checking if we should stop searching for a better quality for for episode " + result.name,
logger.DEBUG)
show_obj = result.episodes[0].show
any_qualities_, best_qualities = Quality.splitQuality(show_obj.quality)
return result.quality in best_qualities if best_qualities else False
def wantedEpisodes(show, fromDate):
"""
Get a list of episodes that we want to download
:param show: Show these episodes are from
:param fromDate: Search from a certain date
:return: list of wanted episodes
"""
wanted = []
if show.paused:
logger.log("Not checking for episodes of {0} because the show is paused".format(show.name), logger.DEBUG)
return wanted
allowed_qualities, preferred_qualities = common.Quality.splitQuality(show.quality)
all_qualities = list(set(allowed_qualities + preferred_qualities))
logger.log("Seeing if we need anything from " + show.name, logger.DEBUG)
con = db.DBConnection()
sql_results = con.select(
"SELECT status, season, episode FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?",
[show.indexerid, fromDate.toordinal()]
)
# check through the list of statuses to see if we want any
for result in sql_results:
cur_status, cur_quality = common.Quality.splitCompositeStatus(int(result[b"status"] or -1))
if cur_status not in {common.WANTED, common.DOWNLOADED, common.SNATCHED, common.SNATCHED_PROPER}:
continue
if cur_status != common.WANTED:
if preferred_qualities:
if cur_quality in preferred_qualities:
continue
elif cur_quality in allowed_qualities:
continue
epObj = show.getEpisode(result[b"season"], result[b"episode"])
epObj.wantedQuality = [i for i in all_qualities if i > cur_quality and i != common.Quality.UNKNOWN]
wanted.append(epObj)
return wanted
def searchForNeededEpisodes():
"""
Check providers for details on wanted episodes
:return: episodes we have a search hit for
"""
foundResults = {}
didSearch = False
show_list = sickbeard.showList
fromDate = datetime.date.fromordinal(1)
episodes = []
for curShow in show_list:
if not curShow.paused:
sickbeard.name_cache.buildNameCache(curShow)
episodes.extend(wantedEpisodes(curShow, fromDate))
if not episodes:
# nothing wanted so early out, ie: avoid whatever abritrarily
# complex thing a provider cache update entails, for example,
# reading rss feeds
logger.log("No episodes needed.", logger.INFO)
return foundResults.values()
origThreadName = threading.currentThread().name
providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.is_active() and x.enable_daily]
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curProvider.cache.update_cache()
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curFoundResults = {}
try:
curFoundResults = curProvider.search_rss(episodes)
except AuthException as e:
logger.log("Authentication error: " + ex(e), logger.WARNING)
continue
except Exception as e:
logger.log("Error while searching " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
continue
didSearch = True
# pick a single result for each episode, respecting existing results
for curEp in curFoundResults:
if not curEp.show or curEp.show.paused:
logger.log("Skipping {0} because the show is paused ".format(curEp.prettyName()), logger.DEBUG)
continue
bestResult = pickBestResult(curFoundResults[curEp], curEp.show)
# if all results were rejected move on to the next episode
if not bestResult:
logger.log("All found results for " + curEp.prettyName() + " were rejected.", logger.DEBUG)
continue
# if it's already in the list (from another provider) and the newly found quality is no better then skip it
if curEp in foundResults and bestResult.quality <= foundResults[curEp].quality:
continue
foundResults[curEp] = bestResult
threading.currentThread().name = origThreadName
if not didSearch:
logger.log(
"No NZB/Torrent providers found or enabled in the sickrage config for daily searches. Please check your settings.",
logger.INFO)
return foundResults.values()
def searchProviders(show, episodes, manualSearch=False, downCurQuality=False): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
"""
Walk providers for information on shows
:param show: Show we are looking for
:param episodes: Episodes we hope to find
:param manualSearch: Boolean, is this a manual search?
:param downCurQuality: Boolean, should we re-download currently available quality file
:return: results for search
"""
foundResults = {}
finalResults = []
didSearch = False
# build name cache for show
sickbeard.name_cache.buildNameCache(show)
origThreadName = threading.currentThread().name
providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.is_active() and x.enable_backlog]
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
curProvider.cache.update_cache()
threading.currentThread().name = origThreadName
for curProvider in providers:
threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"
if curProvider.anime_only and not show.is_anime:
logger.log("" + str(show.name) + " is not an anime, skipping", logger.DEBUG)
continue
foundResults[curProvider.name] = {}
searchCount = 0
search_mode = curProvider.search_mode
# Always search for episode when manually searching when in sponly
if search_mode == 'sponly' and manualSearch is True:
search_mode = 'eponly'
while True:
searchCount += 1
if search_mode == 'eponly':
logger.log("Performing episode search for " + show.name)
else:
logger.log("Performing season pack search for " + show.name)
try:
searchResults = curProvider.find_search_results(show, episodes, search_mode, manualSearch, downCurQuality)
except AuthException as error:
logger.log("Authentication error: {0!r}".format(error), logger.WARNING)
break
except Exception as error:
logger.log("Exception while searching {0}. Error: {1!r}".format(curProvider.name, error), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
break
didSearch = True
if len(searchResults):
# make a list of all the results for this provider
for curEp in searchResults:
if curEp in foundResults[curProvider.name]:
foundResults[curProvider.name][curEp] += searchResults[curEp]
else:
foundResults[curProvider.name][curEp] = searchResults[curEp]
break
elif not curProvider.search_fallback or searchCount == 2:
break
if search_mode == 'sponly':
logger.log("Fallback episode search initiated", logger.DEBUG)
search_mode = 'eponly'
else:
logger.log("Fallback season pack search initiate", logger.DEBUG)
search_mode = 'sponly'
# skip to next provider if we have no results to process
if not foundResults[curProvider.name]:
continue
# pick the best season NZB
bestSeasonResult = None
if SEASON_RESULT in foundResults[curProvider.name]:
bestSeasonResult = pickBestResult(foundResults[curProvider.name][SEASON_RESULT], show)
highest_quality_overall = 0
for cur_episode in foundResults[curProvider.name]:
for cur_result in foundResults[curProvider.name][cur_episode]:
if cur_result.quality != Quality.UNKNOWN and cur_result.quality > highest_quality_overall:
highest_quality_overall = cur_result.quality
logger.log("The highest quality of any match is " + Quality.qualityStrings[highest_quality_overall],
logger.DEBUG)
# see if every episode is wanted
if bestSeasonResult:
searchedSeasons = {str(x.season) for x in episodes}
# get the quality of the season nzb
seasonQual = bestSeasonResult.quality
logger.log(
"The quality of the season " + bestSeasonResult.provider.provider_type + " is " + Quality.qualityStrings[
seasonQual], logger.DEBUG)
main_db_con = db.DBConnection()
allEps = [int(x[b"episode"])
for x in main_db_con.select("SELECT episode FROM tv_episodes WHERE showid = ? AND ( season IN ( " + ','.join(searchedSeasons) + " ) )",
[show.indexerid])]
logger.log(
"Executed query: [SELECT episode FROM tv_episodes WHERE showid = {0} AND season in {1}]".format(show.indexerid, ','.join(searchedSeasons)))
logger.log("Episode list: " + str(allEps), logger.DEBUG)
allWanted = True
anyWanted = False
for curEpNum in allEps:
for season in {x.season for x in episodes}:
if not show.wantEpisode(season, curEpNum, seasonQual, downCurQuality):
allWanted = False
else:
anyWanted = True
# if we need every ep in the season and there's nothing better then just download this and be done with it (unless single episodes are preferred)
if allWanted and bestSeasonResult.quality == highest_quality_overall:
logger.log(
"Every ep in this season is needed, downloading the whole " + bestSeasonResult.provider.provider_type + " " + bestSeasonResult.name)
epObjs = []
for curEpNum in allEps:
for season in {x.season for x in episodes}:
epObjs.append(show.getEpisode(season, curEpNum))
bestSeasonResult.episodes = epObjs
# Remove provider from thread name before return results
threading.currentThread().name = origThreadName
return [bestSeasonResult]
elif not anyWanted:
logger.log(
"No eps from this season are wanted at this quality, ignoring the result of " + bestSeasonResult.name,
logger.DEBUG)
else:
if bestSeasonResult.provider.provider_type == GenericProvider.NZB:
logger.log("Breaking apart the NZB and adding the individual ones to our results", logger.DEBUG)
# if not, break it apart and add them as the lowest priority results
individualResults = nzbSplitter.split_result(bestSeasonResult)
for curResult in individualResults:
if len(curResult.episodes) == 1:
epNum = curResult.episodes[0].episode
elif len(curResult.episodes) > 1:
epNum = MULTI_EP_RESULT
if epNum in foundResults[curProvider.name]:
foundResults[curProvider.name][epNum].append(curResult)
else:
foundResults[curProvider.name][epNum] = [curResult]
# If this is a torrent all we can do is leech the entire torrent, user will have to select which eps not do download in his torrent client
else:
# Season result from Torrent Provider must be a full-season torrent, creating multi-ep result for it.
logger.log(
"Adding multi-ep result for full-season torrent. Set the episodes you don't want to 'don't download' in your torrent client if desired!")
epObjs = []
for curEpNum in allEps:
for season in {x.season for x in episodes}:
epObjs.append(show.getEpisode(season, curEpNum))
bestSeasonResult.episodes = epObjs
if MULTI_EP_RESULT in foundResults[curProvider.name]:
foundResults[curProvider.name][MULTI_EP_RESULT].append(bestSeasonResult)
else:
foundResults[curProvider.name][MULTI_EP_RESULT] = [bestSeasonResult]
# go through multi-ep results and see if we really want them or not, get rid of the rest
multiResults = {}
if MULTI_EP_RESULT in foundResults[curProvider.name]:
for _multiResult in foundResults[curProvider.name][MULTI_EP_RESULT]:
logger.log("Seeing if we want to bother with multi-episode result " + _multiResult.name, logger.DEBUG)
# Filter result by ignore/required/whitelist/blacklist/quality, etc
multiResult = pickBestResult(_multiResult, show)
if not multiResult:
continue
# see how many of the eps that this result covers aren't covered by single results
neededEps = []
notNeededEps = []
for epObj in multiResult.episodes:
# if we have results for the episode
if epObj.episode in foundResults[curProvider.name] and len(foundResults[curProvider.name][epObj.episode]) > 0:
notNeededEps.append(epObj.episode)
else:
neededEps.append(epObj.episode)
logger.log(
"Single-ep check result is neededEps: " + str(neededEps) + ", notNeededEps: " + str(notNeededEps),
logger.DEBUG)
if not neededEps:
logger.log("All of these episodes were covered by single episode results, ignoring this multi-episode result", logger.DEBUG)
continue
# check if these eps are already covered by another multi-result
multiNeededEps = []
multiNotNeededEps = []
for epObj in multiResult.episodes:
if epObj.episode in multiResults:
multiNotNeededEps.append(epObj.episode)
else:
multiNeededEps.append(epObj.episode)
logger.log(
"Multi-ep check result is multiNeededEps: " + str(multiNeededEps) + ", multiNotNeededEps: " + str(
multiNotNeededEps), logger.DEBUG)
if not multiNeededEps:
logger.log(
"All of these episodes were covered by another multi-episode nzbs, ignoring this multi-ep result",
logger.DEBUG)
continue
# don't bother with the single result if we're going to get it with a multi result
for epObj in multiResult.episodes:
multiResults[epObj.episode] = multiResult
if epObj.episode in foundResults[curProvider.name]:
logger.log(
"A needed multi-episode result overlaps with a single-episode result for ep #" + str(
epObj.episode) + ", removing the single-episode results from the list", logger.DEBUG)
del foundResults[curProvider.name][epObj.episode]
# of all the single ep results narrow it down to the best one for each episode
finalResults += set(multiResults.values())
for curEp in foundResults[curProvider.name]:
if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
continue
if not foundResults[curProvider.name][curEp]:
continue
# if all results were rejected move on to the next episode
bestResult = pickBestResult(foundResults[curProvider.name][curEp], show)
if not bestResult:
continue
# add result if its not a duplicate and
found = False
for i, result in enumerate(finalResults):
for bestResultEp in bestResult.episodes:
if bestResultEp in result.episodes:
if result.quality < bestResult.quality:
finalResults.pop(i)
else:
found = True
if not found:
finalResults += [bestResult]
# check that we got all the episodes we wanted first before doing a match and snatch
wantedEpCount = 0
for wantedEp in episodes:
for result in finalResults:
if wantedEp in result.episodes and isFinalResult(result):
wantedEpCount += 1
# make sure we search every provider for results unless we found everything we wanted
if wantedEpCount == len(episodes):
break
if not didSearch:
logger.log("No NZB/Torrent providers found or enabled in the sickrage config for backlog searches. Please check your settings.",
logger.INFO)
# Remove provider from thread name before return results
threading.currentThread().name = origThreadName
return finalResults
| b0ttl3z/SickRage | sickbeard/search.py | Python | gpl-3.0 | 30,123 |
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import _check_cv as check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# get complete grid and yield from it
param_grid = list(ParameterGrid(self.param_distributions))
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class ChangedBehaviorWarning(UserWarning):
pass
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
degree=..., gamma=..., kernel='rbf', max_iter=-1,
probability=False, random_state=None, shrinking=True,
tol=..., verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| WangWenjun559/Weiss | summary/sumy/sklearn/grid_search.py | Python | apache-2.0 | 34,641 |
from common.models import Position, Region, TeamMember
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.reverse import reverse
from rest_framework.test import APIRequestFactory, APITestCase
from teams.models import Team
from tf_auth.models import TFUser
from .serializers import MembershipSerializer, PositionSerializer, RegionSerializer
User = get_user_model()
# Some of these are defined by DRF
ALLOWED_LIST_METHODS = ('head', 'options', 'get', 'post', )
ALLOWED_DETAIL_METHODS = ('head', 'options', 'get', 'put', 'patch', 'delete', )
AUTHORIZED_LIST_METHODS = ('head', 'options', 'get', )
AUTHORIZED_DETAIL_METHODS = ('head', 'options' 'get', )
class CommonApiSerializerTests(APITestCase):
"""
Serializer tests.
"""
def setUp(self):
self.factory = APIRequestFactory()
def test_RegionSerializer(self):
name = 'Test Region'
region = Region.objects.create(name=name)
url = reverse('region-detail', (region.id, ))
request = self.factory.get(url)
absolute_url = request.build_absolute_uri()
serializer = RegionSerializer(instance=region, context={'request': request})
self.assertEqual(serializer.data, {'id': str(region.id),
'name': name,
'url': absolute_url})
def test_PositionSerializer(self):
name = 'Test Position'
position = Position.objects.create(name=name)
url = reverse('position-detail', (position.id, ))
request = self.factory.get(url)
absolute_url = request.build_absolute_uri()
serializer = PositionSerializer(instance=position, context={'request': request})
self.assertEqual(serializer.data, {'id': str(position.id),
'name': name,
'secondary': False,
'url': absolute_url})
def test_MembershipSerializer(self):
user = TFUser.objects.create_user(11, 'admin', 'lenny+tftests@prattdev.net', '12345678')
player = user.player
team = Team.objects.create(name='team')
member = TeamMember.objects.create(player=player, team=team)
url = reverse('teammember-detail', (member.id, ))
request = self.factory.get(url)
absolute_url = request.build_absolute_uri()
serializer = MembershipSerializer(instance=member, context={'request': request})
expected_data = {
'id': str(member.id),
'player': member.player.id,
'team': member.team.id,
'position': None,
'url': absolute_url
}
self.assertLessEqual(expected_data.items(), serializer.data.items())
class BaseTestCases:
class AuthenticatedTests(APITestCase):
"""
Base test case for an authenticated user.
"""
@classmethod
def setUpTestData(cls):
super(BaseTestCases.AuthenticatedTests, cls).setUpTestData()
cls.user = User.objects.create_user(11, 'admin', 'lenny+tftests@prattdev.net', '01234567')
token, _ = Token.objects.get_or_create(user=cls.user)
cls.token = token
def setUp(self):
# Authenticate user
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
class CommonModelTests:
"""
Test cases for HTTP methods against the common models (Position and Region)
"""
class UnauthenticatedListViewTests(APITestCase):
url = ''
def test_head(self):
response = self.client.head(self.url)
self.assertTrue(200 <= response.status_code < 300)
def test_options(self):
response = self.client.options(self.url)
self.assertTrue(200 <= response.status_code < 300)
def test_get(self):
response = self.client.get(self.url)
self.assertTrue(200 <= response.status_code < 300)
def test_post(self):
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put(self):
response = self.client.put(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch(self):
response = self.client.patch(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete(self):
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_trace(self):
response = self.client.trace(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class UnauthenticatedDetailViewTests(APITestCase):
url = ''
def test_head(self):
response = self.client.head(self.url)
self.assertTrue(200 <= response.status_code < 300)
def test_options(self):
response = self.client.options(self.url)
self.assertTrue(200 <= response.status_code < 300)
def test_get(self):
response = self.client.get(self.url)
self.assertTrue(200 <= response.status_code < 300)
def test_post(self):
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put(self):
response = self.client.put(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch(self):
response = self.client.patch(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete(self):
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_trace(self):
response = self.client.trace(self.url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class UnauthenticatedPositionListViewTests(CommonModelTests.UnauthenticatedListViewTests):
url = reverse('position-list')
class UnauthenticatedPositionDetailViewTests(CommonModelTests.UnauthenticatedDetailViewTests):
@classmethod
def setUpTestData(cls):
super(UnauthenticatedPositionDetailViewTests, cls).setUpTestData()
cls.url = reverse('position-detail', (Position.objects.first().pk, ))
class UnauthenticatedRegionListViewTests(CommonModelTests.UnauthenticatedListViewTests):
url = reverse('region-list')
class UnauthenticatedRegionDetailViewTests(CommonModelTests.UnauthenticatedDetailViewTests):
@classmethod
def setUpTestData(cls):
super(UnauthenticatedRegionDetailViewTests, cls).setUpTestData()
cls.url = reverse('region-detail', (Region.objects.first().pk, ))
class AuthenticatedPositionListViewTests(BaseTestCases.AuthenticatedTests,
CommonModelTests.UnauthenticatedListViewTests):
url = reverse('position-list')
class AuthenticatedPositionDetailViewTests(BaseTestCases.AuthenticatedTests,
CommonModelTests.UnauthenticatedDetailViewTests):
url = reverse('position-detail', (Position.objects.first().pk, ))
class AuthenticatedRegionListViewTests(BaseTestCases.AuthenticatedTests,
CommonModelTests.UnauthenticatedListViewTests):
url = reverse('region-list')
class AuthenticatedRegionDetailViewTests(BaseTestCases.AuthenticatedTests,
CommonModelTests.UnauthenticatedDetailViewTests):
@classmethod
def setUpTestData(cls):
super(AuthenticatedRegionDetailViewTests, cls).setUpTestData()
cls.url = reverse('region-detail', (Region.objects.first().pk, ))
# Viewset test cases
class UnauthenticatedRegionListViewSetTests(APITestCase):
url = reverse('region-list')
def test_head(self):
response = self.client.head(self.url)
self.assertEqual(response.data['count'], Region.objects.count())
def test_options(self):
response = self.client.options(self.url)
self.assertEqual(response.data['name'], 'Region List')
self.assertTrue('application/json' in response.data['renders'])
def test_get(self):
response = self.client.get(self.url)
self.assertEqual(response.data['count'], Region.objects.count())
class UnauthenticatedRegionDetailViewSetTests(APITestCase):
@classmethod
def setUpTestData(cls):
super(UnauthenticatedRegionDetailViewSetTests, cls).setUpTestData()
cls.region = Region.objects.first()
cls.url = reverse('region-detail', (cls.region.pk, ))
def assertDataEqualsInstance(self, data):
self.assertEqual(data['id'], str(self.region.id))
self.assertEqual(data['name'], str(self.region.name))
def test_head(self):
response = self.client.head(self.url)
self.assertDataEqualsInstance(response.data)
def test_options(self):
response = self.client.options(self.url)
self.assertEqual(response.data['name'], 'Region Instance')
self.assertTrue('application/json' in response.data['renders'])
def test_get(self):
response = self.client.get(self.url)
self.assertDataEqualsInstance(response.data)
class UnauthenticatedPositionListViewSetTests(APITestCase):
url = reverse('position-list')
def test_head(self):
response = self.client.head(self.url)
self.assertEqual(response.data['count'], Position.objects.count())
def test_options(self):
response = self.client.options(self.url)
self.assertEqual(response.data['name'], 'Position List')
self.assertTrue('application/json' in response.data['renders'])
def test_get(self):
response = self.client.get(self.url)
self.assertEqual(response.data['count'], Position.objects.count())
class UnauthenticatedPositionDetailViewSetTests(APITestCase):
@classmethod
def setUpTestData(cls):
super(UnauthenticatedPositionDetailViewSetTests, cls).setUpTestData()
cls.position = Position.objects.first()
cls.url = reverse('position-detail', (cls.position.pk, ))
def assertDataEqualsInstance(self, data):
self.assertEqual(data['id'], str(self.position.id))
self.assertEqual(data['name'], str(self.position.name))
def test_head(self):
response = self.client.head(self.url)
self.assertDataEqualsInstance(response.data)
def test_options(self):
response = self.client.options(self.url)
self.assertEqual(response.data['name'], 'Position Instance')
self.assertTrue('application/json' in response.data['renders'])
def test_get(self):
response = self.client.get(self.url)
self.assertDataEqualsInstance(response.data)
class AuthenticatedRegionListViewSetTests(BaseTestCases.AuthenticatedTests,
UnauthenticatedRegionListViewSetTests):
pass
class AuthenticatedRegionDetailViewSetTests(BaseTestCases.AuthenticatedTests,
UnauthenticatedRegionDetailViewSetTests):
pass
class AuthenticatedPositionListViewSetTests(BaseTestCases.AuthenticatedTests,
UnauthenticatedPositionListViewSetTests):
pass
class AuthenticatedPositionDetailViewSetTests(BaseTestCases.AuthenticatedTests,
UnauthenticatedPositionDetailViewSetTests):
pass
| prattl/teamfinder | api/common/api/tests.py | Python | apache-2.0 | 11,895 |
#!/usr/bin/python
# Copyright 2018 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import requests
import sys
# read arguments
if len(sys.argv) != 5:
sys.exit("Received incorrect number of arguments to script, 4 expected.")
conf_user = sys.argv[1]
conf_token = sys.argv[2]
page_id = sys.argv[3]
html_href = sys.argv[4]
print("page_id is: %(page_id)s" % {'page_id': page_id})
print("html_href is: %(html_href)s" % {'html_href': html_href})
basic_auth = (conf_user, conf_token)
print("basic_auth is %(auth)s" % {'auth': basic_auth})
headers = {'content-type': 'application/json'}
# get newton existing page with body and version number
url = ('https://bigswitch.atlassian.net/wiki/rest/api/content/%(page_id)s'
'?expand=body.storage,version' % {'page_id': page_id})
r = requests.get(url, headers=headers, auth=basic_auth)
json_out = r.json()
version_number = json_out['version']['number']
new_ver_num = version_number + 1
datetime_str = datetime.datetime.now().strftime("%B %d, %Y at %I:%M%p PST")
new_value = html_href + '<br/>' + datetime_str
# create new data to be uploaded
data = ('{"id":"%(page_id)s",'
'"body":{"storage":{"value": "%(new_value)s",'
'"representation":"storage"}},'
'"version": {"number": %(new_ver_num)s},'
'"type":"%(type)s","title":"%(title)s"}'
% {'page_id': page_id,
'new_value': new_value,
'new_ver_num': new_ver_num,
'type': json_out['type'],
'title': json_out['title']})
p = requests.put(url, data=data, headers=headers, auth=basic_auth)
print(p.json())
| wolverineav/networking-bigswitch | build_packages/update_wiki.py | Python | apache-2.0 | 2,178 |
import sys
from greenery.lego import parse
regexes = sys.argv[1:]
if len(regexes) < 2:
print("Please supply several regexes to compute their intersection, union and concatenation.")
print("E.g. \"19.*\" \"\\d{4}-\\d{2}-\\d{2}\"")
else:
p = parse(regexes[0])
for regex in regexes[1:]:
p &= parse(regex)
print("Intersection: %s" % ( p ))
p = parse(regexes[0])
for regex in regexes[1:]:
p |= parse(regex)
print("Union: %s" % ( p ))
p = parse(regexes[0])
for regex in regexes[1:]:
p += parse(regex)
print("Concatenation: %s" % ( p ))
| AdeebNqo/sublimegen | src/greenery/main.py | Python | apache-2.0 | 562 |
import numpy as np
NR_PER_CONDITION = 1000
neuro_sigma = 4
neuro_mean = 0
satis_sigma = 4
satis_mean = 0
print "Start drawing"
bins = {
5: np.array([-6, -3, 0, 3, 6]),
7: np.array([-6, -4, -2, 0, 2, 4, 6])
}
borders = {
5: np.array([-4.5,-1.5,1.5,4.5]),
7: np.array([-5.,-3.,-1.,1,3,5])
}
'output.dat'
conditions = [
{'cond': 1, 'first': 5, 'second': 5},
{'cond': 2, 'first': 7, 'second': 7},
{'cond': 3, 'first': 5, 'second': 7},
{'cond': 4, 'first': 7, 'second': 5}
]
neuro_vals = np.empty([12,NR_PER_CONDITION])
satis_vals = np.empty([12,NR_PER_CONDITION])
outfile = file('output.dat', 'w')
outfile.write('cond')
for i in range(12):
outfile.write('\tneuro'+str(i+1))
for i in range(12):
outfile.write('\tsatis'+str(i+1))
outfile.write('\n')
for cond in conditions:
print "Writing condition ", cond['cond']
for i in range(12):
neuro = neuro_sigma * np.random.randn(NR_PER_CONDITION) + neuro_mean
neuro_index = np.digitize(neuro, borders[cond['first']])
neuro_vals[i] = bins[cond['first']][neuro_index]
satis = satis_sigma * np.random.randn(NR_PER_CONDITION) + satis_mean
satis_index = np.digitize(satis, borders[cond['second']])
satis_vals[i] = bins[cond['second']][satis_index]
cond_arr = np.full([1,NR_PER_CONDITION], cond['cond'])
output = np.concatenate((cond_arr, neuro_vals, satis_vals) )
np.savetxt(outfile, output.transpose(), fmt="%2i")
outfile.close()
print "Finished"
| mpauly/psych-simulation | simulate.py | Python | apache-2.0 | 1,499 |
from .settings import * # NOQA
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
DEBUG = True
TASTYPIE_FULL_DEBUG = True
| phalt/pokeapi | config/local.py | Python | bsd-3-clause | 322 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1FlowSchemaSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'distinguisher_method': 'V1beta1FlowDistinguisherMethod',
'matching_precedence': 'int',
'priority_level_configuration': 'V1beta1PriorityLevelConfigurationReference',
'rules': 'list[V1beta1PolicyRulesWithSubjects]'
}
attribute_map = {
'distinguisher_method': 'distinguisherMethod',
'matching_precedence': 'matchingPrecedence',
'priority_level_configuration': 'priorityLevelConfiguration',
'rules': 'rules'
}
def __init__(self, distinguisher_method=None, matching_precedence=None, priority_level_configuration=None, rules=None, local_vars_configuration=None): # noqa: E501
"""V1beta1FlowSchemaSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._distinguisher_method = None
self._matching_precedence = None
self._priority_level_configuration = None
self._rules = None
self.discriminator = None
if distinguisher_method is not None:
self.distinguisher_method = distinguisher_method
if matching_precedence is not None:
self.matching_precedence = matching_precedence
self.priority_level_configuration = priority_level_configuration
if rules is not None:
self.rules = rules
@property
def distinguisher_method(self):
"""Gets the distinguisher_method of this V1beta1FlowSchemaSpec. # noqa: E501
:return: The distinguisher_method of this V1beta1FlowSchemaSpec. # noqa: E501
:rtype: V1beta1FlowDistinguisherMethod
"""
return self._distinguisher_method
@distinguisher_method.setter
def distinguisher_method(self, distinguisher_method):
"""Sets the distinguisher_method of this V1beta1FlowSchemaSpec.
:param distinguisher_method: The distinguisher_method of this V1beta1FlowSchemaSpec. # noqa: E501
:type: V1beta1FlowDistinguisherMethod
"""
self._distinguisher_method = distinguisher_method
@property
def matching_precedence(self):
"""Gets the matching_precedence of this V1beta1FlowSchemaSpec. # noqa: E501
`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
:return: The matching_precedence of this V1beta1FlowSchemaSpec. # noqa: E501
:rtype: int
"""
return self._matching_precedence
@matching_precedence.setter
def matching_precedence(self, matching_precedence):
"""Sets the matching_precedence of this V1beta1FlowSchemaSpec.
`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
:param matching_precedence: The matching_precedence of this V1beta1FlowSchemaSpec. # noqa: E501
:type: int
"""
self._matching_precedence = matching_precedence
@property
def priority_level_configuration(self):
"""Gets the priority_level_configuration of this V1beta1FlowSchemaSpec. # noqa: E501
:return: The priority_level_configuration of this V1beta1FlowSchemaSpec. # noqa: E501
:rtype: V1beta1PriorityLevelConfigurationReference
"""
return self._priority_level_configuration
@priority_level_configuration.setter
def priority_level_configuration(self, priority_level_configuration):
"""Sets the priority_level_configuration of this V1beta1FlowSchemaSpec.
:param priority_level_configuration: The priority_level_configuration of this V1beta1FlowSchemaSpec. # noqa: E501
:type: V1beta1PriorityLevelConfigurationReference
"""
if self.local_vars_configuration.client_side_validation and priority_level_configuration is None: # noqa: E501
raise ValueError("Invalid value for `priority_level_configuration`, must not be `None`") # noqa: E501
self._priority_level_configuration = priority_level_configuration
@property
def rules(self):
"""Gets the rules of this V1beta1FlowSchemaSpec. # noqa: E501
`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
:return: The rules of this V1beta1FlowSchemaSpec. # noqa: E501
:rtype: list[V1beta1PolicyRulesWithSubjects]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1beta1FlowSchemaSpec.
`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
:param rules: The rules of this V1beta1FlowSchemaSpec. # noqa: E501
:type: list[V1beta1PolicyRulesWithSubjects]
"""
self._rules = rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1FlowSchemaSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1FlowSchemaSpec):
return True
return self.to_dict() != other.to_dict()
| kubernetes-client/python | kubernetes/client/models/v1beta1_flow_schema_spec.py | Python | apache-2.0 | 8,042 |
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('wa')
J.url = 'http://wa.gov'
| sunlightlabs/billy | billy2pupa/wa.py | Python | bsd-3-clause | 110 |
# -*- coding: utf-8 -*-
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import ldap
import mock
from oslo_config import cfg
from testtools import matchers
from keystone.common import cache
from keystone.common import ldap as common_ldap
from keystone.common.ldap import core as common_ldap_core
from keystone.common import sql
from keystone import exception
from keystone import identity
from keystone.identity.mapping_backends import mapping as map
from keystone import resource
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import fakeldap
from keystone.tests.unit import identity_mapping as mapping_sql
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit import test_backend
CONF = cfg.CONF
def create_group_container(identity_api):
# Create the groups base entry (ou=Groups,cn=example,cn=com)
group_api = identity_api.driver.group
conn = group_api.get_connection()
dn = 'ou=Groups,cn=example,cn=com'
conn.add_s(dn, [('objectclass', ['organizationalUnit']),
('ou', ['Groups'])])
class BaseLDAPIdentity(test_backend.IdentityTests):
def setUp(self):
super(BaseLDAPIdentity, self).setUp()
self.clear_database()
common_ldap.register_handler('fake://', fakeldap.FakeLdap)
self.load_backends()
self.load_fixtures(default_fixtures)
self.addCleanup(common_ldap_core._HANDLERS.clear)
def _get_domain_fixture(self):
"""Domains in LDAP are read-only, so just return the static one."""
return self.resource_api.get_domain(CONF.identity.default_domain_id)
def clear_database(self):
for shelf in fakeldap.FakeShelves:
fakeldap.FakeShelves[shelf].clear()
def reload_backends(self, domain_id):
# Only one backend unless we are using separate domain backends
self.load_backends()
def get_config(self, domain_id):
# Only one conf structure unless we are using separate domain backends
return CONF
def config_overrides(self):
super(BaseLDAPIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
def config_files(self):
config_files = super(BaseLDAPIdentity, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
return config_files
def get_user_enabled_vals(self, user):
user_dn = (
self.identity_api.driver.user._id_to_dn_string(user['id']))
enabled_attr_name = CONF.ldap.user_enabled_attribute
ldap_ = self.identity_api.driver.user.get_connection()
res = ldap_.search_s(user_dn,
ldap.SCOPE_BASE,
u'(sn=%s)' % user['name'])
if enabled_attr_name in res[0][1]:
return res[0][1][enabled_attr_name]
else:
return None
def test_build_tree(self):
"""Regression test for building the tree names
"""
user_api = identity.backends.ldap.UserApi(CONF)
self.assertTrue(user_api)
self.assertEqual("ou=Users,%s" % CONF.ldap.suffix, user_api.tree_dn)
def test_configurable_allowed_user_actions(self):
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'tenants': ['bar']}
user = self.identity_api.create_user(user)
self.identity_api.get_user(user['id'])
user['password'] = u'fäképass2'
self.identity_api.update_user(user['id'], user)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
def test_configurable_forbidden_user_actions(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_allow_create = False
conf.ldap.user_allow_update = False
conf.ldap.user_allow_delete = False
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'tenants': ['bar']}
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
user)
self.user_foo['password'] = u'fäképass2'
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_user,
self.user_foo['id'],
self.user_foo)
self.assertRaises(exception.ForbiddenAction,
self.identity_api.delete_user,
self.user_foo['id'])
def test_configurable_forbidden_create_existing_user(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_allow_create = False
self.reload_backends(CONF.identity.default_domain_id)
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
self.user_foo)
def test_user_filter(self):
user_ref = self.identity_api.get_user(self.user_foo['id'])
self.user_foo.pop('password')
self.assertDictEqual(user_ref, self.user_foo)
conf = self.get_config(user_ref['domain_id'])
conf.ldap.user_filter = '(CN=DOES_NOT_MATCH)'
self.reload_backends(user_ref['domain_id'])
# invalidate the cache if the result is cached.
self.identity_api.get_user.invalidate(self.identity_api,
self.user_foo['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
self.user_foo['id'])
def test_remove_role_grant_from_user_and_project(self):
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.RoleAssignmentNotFound,
self.assignment_api.delete_grant,
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
def test_get_and_remove_role_grant_by_group_and_project(self):
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_user = {'name': 'new_user', 'enabled': True,
'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertEqual([], roles_ref)
self.assertEqual(0, len(roles_ref))
self.assignment_api.create_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertNotEmpty(roles_ref)
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.RoleAssignmentNotFound,
self.assignment_api.delete_grant,
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
def test_get_and_remove_role_grant_by_group_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_role_assignment_by_domain_not_found(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_del_role_assignment_by_domain_not_found(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_role_grant_by_user_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_correct_role_grant_from_a_mix(self):
self.skipTest('Blocked by bug 1101287')
def test_get_and_remove_role_grant_by_group_and_cross_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_role_grant_by_user_and_cross_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_role_grant_by_group_and_cross_domain_project(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_role_grant_by_user_and_cross_domain_project(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_multi_role_grant_by_user_group_on_project_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_delete_role_with_user_and_group_grants(self):
self.skipTest('Blocked by bug 1101287')
def test_delete_user_with_group_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_delete_group_with_user_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_list_projects_for_user(self):
domain = self._get_domain_fixture()
user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user1 = self.identity_api.create_user(user1)
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(0))
# new grant(user1, role_member, tenant_bar)
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
# new grant(user1, role_member, tenant_baz)
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_baz['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(2))
# Now, check number of projects through groups
user2 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user2 = self.identity_api.create_user(user2)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
self.identity_api.add_user_to_group(user2['id'], group1['id'])
# new grant(group1(user2), role_member, tenant_bar)
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
# new grant(group1(user2), role_member, tenant_baz)
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_baz['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user2['id'])
self.assertThat(user_projects, matchers.HasLength(2))
# new grant(group1(user2), role_other, tenant_bar)
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_other['id'])
user_projects = self.assignment_api.list_projects_for_user(user2['id'])
self.assertThat(user_projects, matchers.HasLength(2))
def test_list_projects_for_user_and_groups(self):
domain = self._get_domain_fixture()
# Create user1
user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user1 = self.identity_api.create_user(user1)
# Create new group for user1
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
# Add user1 to group1
self.identity_api.add_user_to_group(user1['id'], group1['id'])
# Now, add grant to user1 and group1 in tenant_bar
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
# The result is user1 has only one project granted
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(1))
# Now, delete user1 grant into tenant_bar and check
self.assignment_api.delete_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
# The result is user1 has only one project granted.
# Granted through group1.
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(1))
def test_list_projects_for_user_with_grants(self):
domain = self._get_domain_fixture()
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group2 = self.identity_api.create_group(group2)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.resource_api.create_project(project1['id'], project1)
project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.resource_api.create_project(project2['id'], project2)
self.identity_api.add_user_to_group(new_user['id'],
group1['id'])
self.identity_api.add_user_to_group(new_user['id'],
group2['id'])
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=project1['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(group_id=group2['id'],
project_id=project2['id'],
role_id=self.role_admin['id'])
user_projects = self.assignment_api.list_projects_for_user(
new_user['id'])
self.assertEqual(3, len(user_projects))
def test_create_duplicate_user_name_in_different_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_create_duplicate_project_name_in_different_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_create_duplicate_group_name_in_different_domains(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_move_user_between_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_user_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_group_between_domains(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_move_group_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_project_between_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_project_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_get_roles_for_user_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_roles_for_groups_on_domain(self):
self.skipTest('Blocked by bug: 1390125')
def test_get_roles_for_groups_on_project(self):
self.skipTest('Blocked by bug: 1390125')
def test_list_domains_for_groups(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_list_projects_for_groups(self):
self.skipTest('Blocked by bug: 1390125')
def test_domain_delete_hierarchy(self):
self.skipTest('Domains are read-only against LDAP')
def test_list_role_assignments_unfiltered(self):
new_domain = self._get_domain_fixture()
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': new_domain['id']}
self.resource_api.create_project(new_project['id'], new_project)
# First check how many role grant already exist
existing_assignments = len(self.assignment_api.list_role_assignments())
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=new_project['id'],
role_id='other')
self.assignment_api.create_grant(group_id=new_group['id'],
project_id=new_project['id'],
role_id='admin')
# Read back the list of assignments - check it is gone up by 2
after_assignments = len(self.assignment_api.list_role_assignments())
self.assertEqual(existing_assignments + 2, after_assignments)
def test_list_role_assignments_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
new_domain = self._get_domain_fixture()
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
new_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': new_domain['id']}
self.resource_api.create_project(new_project['id'], new_project)
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=new_project['id'],
role_id='other')
# Read back the list of assignments and ensure
# that the LDAP dumb member isn't listed.
assignment_ids = [a['user_id'] for a in
self.assignment_api.list_role_assignments()]
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, assignment_ids)
def test_list_user_ids_for_project_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': test_backend.DEFAULT_DOMAIN_ID}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
user_ids = self.assignment_api.list_user_ids_for_project(
self.tenant_baz['id'])
self.assertIn(user['id'], user_ids)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, user_ids)
def test_multi_group_grants_on_project_domain(self):
self.skipTest('Blocked by bug 1101287')
def test_list_group_members_missing_entry(self):
"""List group members with deleted user.
If a group has a deleted entry for a member, the non-deleted members
are returned.
"""
# Create a group
group = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group)['id']
# Create a couple of users and add them to the group.
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_1_id = self.identity_api.create_user(user)['id']
self.identity_api.add_user_to_group(user_1_id, group_id)
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_2_id = self.identity_api.create_user(user)['id']
self.identity_api.add_user_to_group(user_2_id, group_id)
# Delete user 2
# NOTE(blk-u): need to go directly to user interface to keep from
# updating the group.
unused, driver, entity_id = (
self.identity_api._get_domain_driver_and_entity_id(user_2_id))
driver.user.delete(entity_id)
# List group users and verify only user 1.
res = self.identity_api.list_users_in_group(group_id)
self.assertEqual(1, len(res), "Expected 1 entry (user_1)")
self.assertEqual(user_1_id, res[0]['id'], "Expected user 1 id")
def test_list_group_members_when_no_members(self):
# List group members when there is no member in the group.
# No exception should be raised.
group = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
# If this doesn't raise, then the test is successful.
self.identity_api.list_users_in_group(group['id'])
def test_list_group_members_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# Create a group
group = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group)['id']
# Create a user
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_id = self.identity_api.create_user(user)['id']
# Add user to the group
self.identity_api.add_user_to_group(user_id, group_id)
user_ids = self.identity_api.list_users_in_group(group_id)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, user_ids)
def test_list_domains(self):
domains = self.resource_api.list_domains()
self.assertEqual(
[resource.calc_default_domain()],
domains)
def test_list_domains_non_default_domain_id(self):
# If change the default_domain_id, the ID of the default domain
# returned by list_domains changes is the new default_domain_id.
new_domain_id = uuid.uuid4().hex
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
domains = self.resource_api.list_domains()
self.assertEqual(new_domain_id, domains[0]['id'])
def test_authenticate_requires_simple_bind(self):
user = {
'name': 'NO_META',
'domain_id': test_backend.DEFAULT_DOMAIN_ID,
'password': 'no_meta2',
'enabled': True,
}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
driver = self.identity_api._select_identity_driver(
user['domain_id'])
driver.user.LDAP_USER = None
driver.user.LDAP_PASSWORD = None
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={},
user_id=user['id'],
password=None)
# (spzala)The group and domain crud tests below override the standard ones
# in test_backend.py so that we can exclude the update name test, since we
# do not yet support the update of either group or domain names with LDAP.
# In the tests below, the update is demonstrated by updating description.
# Refer to bug 1136403 for more detail.
def test_group_crud(self):
group = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictEqual(group_ref, group)
group['description'] = uuid.uuid4().hex
self.identity_api.update_group(group['id'], group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictEqual(group_ref, group)
self.identity_api.delete_group(group['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group,
group['id'])
@tests.skip_if_cache_disabled('identity')
def test_cache_layer_group_crud(self):
group = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
# cache the result
group_ref = self.identity_api.get_group(group['id'])
# delete the group bypassing identity api.
domain_id, driver, entity_id = (
self.identity_api._get_domain_driver_and_entity_id(group['id']))
driver.delete_group(entity_id)
self.assertEqual(group_ref,
self.identity_api.get_group(group['id']))
self.identity_api.get_group.invalidate(self.identity_api, group['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group, group['id'])
group = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
# cache the result
self.identity_api.get_group(group['id'])
group['description'] = uuid.uuid4().hex
group_ref = self.identity_api.update_group(group['id'], group)
self.assertDictContainsSubset(self.identity_api.get_group(group['id']),
group_ref)
def test_create_user_none_mapping(self):
# When create a user where an attribute maps to None, the entry is
# created without that attribute and it doesn't fail with a TypeError.
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'default_project_id': 'maps_to_none',
}
# If this doesn't raise, then the test is successful.
user = self.identity_api.create_user(user)
def test_create_user_with_boolean_string_names(self):
# Ensure that any attribute that is equal to the string 'TRUE'
# or 'FALSE' will not be converted to a boolean value, it
# should be returned as is.
boolean_strings = ['TRUE', 'FALSE', 'true', 'false', 'True', 'False',
'TrUe' 'FaLse']
for name in boolean_strings:
user = {
'name': name,
'domain_id': CONF.identity.default_domain_id}
user_ref = self.identity_api.create_user(user)
user_info = self.identity_api.get_user(user_ref['id'])
self.assertEqual(name, user_info['name'])
# Delete the user to ensure that the Keystone uniqueness
# requirements combined with the case-insensitive nature of a
# typical LDAP schema does not cause subsequent names in
# boolean_strings to clash.
self.identity_api.delete_user(user_ref['id'])
def test_unignored_user_none_mapping(self):
# Ensure that an attribute that maps to None that is not explicitly
# ignored in configuration is implicitly ignored without triggering
# an error.
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
}
user_ref = self.identity_api.create_user(user)
# If this doesn't raise, then the test is successful.
self.identity_api.get_user(user_ref['id'])
def test_update_user_name(self):
"""A user's name cannot be changed through the LDAP driver."""
self.assertRaises(exception.Conflict,
super(BaseLDAPIdentity, self).test_update_user_name)
def test_arbitrary_attributes_are_returned_from_get_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_new_arbitrary_attributes_are_returned_from_update_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_updated_arbitrary_attributes_are_returned_from_update_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_cache_layer_domain_crud(self):
# TODO(morganfainberg): This also needs to be removed when full LDAP
# implementation is submitted. No need to duplicate the above test,
# just skip this time.
self.skipTest('Domains are read-only against LDAP')
def test_user_id_comma(self):
"""Even if the user has a , in their ID, groups can be listed."""
# Create a user with a , in their ID
# NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
# Since we want to fake up this special ID, we'll squirt this
# direct into the driver and bypass the manager layer.
user_id = u'Doe, John'
user = {
'id': user_id,
'name': self.getUniqueString(),
'password': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
user = self.identity_api.driver.create_user(user_id, user)
# Now we'll use the manager to discover it, which will create a
# Public ID for it.
ref_list = self.identity_api.list_users()
public_user_id = None
for ref in ref_list:
if ref['name'] == user['name']:
public_user_id = ref['id']
break
# Create a group
group_id = uuid.uuid4().hex
group = {
'id': group_id,
'name': self.getUniqueString(prefix='tuidc'),
'description': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
group = self.identity_api.driver.create_group(group_id, group)
# Now we'll use the manager to discover it, which will create a
# Public ID for it.
ref_list = self.identity_api.list_groups()
public_group_id = None
for ref in ref_list:
if ref['name'] == group['name']:
public_group_id = ref['id']
break
# Put the user in the group
self.identity_api.add_user_to_group(public_user_id, public_group_id)
# List groups for user.
ref_list = self.identity_api.list_groups_for_user(public_user_id)
group['id'] = public_group_id
self.assertThat(ref_list, matchers.Equals([group]))
def test_user_id_comma_grants(self):
"""Even if the user has a , in their ID, can get user and group grants.
"""
# Create a user with a , in their ID
# NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
# Since we want to fake up this special ID, we'll squirt this
# direct into the driver and bypass the manager layer
user_id = u'Doe, John'
user = {
'id': user_id,
'name': self.getUniqueString(),
'password': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
self.identity_api.driver.create_user(user_id, user)
# Now we'll use the manager to discover it, which will create a
# Public ID for it.
ref_list = self.identity_api.list_users()
public_user_id = None
for ref in ref_list:
if ref['name'] == user['name']:
public_user_id = ref['id']
break
# Grant the user a role on a project.
role_id = 'member'
project_id = self.tenant_baz['id']
self.assignment_api.create_grant(role_id, user_id=public_user_id,
project_id=project_id)
role_ref = self.assignment_api.get_grant(role_id,
user_id=public_user_id,
project_id=project_id)
self.assertEqual(role_id, role_ref['id'])
def test_user_enabled_ignored_disable_error(self):
# When the server is configured so that the enabled attribute is
# ignored for users, users cannot be disabled.
self.config_fixture.config(group='ldap',
user_attribute_ignore=['enabled'])
# Need to re-load backends for the config change to take effect.
self.load_backends()
# Attempt to disable the user.
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_user, self.user_foo['id'],
{'enabled': False})
user_info = self.identity_api.get_user(self.user_foo['id'])
# If 'enabled' is ignored then 'enabled' isn't returned as part of the
# ref.
self.assertNotIn('enabled', user_info)
def test_group_enabled_ignored_disable_error(self):
# When the server is configured so that the enabled attribute is
# ignored for groups, groups cannot be disabled.
self.config_fixture.config(group='ldap',
group_attribute_ignore=['enabled'])
# Need to re-load backends for the config change to take effect.
self.load_backends()
# There's no group fixture so create a group.
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
# Attempt to disable the group.
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_group, new_group['id'],
{'enabled': False})
group_info = self.identity_api.get_group(new_group['id'])
# If 'enabled' is ignored then 'enabled' isn't returned as part of the
# ref.
self.assertNotIn('enabled', group_info)
def test_project_enabled_ignored_disable_error(self):
# When the server is configured so that the enabled attribute is
# ignored for projects, projects cannot be disabled.
self.config_fixture.config(group='ldap',
project_attribute_ignore=['enabled'])
# Need to re-load backends for the config change to take effect.
self.load_backends()
# Attempt to disable the project.
self.assertRaises(exception.ForbiddenAction,
self.resource_api.update_project,
self.tenant_baz['id'], {'enabled': False})
project_info = self.resource_api.get_project(self.tenant_baz['id'])
# Unlike other entities, if 'enabled' is ignored then 'enabled' is
# returned as part of the ref.
self.assertIs(True, project_info['enabled'])
class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
def setUp(self):
# NOTE(dstanek): The database must be setup prior to calling the
# parent's setUp. The parent's setUp uses services (like
# credentials) that require a database.
self.useFixture(database.Database())
super(LDAPIdentity, self).setUp()
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
create_group_container(self.identity_api)
super(LDAPIdentity, self).load_fixtures(fixtures)
def test_configurable_allowed_project_actions(self):
tenant = {'id': u'fäké1', 'name': u'fäké1', 'enabled': True}
self.resource_api.create_project(u'fäké1', tenant)
tenant_ref = self.resource_api.get_project(u'fäké1')
self.assertEqual(u'fäké1', tenant_ref['id'])
tenant['enabled'] = False
self.resource_api.update_project(u'fäké1', tenant)
self.resource_api.delete_project(u'fäké1')
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
u'fäké1')
def test_configurable_subtree_delete(self):
self.config_fixture.config(group='ldap', allow_subtree_delete=True)
self.load_backends()
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id}
self.resource_api.create_project(project1['id'], project1)
role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.role_api.create_role(role1['id'], role1)
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user1 = self.identity_api.create_user(user1)
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role1['id'])
self.resource_api.delete_project(project1['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
project1['id'])
self.resource_api.create_project(project1['id'], project1)
list = self.assignment_api.get_roles_for_user_and_project(
user1['id'],
project1['id'])
self.assertEqual(0, len(list))
def test_configurable_forbidden_project_actions(self):
self.config_fixture.config(
group='ldap', project_allow_create=False,
project_allow_update=False, project_allow_delete=False)
self.load_backends()
tenant = {'id': u'fäké1', 'name': u'fäké1'}
self.assertRaises(exception.ForbiddenAction,
self.resource_api.create_project,
u'fäké1',
tenant)
self.tenant_bar['enabled'] = False
self.assertRaises(exception.ForbiddenAction,
self.resource_api.update_project,
self.tenant_bar['id'],
self.tenant_bar)
self.assertRaises(exception.ForbiddenAction,
self.resource_api.delete_project,
self.tenant_bar['id'])
def test_project_filter(self):
tenant_ref = self.resource_api.get_project(self.tenant_bar['id'])
self.assertDictEqual(tenant_ref, self.tenant_bar)
self.config_fixture.config(group='ldap',
project_filter='(CN=DOES_NOT_MATCH)')
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.project_filter will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.role_api.get_role.invalidate(self.role_api,
self.role_member['id'])
self.role_api.get_role(self.role_member['id'])
self.resource_api.get_project.invalidate(self.resource_api,
self.tenant_bar['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
self.tenant_bar['id'])
def test_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
dumb_id)
def test_project_attribute_mapping(self):
self.config_fixture.config(
group='ldap', project_name_attribute='ou',
project_desc_attribute='description',
project_enabled_attribute='enabled')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.project_name_attribute,
# CONF.ldap.project_desc_attribute, and
# CONF.ldap.project_enabled_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.resource_api.get_project.invalidate(self.resource_api,
self.tenant_baz['id'])
tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertEqual(self.tenant_baz['name'], tenant_ref['name'])
self.assertEqual(
self.tenant_baz['description'],
tenant_ref['description'])
self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
self.config_fixture.config(group='ldap',
project_name_attribute='description',
project_desc_attribute='ou')
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.project_name_attribute,
# CONF.ldap.project_desc_attribute, and
# CONF.ldap.project_enabled_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.resource_api.get_project.invalidate(self.resource_api,
self.tenant_baz['id'])
tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertEqual(self.tenant_baz['description'], tenant_ref['name'])
self.assertEqual(self.tenant_baz['name'], tenant_ref['description'])
self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
def test_project_attribute_ignore(self):
self.config_fixture.config(
group='ldap',
project_attribute_ignore=['name', 'description', 'enabled'])
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.project_attribute_ignore will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change configs values in tests
# that could affect what the drivers would return up to the manager.
# This solves this assumption when working with aggressive (on-create)
# cache population.
self.resource_api.get_project.invalidate(self.resource_api,
self.tenant_baz['id'])
tenant_ref = self.resource_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertNotIn('name', tenant_ref)
self.assertNotIn('description', tenant_ref)
self.assertNotIn('enabled', tenant_ref)
def test_user_enable_attribute_mask(self):
self.config_fixture.config(group='ldap', user_enabled_mask=2,
user_enabled_default='512')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user_ref = self.identity_api.create_user(user)
# Use assertIs rather than assertTrue because assertIs will assert the
# value is a Boolean as expected.
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([512], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
user['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user)
self.assertIs(user_ref['enabled'], False)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([514], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], False)
self.assertNotIn('enabled_nomask', user_ref)
user['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user)
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([512], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
def test_user_enabled_invert(self):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user1 = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user2 = {'name': u'fäké2', 'enabled': False,
'domain_id': CONF.identity.default_domain_id}
user3 = {'name': u'fäké3',
'domain_id': CONF.identity.default_domain_id}
# Ensure that the LDAP attribute is False for a newly created
# enabled user.
user_ref = self.identity_api.create_user(user1)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
# Ensure that the LDAP attribute is True for a disabled user.
user1['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(False, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([True], enabled_vals)
# Enable the user and ensure that the LDAP attribute is True again.
user1['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
# Ensure that the LDAP attribute is True for a newly created
# disabled user.
user_ref = self.identity_api.create_user(user2)
self.assertIs(False, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([True], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(False, user_ref['enabled'])
# Ensure that the LDAP attribute is inverted for a newly created
# user when the user_enabled_default setting is used.
user_ref = self.identity_api.create_user(user3)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_invert_no_enabled_value(self, mock_ldap_get):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
# Mock the search results to return an entry with
# no enabled value.
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'email': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('junk')
# Ensure that the model enabled attribute is inverted
# from the resource default.
self.assertIs(not CONF.ldap.user_enabled_default, user_ref['enabled'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_invert_default_str_value(self, mock_ldap_get):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default='False')
# Mock the search results to return an entry with
# no enabled value.
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'email': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('junk')
# Ensure that the model enabled attribute is inverted
# from the resource default.
self.assertIs(True, user_ref['enabled'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_attribute_handles_expired(self, mock_ldap_get):
# If using 'passwordisexpired' as enabled attribute, and inverting it,
# Then an unauthorized user (expired password) should not be enabled.
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_attribute='passwordisexpired')
mock_ldap_get.return_value = (
u'uid=123456789,c=us,ou=our_ldap,o=acme.com',
{
'uid': [123456789],
'mail': ['shaun@acme.com'],
'passwordisexpired': ['TRUE'],
'cn': ['uid=123456789,c=us,ou=our_ldap,o=acme.com']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('123456789')
self.assertIs(False, user_ref['enabled'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_attribute_handles_utf8(self, mock_ldap_get):
# If using 'passwordisexpired' as enabled attribute, and inverting it,
# and the result is utf8 encoded, then the an authorized user should
# be enabled.
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_attribute='passwordisexpired')
mock_ldap_get.return_value = (
u'uid=123456789,c=us,ou=our_ldap,o=acme.com',
{
'uid': [123456789],
'mail': [u'shaun@acme.com'],
'passwordisexpired': [u'false'],
'cn': [u'uid=123456789,c=us,ou=our_ldap,o=acme.com']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('123456789')
self.assertIs(True, user_ref['enabled'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'simple_bind_s')
def test_user_api_get_connection_no_user_password(self, mocked_method):
"""Don't bind in case the user and password are blank."""
# Ensure the username/password are in-fact blank
self.config_fixture.config(group='ldap', user=None, password=None)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
self.assertFalse(mocked_method.called,
msg='`simple_bind_s` method was unexpectedly called')
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_chase_referrals_off(self, mocked_fakeldap):
self.config_fixture.config(
group='ldap',
url='fake://memory',
chase_referrals=False)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
# The last call_arg should be a dictionary and should contain
# chase_referrals. Check to make sure the value of chase_referrals
# is as expected.
self.assertFalse(mocked_fakeldap.call_args[-1]['chase_referrals'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_chase_referrals_on(self, mocked_fakeldap):
self.config_fixture.config(
group='ldap',
url='fake://memory',
chase_referrals=True)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
# The last call_arg should be a dictionary and should contain
# chase_referrals. Check to make sure the value of chase_referrals
# is as expected.
self.assertTrue(mocked_fakeldap.call_args[-1]['chase_referrals'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_debug_level_set(self, mocked_fakeldap):
level = 12345
self.config_fixture.config(
group='ldap',
url='fake://memory',
debug_level=level)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
# The last call_arg should be a dictionary and should contain
# debug_level. Check to make sure the value of debug_level
# is as expected.
self.assertEqual(level, mocked_fakeldap.call_args[-1]['debug_level'])
def test_wrong_ldap_scope(self):
self.config_fixture.config(group='ldap', query_scope=uuid.uuid4().hex)
self.assertRaisesRegexp(
ValueError,
'Invalid LDAP scope: %s. *' % CONF.ldap.query_scope,
identity.backends.ldap.Identity)
def test_wrong_alias_dereferencing(self):
self.config_fixture.config(group='ldap',
alias_dereferencing=uuid.uuid4().hex)
self.assertRaisesRegexp(
ValueError,
'Invalid LDAP deref option: %s\.' % CONF.ldap.alias_dereferencing,
identity.backends.ldap.Identity)
def test_is_dumb_member(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'cn=dumb,dc=nonexistent'
self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_upper_case_keys(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'CN=dumb,DC=nonexistent'
self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_with_false_use_dumb_member(self):
self.config_fixture.config(group='ldap',
use_dumb_member=False)
self.load_backends()
dn = 'cn=dumb,dc=nonexistent'
self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_not_dumb(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'ou=some,dc=example.com'
self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn))
def test_user_extra_attribute_mapping(self):
self.config_fixture.config(
group='ldap',
user_additional_attribute_mapping=['description:name'])
self.load_backends()
user = {
'name': 'EXTRA_ATTRIBUTES',
'password': 'extra',
'domain_id': CONF.identity.default_domain_id
}
user = self.identity_api.create_user(user)
dn, attrs = self.identity_api.driver.user._ldap_get(user['id'])
self.assertThat([user['name']], matchers.Equals(attrs['description']))
def test_user_extra_attribute_mapping_description_is_returned(self):
# Given a mapping like description:description, the description is
# returned.
self.config_fixture.config(
group='ldap',
user_additional_attribute_mapping=['description:description'])
self.load_backends()
description = uuid.uuid4().hex
user = {
'name': uuid.uuid4().hex,
'description': description,
'password': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id
}
user = self.identity_api.create_user(user)
res = self.identity_api.driver.user.get_all()
new_user = [u for u in res if u['id'] == user['id']][0]
self.assertThat(new_user['description'], matchers.Equals(description))
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_mixed_case_attribute(self, mock_ldap_get):
# Mock the search results to return attribute names
# with unexpected case.
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sN': [uuid.uuid4().hex],
'MaIl': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user = self.identity_api.get_user('junk')
self.assertEqual(mock_ldap_get.return_value[1]['sN'][0],
user['name'])
self.assertEqual(mock_ldap_get.return_value[1]['MaIl'][0],
user['email'])
def test_parse_extra_attribute_mapping(self):
option_list = ['description:name', 'gecos:password',
'fake:invalid', 'invalid1', 'invalid2:',
'description:name:something']
mapping = self.identity_api.driver.user._parse_extra_attrs(option_list)
expected_dict = {'description': 'name', 'gecos': 'password',
'fake': 'invalid', 'invalid2': ''}
self.assertDictEqual(expected_dict, mapping)
# TODO(henry-nash): These need to be removed when the full LDAP implementation
# is submitted - see Bugs 1092187, 1101287, 1101276, 1101289
def test_domain_crud(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True, 'description': uuid.uuid4().hex}
self.assertRaises(exception.Forbidden,
self.resource_api.create_domain,
domain['id'],
domain)
self.assertRaises(exception.Conflict,
self.resource_api.create_domain,
CONF.identity.default_domain_id,
domain)
self.assertRaises(exception.DomainNotFound,
self.resource_api.get_domain,
domain['id'])
domain['description'] = uuid.uuid4().hex
self.assertRaises(exception.DomainNotFound,
self.resource_api.update_domain,
domain['id'],
domain)
self.assertRaises(exception.Forbidden,
self.resource_api.update_domain,
CONF.identity.default_domain_id,
domain)
self.assertRaises(exception.DomainNotFound,
self.resource_api.get_domain,
domain['id'])
self.assertRaises(exception.DomainNotFound,
self.resource_api.delete_domain,
domain['id'])
self.assertRaises(exception.Forbidden,
self.resource_api.delete_domain,
CONF.identity.default_domain_id)
self.assertRaises(exception.DomainNotFound,
self.resource_api.get_domain,
domain['id'])
@tests.skip_if_no_multiple_domains_support
def test_create_domain_case_sensitivity(self):
# domains are read-only, so case sensitivity isn't an issue
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assertRaises(exception.Forbidden,
self.resource_api.create_domain,
ref['id'],
ref)
def test_cache_layer_domain_crud(self):
# TODO(morganfainberg): This also needs to be removed when full LDAP
# implementation is submitted. No need to duplicate the above test,
# just skip this time.
self.skipTest('Domains are read-only against LDAP')
def test_domain_rename_invalidates_get_domain_by_name_cache(self):
parent = super(LDAPIdentity, self)
self.assertRaises(
exception.Forbidden,
parent.test_domain_rename_invalidates_get_domain_by_name_cache)
def test_project_rename_invalidates_get_project_by_name_cache(self):
parent = super(LDAPIdentity, self)
self.assertRaises(
exception.Forbidden,
parent.test_project_rename_invalidates_get_project_by_name_cache)
def test_project_crud(self):
# NOTE(topol): LDAP implementation does not currently support the
# updating of a project name so this method override
# provides a different update test
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex,
'enabled': True,
'parent_id': None}
self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
project['description'] = uuid.uuid4().hex
self.resource_api.update_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.resource_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
project['id'])
@tests.skip_if_cache_disabled('assignment')
def test_cache_layer_project_crud(self):
# NOTE(morganfainberg): LDAP implementation does not currently support
# updating project names. This method override provides a different
# update test.
project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex}
project_id = project['id']
# Create a project
self.resource_api.create_project(project_id, project)
self.resource_api.get_project(project_id)
updated_project = copy.deepcopy(project)
updated_project['description'] = uuid.uuid4().hex
# Update project, bypassing resource manager
self.resource_api.driver.update_project(project_id,
updated_project)
# Verify get_project still returns the original project_ref
self.assertDictContainsSubset(
project, self.resource_api.get_project(project_id))
# Invalidate cache
self.resource_api.get_project.invalidate(self.resource_api,
project_id)
# Verify get_project now returns the new project
self.assertDictContainsSubset(
updated_project,
self.resource_api.get_project(project_id))
# Update project using the resource_api manager back to original
self.resource_api.update_project(project['id'], project)
# Verify get_project returns the original project_ref
self.assertDictContainsSubset(
project, self.resource_api.get_project(project_id))
# Delete project bypassing resource_api
self.resource_api.driver.delete_project(project_id)
# Verify get_project still returns the project_ref
self.assertDictContainsSubset(
project, self.resource_api.get_project(project_id))
# Invalidate cache
self.resource_api.get_project.invalidate(self.resource_api,
project_id)
# Verify ProjectNotFound now raised
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
project_id)
# recreate project
self.resource_api.create_project(project_id, project)
self.resource_api.get_project(project_id)
# delete project
self.resource_api.delete_project(project_id)
# Verify ProjectNotFound is raised
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
project_id)
def _assert_create_hierarchy_not_allowed(self):
domain = self._get_domain_fixture()
project1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': '',
'domain_id': domain['id'],
'enabled': True,
'parent_id': None}
self.resource_api.create_project(project1['id'], project1)
# Creating project2 under project1. LDAP will not allow
# the creation of a project with parent_id being set
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': '',
'domain_id': domain['id'],
'enabled': True,
'parent_id': project1['id']}
self.assertRaises(exception.InvalidParentProject,
self.resource_api.create_project,
project2['id'],
project2)
# Now, we'll create project 2 with no parent
project2['parent_id'] = None
self.resource_api.create_project(project2['id'], project2)
# Returning projects to be used across the tests
return [project1, project2]
def test_check_leaf_projects(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
self.assertTrue(self.resource_api.is_leaf_project(project))
def test_list_projects_in_subtree(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
subtree_list = self.resource_api.list_projects_in_subtree(
project)
self.assertEqual(0, len(subtree_list))
def test_list_project_parents(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
parents_list = self.resource_api.list_project_parents(project)
self.assertEqual(0, len(parents_list))
def test_hierarchical_projects_crud(self):
self._assert_create_hierarchy_not_allowed()
def test_create_project_under_disabled_one(self):
self._assert_create_hierarchy_not_allowed()
def test_create_project_with_invalid_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_create_leaf_project_with_invalid_domain(self):
self._assert_create_hierarchy_not_allowed()
def test_update_project_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_enable_project_with_disabled_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_disable_hierarchical_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_disable_hierarchical_not_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_delete_hierarchical_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_delete_hierarchical_not_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_check_hierarchy_depth(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
depth = self._get_hierarchy_depth(project['id'])
self.assertEqual(1, depth)
def test_multi_role_grant_by_user_group_on_project_domain(self):
# This is a partial implementation of the standard test that
# is defined in test_backend.py. It omits both domain and
# group grants. since neither of these are yet supported by
# the ldap backend.
role_list = []
for _ in range(2):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.role_api.create_role(role['id'], role)
role_list.append(role)
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user1 = self.identity_api.create_user(user1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id}
self.resource_api.create_project(project1['id'], project1)
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role_list[0]['id'])
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role_list[1]['id'])
# Although list_grants are not yet supported, we can test the
# alternate way of getting back lists of grants, where user
# and group roles are combined. Only directly assigned user
# roles are available, since group grants are not yet supported
combined_list = self.assignment_api.get_roles_for_user_and_project(
user1['id'],
project1['id'])
self.assertEqual(2, len(combined_list))
self.assertIn(role_list[0]['id'], combined_list)
self.assertIn(role_list[1]['id'], combined_list)
# Finally, although domain roles are not implemented, check we can
# issue the combined get roles call with benign results, since thus is
# used in token generation
combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
user1['id'], CONF.identity.default_domain_id)
self.assertEqual(0, len(combined_role_list))
def test_list_projects_for_alternate_domain(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_get_default_domain_by_name(self):
domain = self._get_domain_fixture()
domain_ref = self.resource_api.get_domain_by_name(domain['name'])
self.assertEqual(domain_ref, domain)
def test_base_ldap_connection_deref_option(self):
def get_conn(deref_name):
self.config_fixture.config(group='ldap',
alias_dereferencing=deref_name)
base_ldap = common_ldap.BaseLdap(CONF)
return base_ldap.get_connection()
conn = get_conn('default')
self.assertEqual(ldap.get_option(ldap.OPT_DEREF),
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('always')
self.assertEqual(ldap.DEREF_ALWAYS,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('finding')
self.assertEqual(ldap.DEREF_FINDING,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('never')
self.assertEqual(ldap.DEREF_NEVER,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('searching')
self.assertEqual(ldap.DEREF_SEARCHING,
conn.get_option(ldap.OPT_DEREF))
def test_list_users_no_dn(self):
users = self.identity_api.list_users()
self.assertEqual(len(default_fixtures.USERS), len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
for user_ref in users:
self.assertNotIn('dn', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_list_groups_no_dn(self):
# Create some test groups.
domain = self._get_domain_fixture()
expected_group_ids = []
numgroups = 3
for _ in range(numgroups):
group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group = self.identity_api.create_group(group)
expected_group_ids.append(group['id'])
# Fetch the test groups and ensure that they don't contain a dn.
groups = self.identity_api.list_groups()
self.assertEqual(numgroups, len(groups))
group_ids = set(group['id'] for group in groups)
for group_ref in groups:
self.assertNotIn('dn', group_ref)
self.assertEqual(set(expected_group_ids), group_ids)
def test_list_groups_for_user_no_dn(self):
# Create a test user.
user = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user = self.identity_api.create_user(user)
# Create some test groups and add the test user as a member.
domain = self._get_domain_fixture()
expected_group_ids = []
numgroups = 3
for _ in range(numgroups):
group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group = self.identity_api.create_group(group)
expected_group_ids.append(group['id'])
self.identity_api.add_user_to_group(user['id'], group['id'])
# Fetch the groups for the test user
# and ensure they don't contain a dn.
groups = self.identity_api.list_groups_for_user(user['id'])
self.assertEqual(numgroups, len(groups))
group_ids = set(group['id'] for group in groups)
for group_ref in groups:
self.assertNotIn('dn', group_ref)
self.assertEqual(set(expected_group_ids), group_ids)
def test_user_id_attribute_in_create(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id}
user = self.identity_api.create_user(user)
user_ref = self.identity_api.get_user(user['id'])
# 'email' attribute should've created because it is also being used
# as user_id
self.assertEqual(user_ref['id'], user_ref['email'])
def test_user_id_attribute_map(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
user_ref = self.identity_api.get_user(self.user_foo['email'])
# the user_id_attribute map should be honored, which means
# user_ref['id'] should contains the email attribute
self.assertEqual(self.user_foo['email'], user_ref['id'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_get_id_from_dn_for_multivalued_attribute_id(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
# make 'email' multivalued so we can test the error condition
email1 = uuid.uuid4().hex
email2 = uuid.uuid4().hex
mock_ldap_get.return_value = (
'cn=nobodycares,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'mail': [email1, email2],
'cn': 'nobodycares'
}
)
user_ref = self.identity_api.get_user(email1)
# make sure we get the ID from DN (old behavior) if the ID attribute
# has multiple values
self.assertEqual('nobodycares', user_ref['id'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_id_attribute_not_found(self, mock_ldap_get):
mock_ldap_get.return_value = (
'cn=nobodycares,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
}
)
user_api = identity.backends.ldap.UserApi(CONF)
self.assertRaises(exception.NotFound,
user_api.get,
'nobodycares')
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_id_not_in_dn(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'uid'
conf.ldap.user_name_attribute = 'cn'
self.reload_backends(CONF.identity.default_domain_id)
mock_ldap_get.return_value = (
'foo=bar,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'foo': ['bar'],
'cn': ['junk'],
'uid': ['crap']
}
)
user_ref = self.identity_api.get_user('crap')
self.assertEqual('crap', user_ref['id'])
self.assertEqual('junk', user_ref['name'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_name_in_dn(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'sAMAccountName'
conf.ldap.user_name_attribute = 'cn'
self.reload_backends(CONF.identity.default_domain_id)
mock_ldap_get.return_value = (
'cn=Foo Bar,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'cn': ['Foo Bar'],
'SAMAccountName': ['crap']
}
)
user_ref = self.identity_api.get_user('crap')
self.assertEqual('crap', user_ref['id'])
self.assertEqual('Foo Bar', user_ref['name'])
class LDAPIdentityEnabledEmulation(LDAPIdentity):
def setUp(self):
super(LDAPIdentityEnabledEmulation, self).setUp()
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
for obj in [self.tenant_bar, self.tenant_baz, self.user_foo,
self.user_two, self.user_badguy]:
obj.setdefault('enabled', True)
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
create_group_container(self.identity_api)
super(LDAPIdentity, self).load_fixtures(fixtures)
def config_files(self):
config_files = super(LDAPIdentityEnabledEmulation, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
return config_files
def config_overrides(self):
super(LDAPIdentityEnabledEmulation, self).config_overrides()
self.config_fixture.config(group='ldap',
user_enabled_emulation=True,
project_enabled_emulation=True)
def test_project_crud(self):
# NOTE(topol): LDAPIdentityEnabledEmulation will create an
# enabled key in the project dictionary so this
# method override handles this side-effect
project = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex,
'parent_id': None}
self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
# self.resource_api.create_project adds an enabled
# key with a value of True when LDAPIdentityEnabledEmulation
# is used so we now add this expected key to the project dictionary
project['enabled'] = True
self.assertDictEqual(project_ref, project)
project['description'] = uuid.uuid4().hex
self.resource_api.update_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.resource_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.resource_api.get_project,
project['id'])
def test_user_crud(self):
user_dict = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex}
user = self.identity_api.create_user(user_dict)
user_dict['enabled'] = True
user_ref = self.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = {x: user_ref[x] for x in user_ref}
self.assertDictContainsSubset(user_dict, user_ref_dict)
user_dict['password'] = uuid.uuid4().hex
self.identity_api.update_user(user['id'], user)
user_ref = self.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = {x: user_ref[x] for x in user_ref}
self.assertDictContainsSubset(user_dict, user_ref_dict)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
def test_user_auth_emulated(self):
self.config_fixture.config(group='ldap',
user_enabled_emulation_dn='cn=test,dc=test')
self.reload_backends(CONF.identity.default_domain_id)
self.identity_api.authenticate(
context={},
user_id=self.user_foo['id'],
password=self.user_foo['password'])
def test_user_enable_attribute_mask(self):
self.skipTest(
"Enabled emulation conflicts with enabled mask")
def test_user_enabled_invert(self):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user1 = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user2 = {'name': u'fäké2', 'enabled': False,
'domain_id': CONF.identity.default_domain_id}
user3 = {'name': u'fäké3',
'domain_id': CONF.identity.default_domain_id}
# Ensure that the enabled LDAP attribute is not set for a
# newly created enabled user.
user_ref = self.identity_api.create_user(user1)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
# Ensure that an enabled LDAP attribute is not set for a disabled user.
user1['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(False, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
# Enable the user and ensure that the LDAP enabled
# attribute is not set.
user1['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
# Ensure that the LDAP enabled attribute is not set for a
# newly created disabled user.
user_ref = self.identity_api.create_user(user2)
self.assertIs(False, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(False, user_ref['enabled'])
# Ensure that the LDAP enabled attribute is not set for a newly created
# user when the user_enabled_default setting is used.
user_ref = self.identity_api.create_user(user3)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
def test_user_enabled_invert_no_enabled_value(self):
self.skipTest(
"N/A: Covered by test_user_enabled_invert")
def test_user_enabled_invert_default_str_value(self):
self.skipTest(
"N/A: Covered by test_user_enabled_invert")
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_attribute_handles_utf8(self, mock_ldap_get):
# Since user_enabled_emulation is enabled in this test, this test will
# fail since it's using user_enabled_invert.
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_attribute='passwordisexpired')
mock_ldap_get.return_value = (
u'uid=123456789,c=us,ou=our_ldap,o=acme.com',
{
'uid': [123456789],
'mail': [u'shaun@acme.com'],
'passwordisexpired': [u'false'],
'cn': [u'uid=123456789,c=us,ou=our_ldap,o=acme.com']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('123456789')
self.assertIs(False, user_ref['enabled'])
class LdapIdentitySqlAssignment(BaseLDAPIdentity, tests.SQLDriverOverrides,
tests.TestCase):
def config_files(self):
config_files = super(LdapIdentitySqlAssignment, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap_sql.conf'))
return config_files
def setUp(self):
self.useFixture(database.Database())
super(LdapIdentitySqlAssignment, self).setUp()
self.clear_database()
self.load_backends()
cache.configure_cache_region(cache.REGION)
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
def config_overrides(self):
super(LdapIdentitySqlAssignment, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='resource',
driver='keystone.resource.backends.sql.Resource')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def test_domain_crud(self):
pass
def test_list_domains(self):
domains = self.resource_api.list_domains()
self.assertEqual([resource.calc_default_domain()], domains)
def test_list_domains_non_default_domain_id(self):
# If change the default_domain_id, the ID of the default domain
# returned by list_domains doesn't change because the SQL identity
# backend reads it from the database, which doesn't get updated by
# config change.
orig_default_domain_id = CONF.identity.default_domain_id
new_domain_id = uuid.uuid4().hex
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
domains = self.resource_api.list_domains()
self.assertEqual(orig_default_domain_id, domains[0]['id'])
def test_create_domain(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True}
self.assertRaises(exception.Forbidden,
self.resource_api.create_domain,
domain['id'],
domain)
def test_get_and_remove_role_grant_by_group_and_domain(self):
# TODO(henry-nash): We should really rewrite the tests in test_backend
# to be more flexible as to where the domains are sourced from, so
# that we would not need to override such tests here. This is raised
# as bug 1373865.
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertEqual(0, len(roles_ref))
self.assignment_api.create_grant(group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
def test_project_enabled_ignored_disable_error(self):
# Override
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
"""Class to test mapping of default LDAP backend.
The default configuration is not to enable mapping when using a single
backend LDAP driver. However, a cloud provider might want to enable
the mapping, hence hiding the LDAP IDs from any clients of keystone.
Setting backward_compatible_ids to False will enable this mapping.
"""
def config_overrides(self):
super(LdapIdentitySqlAssignmentWithMapping, self).config_overrides()
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
def test_dynamic_mapping_build(self):
"""Test to ensure entities not create via controller are mapped.
Many LDAP backends will, essentially, by Read Only. In these cases
the mapping is not built by creating objects, rather from enumerating
the entries. We test this here my manually deleting the mapping and
then trying to re-read the entries.
"""
initial_mappings = len(mapping_sql.list_id_mappings())
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user1 = self.identity_api.create_user(user1)
user2 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user2 = self.identity_api.create_user(user2)
mappings = mapping_sql.list_id_mappings()
self.assertEqual(initial_mappings + 2, len(mappings))
# Now delete the mappings for the two users above
self.id_mapping_api.purge_mappings({'public_id': user1['id']})
self.id_mapping_api.purge_mappings({'public_id': user2['id']})
# We should no longer be able to get these users via their old IDs
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user1['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user2['id'])
# Now enumerate all users...this should re-build the mapping, and
# we should be able to find the users via their original public IDs.
self.identity_api.list_users()
self.identity_api.get_user(user1['id'])
self.identity_api.get_user(user2['id'])
def test_get_roles_for_user_and_project_user_group_same_id(self):
self.skipTest('N/A: We never generate the same ID for a user and '
'group in our mapping table')
class BaseMultiLDAPandSQLIdentity(object):
"""Mixin class with support methods for domain-specific config testing."""
def create_user(self, domain_id):
user = {'name': uuid.uuid4().hex,
'domain_id': domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user_ref = self.identity_api.create_user(user)
# Put the password back in, since this is used later by tests to
# authenticate.
user_ref['password'] = user['password']
return user_ref
def create_users_across_domains(self):
"""Create a set of users, each with a role on their own domain."""
# We also will check that the right number of id mappings get created
initial_mappings = len(mapping_sql.list_id_mappings())
self.users['user0'] = self.create_user(
self.domains['domain_default']['id'])
self.assignment_api.create_grant(
user_id=self.users['user0']['id'],
domain_id=self.domains['domain_default']['id'],
role_id=self.role_member['id'])
for x in range(1, self.domain_count):
self.users['user%s' % x] = self.create_user(
self.domains['domain%s' % x]['id'])
self.assignment_api.create_grant(
user_id=self.users['user%s' % x]['id'],
domain_id=self.domains['domain%s' % x]['id'],
role_id=self.role_member['id'])
# So how many new id mappings should have been created? One for each
# user created in a domain that is using the non default driver..
self.assertEqual(initial_mappings + self.domain_specific_count,
len(mapping_sql.list_id_mappings()))
def check_user(self, user, domain_id, expected_status):
"""Check user is in correct backend.
As part of the tests, we want to force ourselves to manually
select the driver for a given domain, to make sure the entity
ended up in the correct backend.
"""
driver = self.identity_api._select_identity_driver(domain_id)
unused, unused, entity_id = (
self.identity_api._get_domain_driver_and_entity_id(
user['id']))
if expected_status == 200:
ref = driver.get_user(entity_id)
ref = self.identity_api._set_domain_id_and_mapping(
ref, domain_id, driver, map.EntityType.USER)
user = user.copy()
del user['password']
self.assertDictEqual(ref, user)
else:
# TODO(henry-nash): Use AssertRaises here, although
# there appears to be an issue with using driver.get_user
# inside that construct
try:
driver.get_user(entity_id)
except expected_status:
pass
def setup_initial_domains(self):
def create_domain(domain):
try:
ref = self.resource_api.create_domain(
domain['id'], domain)
except exception.Conflict:
ref = (
self.resource_api.get_domain_by_name(domain['name']))
return ref
self.domains = {}
for x in range(1, self.domain_count):
domain = 'domain%s' % x
self.domains[domain] = create_domain(
{'id': uuid.uuid4().hex, 'name': domain})
self.domains['domain_default'] = create_domain(
resource.calc_default_domain())
def test_authenticate_to_each_domain(self):
"""Test that a user in each domain can authenticate."""
for user_num in range(self.domain_count):
user = 'user%s' % user_num
self.identity_api.authenticate(
context={},
user_id=self.users[user]['id'],
password=self.users[user]['password'])
class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
tests.TestCase, BaseMultiLDAPandSQLIdentity):
"""Class to test common SQL plus individual LDAP backends.
We define a set of domains and domain-specific backends:
- A separate LDAP backend for the default domain
- A separate LDAP backend for domain1
- domain2 shares the same LDAP as domain1, but uses a different
tree attach point
- An SQL backend for all other domains (which will include domain3
and domain4)
Normally one would expect that the default domain would be handled as
part of the "other domains" - however the above provides better
test coverage since most of the existing backend tests use the default
domain.
"""
def setUp(self):
self.useFixture(database.Database())
super(MultiLDAPandSQLIdentity, self).setUp()
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 5
self.domain_specific_count = 3
self.setup_initial_domains()
self._setup_initial_users()
# All initial test data setup complete, time to switch on support
# for separate backends per domain.
self.enable_multi_domain()
self.clear_database()
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(MultiLDAPandSQLIdentity, self).config_overrides()
# Make sure identity and assignment are actually SQL drivers,
# BaseLDAPIdentity sets these options to use LDAP.
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.sql.Identity')
self.config_fixture.config(
group='resource',
driver='keystone.resource.backends.sql.Resource')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def _setup_initial_users(self):
# Create some identity entities BEFORE we switch to multi-backend, so
# we can test that these are still accessible
self.users = {}
self.users['userA'] = self.create_user(
self.domains['domain_default']['id'])
self.users['userB'] = self.create_user(
self.domains['domain1']['id'])
self.users['userC'] = self.create_user(
self.domains['domain3']['id'])
def enable_multi_domain(self):
"""Enable the chosen form of multi domain configuration support.
This method enables the file-based configuration support. Child classes
that wish to use the database domain configuration support should
override this method and set the appropriate config_fixture option.
"""
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=tests.TESTCONF + '/domain_configs_multi_ldap')
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
def reload_backends(self, domain_id):
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(domain_id)
def get_config(self, domain_id):
# Get the config for this domain, will return CONF
# if no specific config defined for this domain
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def test_list_domains(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_domains_non_default_domain_id(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_users(self):
# Override the standard list users, since we have added an extra user
# to the default domain, so the number of expected users is one more
# than in the standard test.
users = self.identity_api.list_users(
domain_scope=self._set_domain_scope(
CONF.identity.default_domain_id))
self.assertEqual(len(default_fixtures.USERS) + 1, len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
expected_user_ids.add(self.users['user0']['id'])
for user_ref in users:
self.assertNotIn('password', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_domain_segregation(self):
"""Test that separate configs have segregated the domain.
Test Plan:
- Users were created in each domain as part of setup, now make sure
you can only find a given user in its relevant domain/backend
- Make sure that for a backend that supports multiple domains
you can get the users via any of its domains
"""
# Check that I can read a user with the appropriate domain-selected
# driver, but won't find it via any other domain driver
check_user = self.check_user
check_user(self.users['user0'],
self.domains['domain_default']['id'], 200)
for domain in [self.domains['domain1']['id'],
self.domains['domain2']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user0'], domain, exception.UserNotFound)
check_user(self.users['user1'], self.domains['domain1']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain2']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user1'], domain, exception.UserNotFound)
check_user(self.users['user2'], self.domains['domain2']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain1']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user2'], domain, exception.UserNotFound)
# domain3 and domain4 share the same backend, so you should be
# able to see user3 and user4 from either.
check_user(self.users['user3'], self.domains['domain3']['id'], 200)
check_user(self.users['user3'], self.domains['domain4']['id'], 200)
check_user(self.users['user4'], self.domains['domain3']['id'], 200)
check_user(self.users['user4'], self.domains['domain4']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain1']['id'],
self.domains['domain2']['id']]:
check_user(self.users['user3'], domain, exception.UserNotFound)
check_user(self.users['user4'], domain, exception.UserNotFound)
# Finally, going through the regular manager layer, make sure we
# only see the right number of users in each of the non-default
# domains. One might have expected two users in domain1 (since we
# created one before we switched to multi-backend), however since
# that domain changed backends in the switch we don't find it anymore.
# This is as designed - we don't support moving domains between
# backends.
#
# The listing of the default domain is already handled in the
# test_lists_users() method.
for domain in [self.domains['domain1']['id'],
self.domains['domain2']['id'],
self.domains['domain4']['id']]:
self.assertThat(
self.identity_api.list_users(domain_scope=domain),
matchers.HasLength(1))
# domain3 had a user created before we switched on
# multiple backends, plus one created afterwards - and its
# backend has not changed - so we should find two.
self.assertThat(
self.identity_api.list_users(
domain_scope=self.domains['domain3']['id']),
matchers.HasLength(2))
def test_existing_uuids_work(self):
"""Test that 'uni-domain' created IDs still work.
Throwing the switch to domain-specific backends should not cause
existing identities to be inaccessible via ID.
"""
self.identity_api.get_user(self.users['userA']['id'])
self.identity_api.get_user(self.users['userB']['id'])
self.identity_api.get_user(self.users['userC']['id'])
def test_scanning_of_config_dir(self):
"""Test the Manager class scans the config directory.
The setup for the main tests above load the domain configs directly
so that the test overrides can be included. This test just makes sure
that the standard config directory scanning does pick up the relevant
domain config files.
"""
# Confirm that config has drivers_enabled as True, which we will
# check has been set to False later in this test
self.assertTrue(CONF.identity.domain_specific_drivers_enabled)
self.load_backends()
# Execute any command to trigger the lazy loading of domain configs
self.identity_api.list_users(
domain_scope=self.domains['domain1']['id'])
# ...and now check the domain configs have been set up
self.assertIn('default', self.identity_api.domain_configs)
self.assertIn(self.domains['domain1']['id'],
self.identity_api.domain_configs)
self.assertIn(self.domains['domain2']['id'],
self.identity_api.domain_configs)
self.assertNotIn(self.domains['domain3']['id'],
self.identity_api.domain_configs)
self.assertNotIn(self.domains['domain4']['id'],
self.identity_api.domain_configs)
# Finally check that a domain specific config contains items from both
# the primary config and the domain specific config
conf = self.identity_api.domain_configs.get_domain_conf(
self.domains['domain1']['id'])
# This should now be false, as is the default, since this is not
# set in the standard primary config file
self.assertFalse(conf.identity.domain_specific_drivers_enabled)
# ..and make sure a domain-specific options is also set
self.assertEqual('fake://memory1', conf.ldap.url)
def test_delete_domain_with_user_added(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True}
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'description': uuid.uuid4().hex,
'parent_id': None,
'enabled': True}
self.resource_api.create_domain(domain['id'], domain)
self.resource_api.create_project(project['id'], project)
project_ref = self.resource_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=project['id'],
role_id=self.role_member['id'])
self.assignment_api.delete_grant(user_id=self.user_foo['id'],
project_id=project['id'],
role_id=self.role_member['id'])
domain['enabled'] = False
self.resource_api.update_domain(domain['id'], domain)
self.resource_api.delete_domain(domain['id'])
self.assertRaises(exception.DomainNotFound,
self.resource_api.get_domain,
domain['id'])
def test_user_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_group_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_project_enabled_ignored_disable_error(self):
# Override
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity):
"""Class to test the use of domain configs stored in the database.
Repeat the same tests as MultiLDAPandSQLIdentity, but instead of using the
domain specific config files, store the domain specific values in the
database.
"""
def enable_multi_domain(self):
# The values below are the same as in the domain_configs_multi_ldap
# cdirectory of test config_files.
default_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
}
domain1_config = {
'ldap': {'url': 'fake://memory1',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
}
domain2_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=myroot,cn=com',
'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org',
'user_tree_dn': 'ou=Users,dc=myroot,dc=org'},
'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}
}
self.domain_config_api.create_config(CONF.identity.default_domain_id,
default_config)
self.domain_config_api.create_config(self.domains['domain1']['id'],
domain1_config)
self.domain_config_api.create_config(self.domains['domain2']['id'],
domain2_config)
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_configurations_from_database=True)
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
def test_domain_config_has_no_impact_if_database_support_disabled(self):
"""Ensure database domain configs have no effect if disabled.
Set reading from database configs to false, restart the backends
and then try and set and use database configs.
"""
self.config_fixture.config(
group='identity', domain_configurations_from_database=False)
self.load_backends()
new_config = {'ldap': {'url': uuid.uuid4().hex}}
self.domain_config_api.create_config(
CONF.identity.default_domain_id, new_config)
# Trigger the identity backend to initialise any domain specific
# configurations
self.identity_api.list_users()
# Check that the new config has not been passed to the driver for
# the default domain.
default_config = (
self.identity_api.domain_configs.get_domain_conf(
CONF.identity.default_domain_id))
self.assertEqual(CONF.ldap.url, default_config.ldap.url)
def test_reloading_domain_config(self):
"""Ensure domain drivers are reloaded on a config modification."""
domain_cfgs = self.identity_api.domain_configs
# Create a new config for the default domain, hence overwriting the
# current settings.
new_config = {
'ldap': {'url': uuid.uuid4().hex},
'identity': {'driver': 'keystone.identity.backends.ldap.Identity'}}
self.domain_config_api.create_config(
CONF.identity.default_domain_id, new_config)
default_config = (
domain_cfgs.get_domain_conf(CONF.identity.default_domain_id))
self.assertEqual(new_config['ldap']['url'], default_config.ldap.url)
# Ensure updating is also honored
updated_config = {'url': uuid.uuid4().hex}
self.domain_config_api.update_config(
CONF.identity.default_domain_id, updated_config,
group='ldap', option='url')
default_config = (
domain_cfgs.get_domain_conf(CONF.identity.default_domain_id))
self.assertEqual(updated_config['url'], default_config.ldap.url)
# ...and finally ensure delete causes the driver to get the standard
# config again.
self.domain_config_api.delete_config(CONF.identity.default_domain_id)
default_config = (
domain_cfgs.get_domain_conf(CONF.identity.default_domain_id))
self.assertEqual(CONF.ldap.url, default_config.ldap.url)
def test_setting_sql_driver_raises_exception(self):
"""Ensure setting of domain specific sql driver is prevented."""
new_config = {
'identity': {'driver': 'keystone.identity.backends.sql.Identity'}}
self.domain_config_api.create_config(
CONF.identity.default_domain_id, new_config)
self.assertRaises(exception.InvalidDomainConfig,
self.identity_api.domain_configs.get_domain_conf,
CONF.identity.default_domain_id)
class DomainSpecificLDAPandSQLIdentity(
BaseLDAPIdentity, tests.SQLDriverOverrides, tests.TestCase,
BaseMultiLDAPandSQLIdentity):
"""Class to test when all domains use specific configs, including SQL.
We define a set of domains and domain-specific backends:
- A separate LDAP backend for the default domain
- A separate SQL backend for domain1
Although the default driver still exists, we don't use it.
"""
def setUp(self):
self.useFixture(database.Database())
super(DomainSpecificLDAPandSQLIdentity, self).setUp()
self.initial_setup()
def initial_setup(self):
# We aren't setting up any initial data ahead of switching to
# domain-specific operation, so make the switch straight away.
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=(
tests.TESTCONF + '/domain_configs_one_sql_one_ldap'))
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 2
self.domain_specific_count = 2
self.setup_initial_domains()
self.users = {}
self.clear_database()
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(DomainSpecificLDAPandSQLIdentity, self).config_overrides()
# Make sure resource & assignment are actually SQL drivers,
# BaseLDAPIdentity causes this option to use LDAP.
self.config_fixture.config(
group='resource',
driver='keystone.resource.backends.sql.Resource')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def reload_backends(self, domain_id):
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(domain_id)
def get_config(self, domain_id):
# Get the config for this domain, will return CONF
# if no specific config defined for this domain
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def test_list_domains(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_domains_non_default_domain_id(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_domain_crud(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_users(self):
# Override the standard list users, since we have added an extra user
# to the default domain, so the number of expected users is one more
# than in the standard test.
users = self.identity_api.list_users(
domain_scope=self._set_domain_scope(
CONF.identity.default_domain_id))
self.assertEqual(len(default_fixtures.USERS) + 1, len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
expected_user_ids.add(self.users['user0']['id'])
for user_ref in users:
self.assertNotIn('password', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_domain_segregation(self):
"""Test that separate configs have segregated the domain.
Test Plan:
- Users were created in each domain as part of setup, now make sure
you can only find a given user in its relevant domain/backend
- Make sure that for a backend that supports multiple domains
you can get the users via any of its domains
"""
# Check that I can read a user with the appropriate domain-selected
# driver, but won't find it via any other domain driver
self.check_user(self.users['user0'],
self.domains['domain_default']['id'], 200)
self.check_user(self.users['user0'],
self.domains['domain1']['id'], exception.UserNotFound)
self.check_user(self.users['user1'],
self.domains['domain1']['id'], 200)
self.check_user(self.users['user1'],
self.domains['domain_default']['id'],
exception.UserNotFound)
# Finally, going through the regular manager layer, make sure we
# only see the right number of users in the non-default domain.
self.assertThat(
self.identity_api.list_users(
domain_scope=self.domains['domain1']['id']),
matchers.HasLength(1))
def test_add_role_grant_to_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_get_role_grants_for_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_list_projects_for_user_with_grants(self):
self.skipTest('Blocked by bug 1221805')
def test_get_roles_for_user_and_project_user_group_same_id(self):
self.skipTest('N/A: We never generate the same ID for a user and '
'group in our mapping table')
def test_user_id_comma(self):
self.skipTest('Only valid if it is guaranteed to be talking to '
'the fakeldap backend')
def test_user_id_comma_grants(self):
self.skipTest('Only valid if it is guaranteed to be talking to '
'the fakeldap backend')
def test_user_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_group_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_project_enabled_ignored_disable_error(self):
# Override
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
"""Class to test simplest use of domain-specific SQL driver.
The simplest use of an SQL domain-specific backend is when it is used to
augment the standard case when LDAP is the default driver defined in the
main config file. This would allow, for example, service users to be
stored in SQL while LDAP handles the rest. Hence we define:
- The default driver uses the LDAP backend for the default domain
- A separate SQL backend for domain1
"""
def initial_setup(self):
# We aren't setting up any initial data ahead of switching to
# domain-specific operation, so make the switch straight away.
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=(
tests.TESTCONF + '/domain_configs_default_ldap_one_sql'))
# Part of the testing counts how many new mappings get created as
# we create users, so ensure we are NOT using mapping for the default
# LDAP domain so this doesn't confuse the calculation.
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=True)
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 2
self.domain_specific_count = 1
self.setup_initial_domains()
self.users = {}
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(DomainSpecificSQLIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='resource',
driver='keystone.resource.backends.sql.Resource')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def get_config(self, domain_id):
if domain_id == CONF.identity.default_domain_id:
return CONF
else:
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def reload_backends(self, domain_id):
if domain_id == CONF.identity.default_domain_id:
self.load_backends()
else:
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(domain_id)
def test_default_sql_plus_sql_specific_driver_fails(self):
# First confirm that if ldap is default driver, domain1 can be
# loaded as sql
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Make any identity call to initiate the lazy loading of configs
self.identity_api.list_users(
domain_scope=CONF.identity.default_domain_id)
self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
# Now re-initialize, but with sql as the default identity driver
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.sql.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Make any identity call to initiate the lazy loading of configs, which
# should fail since we would now have two sql drivers.
self.assertRaises(exception.MultipleSQLDriversInConfig,
self.identity_api.list_users,
domain_scope=CONF.identity.default_domain_id)
def test_multiple_sql_specific_drivers_fails(self):
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Ensure default, domain1 and domain2 exist
self.domain_count = 3
self.setup_initial_domains()
# Make any identity call to initiate the lazy loading of configs
self.identity_api.list_users(
domain_scope=CONF.identity.default_domain_id)
# This will only load domain1, since the domain2 config file is
# not stored in the same location
self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
# Now try and manually load a 2nd sql specific driver, for domain2,
# which should fail.
self.assertRaises(
exception.MultipleSQLDriversInConfig,
self.identity_api.domain_configs._load_config_from_file,
self.resource_api,
[tests.TESTCONF + '/domain_configs_one_extra_sql/' +
'keystone.domain2.conf'],
'domain2')
class LdapFilterTests(test_backend.FilterTests, tests.TestCase):
def setUp(self):
super(LdapFilterTests, self).setUp()
self.useFixture(database.Database())
self.clear_database()
common_ldap.register_handler('fake://', fakeldap.FakeLdap)
self.load_backends()
self.load_fixtures(default_fixtures)
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.addCleanup(common_ldap_core._HANDLERS.clear)
def config_overrides(self):
super(LdapFilterTests, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
def config_files(self):
config_files = super(LdapFilterTests, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
return config_files
def clear_database(self):
for shelf in fakeldap.FakeShelves:
fakeldap.FakeShelves[shelf].clear()
| yanheven/keystone | keystone/tests/unit/test_backend_ldap.py | Python | apache-2.0 | 134,073 |
import logging
import os
import numpy as np
import parmap
import networkx as nx
from yass.cluster.getptp import GETPTP, GETCLEANPTP
from yass import mfm
# from yass import read_config
# CONFIG = read_config()
def run_split_on_ptp(savedir,
fname_spike_index,
CONFIG,
raw_data=True,
fname_labels=None,
fname_templates=None,
fname_shifts=None,
fname_scales=None,
reader_raw=None,
reader_residual=None,
denoiser=None):
logger = logging.getLogger(__name__)
os.environ["CUDA_VISIBLE_DEVICES"] = str(CONFIG.resources.gpu_id)
# save results
fname_spike_index_new = os.path.join(savedir, 'spike_index.npy')
fname_labels_new = os.path.join(savedir, 'labels.npy')
fname_ptp = os.path.join(savedir, 'ptp.npy')
if os.path.exists(fname_labels_new):
if fname_labels is None:
fname_labels_input = None
else:
fname_labels_input = os.path.join(savedir, 'input_labels.npy')
return fname_spike_index_new, fname_labels_new,fname_labels_input
if not os.path.exists(savedir):
os.makedirs(savedir)
# get ptp
logger.info("Get Spike PTP")
if raw_data:
getptp = GETPTP(fname_spike_index, reader_raw, CONFIG, denoiser)
ptp_raw, ptp_deno = getptp.compute_ptps()
else:
getcleanptp = GETCLEANPTP(fname_spike_index,
fname_labels,
fname_templates,
fname_shifts,
fname_scales,
reader_residual,
denoiser)
ptp_raw, ptp_deno = getcleanptp.compute_ptps()
np.savez(os.path.join(savedir, 'ptps_input.npz'),
ptp_raw=ptp_raw,
ptp_deno=ptp_deno)
# if there is an input label, load it
# otherwise, max channel becomes the input label
spike_index = np.load(fname_spike_index)
if fname_labels is None:
labels = spike_index[:, 1]
else:
labels = np.load(fname_labels)
# triage if denoiser is in
# Triage out badly collided ones
if denoiser is not None:
idx = np.where(ptp_raw < ptp_deno*1.5)[0]
#idx = np.arange(len(ptp_deno))
ptps = ptp_deno[idx]
labels = labels[idx]
spike_index = spike_index[idx]
else:
idx = np.arange(len(ptp_raw))
ptps = ptp_raw
np.save(os.path.join(savedir, 'idx_keep.npy'), idx)
logger.info("Run Split")
new_labels = run_split_parallel(ptps, labels, CONFIG, ptp_cut=5)
np.save(fname_spike_index_new, spike_index)
np.save(fname_labels_new, new_labels)
np.save(fname_ptp, ptps)
# if there is an input labels, update it
if fname_labels is None:
fname_labels_input = None
else:
fname_labels_input = os.path.join(savedir, 'input_labels.npy')
np.save(fname_labels_input, labels)
return fname_spike_index_new, fname_labels_new, fname_labels_input
def run_split_parallel(ptps, labels, CONFIG, ptp_cut=5):
all_units = np.unique(labels)
new_labels = np.ones(len(ptps), 'int32')*-1
n_processors = CONFIG.resources.n_processors
if CONFIG.resources.multi_processing:
units_in = []
for j in range(n_processors):
units_in.append(all_units[slice(j, len(all_units), n_processors)])
results = parmap.map(run_split,
units_in,
ptps,
labels,
CONFIG,
ptp_cut,
processes=n_processors)
n_labels= 0
for rr in results:
for rr2 in rr:
ii_ = rr2[:, 0]
lab_ = rr2[:, 1]
new_labels[ii_] = lab_ + n_labels
n_labels += len(np.unique(lab_))
else:
results = run_split(all_units, ptps, labels, CONFIG, ptp_cut)
n_labels= 0
for rr in results:
ii_ = rr[:, 0]
lab_ = rr[:, 1]
new_labels[ii_] = lab_ + n_labels
n_labels += len(np.unique(lab_))
return new_labels
def run_split(units_in, ptps, labels, CONFIG, ptp_cut=5):
spike_index_list = []
for unit in units_in:
idx_ = np.where(labels == unit)[0]
ptps_ = ptps[idx_]
new_assignment = np.zeros(len(idx_), 'int32')
idx_big= np.where(ptps_ > ptp_cut)[0]
if len(idx_big) > 10:
mask = np.ones((len(idx_big), 1))
group = np.arange(len(idx_big))
vbParam = mfm.spikesort(ptps_[idx_big,None,None],
mask,
group,
CONFIG)
cc_assignment, stability, cc = anneal_clusters(vbParam)
# get ptp per cc
mean_ptp_cc = np.zeros(len(cc))
for k in range(len(cc)):
mean_ptp_cc[k] = np.mean(ptps_[idx_big][cc_assignment == k])
# reorder cc label by mean ptp
cc_assignment_ordered = np.zeros_like(cc_assignment)
for ii, k in enumerate(np.argsort(mean_ptp_cc)):
cc_assignment_ordered[cc_assignment == k] = ii
# cc with the smallest mean ptp will have the same assignment as ptps < ptp cut
new_assignment[idx_big] = cc_assignment_ordered
spike_index_list.append(np.vstack((idx_, new_assignment)).T)
return spike_index_list
def anneal_clusters(vbParam):
N, K = vbParam.rhat.shape
stability = calculate_stability(vbParam.rhat)
if np.all(stability > 0.8):
cc = [[k] for k in range(K)]
return vbParam.rhat.argmax(1), stability, cc
maha = mfm.calc_mahalonobis(vbParam, vbParam.muhat.transpose((1,0,2)))
maha = np.maximum(maha, maha.T)
maha_thresh_min = 0
for k_target in range(K-1, 0, -1):
# get connected components with k_target number of them
cc, maha_thresh_min = get_k_cc(maha, maha_thresh_min, k_target)
# calculate soft assignment for each cc
rhat_cc = np.zeros([N,len(cc)])
for i, units in enumerate(cc):
rhat_cc[:, i] = np.sum(vbParam.rhat[:, units], axis=1)
rhat_cc[rhat_cc<0.001] = 0.0
rhat_cc = rhat_cc/np.sum(rhat_cc,axis =1 ,keepdims = True)
# calculate stability for each component
# and make decision
stability = calculate_stability(rhat_cc)
if np.all(stability>0.8) or k_target == 1:
return rhat_cc.argmax(1), stability, cc
def calculate_stability(rhat):
K = rhat.shape[1]
mask = rhat > 0.05
stability = np.zeros(K)
for clust in range(stability.size):
if mask[:,clust].sum() == 0.0:
continue
stability[clust] = np.average(mask[:,clust] * rhat[:,clust], axis=0, weights = mask[:,clust])
return stability
def get_cc(maha, maha_thresh):
row, column = np.where(maha<maha_thresh)
G = nx.DiGraph()
for i in range(maha.shape[0]):
G.add_node(i)
for i, j in zip(row,column):
G.add_edge(i, j)
cc = [list(units) for units in nx.strongly_connected_components(G)]
return cc
def get_k_cc(maha, maha_thresh_min, k_target):
# it assumes that maha_thresh_min gives
# at least k+1 number of connected components
k_now = k_target + 1
if len(get_cc(maha, maha_thresh_min)) != k_now:
raise ValueError("something is not right")
maha_thresh = maha_thresh_min
while k_now > k_target:
maha_thresh += 1
cc = get_cc(maha, maha_thresh)
k_now = len(cc)
if k_now == k_target:
return cc, maha_thresh
else:
maha_thresh_max = maha_thresh
maha_thresh_min = maha_thresh - 1
if len(get_cc(maha, maha_thresh_min)) <= k_target:
raise ValueError("something is not right")
ctr = 0
maha_thresh_max_init = maha_thresh_max
while True:
ctr += 1
maha_thresh = (maha_thresh_max + maha_thresh_min)/2.0
cc = get_cc(maha, maha_thresh)
k_now = len(cc)
if k_now == k_target:
return cc, maha_thresh
elif k_now > k_target:
maha_thresh_min = maha_thresh
elif k_now < k_target:
maha_thresh_max = maha_thresh
if ctr > 1000:
print(k_now, k_target, maha_thresh, maha_thresh_max_init)
print(cc)
print(len(get_cc(maha, maha_thresh+0.001)))
print(len(get_cc(maha, maha_thresh-0.001)))
raise ValueError("something is not right")
| paninski-lab/yass | src/yass/cluster/ptp_split.py | Python | apache-2.0 | 8,988 |
"""
Authorization rules related to content management.
"""
import user_tasks.rules
user_tasks.rules.add_rules()
| edx/edx-platform | cms/djangoapps/contentstore/rules.py | Python | agpl-3.0 | 115 |
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:/Program Files/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil'
elif CROSS_TOOL == 'iar':
print('================ERROR============================')
print('Not support iar yet!')
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -g -Wall -D__ASSEMBLY__ -D__FPU_USED'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -lm -lgcc -lc' + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread-k64f.map,-cref,-u,Reset_Handler -T K64FN1M0xxx12.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-k64f.map --scatter MK64F.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' --c99 -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
| aozima/rt-thread | bsp/frdm-k64f/rtconfig.py | Python | gpl-2.0 | 2,384 |
import unittest
import utils
from tree import TreeNode
def height(root):
if not root:
return -1
return 1 + max(height(root.left), height(root.right))
def match(a, b):
if not a:
return not b
if not b:
return False
return a.val == b.val and match(a.left, b.left) and match(a.right, b.right)
# O(n) time. O(log(n)) space. Recursive DFS.
class Solution:
def isSubtree(self, root: TreeNode, subRoot: TreeNode) -> bool:
subtree_height = height(subRoot)
def dfs(curr):
if not curr:
return False, -1
left_match, left_height = dfs(curr.left)
if left_match:
return True, -1
right_match, right_height = dfs(curr.right)
if right_match:
return True, -1
curr_height = 1 + max(left_height, right_height)
if curr_height == subtree_height and match(curr, subRoot):
return True, -1
return False, curr_height
return dfs(root)[0]
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution, process_args=self.process_args)
@staticmethod
def process_args(args):
args.root = TreeNode.from_array(args.root)
args.subRoot = TreeNode.from_array(args.subRoot)
if __name__ == '__main__':
unittest.main()
| chrisxue815/leetcode_python | problems/test_0572_recursive_dfs.py | Python | unlicense | 1,389 |
from flask import current_app, flash, abort
from willow.app import willow_signals
from slugify import slugify
from willow.models import db, mixins
class Venue(db.Model, mixins.WLWMixin):
pass
| undergroundtheater/willow | willow/models/venue.py | Python | mpl-2.0 | 198 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.securitycenter.v1", manifest={"RunAssetDiscoveryResponse",},
)
class RunAssetDiscoveryResponse(proto.Message):
r"""Response of asset discovery run
Attributes:
state (google.cloud.securitycenter_v1.types.RunAssetDiscoveryResponse.State):
The state of an asset discovery run.
duration (google.protobuf.duration_pb2.Duration):
The duration between asset discovery run
start and end
"""
class State(proto.Enum):
r"""The state of an asset discovery run."""
STATE_UNSPECIFIED = 0
COMPLETED = 1
SUPERSEDED = 2
TERMINATED = 3
state = proto.Field(proto.ENUM, number=1, enum=State,)
duration = proto.Field(proto.MESSAGE, number=2, message=duration_pb2.Duration,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-securitycenter | google/cloud/securitycenter_v1/types/run_asset_discovery_response.py | Python | apache-2.0 | 1,567 |
# Copyright 2010-2015 RethinkDB, all rights reserved.
import os, setuptools, sys
modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rethinkdb')
sys.path.insert(0, modulePath)
from version import version
sys.path.remove(modulePath)
conditionalPackages = []
if 'upload' in sys.argv: # ensure that uplodas always have everything while uploading to pypi
conditionalPackages = ['rethinkdb.asyncio_net']
else:
try: # only add asyncio when it is supported per #4702
import asyncio
conditionalPackages = ['rethinkdb.asyncio_net']
except ImportError: pass
setuptools.setup(
name="rethinkdb",
zip_safe=True,
version=version,
description="Python driver library for the RethinkDB database server.",
url="http://rethinkdb.com",
maintainer="RethinkDB Inc.",
maintainer_email="bugs@rethinkdb.com",
packages=['rethinkdb', 'rethinkdb.tornado_net', 'rethinkdb.twisted_net', 'rethinkdb.backports.ssl_match_hostname'] + conditionalPackages,
package_dir={'rethinkdb':'rethinkdb'},
package_data={ 'rethinkdb':['backports/ssl_match_hostname/*.txt'] },
entry_points={
'console_scripts':[
'rethinkdb-import = rethinkdb._import:main',
'rethinkdb-dump = rethinkdb._dump:main',
'rethinkdb-export = rethinkdb._export:main',
'rethinkdb-restore = rethinkdb._restore:main',
'rethinkdb-index-rebuild = rethinkdb._index_rebuild:main'
]
}
)
| niieani/rethinkdb | drivers/python/setup.py | Python | agpl-3.0 | 1,483 |
# config.py ---
#
# Filename: config.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Fri May 4 14:46:29 2012 (+0530)
# Version:
# Last-Updated: Fri May 4 21:05:04 2012 (+0530)
# By: Subhasis Ray
# Update #: 140
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
# Code:
from datetime import datetime
import ConfigParser as configparser
import logging
import numpy
import os
# runtime info
timestamp = datetime.now()
mypid = os.getpid()
# Unit Conversion Factors
uS = 1e-6 # micro Siemens to Siemens
ms = 1e-3 # milli second to second
mV = 1e-3 # milli Volt to Volt
# limits on HH-gate tables
vmin = -120 * mV
vmax = 40 * mV
ndivs = 640
dv = (vmax - vmin)/ndivs
# element to contain prototypes
libpath = '/library'
# defined channels to be initialized in prototypes
channel_names = ['AR',
'CaPool',
'CaL',
'CaT',
'CaT_A',
'K2',
'KA',
'KA_IB',
'KAHP',
'KAHP_DP',
'KAHP_SLOWER',
'KC',
'KC_FAST',
'KDR',
'KDR_FS',
'KM',
'NaF',
'NaF2',
'NaF_TCR',
'NaP',
'NaPF',
'NaPF_SS',
'NaPF_TCR',
'NaF2_nRT']
############################################
# Parse configuration file
############################################
_parser = configparser.SafeConfigParser()
_parser.optionxform = str
_parser.read(['defaults.ini', 'custom.ini'])
# seed for random number generator in MOOSE
moose_rngseed = _parser.get('numeric', 'moose_rngseed')
# seed for random number generator in numpy
numpy_rngseed = _parser.get('numeric', 'numpy_rngseed')
# flag if the simulation uses stochastic synchans
stochastic = _parser.get('numeric', 'stochastic') in ['Yes', 'yes', 'True', 'true', '1']
reseed = _parser.get('numeric', 'reseed') in ['Yes', 'yes', 'True', 'true', '1']
solver = _parser.get('numeric', 'solver')
simtime = float(_parser.get('scheduling', 'simtime'))
simdt = float(_parser.get('scheduling', 'simdt'))
plotdt = float(_parser.get('scheduling', 'plotdt'))
######################################################################
# configuration for saving simulation data
######################################################################
datadir = os.path.join(_parser.get('directories', 'data'),
timestamp.strftime('%Y_%m_%d'))
if not os.access(datadir, os.F_OK):
os.mkdirs(datadir)
protodir = _parser.get('directories', 'proto')
datafileprefix = 'data'
netfileprefix = 'network'
filesuffix = '_%s_%d' % (timestamp.strftime('%Y%m%d_%H%M%S'), mypid)
datafilepath = os.path.join(datadir, datafileprefix + filesuffix + '.h5')
netfilepath = os.path.join(datadir, netfileprefix + filesuffix + '.h5')
#####################################################################
# Logging
#####################################################################
logfileprefix = 'traub2005'
logfilename = os.path.join(datadir, logfileprefix + filesuffix + '.log')
loglevel = int(_parser.get('logging', 'level'))
logger = logging.getLogger(logfileprefix)
logging.basicConfig(filename=logfilename,
level=loglevel,
format='%(asctime)s \
%(levelname)s \
%(name)s \
%(filename)s \
%(funcName)s: \
%(message)s',
filemode='w')
benchmark = int(_parser.get('logging', 'benchmark'))
benchmarker = logging.getLogger(logfileprefix + '.benchmark')
benchmarker.setLevel(logging.DEBUG)
_inited = False
def init():
if _inited:
return
_inited = True
if reseed:
if moose_rngseed:
moose.seed(int(moose_rngseed))
else:
moose.seed(0)
if numpy_rngseed:
numpy.random.seed(int(numpy_rngseed))
init()
#
# config.py ends here
| dilawar/moose-full | moose-examples/traub_2005/py/trbconfig.py | Python | gpl-2.0 | 4,129 |
import serial
PORT = "/dev/ttyACM3"
beginStr = "101010101010101001"
communication = serial.Serial(PORT, 115200, timeout=4)
class Decodeur(object):
def __init__(self):
self.communication = serial.Serial(PORT, 115200, timeout=4)
self.longueurMessage = 32
self.longueurEncapsulation = 18
def demanderCode(self):
data = b''
conteur = 0
test = []
while (data == b''):
#Pour eviter que le commande soit envoye a chaque iteration
if conteur % 2 == 0:
commande = "Manchester"
self._send_command(commande, 0, 0)
conteur += 1
else:
conteur = conteur + 1
data = communication.readline()
test = data.split()
for byte in test:
byte = str(byte)
for i in range(0, len(test)):
test[i] = int(float(test[i]))
test[i] = "{0:08b}".format(test[i])
if i == len(test) - 1:
codelong = "".join(test)
index = codelong.find(beginStr)
if codelong.index(beginStr, index + 1) == (index + self.longueurMessage):
decode = ""
for j in range(index + self.longueurEncapsulation, index + self.longueurMessage, 2):
if codelong[j] == "1" and codelong[j + 1] == "0":
decode += "1"
elif codelong[j] == "0" and codelong[j + 1] == "1":
decode += "0"
else:
decode = ""
break
asciiInt = int(decode, 2)
ascii = chr(asciiInt)
return ascii
def manchester(self):
commande = "Manchester"
self._send_command(commande, 0, 0)
data = communication.readline()
test = data.split()
for i in range(0, len(test)):
test[i] = int(float(test[i]))
test[i] = "{0:08b}".format(test[i])
if i == len(test) - 1:
codelong = "".join(test)
index = codelong.find(beginStr)
if codelong.index(beginStr, index + 1) == (index + self.longueurMessage):
decode = ""
for j in range(index + self.longueurEncapsulation, index + self.longueurMessage, 2):
if codelong[j] == "1" and codelong[j + 1] == "0":
decode += "1"
elif codelong[j] == "0" and codelong[j + 1] == "1":
decode += "0"
else:
decode = ""
break
asciiInt = int(decode, 2)
ascii = chr(asciiInt)
#print(ascii)
return ascii
def _send_command(self, commande, args1, args2, retry_countdown=3):
self.communication.flushInput()
write = "{}({},{});".format(commande, args1, args2)
write1=bytes(write,encoding="UTF-8")
self.communication.write(write1)
self.communication.flushOutput()
| phil888/Design3 | Livrable3/Remise/Code source et tests/source/Decodeur.py | Python | gpl-3.0 | 3,262 |
import os
__version__ = (0, 2, 2)
os.environ.setdefault('PGCLIENTENCODING', 'UTF-8')
os.environ.setdefault('SHAPE_ENCODING', 'UTF-8')
os.environ.setdefault('PG_USE_COPY', 'yes') | davisc/django-osgeo-importer | osgeo_importer/__init__.py | Python | gpl-3.0 | 179 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy import log
from funs.items import FunsItem
class FunsSpider(Spider):
#log.start("log",loglevel='INFO')
name="funs"
allowed_domains = ["http://ent.sina.com.cn/"]
start_urls = [
"http://ent.sina.com.cn/",
]
def parse_start(self , response):
for i in range(0,5):
start_url = start_urls[i]
yield Request(start_url,callback=self.parse)
def parse(self, response):
self=Selector(response)
sites = self.xpath('//div[@id="listZone"]/div[@class="nrC"]')
items = []
for site in sites:
item = MovienewsItem()
url = site.xpath('a/@href').extract()
image = site.xpath('a/img[@class="nrPic"]/@src').extract()
title = site.xpath('a/img[@class="nrPic"]/@alt').extract()
src = site.xpath('p/text()').extract()
pdate = site.xpath('p/span[@class="date"]/text()').extract()
content = site.xpath('div[@class="nrP"]/text()').extract()
item['title'] = [t.encode('utf-8') for t in title]
url_p = str(url)[4:25]
url_home = "http://ent.qq.com/"
url_home += url_p
url=[]
url.append(url_home)
item['url'] = [u.encode('utf-8') for u in url]
item['src'] = [s.encode('utf-8') for s in src]
item['pdate'] = [p.encode('utf-8') for p in pdate]
item['image'] = [i.encode('utf-8') for i in image]
item['content'] = [c.encode('utf-8') for c in content]
items.append(item)
#return item
yield item
#url_next = "http://trace.qq.com/collect?pj=8888&url=http%3A//ent.qq.com/movie/news_index.shtml&w=1280&x=410&y=4675&v=1&u=951184213"
#Request(url_next)
#return items
| huran2014/huran.github.io | program_learning/python/Scrapy/funs/funs/spiders/funs_spider.py | Python | gpl-2.0 | 1,760 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import collections
import os
from unittest import TestCase
from mock import patch
from pants.base.exceptions import ErrorWhileTesting
from pants.task.task import TaskBase
from pants.task.testrunner_task_mixin import TestRunnerTaskMixin
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_open
from pants.util.process_handler import ProcessHandler
from pants.util.timeout import TimeoutReached
from pants.util.xml_parser import XmlParser
from pants_test.tasks.task_test_base import TaskTestBase
class DummyTestTarget(object):
def __init__(self, name, timeout=None):
self.name = name
self.timeout = timeout
self.address = collections.namedtuple('address', ['spec'])(name)
targetA = DummyTestTarget('TargetA')
targetB = DummyTestTarget('TargetB', timeout=1)
targetC = DummyTestTarget('TargetC', timeout=10)
class TestRunnerTaskMixinTest(TaskTestBase):
@classmethod
def task_type(cls):
class TestRunnerTaskMixinTask(TestRunnerTaskMixin, TaskBase):
call_list = []
def _execute(self, all_targets):
self.call_list.append(['_execute', all_targets])
self._spawn_and_wait()
def _spawn(self, *args, **kwargs):
self.call_list.append(['_spawn', args, kwargs])
class FakeProcessHandler(ProcessHandler):
def wait(_):
self.call_list.append(['process_handler.wait'])
return 0
def kill(_):
self.call_list.append(['process_handler.kill'])
def terminate(_):
self.call_list.append(['process_handler.terminate'])
def poll(_):
self.call_list.append(['process_handler.poll'])
return FakeProcessHandler()
def _get_targets(self):
return [targetA, targetB]
def _test_target_filter(self):
def target_filter(target):
self.call_list.append(['target_filter', target])
if target.name == 'TargetA':
return False
else:
return True
return target_filter
def _validate_target(self, target):
self.call_list.append(['_validate_target', target])
return TestRunnerTaskMixinTask
def test_execute_normal(self):
task = self.create_task(self.context())
task.execute()
# Confirm that everything ran as expected
self.assertIn(['target_filter', targetA], task.call_list)
self.assertIn(['target_filter', targetB], task.call_list)
self.assertIn(['_validate_target', targetB], task.call_list)
self.assertIn(['_execute', [targetA, targetB]], task.call_list)
def test_execute_skip(self):
# Set the skip option
self.set_options(skip=True)
task = self.create_task(self.context())
task.execute()
# Ensure nothing got called
self.assertListEqual(task.call_list, [])
def test_get_timeouts_no_default(self):
"""If there is no default and one of the targets has no timeout, then there is no timeout for the entire run."""
self.set_options(timeouts=True, timeout_default=None)
task = self.create_task(self.context())
self.assertIsNone(task._timeout_for_targets([targetA, targetB]))
def test_get_timeouts_disabled(self):
"""If timeouts are disabled, there is no timeout for the entire run."""
self.set_options(timeouts=False, timeout_default=2)
task = self.create_task(self.context())
self.assertIsNone(task._timeout_for_targets([targetA, targetB]))
def test_get_timeouts_with_default(self):
"""If there is a default timeout, use that for targets which have no timeout set."""
self.set_options(timeouts=True, timeout_default=2)
task = self.create_task(self.context())
self.assertEquals(task._timeout_for_targets([targetA, targetB]), 3)
def test_get_timeouts_with_maximum(self):
"""If a timeout exceeds the maximum, set it to that."""
self.set_options(timeouts=True, timeout_maximum=1)
task = self.create_task(self.context())
self.assertEquals(task._timeout_for_targets([targetC]), 1)
def test_default_maximum_conflict(self):
"""If the default exceeds the maximum, throw an error."""
self.set_options(timeouts=True, timeout_maximum=1, timeout_default=10)
task = self.create_task(self.context())
with self.assertRaises(ErrorWhileTesting):
task.execute()
class TestRunnerTaskMixinSimpleTimeoutTest(TaskTestBase):
@classmethod
def task_type(cls):
class TestRunnerTaskMixinTask(TestRunnerTaskMixin, TaskBase):
call_list = []
def _execute(self, all_targets):
self.call_list.append(['_execute', all_targets])
self._spawn_and_wait()
def _spawn(self, *args, **kwargs):
self.call_list.append(['_spawn', args, kwargs])
class FakeProcessHandler(ProcessHandler):
def wait(_):
self.call_list.append(['process_handler.wait'])
return 0
def kill(_):
self.call_list.append(['process_handler.kill'])
def terminate(_):
self.call_list.append(['process_handler.terminate'])
def poll(_):
self.call_list.append(['process_handler.poll'])
return 0
return FakeProcessHandler()
def _get_targets(self):
return [targetB]
def _test_target_filter(self):
def target_filter(target):
return True
return target_filter
def _validate_target(self, target):
self.call_list.append(['_validate_target', target])
return TestRunnerTaskMixinTask
def test_timeout(self):
self.set_options(timeouts=True)
task = self.create_task(self.context())
with patch('pants.task.testrunner_task_mixin.Timeout') as mock_timeout:
mock_timeout().__exit__.side_effect = TimeoutReached(1)
with self.assertRaises(ErrorWhileTesting):
task.execute()
# Ensures that Timeout is instantiated with a 1 second timeout.
args, kwargs = mock_timeout.call_args
self.assertEqual(args, (1,))
def test_timeout_disabled(self):
self.set_options(timeouts=False)
task = self.create_task(self.context())
with patch('pants.task.testrunner_task_mixin.Timeout') as mock_timeout:
task.execute()
# Ensures that Timeout is instantiated with no timeout.
args, kwargs = mock_timeout.call_args
self.assertEqual(args, (None,))
class TestRunnerTaskMixinGracefulTimeoutTest(TaskTestBase):
def create_process_handler(self, return_none_first=True):
class FakeProcessHandler(ProcessHandler):
call_list = []
poll_called = False
def wait(self):
self.call_list.append(['process_handler.wait'])
return 0
def kill(self):
self.call_list.append(['process_handler.kill'])
def terminate(self):
self.call_list.append(['process_handler.terminate'])
def poll(self):
print("poll called")
self.call_list.append(['process_handler.poll'])
if not self.poll_called and return_none_first:
self.poll_called = True
return None
else:
return 0
return FakeProcessHandler()
def task_type(cls):
class TestRunnerTaskMixinTask(TestRunnerTaskMixin, TaskBase):
call_list = []
def _execute(self, all_targets):
self.call_list.append(['_execute', all_targets])
self._spawn_and_wait()
def _spawn(self, *args, **kwargs):
self.call_list.append(['_spawn', args, kwargs])
return cls.process_handler
def _get_targets(self):
return [targetA, targetB]
def _test_target_filter(self):
def target_filter(target):
self.call_list.append(['target_filter', target])
if target.name == 'TargetA':
return False
else:
return True
return target_filter
def _validate_target(self, target):
self.call_list.append(['_validate_target', target])
return TestRunnerTaskMixinTask
def test_graceful_terminate_if_poll_is_none(self):
self.process_handler = self.create_process_handler(return_none_first=True)
self.set_options(timeouts=True)
task = self.create_task(self.context())
with patch('pants.task.testrunner_task_mixin.Timer') as mock_timer:
def set_handler(dummy, handler):
mock_timer_instance = mock_timer.return_value
mock_timer_instance.start.side_effect = handler
return mock_timer_instance
mock_timer.side_effect = set_handler
with self.assertRaises(ErrorWhileTesting):
task.execute()
# Ensure that all the calls we want to kill the process gracefully are made.
self.assertEqual(self.process_handler.call_list,
[[u'process_handler.terminate'], [u'process_handler.poll'], [u'process_handler.kill'], [u'process_handler.wait']])
def test_graceful_terminate_if_poll_is_zero(self):
self.process_handler = self.create_process_handler(return_none_first=False)
self.set_options(timeouts=True)
task = self.create_task(self.context())
with patch('pants.task.testrunner_task_mixin.Timer') as mock_timer:
def set_handler(dummy, handler):
mock_timer_instance = mock_timer.return_value
mock_timer_instance.start.side_effect = handler
return mock_timer_instance
mock_timer.side_effect = set_handler
with self.assertRaises(ErrorWhileTesting):
task.execute()
# Ensure that we only call terminate, and not kill.
self.assertEqual(self.process_handler.call_list,
[[u'process_handler.terminate'], [u'process_handler.poll'], [u'process_handler.wait']])
class TestRunnerTaskMixinMultipleTargets(TaskTestBase):
@classmethod
def task_type(cls):
class TestRunnerTaskMixinMultipleTargetsTask(TestRunnerTaskMixin, TaskBase):
def _execute(self, all_targets):
self._spawn_and_wait()
def _spawn(self, *args, **kwargs):
class FakeProcessHandler(ProcessHandler):
def wait(self):
return 0
def kill(self):
pass
def terminate(self):
pass
def poll(self):
pass
return FakeProcessHandler()
def _test_target_filter(self):
return lambda target: True
def _validate_target(self, target):
pass
def _get_targets(self):
return [targetA, targetB]
def _get_test_targets_for_spawn(self):
return self.current_targets
return TestRunnerTaskMixinMultipleTargetsTask
def test_multiple_targets_single_target_timeout(self):
with patch('pants.task.testrunner_task_mixin.Timeout') as mock_timeout:
mock_timeout().__exit__.side_effect = TimeoutReached(1)
self.set_options(timeouts=True)
task = self.create_task(self.context())
task.current_targets = [targetA]
with self.assertRaises(ErrorWhileTesting) as cm:
task.execute()
self.assertEqual(len(cm.exception.failed_targets), 1)
self.assertEqual(cm.exception.failed_targets[0].address.spec, 'TargetA')
task.current_targets = [targetB]
with self.assertRaises(ErrorWhileTesting) as cm:
task.execute()
self.assertEqual(len(cm.exception.failed_targets), 1)
self.assertEqual(cm.exception.failed_targets[0].address.spec, 'TargetB')
class TestRunnerTaskMixinXmlParsing(TestRunnerTaskMixin, TestCase):
@staticmethod
def _raise_handler(e):
raise e
class CollectHandler(object):
def __init__(self):
self._errors = []
def __call__(self, e):
self._errors.append(e)
@property
def errors(self):
return self._errors
def test_parse_test_info_no_files(self):
with temporary_dir() as xml_dir:
test_info = self.parse_test_info(xml_dir, self._raise_handler)
self.assertEqual({}, test_info)
def test_parse_test_info_all_testcases(self):
with temporary_dir() as xml_dir:
with open(os.path.join(xml_dir, 'TEST-a.xml'), 'w') as fp:
fp.write("""
<testsuite failures="1" errors="1">
<testcase classname="org.pantsbuild.Green" name="testOK" time="1.290"/>
<testcase classname="org.pantsbuild.Failure" name="testFailure" time="0.27">
<failure/>
</testcase>
<testcase classname="org.pantsbuild.Error" name="testError" time="0.932">
<error/>
</testcase>
<testcase classname="org.pantsbuild.Skipped" name="testSkipped" time="0.1">
<skipped/>
</testcase>
</testsuite>
""")
tests_info = self.parse_test_info(xml_dir, self._raise_handler)
self.assertEqual(
{
'testOK': {
'result_code': 'success',
'time': 1.290
},
'testFailure': {
'result_code': 'failure',
'time': 0.27
},
'testError': {
'result_code': 'error',
'time': 0.932
},
'testSkipped': {
'result_code': 'skipped',
'time': 0.1
}
}, tests_info)
def test_parse_test_info_invalid_file_name(self):
with temporary_dir() as xml_dir:
with open(os.path.join(xml_dir, 'random.xml'), 'w') as fp:
fp.write('<invalid></xml>')
tests_info = self.parse_test_info(xml_dir, self._raise_handler)
self.assertEqual({}, tests_info)
def test_parse_test_info_invalid_dir(self):
with temporary_dir() as xml_dir:
with safe_open(os.path.join(xml_dir, 'subdir', 'TEST-c.xml'), 'w') as fp:
fp.write('<invalid></xml>')
tests_info = self.parse_test_info(xml_dir, self._raise_handler)
self.assertEqual({}, tests_info)
def test_parse_test_info_error_raise(self):
with temporary_dir() as xml_dir:
xml_file = os.path.join(xml_dir, 'TEST-bad.xml')
with open(xml_file, 'w') as fp:
fp.write('<invalid></xml>')
with self.assertRaises(Exception) as exc:
self.parse_test_info(xml_dir, self._raise_handler)
self.assertEqual(xml_file, exc.exception.xml_path)
self.assertIsInstance(exc.exception.cause, XmlParser.XmlError)
def test_parse_test_info_error_continue(self):
with temporary_dir() as xml_dir:
bad_file1 = os.path.join(xml_dir, 'TEST-bad1.xml')
with open(bad_file1, 'w') as fp:
fp.write("""
<testsuite failures="0" errors="1">
<testcase classname="org.pantsbuild.Error" name="testError" time="zero">
<error/>
</testcase>
</testsuite>
""")
with open(os.path.join(xml_dir, 'TEST-good.xml'), 'w') as fp:
fp.write("""
<testsuite failures="0" errors="1">
<testcase classname="org.pantsbuild.Error" name="testError" time="1.2">
<error/>
</testcase>
</testsuite>
""")
bad_file2 = os.path.join(xml_dir, 'TEST-bad2.xml')
with open(bad_file2, 'w') as fp:
fp.write('<invalid></xml>')
collect_handler = self.CollectHandler()
tests_info = self.parse_test_info(xml_dir, collect_handler)
self.assertEqual(2, len(collect_handler.errors))
self.assertEqual({bad_file1, bad_file2}, {e.xml_path for e in collect_handler.errors})
self.assertEqual(
{'testError':
{
'result_code': 'error',
'time': 1.2
}
}, tests_info)
def test_parse_test_info_extra_attributes(self):
with temporary_dir() as xml_dir:
with open(os.path.join(xml_dir, 'TEST-a.xml'), 'w') as fp:
fp.write("""
<testsuite errors="1">
<testcase classname="org.pantsbuild.Green" name="testOK" time="1.290" file="file.py"/>
<testcase classname="org.pantsbuild.Error" name="testError" time="0.27" file="file.py">
<error/>
</testcase>
</testsuite>
""")
tests_info = self.parse_test_info(xml_dir, self._raise_handler, ['file', 'classname'])
self.assertEqual(
{
'testOK': {
'file': 'file.py',
'classname': 'org.pantsbuild.Green',
'result_code': 'success',
'time': 1.290
},
'testError': {
'file': 'file.py',
'classname': 'org.pantsbuild.Error',
'result_code': 'error',
'time': 0.27
}
}, tests_info)
| pombredanne/pants | tests/python/pants_test/task/test_testrunner_task_mixin.py | Python | apache-2.0 | 16,660 |
from metakernel.tests.utils import get_kernel, get_log_text
def test_reload_magics_magic():
kernel = get_kernel()
kernel.do_execute("%reload_magics")
text = get_log_text(kernel)
for item in "%cd %connect_info %download %edit %help %html %install_magic %javascript %kernel %kx %latex %load %lsmagic %magic %parallel %plot %pmap %px %python %reload_magics %restart %run %shell %macro %%debug %%file %%help %%html %%javascript %%kx %%latex %%processing %%px %%python %%shell %%show %%macro %%time".split():
assert item in text, ("load_magic didn't list '%s'" % item)
| Calysto/metakernel | metakernel/magics/tests/test_reload_magics_magic.py | Python | bsd-3-clause | 592 |
#
# Cookie jar for sequence building
#
# Author: Gregory Fleischer (gfleischer@gmail.com)
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
from core.network.InMemoryCookieJar import InMemoryCookieJar
from urllib import parse as urlparse
class SequenceBuilderCookieJar(InMemoryCookieJar):
def __init__(self, framework, parent = None):
InMemoryCookieJar.__init__(self, framework, parent)
self.framework = framework
self.cookie_tracking = False
self.cookie_items = {}
self.tracked_cookies = []
def start_tracking(self):
self.cookie_tracking = True
def stop_tracking(self):
self.cookie_tracking = False
def clear_cookies(self):
self.cookie_items.clear()
InMemoryCookieJar.clear_cookies(self)
def setCookiesFromUrl(self, cookieList, url):
if self.cookie_tracking:
for cookie in cookieList:
cookie_domain = str(cookie.domain())
if not cookie_domain:
cookie_domain = str(url.encodedHost())
if cookie_domain not in self.cookie_items:
self.cookie_items[cookie_domain] = {}
self.cookie_items[cookie_domain][str(cookie.name())] = str(cookie.value())
return InMemoryCookieJar.setCookiesFromUrl(self, cookieList, url)
def is_cookie_tracked(self, cookie_domain, cookie_name):
if cookie_domain in self.cookie_items:
if cookie_name in self.cookie_items[cookie_domain]:
return True
elif cookie_domain.startswith('.'):
cookie_domain = cookie_domain[1:]
if cookie_domain in self.cookie_items and cookie_name in self.cookie_items[cookie_domain]:
return True
print(('OOPS', cookie_domain, list(self.cookie_items.keys())))
return False
| chp1084/raft | core/web/SequenceBuilderCookieJar.py | Python | gpl-3.0 | 2,503 |
from io import StringIO
import re
import types
import sys
from contexts.plugins import argv_forwarder
from contexts.plugins.reporting import cli
from contexts.plugins.reporting import teamcity
from contexts import setup, action, assertion
from .. import tools
class TeamCitySharedContext:
def shared_context(self):
self.stringio = StringIO()
self.reporter = teamcity.TeamCityReporter(self.stringio)
self.outputs = []
def parse_line(self, n):
if n < 0: # to hide the fact that the last line will be empty
n -= 1
return teamcity_parse(self.stringio.getvalue().split('\n')[n])
class WhenLocatingThePlugin:
def when_framework_asks_where_the_plugin_wants_to_be(self):
self.result = teamcity.TeamCityReporter.locate()
def it_should_not_override_the_argv_forwarder(self):
assert self.result[0] == argv_forwarder.ArgvForwarder
def it_should_override_the_command_line_reporter(self):
assert self.result[1] == cli.DotsReporter
###########################################################
# Suite tests
###########################################################
class WhenASuiteStartsInTeamCity(TeamCitySharedContext):
def context(self):
self.module = types.ModuleType('test_suite')
def because_the_suite_starts(self):
self.reporter.suite_started(self.module)
def it_should_tell_team_city_the_suite_started(self):
assert teamcity_parse(self.stringio.getvalue()) == ("testSuiteStarted", {'name': self.module.__name__})
class WhenASuiteEndsInTeamCity(TeamCitySharedContext):
def context(self):
self.module = types.ModuleType('mah_suite')
def because_the_suite_ends(self):
self.reporter.suite_ended(self.module)
def it_should_tell_team_city_the_suite_ended(self):
assert teamcity_parse(self.stringio.getvalue()) == ("testSuiteFinished", {'name': self.module.__name__})
###########################################################
# Suite tests
###########################################################
class WhenATestClassStartsInTeamCity(TeamCitySharedContext):
def context(self):
self.class_name = 'abc'
def because_the_suite_starts(self):
self.reporter.test_class_started(type(self.class_name, (), {}))
def it_should_tell_team_city_the_class_started(self):
assert teamcity_parse(self.stringio.getvalue()) == ("testClassStarted", {'name': self.class_name})
class WhenATestClassEndsInTeamCity(TeamCitySharedContext):
def context(self):
self.class_name = 'abc'
def because_the_suite_ends(self):
self.reporter.test_class_ended(type(self.class_name, (), {}))
def it_should_tell_team_city_the_class_ended(self):
assert teamcity_parse(self.stringio.getvalue()) == ("testClassFinished", {'name': self.class_name})
class WhenATestClassErrorsInTeamCity(TeamCitySharedContext):
def context(self):
self.class_name = 'abc'
tb = [('made_up_file.py', 3, 'made_up_function', 'frame1'),
('another_made_up_file.py', 2, 'another_made_up_function', 'frame2')]
self.exception = tools.build_fake_exception(tb, "Gotcha")
self.formatted_tb = (
'Traceback (most recent call last):|n'
' File "made_up_file.py", line 3, in made_up_function|n'
' frame1|n'
' File "another_made_up_file.py", line 2, in another_made_up_function|n'
' frame2|n'
'plugin_tests.tools.FakeException: Gotcha')
def because_the_suite_ends(self):
self.reporter.test_class_errored(type(self.class_name, (), {}), self.exception)
def it_should_tell_team_city_a_test_started(self):
assert self.parse_line(0) == ("testStarted", {'name': self.class_name})
def it_should_tell_team_city_the_test_failed(self):
assert self.parse_line(1) == (
"testFailed",
{
'name': self.class_name,
'message': 'plugin_tests.tools.FakeException: Gotcha',
'details': self.formatted_tb
})
def it_should_tell_team_city_the_test_finished(self):
assert self.parse_line(2) == ("testFinished", {'name': self.class_name})
def it_should_tell_team_city_the_class_finished(self):
assert self.parse_line(3) == ("testClassFinished", {'name': self.class_name})
###########################################################
# assertion_started tests
###########################################################
class WhenAnAssertionStartsInTeamCity(TeamCitySharedContext):
def establish_that_a_context_is_running(self):
context = tools.create_context('MyNiceContext')
self.reporter.context_started(context.cls, context.example)
self.assertion = lambda: None
self.assertion.__name__ = 'aLovelyAssertion'
def because_the_assertion_starts(self):
self.reporter.assertion_started(self.assertion)
def it_should_tell_team_city_it_started(self):
assert self.parse_line(0) == ("testStarted", {'name': 'My nice context -> a lovely assertion'})
class WhenAnAssertionInAContextWithExamplesStartsInTeamCity(TeamCitySharedContext):
@setup
def establish_that_a_context_with_an_example_is_running(self):
context = tools.create_context('ContextWithExamples', 12.3)
self.reporter.context_started(context.cls, context.example)
self.assertion = lambda: None
self.assertion.__name__ = 'aLovelyAssertion'
def because_the_assertion_starts(self):
self.reporter.assertion_started(self.assertion)
@assertion
def it_should_report_the_example(self):
assert self.parse_line(0)[1]['name'] == 'Context with examples -> 12.3 -> a lovely assertion'
###########################################################
# assertion_passed tests
###########################################################
class WhenAnAssertionPassesInTeamCity(TeamCitySharedContext):
def establish_that_a_context_is_running(self):
context = tools.create_context('MyNiceContext')
self.reporter.context_started(context.cls, context.example)
self.assertion = lambda: None
self.assertion.__name__ = 'aLovelyAssertion'
def because_the_assertion_ends(self):
self.reporter.assertion_passed(self.assertion)
def it_should_tell_team_city_it_passed(self):
assert self.parse_line(0) == ("testFinished", {'name': 'My nice context -> a lovely assertion'})
class WhenAnAssertionInAContextWithExamplesPassesInTeamCity(TeamCitySharedContext):
@setup
def establish_that_a_context_with_an_example_is_running(self):
context = tools.create_context('ContextWithExamples', 12.3)
self.reporter.context_started(context.cls, context.example)
self.assertion = lambda: None
self.assertion.__name__ = 'aLovelyAssertion'
def because_the_assertion_passes(self):
self.reporter.assertion_passed(self.assertion)
@assertion
def it_should_report_the_example(self):
assert self.parse_line(0)[1]['name'] == 'Context with examples -> 12.3 -> a lovely assertion'
class WhenSomethingGetsPrintedDuringAPassingAssertionInTeamCity(TeamCitySharedContext):
def establish_that_something_has_been_printed(self):
self.real_stdout, self.real_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.fake_stdout, self.fake_stderr = StringIO(), StringIO()
context = tools.create_context('Context')
self.reporter.context_started(context.cls, context.example)
print("to stdout")
print("to stderr", file=sys.stderr)
self.assertion = lambda: None
self.assertion.__name__ = "assertion"
def because_the_assertion_passes(self):
self.reporter.assertion_passed(self.assertion)
def it_should_not_print_anything_to_stdout(self):
assert self.fake_stdout.getvalue() == ''
def it_should_not_print_anything_to_stderr(self):
assert self.fake_stderr.getvalue() == ''
def it_should_report_what_went_to_stdout(self):
assert self.parse_line(0) == ("testStdOut", {'name': 'Context -> assertion', 'out': 'to stdout|n'})
def it_should_report_what_went_to_stderr(self):
assert self.parse_line(1) == ("testStdErr", {'name': 'Context -> assertion', 'out': 'to stderr|n'})
def cleanup_stdout_and_stderr(self):
sys.stdout, sys.stderr = self.real_stdout, self.real_stderr
###########################################################
# assertion_failed tests
###########################################################
class WhenAnAssertionFailsInTeamCity(TeamCitySharedContext):
def establish_that_a_context_is_running(self):
context = tools.create_context('FakeContext')
tb = [('made_up_file.py', 3, 'made_up_function', 'frame1'),
('another_made_up_file.py', 2, 'another_made_up_function', 'frame2')]
self.exception = tools.build_fake_assertion_error(tb, "Gotcha")
self.formatted_tb = (
'Traceback (most recent call last):|n'
' File "made_up_file.py", line 3, in made_up_function|n'
' frame1|n'
' File "another_made_up_file.py", line 2, in another_made_up_function|n'
' frame2|n'
'plugin_tests.tools.FakeAssertionError: Gotcha')
self.reporter.context_started(context.cls, context.example)
self.assertion = lambda: None
self.assertion.__name__ = 'Fake_assertion'
def because_the_assertion_fails(self):
self.reporter.assertion_failed(self.assertion, self.exception)
def it_should_tell_team_city_it_failed(self):
assert self.parse_line(0) == (
"testFailed",
{
'name': 'Fake context -> Fake assertion',
'message': 'plugin_tests.tools.FakeAssertionError: Gotcha',
'details': self.formatted_tb
})
def it_should_tell_team_city_it_finished(self):
assert self.parse_line(1) == ("testFinished", {'name': 'Fake context -> Fake assertion'})
class WhenAnAssertionInAContextWithExamplesFailsInTeamCity(TeamCitySharedContext):
@setup
def establish_that_a_context_with_an_example_is_running(self):
ctx = tools.create_context('ContextWithExamples', 12.3)
self.reporter.context_started(ctx.cls, ctx.example)
self.assertion = lambda: None
self.assertion.__name__ = 'aLovelyAssertion'
def because_the_assertion_fails(self):
self.reporter.assertion_failed(self.assertion, Exception())
@assertion
def it_should_report_the_example(self):
assert self.parse_line(0)[1]['name'] == 'Context with examples -> 12.3 -> a lovely assertion'
class WhenSomethingGetsPrintedDuringAFailingAssertionInTeamCity(TeamCitySharedContext):
def establish_that_something_has_been_printed(self):
self.real_stdout, self.real_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.fake_stdout, self.fake_stderr = StringIO(), StringIO()
context = tools.create_context()
self.reporter.context_started(context.cls, context.example)
print("to stdout")
print("to stderr", file=sys.stderr)
self.assertion = lambda: None
self.assertion.__name__ = "assertion"
def because_the_assertion_fails(self):
self.reporter.assertion_failed(self.assertion, Exception())
def it_should_not_print_anything_to_stdout(self):
assert self.fake_stdout.getvalue() == ''
def it_should_not_print_anything_to_stderr(self):
assert self.fake_stderr.getvalue() == ''
def it_should_report_what_went_to_stdout(self):
assert self.parse_line(0) == ("testStdOut", {'name': 'context -> assertion', 'out': 'to stdout|n'})
def it_should_report_what_went_to_stderr(self):
assert self.parse_line(1) == ("testStdErr", {'name': 'context -> assertion', 'out': 'to stderr|n'})
def cleanup_stdout_and_stderr(self):
sys.stdout, sys.stderr = self.real_stdout, self.real_stderr
###########################################################
# assertion_errored tests
###########################################################
class WhenAnAssertionErrorsInTeamCity(TeamCitySharedContext):
def establish_that_a_context_is_running(self):
context = tools.create_context('FakeContext')
tb = [('made_up_file.py', 3, 'made_up_function', 'frame1'),
('another_made_up_file.py', 2, 'another_made_up_function', 'frame2')]
self.exception = tools.build_fake_assertion_error(tb, "Gotcha")
self.formatted_tb = (
'Traceback (most recent call last):|n'
' File "made_up_file.py", line 3, in made_up_function|n'
' frame1|n'
' File "another_made_up_file.py", line 2, in another_made_up_function|n'
' frame2|n'
'plugin_tests.tools.FakeAssertionError: Gotcha')
self.reporter.context_started(context.cls, context.example)
self.assertion = lambda: None
self.assertion.__name__ = 'Fake_assertion'
def because_the_assertion_errors(self):
self.reporter.assertion_errored(self.assertion, self.exception)
def it_should_tell_team_city_it_failed(self):
assert self.parse_line(0) == (
"testFailed",
{
'name': 'Fake context -> Fake assertion',
'message': 'plugin_tests.tools.FakeAssertionError: Gotcha',
'details': self.formatted_tb
})
def it_should_tell_team_city_it_finished(self):
assert self.parse_line(1) == ("testFinished", {'name': 'Fake context -> Fake assertion'})
class WhenAnAssertionInAContextWithExamplesErrorsInTeamCity(TeamCitySharedContext):
@setup
def establish_that_a_context_with_an_example_is_running(self):
context = tools.create_context('ContextWithExamples', 12.3)
self.reporter.context_started(context.cls, context.example)
self.assertion = lambda: None
self.assertion.__name__ = 'aLovelyAssertion'
def because_the_assertion_errors(self):
self.reporter.assertion_errored(self.assertion, Exception())
@assertion
def it_should_report_the_example(self):
assert self.parse_line(0)[1]['name'] == 'Context with examples -> 12.3 -> a lovely assertion'
class WhenSomethingGetsPrintedDuringAnErroringAssertionInTeamCity(TeamCitySharedContext):
def establish_that_something_has_been_printed(self):
self.real_stdout, self.real_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.fake_stdout, self.fake_stderr = StringIO(), StringIO()
self.context = tools.create_context("FakeContext")
self.reporter.context_started(self.context.cls, self.context.example)
print("to stdout")
print("to stderr", file=sys.stderr)
self.assertion = lambda: None
self.assertion.__name__ = "FakeAssertion4"
def because_we_run_an_assertion(self):
self.reporter.assertion_errored(self.assertion, Exception())
def it_should_not_print_anything_to_the_real_stdout(self):
assert self.fake_stdout.getvalue() == ''
def it_should_not_print_anything_to_the_real_stderr(self):
assert self.fake_stdout.getvalue() == ''
def it_should_tell_team_city_what_went_to_stdout(self):
assert self.parse_line(0) == ("testStdOut", {'name': 'Fake context -> Fake assertion 4', 'out': 'to stdout|n'})
def it_should_tell_team_city_what_went_to_stderr(self):
assert self.parse_line(1) == ("testStdErr", {'name': 'Fake context -> Fake assertion 4', 'out': 'to stderr|n'})
def cleanup_stdout_and_stderr(self):
sys.stdout, sys.stderr = self.real_stdout, self.real_stderr
###########################################################
# context_errored tests
###########################################################
class WhenAContextErrorsInTeamCity(TeamCitySharedContext):
def establish_that_a_context_is_running(self):
self.context = tools.create_context('FakeContext')
tb = [('made_up_file.py', 3, 'made_up_function', 'frame1'),
('another_made_up_file.py', 2, 'another_made_up_function', 'frame2')]
self.exception = tools.build_fake_exception(tb, "Gotcha")
self.formatted_tb = (
'Traceback (most recent call last):|n'
' File "made_up_file.py", line 3, in made_up_function|n'
' frame1|n'
' File "another_made_up_file.py", line 2, in another_made_up_function|n'
' frame2|n'
'plugin_tests.tools.FakeException: Gotcha')
self.reporter.context_started(self.context.cls, self.context.example)
def because_we_run_an_assertion(self):
self.reporter.context_errored(self.context.cls, self.context.example, self.exception)
def it_should_tell_team_city_a_test_started(self):
assert self.parse_line(0) == ("testStarted", {'name': 'Fake context'})
def it_should_tell_team_city_the_test_failed(self):
assert self.parse_line(1) == (
"testFailed",
{
'name': 'Fake context',
'message': 'plugin_tests.tools.FakeException: Gotcha',
'details': self.formatted_tb
})
def it_should_tell_team_city_the_test_finished(self):
assert self.parse_line(2) == ("testFinished", {'name': 'Fake context'})
class WhenAContextWithExamplesErrorsInTeamCity(TeamCitySharedContext):
@setup
def establish_that_a_context_with_an_example_is_running(self):
self.context = tools.create_context('ContextWithExamples', 12.3)
self.reporter.context_started(self.context.cls, self.context.example)
@action
def because_the_context_errors(self):
self.reporter.context_errored(self.context.cls, self.context.example, Exception())
@assertion
def it_should_report_the_example(self):
assert self.parse_line(0)[1]['name'] == 'Context with examples -> 12.3'
class WhenSomethingGetsPrintedDuringAnErroringContextInTeamCity(TeamCitySharedContext):
def establish_that_something_has_been_printed(self):
self.real_stdout, self.real_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.fake_stdout, self.fake_stderr = StringIO(), StringIO()
self.context = tools.create_context("FakeContext")
self.reporter.context_started(self.context.cls, self.context.example)
print("to stdout")
print("to stderr", file=sys.stderr)
def because_we_run_an_assertion(self):
self.reporter.context_errored(self.context.cls, self.context.example, Exception())
def it_should_not_print_anything_to_the_real_stdout(self):
assert self.fake_stdout.getvalue() == ''
def it_should_not_print_anything_to_the_real_stderr(self):
assert self.fake_stdout.getvalue() == ''
def it_should_tell_team_city_what_went_to_stdout(self):
assert self.parse_line(1) == ("testStdOut", {'name': 'Fake context', 'out': 'to stdout|n'})
def it_should_tell_team_city_what_went_to_stderr(self):
assert self.parse_line(2) == ("testStdErr", {'name': 'Fake context', 'out': 'to stderr|n'})
def cleanup_stdout_and_stderr(self):
sys.stdout, sys.stderr = self.real_stdout, self.real_stderr
###########################################################
# unexpected_error tests
###########################################################
class WhenAnUnexpectedErrorOccursInTeamCity(TeamCitySharedContext):
def establish_the_exception(self):
tb = [('made_up_file_7.py', 1, 'made_up_function_7', 'frame7'),
('made_up_file_8.py', 2, 'made_up_function_8', 'frame8')]
self.exception = tools.build_fake_exception(tb, "another exception")
self.formatted_tb = (
'Traceback (most recent call last):|n'
' File "made_up_file_7.py", line 1, in made_up_function_7|n'
' frame7|n'
' File "made_up_file_8.py", line 2, in made_up_function_8|n'
' frame8|n'
'plugin_tests.tools.FakeException: another exception')
def because_an_unexpected_error_occurs(self):
self.reporter.unexpected_error(self.exception)
def it_should_tell_team_city_a_test_started(self):
assert self.parse_line(0) == ("testStarted", {'name': 'Test error'})
def it_should_tell_team_city_the_test_failed(self):
assert self.parse_line(1) == (
"testFailed",
{
'name': 'Test error',
'message': 'plugin_tests.tools.FakeException: another exception',
'details': self.formatted_tb
})
def it_should_tell_team_city_the_test_finished(self):
assert self.parse_line(2) == ("testFinished", {'name': 'Test error'})
###########################################################
# tests for sequential calls
###########################################################
# these are really testing context_ended and context_errored,
# but sadly we can only observe it through assertion_started
class WhenASecondContextRuns(TeamCitySharedContext):
def establish_that_a_context_has_run_and_ended(self):
context1 = tools.create_context('the_first_context')
self.reporter.context_started(context1.cls, context1.example)
self.reporter.context_ended(context1.cls, context1.example)
self.context2 = tools.create_context('the_second_context')
self.reporter.context_started(self.context2.cls, self.context2.example)
self.assertion = lambda: None
self.assertion.__name__ = 'aLovelyAssertion'
def when_something_gets_sent_to_team_city(self):
self.reporter.assertion_started(self.assertion)
@assertion
def it_should_report_the_name_of_the_current_context(self):
assert self.parse_line(0)[1]['name'] == 'the second context -> a lovely assertion'
class WhenASecondContextRunsAfterAnError(TeamCitySharedContext):
def establish_that_a_context_has_run_and_errored(self):
context1 = tools.create_context('the_first_context')
self.reporter.context_started(context1.cls, context1.example)
self.reporter.context_errored(context1.cls, context1.example, Exception())
context2 = tools.create_context('the_second_context')
self.reporter.context_started(context2.cls, context2.example)
self.assertion = lambda: None
self.assertion.__name__ = 'aLovelyAssertion'
def when_something_gets_sent_to_team_city(self):
self.reporter.assertion_started(self.assertion)
@assertion
def it_should_report_the_name_of_the_current_context(self):
assert self.parse_line(-1)[1]['name'] == 'the second context -> a lovely assertion'
class WhenParsingATeamCityMessage:
# tests for the test helper method
@classmethod
def examples(self):
yield "##teamcity[hello]", ('hello', {})
yield "##teamcity[hello2 ]", ('hello2', {})
yield "##teamcity[msgName one='value one' two='value two']", ('msgName', {'one': 'value one', 'two': 'value two'})
yield "##teamcity[escaped1 name='|'']", ('escaped1', {'name': "|'"})
yield "##teamcity[escaped2 name='|]']", ('escaped2', {'name': "|]"})
def context(self, example):
self.msg, (self.expected_name, self.expected_values) = example
def because_we_parse_a_message(self):
self.name, self.values = teamcity_parse(self.msg)
def it_should_return_the_correct_name(self):
assert self.name == self.expected_name
def it_should_return_the_correct_values(self):
assert self.values == self.expected_values
def teamcity_parse(string):
outer_match = re.match(r"##teamcity\[(\S+)( .*)?(?<!\|)\]", string)
assignments_string = outer_match.group(2)
if assignments_string is not None and assignments_string.strip():
assignment_matches = re.findall(r"(\w+)='(.*?)(?<!\|)'", assignments_string)
assignments = dict(assignment_matches)
else:
assignments = {}
return (outer_match.group(1), assignments)
| benjamin-hodgson/Contexts | test/plugin_tests/reporting_tests/teamcity_tests.py | Python | mit | 24,240 |
from django.core.management.base import NoArgsCommand
from notifier.models import Notifier
from sms.views import sendsms # OH NO COUPLING
class Command(NoArgsCommand):
""" Finds all notifications that can be sent out, updates them,
and sends them out.
This will only send notifications that haven't been triggered recently.
See the Notifier manager for more details. """
def build_message_dict(self, notifier):
""" Given a Notifier model, returns a dict with keys of the phone numbers
of the related NotifierNumbers, and values of the notification.
If no such numbers exist, we are supposed to send it to ourselves, so
we just return a string for the sms app. """
numbers = notifier.notifiernumber_set.all()
if numbers:
return {n.phone_number: notifier.notify_text for n in numbers}
else:
return notifier.notify_text
def handle_noargs(self, **options):
for notifier in Notifier.objects.due_notifications():
notifier.notifies_left -= 1
notifier.save()
sendsms(self.build_message_dict(notifier))
| hwayne/safehouse | notifier/management/commands/notify.py | Python | apache-2.0 | 1,149 |
import coloredlogs
import logging
import sys
coloredlogs.install(fmt='%(message)s')
def create_logger():
#logging.basicConfig(format='%(levelname)s - %(message)s')
logging.basicConfig(format='%(message)s')
root = logging.getLogger()
root.setLevel(logging.getLevelName('INFO'))
#Add handler for standard output (console) any debug+
#ch = logging.StreamHandler(sys.stdout)
#ch.setLevel(logging.getLevelName('DEBUG'))
#formatter = logging.Formatter('%(message)s')
#ch.setFormatter(formatter)
#handler = ColorStreamHandler()
#handler.setLevel(logging.getLevelName("DEBUG"))
#root.addHandler(handler)
return root
| ericforbes/post-review | postreview/logger.py | Python | mit | 669 |
import yaml
with open('config.yaml', 'r') as f:
loaded = yaml.load(f.read())
locals().update(loaded)
| b1naryth1ef/rowboat | rowboat/config.py | Python | mit | 110 |
tickets = []
# allocate new tickets
def make_tickets():
for i in range(10):
tickets.append("ticket_" + str(i))
# log all tickets currently in the system to terminal
def show_all_tickets():
for ticket in tickets:
print ticket
if '__main__' == __name__:
make_tickets()
show_all_tickets() | kouritron/uvs | examples/break_3way_merge/tickets_ca.py | Python | bsd-3-clause | 328 |
# -*- coding: utf-8 -*-
import time
from modules.core.props import Property, StepProperty
from modules.core.step import StepBase
from modules import cbpi
@cbpi.step
class HERMSStep(StepBase):
'''
Just put the decorator @cbpi.step on top of a method
'''
# Properties
target_temp = Property.Number("Target Temp", configurable=True, description="Target Temperature of Mash Step")
mash_tun = StepProperty.Kettle("Mash Tun", description="Kettle in which the mashing takes place")
hlt = StepProperty.Kettle("HLT", description="Kettle used for heat transfer, hot liquor tank")
timer = Property.Number("Rest (m)", configurable=True, description="Minutes of rest at target temp")
hlt_offset = Property.Number("HLT Offset", configurable=True, default_value=8, description="Temp relative to mash target to maintain in HLT while rising")
def init(self):
'''
Initialize Step. This method is called once at the beginning of the step
:return:
'''
# set target temp
self.set_target_temp(self.target_temp, self.mash_tun)
self.set_target_temp(int(self.target_temp) + int(self.hlt_offset), self.hlt)
@cbpi.action("Start Timer Now")
def start(self):
'''
Custom Action which can be execute form the brewing dashboard.
All method with decorator @cbpi.action("YOUR CUSTOM NAME") will be available in the user interface
:return:
'''
if self.is_timer_finished() is None:
self.start_timer(int(self.timer) * 60)
def reset(self):
self.stop_timer()
self.set_target_temp(self.target_temp, self.mash_tun)
self.set_target_temp(int(self.target_temp) + int(self.hlt_offset), self.hlt)
def finish(self):
self.set_target_temp(0, self.mash_tun)
self.set_target_temp(0, self.hlt)
def execute(self):
'''
This method is execute in an interval
:return:
'''
# Check if Target Temp is reached
if self.get_kettle_temp(self.mash_tun) >= int(self.target_temp):
if self.is_timer_finished() is None:
self.start_timer(int(self.timer) * 60)
# Check if timer finished and go to next step
if self.is_timer_finished() == True:
self.notify("Mash Step Completed!", "Starting the next step", timeout=None)
self.next()
| tlareywi/HERMSStep | __init__.py | Python | mit | 2,411 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2011 Lucas D'Avila - email lucassdvl@gmail.com / twitter @lucadavila
This file is part of web2py-cms.
web2py-cms is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License (LGPL v3) as published by
the Free Software Foundation, on version 3 of the License.
web2py-cms is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with web2py-cms. If not, see <http://www.gnu.org/licenses/>.
"""
#crud table post
crud_post = Crud(globals(),db)
crud_post.settings.auth = None
crud_post.settings.create_next = URL(f='index')
crud_post.settings.update_next = URL(f='post', args=request.args[1:])
crud_post.settings.delete_next = URL(f='index')
#restrictions
t_post.post.requires = IS_NOT_EMPTY()
t_post.posted_by.requires = IS_IN_DB(db, t_user.id, '%(first_name)s')
t_post.datetime.requires = IS_DATETIME()
t_post.posted_by.writable = False
t_post.permalink.writable = False
t_post.permalink.readable = False
def set_permalink(post_id):
"""Retorna o perma link para os posts"""
import string
safe = string.ascii_letters + string.digits + '_-'
post = t_post[post_id]
title = ''.join([char if char in safe else '' for char in post.title]).lower() if post.title else post.id
if post: post.update_record(permalink='%s/%s/%s/%s'%(post.datetime.year, post.datetime.month, post.datetime.day, title))
def get_permalink(args):
if len(args) < 4: redirect(URL_INDEX_PAGE)
return '%s/%s/%s/%s'%(args[0],args[1],args[2],args[3],)
def get_post(args):
return t_post._db(t_post.permalink==get_permalink(args=args)).select().first()
| gilsondev/web2py-cms | models/t_post.py | Python | lgpl-3.0 | 1,900 |
#!/usr/bin/env python
#
# cloudlet infrastructure for mobile computing
#
# author: kiryong ha <krha@cmu.edu>
#
# copyright (c) 2011-2013 carnegie mellon university
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
#
import socket
import os
import time
import sys
import struct
import threading
import multiprocessing
import msgpack
import ctypes
#if os.path.exists("../provisioning"):
# sys.path.insert(0, "../../")
#try:
# from elijah.provisioning.server import NetworkUtil
# from elijah.provisioning.configuration import Const
# from elijah.provisioning.package import VMOverlayPackage
# from elijah.provisioning.synthesis_protocol import Protocol
#except ImportError as e:
# sys.stderr.write("%s\n" % str(e))
# sys.exit(1)
from server import NetworkUtil
from configuration import Const
from configuration import VMOverlayCreationMode
from synthesis_protocol import Protocol
import process_manager
import log as logging
LOG = logging.getLogger(__name__)
ACK_DATA_SIZE = 100*1024
class StreamSynthesisClientError(Exception):
pass
class NetworkMeasurementThread(threading.Thread):
def __init__(self, sock, blob_sent_time_dict, monitor_network_bw, vm_resume_time_at_dest):
self.sock = sock
self.blob_sent_time_dict = blob_sent_time_dict
# shared memory
self.monitor_network_bw = monitor_network_bw
self.vm_resume_time_at_dest = vm_resume_time_at_dest
threading.Thread.__init__(self, target=self.receiving)
@staticmethod
def time_average(measure_history, start_time, cur_time):
sum_value = float(0)
counter = 0
for (measured_time, value) in reversed(measure_history):
# average only last 2s from current and after 3 seconds since start
#if (cur_time - measured_time) > 2 or (measured_time - start_time) < 3:
if (measured_time - start_time) < 3:
break
if counter > 5:
break
sum_value += value
counter += 1
if len(measure_history) > 2:
#LOG.debug("bandwidth\t%f\t%f\t%0.4f --> %0.4f\t%d/%d" % (cur_time,
# start_time,
# measure_history[-2][-1],
# measure_history[-1][-1],
# counter,
# len(measure_history)))
pass
if counter == 0:
return measure_history[-1][1]
return sum_value/counter
def receiving(self):
ack_time_list = list()
measured_bw_list = list()
ack_size = 8
time_start = 0
measured_bw_list = list()
while True:
ack_data = self.sock.recv(ack_size)
ack = struct.unpack("!Q", ack_data)[0]
time_recv_prev = time.time()
if (ack == 0x01):
# start receiving acks of new blob
measure_bw_blob = list()
while True:
ack_data = self.sock.recv(ack_size)
ack_recved_data = struct.unpack("!Q", ack_data)[0]
if ack_recved_data == 0x02:
break
if time_start == 0:
time_start = time.time()
time_recv_cur = time.time()
receive_duration = time_recv_cur - time_recv_prev
bw_mbps = 8*ack_recved_data/receive_duration/1024.0/1024
#print "ack: %f, %f, %f, %ld, %f mbps" % (
# time_recv_cur,
# time_recv_prev,
# receive_duration,
# ack_recved_data,
# bw_mbps)
time_recv_prev = time_recv_cur
# filter out error bw
if len(measured_bw_list) > 0:
prev_bw = measured_bw_list[-1][-1]
#LOG.debug("bandwidth\t%f\tvalue change: %f --> %f" % (time_recv_cur, prev_bw, bw_mbps))
# assume error for one-order of magnitude changes
if bw_mbps > prev_bw*10 or bw_mbps < prev_bw*0.1 or\
bw_mbps > 1024:
#LOG.debug("bandwidth\t%f\tfiltered value: %f --> %f" % (time_recv_cur, prev_bw, bw_mbps))
bw_mbps = prev_bw
measure_bw_blob.append(bw_mbps)
if len(measure_bw_blob) > 0:
median_bw = measure_bw_blob[len(measure_bw_blob)/2]
measured_bw_list.append((time_recv_cur, median_bw))
self.monitor_network_bw.value = self.time_average(measured_bw_list,
time_start,
time_recv_cur)
elif (ack == 0x10):
data = self.sock.recv(8)
vm_resume_time = struct.unpack("!d", data)[0]
self.vm_resume_time_at_dest.value = float(vm_resume_time)
print "migration resume time: %f" % (vm_resume_time)
break
else:
pass
class StreamSynthesisClient(process_manager.ProcWorker):
def __init__(self, remote_addr, remote_port, metadata, compdata_queue, process_controller):
self.remote_addr = remote_addr
self.remote_port = remote_port
self.metadata = metadata
self.compdata_queue = compdata_queue
self.process_controller = process_controller
# measurement
self.monitor_network_bw = multiprocessing.RawValue(ctypes.c_double, 0)
self.monitor_network_bw.value = 0.0
self.vm_resume_time_at_dest = multiprocessing.RawValue(ctypes.c_double, 0)
self.time_finish_transmission = multiprocessing.RawValue(ctypes.c_double, 0)
self.is_first_recv = False
self.time_first_recv = 0
super(StreamSynthesisClient, self).__init__(target=self.transfer)
def transfer(self):
# connect
address = (self.remote_addr, self.remote_port)
sock = None
for index in range(5):
LOG.info("Connecting to (%s).." % str(address))
try:
sock = socket.create_connection(address, 10)
break
except Exception as e:
time.sleep(1)
pass
if sock == None:
msg = "failed to connect to %s" % str(address)
raise StreamSynthesisClientError(msg)
sock.setblocking(True)
self.blob_sent_time_dict = dict()
self.receive_thread = NetworkMeasurementThread(sock,
self.blob_sent_time_dict,
self.monitor_network_bw,
self.vm_resume_time_at_dest)
self.receive_thread.start()
# send header
header_dict = {
Protocol.KEY_SYNTHESIS_OPTION: None,
}
header_dict.update(self.metadata)
header = NetworkUtil.encoding(header_dict)
sock.sendall(struct.pack("!I", len(header)))
sock.sendall(header)
# stream blob
blob_counter = 0
while True:
comp_task = self.compdata_queue.get()
if self.is_first_recv == False:
self.is_first_recv = True
self.time_first_recv = time.time()
LOG.debug("[time] Transfer first input at : %f" % (self.time_first_recv))
transfer_size = 0
if comp_task == Const.QUEUE_SUCCESS_MESSAGE:
break
if comp_task == Const.QUEUE_FAILED_MESSAGE:
sys.stderr.write("Failed to get compressed data\n")
break
(blob_comp_type, compdata, disk_chunks, memory_chunks) = comp_task
blob_header_dict = {
Const.META_OVERLAY_FILE_COMPRESSION: blob_comp_type,
Const.META_OVERLAY_FILE_SIZE:len(compdata),
Const.META_OVERLAY_FILE_DISK_CHUNKS: disk_chunks,
Const.META_OVERLAY_FILE_MEMORY_CHUNKS: memory_chunks
}
# send
header = NetworkUtil.encoding(blob_header_dict)
sock.sendall(struct.pack("!I", len(header)))
sock.sendall(header)
self.blob_sent_time_dict[blob_counter] = (time.time(), len(compdata))
sock.sendall(compdata)
transfer_size += (4+len(header)+len(compdata))
blob_counter += 1
#send the current iteration number for use at the destination
sock.sendall(struct.pack("!I", self.process_controller.get_migration_iteration_count()))
# end message
end_header = {
"blob_type": "blob",
Const.META_OVERLAY_FILE_SIZE:0
}
header = NetworkUtil.encoding(end_header)
sock.sendall(struct.pack("!I", len(header)))
sock.sendall(header)
self.is_processing_alive.value = False
self.time_finish_transmission.value = time.time()
sys.stdout.write("Finish transmission. Waiting for finishing migration\n")
self.receive_thread.join()
sock.close()
| cmusatyalab/elijah-provisioning | elijah/provisioning/stream_client.py | Python | apache-2.0 | 10,061 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file system implementation using pybde."""
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.vfs import bde_file_system
from tests import test_lib as shared_test_lib
class BDEFileSystemTest(shared_test_lib.BaseTestCase):
"""Tests the BDE file system."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['bdetogo.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._bde_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_BDE, parent=test_os_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenAndClose(self):
"""Test the open and close functionality."""
file_system = bde_file_system.BDEFileSystem(
self._resolver_context, self._bde_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
def testFileEntryExistsByPathSpec(self):
"""Test the file entry exists by path specification functionality."""
file_system = bde_file_system.BDEFileSystem(
self._resolver_context, self._bde_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
result = file_system.FileEntryExistsByPathSpec(self._bde_path_spec)
self.assertTrue(result)
def testGetFileEntryByPathSpec(self):
"""Tests the GetFileEntryByPathSpec function."""
file_system = bde_file_system.BDEFileSystem(
self._resolver_context, self._bde_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
file_entry = file_system.GetFileEntryByPathSpec(self._bde_path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, '')
def testGetRootFileEntry(self):
"""Test the get root file entry functionality."""
file_system = bde_file_system.BDEFileSystem(
self._resolver_context, self._bde_path_spec)
self.assertIsNotNone(file_system)
file_system.Open()
file_entry = file_system.GetRootFileEntry()
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.name, '')
if __name__ == '__main__':
unittest.main()
| joachimmetz/dfvfs | tests/vfs/bde_file_system.py | Python | apache-2.0 | 2,491 |
import logging
import sys
from easy_alert.entity.level import Level
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestLevel(unittest.TestCase):
def test_init_error(self):
def assert_err(arg, expect):
with self.assertRaises(KeyError) as cm:
Level(arg)
self.assertEqual(cm.exception.args[0], expect)
assert_err(1.23, 'Invalid level: 1.23')
assert_err('', 'Unknown level string: ')
assert_err('Info', 'Unknown level string: Info')
assert_err('INFO', 'Unknown level string: INFO')
def test_get_keyword(self):
self.assertEqual(Level(logging.DEBUG).get_keyword(), 'debug')
self.assertEqual(Level(logging.INFO).get_keyword(), 'info')
self.assertEqual(Level(logging.WARN).get_keyword(), 'warn')
self.assertEqual(Level(logging.ERROR).get_keyword(), 'error')
self.assertEqual(Level(logging.CRITICAL).get_keyword(), 'critical')
def test_cmp(self):
self.assertTrue(Level('warn') < 20)
self.assertTrue(Level('warn') < Level('error'))
self.assertTrue(Level('debug') <= Level('debug'))
self.assertTrue(Level('info') == Level(20))
self.assertTrue(Level('critical') >= Level('debug'))
self.assertTrue(Level('critical') > Level('debug'))
def test_repr(self):
self.assertEqual(repr(Level('critical')), 'Level(level=50)')
self.assertEqual(repr(Level('error')), 'Level(level=40)')
self.assertEqual(repr(Level('warn')), 'Level(level=30)')
self.assertEqual(repr(Level('info')), 'Level(level=20)')
self.assertEqual(repr(Level('debug')), 'Level(level=10)')
| mogproject/easy-alert | tests/easy_alert/entity/test_level.py | Python | apache-2.0 | 1,712 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "BoxCox", sigma = 0.0, exog_count = 0, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_BoxCox/trend_ConstantTrend/cycle_7/ar_/test_artificial_1024_BoxCox_ConstantTrend_7__0.py | Python | bsd-3-clause | 265 |
#
# QAPI visitor generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Michael Roth <mdroth@linux.vnet.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_visit_struct_fields(name, field_prefix, fn_prefix, members, base = None):
substructs = []
ret = ''
if not fn_prefix:
full_name = name
else:
full_name = "%s_%s" % (name, fn_prefix)
for argname, argentry, optional, structured in parse_args(members):
if structured:
if not fn_prefix:
nested_fn_prefix = argname
else:
nested_fn_prefix = "%s_%s" % (fn_prefix, argname)
nested_field_prefix = "%s%s." % (field_prefix, argname)
ret += generate_visit_struct_fields(name, nested_field_prefix,
nested_fn_prefix, argentry)
ret += mcgen('''
static void visit_type_%(full_name)s_fields(Visitor *m, %(name)s ** obj, Error **errp)
{
Error *err = NULL;
''',
name=name, full_name=full_name)
push_indent()
if base:
ret += mcgen('''
visit_start_implicit_struct(m, obj ? (void**) &(*obj)->%(c_name)s : NULL, sizeof(%(type)s), &err);
if (!err) {
visit_type_%(type)s_fields(m, obj ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, &err);
error_propagate(errp, err);
err = NULL;
visit_end_implicit_struct(m, &err);
}
''',
c_prefix=c_var(field_prefix),
type=type_name(base), c_name=c_var('base'))
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
visit_start_optional(m, obj ? &(*obj)->%(c_prefix)shas_%(c_name)s : NULL, "%(name)s", &err);
if (obj && (*obj)->%(prefix)shas_%(c_name)s) {
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
c_name=c_var(argname), name=argname)
push_indent()
if structured:
ret += generate_visit_struct_body(full_name, argname, argentry)
else:
ret += mcgen('''
visit_type_%(type)s(m, obj ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "%(name)s", &err);
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
type=type_name(argentry), c_name=c_var(argname),
name=argname)
if optional:
pop_indent()
ret += mcgen('''
}
visit_end_optional(m, &err);
''')
pop_indent()
ret += mcgen('''
error_propagate(errp, err);
}
''')
return ret
def generate_visit_struct_body(field_prefix, name, members):
ret = mcgen('''
if (!error_is_set(errp)) {
''')
push_indent()
if not field_prefix:
full_name = name
else:
full_name = "%s_%s" % (field_prefix, name)
if len(field_prefix):
ret += mcgen('''
Error **errp = &err; /* from outer scope */
Error *err = NULL;
visit_start_struct(m, NULL, "", "%(name)s", 0, &err);
''',
name=name)
else:
ret += mcgen('''
Error *err = NULL;
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
''',
name=name)
ret += mcgen('''
if (!err) {
if (!obj || *obj) {
visit_type_%(name)s_fields(m, obj, &err);
error_propagate(errp, err);
err = NULL;
}
''',
name=full_name)
pop_indent()
ret += mcgen('''
/* Always call end_struct if start_struct succeeded. */
visit_end_struct(m, &err);
}
error_propagate(errp, err);
}
''')
return ret
def generate_visit_struct(expr):
name = expr['type']
members = expr['data']
base = expr.get('base')
ret = generate_visit_struct_fields(name, "", "", members, base)
ret += mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
''',
name=name)
push_indent()
ret += generate_visit_struct_body("", name, members)
pop_indent()
ret += mcgen('''
}
''')
return ret
def generate_visit_list(name, members):
return mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp)
{
GenericList *i, **prev = (GenericList **)obj;
Error *err = NULL;
if (!error_is_set(errp)) {
visit_start_list(m, name, &err);
if (!err) {
for (; (i = visit_next_list(m, prev, &err)) != NULL; prev = &i) {
%(name)sList *native_i = (%(name)sList *)i;
visit_type_%(name)s(m, &native_i->value, NULL, &err);
}
error_propagate(errp, err);
err = NULL;
/* Always call end_list if start_list succeeded. */
visit_end_list(m, &err);
}
error_propagate(errp, err);
}
}
''',
name=name)
def generate_visit_enum(name, members):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp)
{
visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp);
}
''',
name=name)
def generate_visit_anon_union(name, members):
ret = mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
Error *err = NULL;
if (!error_is_set(errp)) {
visit_start_implicit_struct(m, (void**) obj, sizeof(%(name)s), &err);
visit_get_next_type(m, (int*) &(*obj)->kind, %(name)s_qtypes, name, &err);
switch ((*obj)->kind) {
''',
name=name)
for key in members:
assert (members[key] in builtin_types
or find_struct(members[key])
or find_union(members[key])), "Invalid anonymous union member"
ret += mcgen('''
case %(abbrev)s_KIND_%(enum)s:
visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, name, &err);
break;
''',
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key),False).upper(),
c_type = type_name(members[key]),
c_name = c_fun(key))
ret += mcgen('''
default:
abort();
}
error_propagate(errp, err);
err = NULL;
visit_end_implicit_struct(m, &err);
}
}
''')
return ret
def generate_visit_union(expr):
name = expr['union']
members = expr['data']
base = expr.get('base')
discriminator = expr.get('discriminator')
if discriminator == {}:
assert not base
return generate_visit_anon_union(name, members)
ret = generate_visit_enum('%sKind' % name, members.keys())
if base:
base_fields = find_struct(base)['data']
if discriminator:
base_fields = base_fields.copy()
del base_fields[discriminator]
ret += generate_visit_struct_fields(name, "", "", base_fields)
ret += mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
Error *err = NULL;
if (!error_is_set(errp)) {
visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
if (!err) {
if (obj && *obj) {
''',
name=name)
push_indent()
push_indent()
push_indent()
if base:
ret += mcgen('''
visit_type_%(name)s_fields(m, obj, &err);
''',
name=name)
pop_indent()
if not discriminator:
desc_type = "type"
else:
desc_type = discriminator
ret += mcgen('''
visit_type_%(name)sKind(m, &(*obj)->kind, "%(type)s", &err);
if (!err) {
switch ((*obj)->kind) {
''',
name=name, type=desc_type)
for key in members:
if not discriminator:
fmt = 'visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", &err);'
else:
fmt = '''visit_start_implicit_struct(m, (void**) &(*obj)->%(c_name)s, sizeof(%(c_type)s), &err);
if (!err) {
visit_type_%(c_type)s_fields(m, &(*obj)->%(c_name)s, &err);
error_propagate(errp, err);
err = NULL;
visit_end_implicit_struct(m, &err);
}'''
ret += mcgen('''
case %(abbrev)s_KIND_%(enum)s:
''' + fmt + '''
break;
''',
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key),False).upper(),
c_type=type_name(members[key]),
c_name=c_fun(key))
ret += mcgen('''
default:
abort();
}
}
error_propagate(errp, err);
err = NULL;
}
''')
pop_indent()
ret += mcgen('''
/* Always call end_struct if start_struct succeeded. */
visit_end_struct(m, &err);
}
error_propagate(errp, err);
}
''')
pop_indent();
ret += mcgen('''
}
''')
return ret
def generate_declaration(name, members, genlist=True, builtin_type=False):
ret = ""
if not builtin_type:
ret += mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp);
''',
name=name)
if genlist:
ret += mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp);
''',
name=name)
return ret
def generate_enum_declaration(name, members, genlist=True):
ret = ""
if genlist:
ret += mcgen('''
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp);
''',
name=name)
return ret
def generate_decl_enum(name, members, genlist=True):
return mcgen('''
void visit_type_%(name)s(Visitor *m, %(name)s * obj, const char *name, Error **errp);
''',
name=name)
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:o:",
["source", "header", "builtins", "prefix=",
"output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-visit.c'
h_file = 'qapi-visit.h'
do_c = False
do_h = False
do_builtins = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
elif o in ("-b", "--builtins"):
do_builtins = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor functions
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qemu-common.h"
#include "%(header)s"
''',
header=basename(h_file)))
fdecl.write(mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI visitor function
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "qapi/visitor.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix, guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
# to avoid header dependency hell, we always generate declarations
# for built-in types in our header files and simply guard them
fdecl.write(guardstart("QAPI_VISIT_BUILTIN_VISITOR_DECL"))
for typename in builtin_types:
fdecl.write(generate_declaration(typename, None, genlist=True,
builtin_type=True))
fdecl.write(guardend("QAPI_VISIT_BUILTIN_VISITOR_DECL"))
# ...this doesn't work for cases where we link in multiple objects that
# have the functions defined, so we use -b option to provide control
# over these cases
if do_builtins:
fdef.write(guardstart("QAPI_VISIT_BUILTIN_VISITOR_DEF"))
for typename in builtin_types:
fdef.write(generate_visit_list(typename, None))
fdef.write(guardend("QAPI_VISIT_BUILTIN_VISITOR_DEF"))
for expr in exprs:
if expr.has_key('type'):
ret = generate_visit_struct(expr)
ret += generate_visit_list(expr['type'], expr['data'])
fdef.write(ret)
ret = generate_declaration(expr['type'], expr['data'])
fdecl.write(ret)
elif expr.has_key('union'):
ret = generate_visit_union(expr)
ret += generate_visit_list(expr['union'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum('%sKind' % expr['union'], expr['data'].keys())
ret += generate_declaration(expr['union'], expr['data'])
fdecl.write(ret)
elif expr.has_key('enum'):
ret = generate_visit_list(expr['enum'], expr['data'])
ret += generate_visit_enum(expr['enum'], expr['data'])
fdef.write(ret)
ret = generate_decl_enum(expr['enum'], expr['data'])
ret += generate_enum_declaration(expr['enum'], expr['data'])
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
| HusterWan/qemu-1.7.2-stable | scripts/qapi-visit.py | Python | gpl-2.0 | 14,043 |
#! /usr/bin/env python
# Slightly modified copy of Lib/test/regrtest.py from Python 1.5.1
"""Regression test.
This will find all modules whose name is "test_*" in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v: verbose -- run tests in verbose mode with output to stdout
-q: quiet -- don't print anything except if a test fails
-g: generate -- write the output file for a test instead of comparing it
-x: exclude -- arguments are tests to *exclude*
If non-option arguments are present, they are names for tests to run,
unless -x is given, in which case they are names for tests not to run.
If no test names are given, all tests are run.
-v is incompatible with -g and does not compare test output files.
"""
import sys
import string
import os
import getopt
import traceback
from test import test_support
def main( tests = None, testdir = None ):
"""Execute a test suite.
This also parses command-line options and modifies its behaviour
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'vgqx')
except getopt.error, msg:
print msg
print __doc__
return 2
verbose = 0
quiet = 0
generate = 0
exclude = 0
for o, a in opts:
if o == '-v': verbose = verbose+1
if o == '-q': quiet = 1; verbose = 0
if o == '-g': generate = 1
if o == '-x': exclude = 1
if generate and verbose:
print "-g and -v don't go together!"
return 2
good = []
bad = []
skipped = []
for i in range(len(args)):
# Strip trailing ".py" from arguments
if args[i][-3:] == '.py':
args[i] = args[i][:-3]
if exclude:
NOTTESTS[:0] = args
args = []
tests = tests or args or findtests()
test_support.verbose = verbose # Tell tests to be moderately quiet
for test in tests:
if not quiet:
print test
ok = runtest(test, generate, verbose, testdir)
if ok > 0:
good.append(test)
elif ok == 0:
bad.append(test)
else:
if not quiet:
print "test", test,
print "skipped -- an optional feature could not be imported"
skipped.append(test)
if good and not quiet:
if not bad and not skipped and len(good) > 1:
print "All",
print count(len(good), "test"), "OK."
if bad:
print count(len(bad), "test"), "failed:",
print string.join(bad)
if skipped and not quiet:
print count(len(skipped), "test"), "skipped:",
print string.join(skipped)
return len(bad) > 0
# Not in PyXML...
STDTESTS = [
# 'test_grammar',
# 'test_opcodes',
# 'test_operations',
# 'test_builtin',
# 'test_exceptions',
# 'test_types',
]
NOTTESTS = [
'test_support',
'test_b1',
'test_b2',
]
def findtests(testdir = None, stdtests = STDTESTS, nottests = NOTTESTS):
"""Return a list of all applicable test modules."""
if not testdir: testdir = findtestdir()
names = os.listdir(testdir)
tests = []
for name in names:
if name[:5] == "test_" and name[-3:] == ".py":
modname = name[:-3]
if modname not in stdtests and modname not in nottests:
tests.append(modname)
tests.sort()
return stdtests + tests
def runtest(test, generate, verbose, testdir = None):
"""Run a single test.
test -- the name of the test
generate -- if true, generate output, instead of running the test
and comparing it to a previously created output file
verbose -- if true, print more messages
testdir -- test directory
"""
test_support.unload(test)
if not testdir: testdir = findtestdir()
outputdir = os.path.join(testdir, "output")
outputfile = os.path.join(outputdir, test)
try:
if generate:
cfp = open(outputfile, "w")
elif verbose:
cfp = sys.stdout
else:
cfp = Compare(outputfile)
except IOError:
cfp = None
print "Warning: can't open", outputfile
try:
save_stdout = sys.stdout
try:
if cfp:
sys.stdout = cfp
print test # Output file starts with test name
the_module = __import__(test, globals(), locals(), [])
# Most tests run to completion simply as a side-effect of
# being imported. For the benefit of tests that can't run
# that way (like test_threaded_import), explicitly invoke
# their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
if indirect_test is not None:
indirect_test()
finally:
sys.stdout = save_stdout
except ImportError, msg:
return -1
except KeyboardInterrupt, v:
raise KeyboardInterrupt, v, sys.exc_info()[2]
except test_support.TestFailed, msg:
print "test", test, "failed --", msg
return 0
except:
type, value = sys.exc_info()[:2]
print "test", test, "crashed --", type, ":", value
if verbose:
traceback.print_exc(file=sys.stdout)
return 0
else:
return 1
def findtestdir():
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
return testdir
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
class Compare:
def __init__(self, filename):
self.fp = open(filename, 'r')
def write(self, data):
expected = self.fp.read(len(data))
if data <> expected:
raise test_support.TestFailed, \
'Writing: '+`data`+', expected: '+`expected`
def flush(self):
pass
def close(self):
leftover = self.fp.read()
if leftover:
raise test_support.TestFailed, 'Unread: '+`leftover`
self.fp.close()
def isatty(self):
return 0
if __name__ == '__main__':
sys.exit(main())
| Pikecillo/genna | external/PyXML-0.8.4/test/regrtest.py | Python | gpl-2.0 | 6,734 |
from rcj_soccer.base import app, db
from rcj_soccer.models import Competition
from flask import render_template, jsonify, request
from datetime import datetime
from dateutil.parser import parse
from rcj_soccer.util import config, obj_to_dict
import logging
logger = logging.getLogger(__name__)
@app.route("/")
def list_competitions():
competitions = Competition.query.filter_by(is_active=True)\
.order_by(Competition.start_date.desc(), Competition.name).all()
return render_template("competitions.html", competitions=competitions,
year=datetime.utcnow().year)
@app.route("/api/competitions")
def api_list_competitions():
competitions = Competition.query.order_by(Competition.start_date).all()
data = []
for competition in competitions:
logger.warn("{0}".format(str(dir(competition))))
data.append(obj_to_dict(competition))
return jsonify(data)
@app.route("/api/competitions/<comp>/<token>",
methods=["GET", "POST", "DELETE", "PUT"])
def api_competition(comp, token):
if request.method == "GET":
competition = Competition.query.filter_by(id=comp).one()
return jsonify(obj_to_dict(competition))
if token != config.get("api", "token"):
return jsonify({"error": "invalid token"})
if request.method == "POST":
body = request.get_json()
competition = Competition()
competition.id = comp
competition.name = body["name"]
competition.fb_link = body["fb_link"]
competition.twitter_link = body["twitter_link"]
competition.event_sponsor_link = body["event_sponsor"]["link"]
competition.event_sponsor_img = body["event_sponsor"]["img"]
competition.is_active = True
competition.start_date = parse(body["start_date"])
db.session.add(competition)
db.session.commit()
return jsonify({"status": "created"})
elif request.method == "DELETE":
competition = Competition.query.filter_by(id=comp).one()
db.session.delete(competition)
db.session.commit()
return jsonify({"status": "deleted"})
elif request.method == "PUT":
competition = Competition.query.filter_by(id=comp).one()
body = request.get_json()
if "name" in body:
competition.name = body["name"]
if "fb_link" in body:
competition.fb_link = body["fb_link"]
if "twitter_link" in body:
competition.twitter_link = body["twitter_link"]
if "active" in body:
competition.is_active = body["active"]
if "start_date" in body:
competition.start_date = parse(body["start_date"])
if "event_sponsor" in body:
if "link" in body["event_sponsor"]:
competition.event_sponsor_link = body["event_sponsor"]["link"]
if "img" in body["event_sponsor"]:
competition.event_sponsor_img = body["event_sponsor"]["img"]
db.session.commit()
return jsonify(obj_to_dict(competition))
def get_competition(id):
competition = Competition.query.filter_by(id=id, is_active=True).first()
return competition
| rcjaustralia/rcj-soccer-platform | rcj_soccer/views/competition.py | Python | mit | 3,192 |
import enum
class ColorTransfer(enum.Enum):
UNSPECIFIED = 'UNSPECIFIED'
BT709 = 'BT709'
GAMMA22 = 'GAMMA22'
GAMMA28 = 'GAMMA28'
SMPTE170M = 'SMPTE170M'
SMPTE240M = 'SMPTE240M'
LINEAR = 'LINEAR'
LOG = 'LOG'
LOG_SQRT = 'LOG_SQRT'
IEC61966_2_4 = 'IEC61966_2_4'
BT1361_ECG = 'BT1361_ECG'
IEC61966_2_1 = 'IEC61966_2_1'
BT2020_10 = 'BT2020_10'
BT2020_12 = 'BT2020_12'
SMPTE2084 = 'SMPTE2084'
SMPTE428 = 'SMPTE428'
ARIB_STD_B67 = 'ARIB_STD_B67'
| bitmovin/bitmovin-python | bitmovin/resources/enums/color_transfer.py | Python | unlicense | 508 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines HorizBlock a horizontal-only block explorer.
"""
from nupic.vision.regions.PictureSensor import PictureSensor
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=
# BlockPictureExplorer
class BlockPictureExplorer(PictureSensor.PictureExplorer):
"""
A base plugin class that implements "explorer" functionality for
specific categories; this functionality controls the manner in
which pictures are swept.
To add support for a new type of explorer to the PictureSensor,
perform the following:
1. Derive a sub-class from this PictureExplorer base class;
2. Implement the following mandatory methods:
initSequence() - create initial state for a new sequence
updateSequence() - update state of an existing sequence
"""
@classmethod
def queryRelevantParams(klass):
"""
Returns a sequence of parameter names that are relevant to
the operation of the explorer.
May be extended or overridden by sub-classes as appropriate.
"""
return ( 'radialLength', 'radialStep', )
def initSequence(self, state, params):
self._presentNextPosn(state, params)
def updateSequence(self, state, params):
self._presentNextPosn(state, params)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Internal helper method(s)
def _presentNextPosn(self, state, params):
"""
Compute the appropriate category and block position
deterministically based on the current iteration count.
"""
# Compute iteration indices
edgeLen = 2 * params['radialLength'] + 1
numBlocksPerCat = edgeLen
numCats = self._getNumCategories()
numBlocks = numBlocksPerCat * numCats
blockCounter = self._getIterCount() % numBlocks
catIndex = blockCounter // numBlocksPerCat
blockCatIndex = blockCounter % numBlocksPerCat
# Compute position within onion block
posnX = ((blockCatIndex % edgeLen) - params['radialLength']) * params['radialStep']
# Override default state
state['posnX'] = posnX
state['posnY'] = 0
state['velocityX'] = 0
state['velocityY'] = 0
state['angularPosn'] = 0
state['angularVelocity'] = 0
state['catIndex'] = catIndex
| numenta/nupic.vision | src/nupic/vision/regions/PictureSensorExplorers/HorizontalBlock.py | Python | agpl-3.0 | 3,159 |
import copy
from django.conf import settings
from olympia.constants.promoted import RECOMMENDED
import olympia.core.logger
from olympia import amo
from olympia.amo.indexers import BaseSearchIndexer
from olympia.amo.utils import attach_trans_dict
from olympia.amo.celery import create_chunked_tasks_signatures
from olympia.lib.es.utils import create_index
from olympia.versions.compare import version_int
log = olympia.core.logger.getLogger('z.es')
class AddonIndexer(BaseSearchIndexer):
"""Fields we don't need to expose in the results, only used for filtering
or sorting."""
hidden_fields = (
'*.raw',
'boost',
'colors',
'hotness',
# Translated content that is used for filtering purposes is stored
# under 3 different fields:
# - One field with all translations (e.g., "name").
# - One field for each language, using corresponding analyzer
# (e.g., "name_l10n_en-us", "name_l10n_fr", etc.)
# - One field with all translations in separate objects for the API
# (e.g. "name_translations")
# Only that last one with all translations needs to be returned.
'name',
'description',
'name_l10n_*',
'description_l10n_*',
'summary',
'summary_l10n_*',
)
index_settings = {
'analysis': {
'analyzer': {
'standard_with_word_split': {
# This analyzer tries to split the text into words by using
# various methods. It also lowercases them and make sure
# each token is only returned once.
# Only use for short things with extremely meaningful
# content like add-on name - it makes too many
# modifications to be useful for things like descriptions,
# for instance.
'tokenizer': 'standard',
'filter': [
'standard',
'custom_word_delimiter',
'lowercase',
'stop',
'custom_dictionary_decompounder',
'unique',
],
},
'trigram': {
# Analyzer that splits the text into trigrams.
'tokenizer': 'ngram_tokenizer',
'filter': [
'lowercase',
],
},
},
'tokenizer': {
'ngram_tokenizer': {
'type': 'ngram',
'min_gram': 3,
'max_gram': 3,
'token_chars': ['letter', 'digit'],
}
},
'normalizer': {
'lowercase_keyword_normalizer': {
# By default keywords are indexed 'as-is', but for exact
# name matches we need to lowercase them before indexing,
# so this normalizer does that for us.
'type': 'custom',
'filter': ['lowercase'],
},
},
'filter': {
'custom_word_delimiter': {
# This filter is useful for add-on names that have multiple
# words sticked together in a way that is easy to
# recognize, like FooBar, which should be indexed as FooBar
# and Foo Bar. (preserve_original: True makes us index both
# the original and the split version.)
'type': 'word_delimiter',
'preserve_original': True,
},
'custom_dictionary_decompounder': {
# This filter is also useful for add-on names that have
# multiple words sticked together, but without a pattern
# that we can automatically recognize. To deal with those,
# we use a small dictionary of common words. It allows us
# to index 'awesometabpassword' as 'awesome tab password',
# helping users looking for 'tab password' find that addon.
'type': 'dictionary_decompounder',
'word_list': [
'all',
'auto',
'ball',
'bar',
'block',
'blog',
'bookmark',
'browser',
'bug',
'button',
'cat',
'chat',
'click',
'clip',
'close',
'color',
'context',
'cookie',
'cool',
'css',
'delete',
'dictionary',
'down',
'download',
'easy',
'edit',
'fill',
'fire',
'firefox',
'fix',
'flag',
'flash',
'fly',
'forecast',
'fox',
'foxy',
'google',
'grab',
'grease',
'html',
'http',
'image',
'input',
'inspect',
'inspector',
'iris',
'js',
'key',
'keys',
'lang',
'link',
'mail',
'manager',
'map',
'mega',
'menu',
'menus',
'monkey',
'name',
'net',
'new',
'open',
'password',
'persona',
'privacy',
'query',
'screen',
'scroll',
'search',
'secure',
'select',
'smart',
'spring',
'status',
'style',
'super',
'sync',
'tab',
'text',
'think',
'this',
'time',
'title',
'translate',
'tree',
'undo',
'upload',
'url',
'user',
'video',
'window',
'with',
'word',
'zilla',
],
},
},
}
}
@classmethod
def get_model(cls):
from olympia.addons.models import Addon
return Addon
@classmethod
def get_index_alias(cls):
"""Return the index alias name."""
return settings.ES_INDEXES.get('default')
@classmethod
def get_mapping(cls):
doc_name = cls.get_doctype_name()
appver_mapping = {
'properties': {
'max': {'type': 'long'},
'min': {'type': 'long'},
'max_human': {'type': 'keyword', 'index': False},
'min_human': {'type': 'keyword', 'index': False},
}
}
version_mapping = {
'type': 'object',
'properties': {
'compatible_apps': {
'properties': {app.id: appver_mapping for app in amo.APP_USAGE}
},
# Keep '<version>.id' indexed to be able to run exists queries
# on it.
'id': {'type': 'long'},
'reviewed': {'type': 'date', 'index': False},
'files': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'created': {'type': 'date', 'index': False},
'hash': {'type': 'keyword', 'index': False},
'filename': {'type': 'keyword', 'index': False},
'is_webextension': {'type': 'boolean'},
'is_mozilla_signed_extension': {'type': 'boolean'},
'is_restart_required': {'type': 'boolean', 'index': False},
'size': {'type': 'long', 'index': False},
'strict_compatibility': {'type': 'boolean', 'index': False},
'status': {'type': 'byte'},
'permissions': {'type': 'keyword', 'index': False},
'optional_permissions': {'type': 'keyword', 'index': False},
},
},
'license': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'builtin': {'type': 'boolean', 'index': False},
'name_translations': cls.get_translations_definition(),
'url': {'type': 'text', 'index': False},
},
},
'release_notes_translations': cls.get_translations_definition(),
'version': {'type': 'keyword', 'index': False},
},
}
mapping = {
doc_name: {
'properties': {
'id': {'type': 'long'},
'app': {'type': 'byte'},
'average_daily_users': {'type': 'long'},
'bayesian_rating': {'type': 'double'},
'boost': {'type': 'float', 'null_value': 1.0},
'category': {'type': 'integer'},
'colors': {
'type': 'nested',
'properties': {
'h': {'type': 'integer'},
's': {'type': 'integer'},
'l': {'type': 'integer'},
'ratio': {'type': 'double'},
},
},
'contributions': {'type': 'text'},
'created': {'type': 'date'},
'current_version': version_mapping,
'default_locale': {'type': 'keyword', 'index': False},
'description': {'type': 'text', 'analyzer': 'snowball'},
'guid': {'type': 'keyword'},
'has_eula': {'type': 'boolean', 'index': False},
'has_privacy_policy': {'type': 'boolean', 'index': False},
'hotness': {'type': 'double'},
'icon_hash': {'type': 'keyword', 'index': False},
'icon_type': {'type': 'keyword', 'index': False},
'is_disabled': {'type': 'boolean'},
'is_experimental': {'type': 'boolean'},
'is_recommended': {'type': 'boolean'},
'last_updated': {'type': 'date'},
'listed_authors': {
'type': 'object',
'properties': {
'id': {'type': 'long'},
'name': {'type': 'text'},
'username': {'type': 'keyword'},
'is_public': {'type': 'boolean', 'index': False},
},
},
'modified': {'type': 'date', 'index': False},
'name': {
'type': 'text',
# Adding word-delimiter to split on camelcase, known
# words like 'tab', and punctuation, and eliminate
# duplicates.
'analyzer': 'standard_with_word_split',
'fields': {
# Raw field for exact matches and sorting.
'raw': cls.get_raw_field_definition(),
# Trigrams for partial matches.
'trigrams': {
'type': 'text',
'analyzer': 'trigram',
},
},
},
'previews': {
'type': 'object',
'properties': {
'id': {'type': 'long', 'index': False},
'caption_translations': cls.get_translations_definition(),
'modified': {'type': 'date', 'index': False},
'sizes': {
'type': 'object',
'properties': {
'thumbnail': {'type': 'short', 'index': False},
'image': {'type': 'short', 'index': False},
},
},
},
},
'promoted': {
'type': 'object',
'properties': {
'group_id': {'type': 'byte'},
'approved_for_apps': {'type': 'byte'},
},
},
'ratings': {
'type': 'object',
'properties': {
'count': {'type': 'short', 'index': False},
'average': {'type': 'float', 'index': False},
},
},
'slug': {'type': 'keyword'},
'requires_payment': {'type': 'boolean', 'index': False},
'status': {'type': 'byte'},
'summary': {'type': 'text', 'analyzer': 'snowball'},
'tags': {'type': 'keyword'},
'type': {'type': 'byte'},
'weekly_downloads': {'type': 'long'},
},
},
}
# Add fields that we expect to return all translations without being
# analyzed/indexed.
cls.attach_translation_mappings(
mapping,
(
'description',
'developer_comments',
'homepage',
'name',
'summary',
'support_email',
'support_url',
),
)
# Add language-specific analyzers for localized fields that are
# analyzed/indexed.
cls.attach_language_specific_analyzers(mapping, ('description', 'summary'))
cls.attach_language_specific_analyzers_with_raw_variant(mapping, ('name',))
return mapping
@classmethod
def extract_version(cls, obj, version_obj):
from olympia.versions.models import License, Version
data = (
{
'id': version_obj.pk,
'compatible_apps': cls.extract_compatibility_info(obj, version_obj),
'files': [
{
'id': file_.id,
'created': file_.created,
'filename': file_.filename,
'hash': file_.hash,
'is_webextension': file_.is_webextension,
'is_mozilla_signed_extension': (
file_.is_mozilla_signed_extension
),
'is_restart_required': file_.is_restart_required,
'size': file_.size,
'status': file_.status,
'strict_compatibility': file_.strict_compatibility,
'permissions': file_.permissions,
'optional_permissions': file_.optional_permissions,
}
for file_ in version_obj.all_files
],
'reviewed': version_obj.reviewed,
'version': version_obj.version,
}
if version_obj
else None
)
if data and version_obj:
attach_trans_dict(Version, [version_obj])
data.update(
cls.extract_field_api_translations(
version_obj, 'release_notes', db_field='release_notes_id'
)
)
if version_obj.license:
data['license'] = {
'id': version_obj.license.id,
'builtin': bool(version_obj.license.builtin),
'url': version_obj.license.url,
}
attach_trans_dict(License, [version_obj.license])
data['license'].update(
cls.extract_field_api_translations(version_obj.license, 'name')
)
return data
@classmethod
def extract_compatibility_info(cls, obj, version_obj):
"""Return compatibility info for the specified version_obj, as will be
indexed in ES."""
compatible_apps = {}
for app, appver in version_obj.compatible_apps.items():
if appver:
min_, max_ = appver.min.version_int, appver.max.version_int
min_human, max_human = appver.min.version, appver.max.version
if not version_obj.files.filter(strict_compatibility=True).exists():
# The files attached to this version are not using strict
# compatibility, so the max version essentially needs to be
# ignored - let's fake a super high one. We leave max_human
# alone to leave the API representation intact.
max_ = version_int('*')
else:
# Fake wide compatibility for add-ons with no info. We don't
# want to reindex every time a new version of the app is
# released, so we directly index a super high version as the
# max.
min_human, max_human = (
amo.D2C_MIN_VERSIONS.get(app.id, '1.0'),
amo.FAKE_MAX_VERSION,
)
min_, max_ = version_int(min_human), version_int(max_human)
compatible_apps[app.id] = {
'min': min_,
'min_human': min_human,
'max': max_,
'max_human': max_human,
}
return compatible_apps
@classmethod
def extract_document(cls, obj):
"""Extract indexable attributes from an add-on."""
from olympia.addons.models import Preview
attrs = (
'id',
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'hotness',
'icon_hash',
'icon_type',
'is_disabled',
'is_experimental',
'last_updated',
'modified',
'requires_payment',
'slug',
'status',
'type',
'weekly_downloads',
)
data = {attr: getattr(obj, attr) for attr in attrs}
data['colors'] = None
# Extract dominant colors from static themes.
if obj.type == amo.ADDON_STATICTHEME:
first_preview = obj.current_previews.first()
if first_preview:
data['colors'] = first_preview.colors
data['app'] = [app.id for app in obj.compatible_apps.keys()]
# Boost by the number of users on a logarithmic scale.
data['boost'] = float(data['average_daily_users'] ** 0.2)
# Quadruple the boost if the add-on is public.
if (
obj.status == amo.STATUS_APPROVED
and not obj.is_experimental
and 'boost' in data
):
data['boost'] = float(max(data['boost'], 1) * 4)
# We can use all_categories because the indexing code goes through the
# transformer that sets it.
data['category'] = [cat.id for cat in obj.all_categories]
data['current_version'] = cls.extract_version(obj, obj.current_version)
data['listed_authors'] = [
{
'name': a.name,
'id': a.id,
'username': a.username,
'is_public': a.is_public,
}
for a in obj.listed_authors
]
data['has_eula'] = bool(obj.eula)
data['has_privacy_policy'] = bool(obj.privacy_policy)
data['is_recommended'] = bool(
obj.promoted and obj.promoted.group == RECOMMENDED
)
data['previews'] = [
{'id': preview.id, 'modified': preview.modified, 'sizes': preview.sizes}
for preview in obj.current_previews
]
data['promoted'] = (
{
'group_id': obj.promoted.group_id,
# store the app approvals because .approved_applications needs it.
'approved_for_apps': [
app.id for app in obj.promoted.approved_applications
],
}
if obj.promoted
else None
)
data['ratings'] = {
'average': obj.average_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
# We can use tag_list because the indexing code goes through the
# transformer that sets it (attach_tags).
data['tags'] = getattr(obj, 'tag_list', [])
# Handle localized fields.
# First, deal with the 3 fields that need everything:
for field in ('description', 'name', 'summary'):
data.update(cls.extract_field_api_translations(obj, field))
data.update(
cls.extract_field_search_translation(obj, field, obj.default_locale)
)
data.update(cls.extract_field_analyzed_translations(obj, field))
# Then add fields that only need to be returned to the API without
# contributing to search relevancy.
for field in ('developer_comments', 'homepage', 'support_email', 'support_url'):
data.update(cls.extract_field_api_translations(obj, field))
if obj.type != amo.ADDON_STATICTHEME:
# Also do that for preview captions, which are set on each preview
# object.
attach_trans_dict(Preview, obj.current_previews)
for i, preview in enumerate(obj.current_previews):
data['previews'][i].update(
cls.extract_field_api_translations(preview, 'caption')
)
return data
@classmethod
def create_new_index(cls, index_name):
"""
Create a new index for addons in ES.
Intended to be used by reindexation (and tests), generally a bad idea
to call manually.
"""
index_settings = copy.deepcopy(cls.index_settings)
config = {
'mappings': {
cls.get_doctype_name(): cls.get_mapping(),
},
'settings': {
# create_index will add its own index settings like number of
# shards and replicas.
'index': index_settings
},
}
create_index(index_name, config)
@classmethod
def reindex_tasks_group(cls, index_name):
"""
Return the group of tasks to execute for a full reindex of addons on
the index called `index_name` (which is not an alias but the real
index name).
"""
from olympia.addons.tasks import index_addons
ids = cls.get_model().unfiltered.values_list('id', flat=True).order_by('id')
chunk_size = 150
return create_chunked_tasks_signatures(index_addons, list(ids), chunk_size)
| bqbn/addons-server | src/olympia/addons/indexers.py | Python | bsd-3-clause | 24,877 |
from Components.Harddisk import harddiskmanager
from config import ConfigSubsection, ConfigYesNo, config, ConfigSelection, ConfigText, ConfigNumber, ConfigSet, ConfigLocations, ConfigSelectionNumber, ConfigClock, ConfigSlider
from Tools.Directories import resolveFilename, SCOPE_HDD, defaultRecordingLocation
from enigma import setTunerTypePriorityOrder, setPreferredTuner, setSpinnerOnOff, setEnableTtCachingOnOff, eEnv, eDVBDB, Misc_Options, eBackgroundFileEraser, eServiceEvent
from Components.NimManager import nimmanager
from Components.Harddisk import harddiskmanager
from Components.ServiceList import refreshServiceList
from SystemInfo import SystemInfo
import os
import time
def InitUsageConfig():
config.usage = ConfigSubsection()
config.usage.showdish = ConfigYesNo(default = True)
config.usage.multibouquet = ConfigYesNo(default = True)
config.usage.alternative_number_mode = ConfigYesNo(default = False)
def alternativeNumberModeChange(configElement):
eDVBDB.getInstance().setNumberingMode(configElement.value)
refreshServiceList()
config.usage.alternative_number_mode.addNotifier(alternativeNumberModeChange)
config.usage.hide_number_markers = ConfigYesNo(default = True)
config.usage.hide_number_markers.addNotifier(refreshServiceList)
config.usage.servicetype_icon_mode = ConfigSelection(default = "0", choices = [("0", _("None")), ("1", _("Left from servicename")), ("2", _("Right from servicename"))])
config.usage.servicetype_icon_mode.addNotifier(refreshServiceList)
config.usage.crypto_icon_mode = ConfigSelection(default = "0", choices = [("0", _("None")), ("1", _("Left from servicename")), ("2", _("Right from servicename"))])
config.usage.crypto_icon_mode.addNotifier(refreshServiceList)
config.usage.record_indicator_mode = ConfigSelection(default = "0", choices = [("0", _("None")), ("1", _("Left from servicename")), ("2", _("Right from servicename")), ("3", _("Red colored"))])
config.usage.record_indicator_mode.addNotifier(refreshServiceList)
choicelist = [("-1", _("Devide")), ("0", _("Disable"))]
for i in range(100,1300,100):
choicelist.append(("%d" % i, ngettext("%d pixel wide", "%d pixels wide", i) % i))
config.usage.servicelist_column = ConfigSelection(default="0", choices=choicelist)
config.usage.servicelist_column.addNotifier(refreshServiceList)
config.usage.service_icon_enable = ConfigYesNo(default = False)
config.usage.service_icon_enable.addNotifier(refreshServiceList)
config.usage.servicelist_cursor_behavior = ConfigSelection(default = "standard", choices = [
("standard", _("Standard")),
("keep", _("Keep service")),
("reverseB", _("Reverse bouquet buttons")),
("keep reverseB", _("Keep service") + " + " + _("Reverse bouquet buttons"))])
config.usage.multiepg_ask_bouquet = ConfigYesNo(default = False)
config.usage.quickzap_bouquet_change = ConfigYesNo(default = False)
config.usage.e1like_radio_mode = ConfigYesNo(default = True)
choicelist = [("0", _("No timeout"))]
for i in range(1, 12):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
config.usage.infobar_timeout = ConfigSelection(default = "5", choices = choicelist)
config.usage.show_infobar_on_zap = ConfigYesNo(default = True)
config.usage.show_infobar_on_skip = ConfigYesNo(default = True)
config.usage.show_infobar_on_event_change = ConfigYesNo(default = False)
config.usage.show_second_infobar = ConfigSelection(default = None, choices = [(None, _("None"))] + choicelist + [("EPG",_("EPG"))])
config.usage.infobar_frontend_source = ConfigSelection(default = "tuner", choices = [("settings", _("Settings")), ("tuner", _("Tuner"))])
config.usage.oldstyle_zap_controls = ConfigYesNo(default = False)
config.usage.oldstyle_channel_select_controls = ConfigYesNo(default = False)
config.usage.zap_with_ch_buttons = ConfigYesNo(default = False)
config.usage.ok_is_channelselection = ConfigYesNo(default = False)
config.usage.show_spinner = ConfigYesNo(default = True)
config.usage.enable_tt_caching = ConfigYesNo(default = True)
choicelist = []
for i in (10, 30):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
for i in (60, 120, 300, 600, 1200, 1800):
m = i / 60
choicelist.append(("%d" % i, ngettext("%d minute", "%d minutes", m) % m))
for i in (3600, 7200, 14400):
h = i / 3600
choicelist.append(("%d" % i, ngettext("%d hour", "%d hours", h) % h))
config.usage.hdd_standby = ConfigSelection(default = "300", choices = [("0", _("No standby"))] + choicelist)
config.usage.output_12V = ConfigSelection(default = "do not change", choices = [
("do not change", _("Do not change")), ("off", _("Off")), ("on", _("On")) ])
config.usage.pip_zero_button = ConfigSelection(default = "standard", choices = [
("standard", _("Standard")), ("swap", _("Swap PiP and main picture")),
("swapstop", _("Move PiP to main picture")), ("stop", _("Stop PiP")) ])
config.usage.pip_hideOnExit = ConfigSelection(default = "without popup", choices = [
("no", _("No")), ("popup", _("With popup")), ("without popup", _("Without popup")) ])
choicelist = [("-1", _("Disabled")), ("0", _("No timeout"))]
for i in [60, 300, 600, 900, 1800, 2700, 3600]:
m = i/60
choicelist.append(("%d" % i, ngettext("%d minute", "%d minutes", m) % m))
config.usage.pip_last_service_timeout = ConfigSelection(default = "0", choices = choicelist)
config.usage.default_path = ConfigText(default = resolveFilename(SCOPE_HDD))
config.usage.timer_path = ConfigText(default = "<default>")
config.usage.instantrec_path = ConfigText(default = "<default>")
config.usage.timeshift_path = ConfigText(default = "/media/hdd/")
config.usage.allowed_timeshift_paths = ConfigLocations(default = ["/media/hdd/"])
config.usage.movielist_trashcan = ConfigYesNo(default=True)
config.usage.movielist_trashcan_days = ConfigNumber(default=8)
config.usage.movielist_trashcan_reserve = ConfigNumber(default=40)
config.usage.on_movie_start = ConfigSelection(default = "resume", choices = [
("ask", _("Ask user")), ("resume", _("Resume from last position")), ("beginning", _("Start from the beginning")) ])
config.usage.on_movie_stop = ConfigSelection(default = "movielist", choices = [
("ask", _("Ask user")), ("movielist", _("Return to movie list")), ("quit", _("Return to previous service")) ])
config.usage.on_movie_eof = ConfigSelection(default = "movielist", choices = [
("ask", _("Ask user")), ("movielist", _("Return to movie list")), ("quit", _("Return to previous service")), ("pause", _("Pause movie at end")), ("playlist", _("Play next (return to movie list)")),
("playlistquit", _("Play next (return to previous service)")), ("loop", _("Continues play (loop)")), ("repeatcurrent", _("Repeat"))])
config.usage.next_movie_msg = ConfigYesNo(default = True)
config.usage.last_movie_played = ConfigText()
config.usage.leave_movieplayer_onExit = ConfigSelection(default = "popup", choices = [
("no", _("No")), ("popup", _("With popup")), ("without popup", _("Without popup")) ])
config.usage.setup_level = ConfigSelection(default = "expert", choices = [
("simple", _("Simple")),
("intermediate", _("Intermediate")),
("expert", _("Expert")) ])
config.usage.startup_to_standby = ConfigYesNo(default = False)
config.usage.on_long_powerpress = ConfigSelection(default = "show_menu", choices = [
("show_menu", _("Show shutdown menu")),
("shutdown", _("Immediate shutdown")),
("standby", _("Standby")) ] )
config.usage.on_short_powerpress = ConfigSelection(default = "standby", choices = [
("show_menu", _("Show shutdown menu")),
("shutdown", _("Immediate shutdown")),
("standby", _("Standby")) ] )
choicelist = [("0", _("Do nothing"))]
for i in range(3600, 21601, 3600):
h = abs(i / 3600)
h = ngettext("%d hour", "%d hours", h) % h
choicelist.append(("%d" % i, _("Standby in ") + h))
config.usage.inactivity_timer = ConfigSelection(default = "0", choices = choicelist)
config.usage.inactivity_timer_blocktime = ConfigYesNo(default = True)
config.usage.inactivity_timer_blocktime_begin = ConfigClock(default = time.mktime((0, 0, 0, 6, 0, 0, 0, 0, 0)))
config.usage.inactivity_timer_blocktime_end = ConfigClock(default = time.mktime((0, 0, 0, 23, 0, 0, 0, 0, 0)))
choicelist = [("0", _("Disabled")),("event_standby", _("Standby after current event"))]
for i in range(900, 7201, 900):
m = abs(i / 60)
m = ngettext("%d minute", "%d minutes", m) % m
choicelist.append(("%d" % i, _("Standby in ") + m))
config.usage.sleep_timer = ConfigSelection(default = "0", choices = choicelist)
choicelist = [("0", _("Disabled"))]
for i in [60, 300, 600] + range(900, 7201, 900):
m = abs(i / 60)
m = ngettext("%d minute", "%d minutes", m) % m
choicelist.append(("%d" % i, _("after ") + m))
config.usage.standby_to_shutdown_timer = ConfigSelection(default = "0", choices = choicelist)
config.usage.standby_to_shutdown_timer_blocktime = ConfigYesNo(default = True)
config.usage.standby_to_shutdown_timer_blocktime_begin = ConfigClock(default = time.mktime((0, 0, 0, 6, 0, 0, 0, 0, 0)))
config.usage.standby_to_shutdown_timer_blocktime_end = ConfigClock(default = time.mktime((0, 0, 0, 23, 0, 0, 0, 0, 0)))
choicelist = [("0", _("Disabled"))]
for i in (5, 30, 60, 300, 600, 900, 1200, 1800, 2700, 3600):
if i < 60:
m = ngettext("%d second", "%d seconds", i) % i
else:
m = abs(i / 60)
m = ngettext("%d minute", "%d minutes", m) % m
choicelist.append(("%d" % i, m))
config.usage.screen_saver = ConfigSelection(default = "60", choices = choicelist)
config.usage.check_timeshift = ConfigYesNo(default = True)
choicelist = [("0", _("Disabled"))]
for i in (2, 3, 4, 5, 10, 20, 30):
choicelist.append(("%d" % i, ngettext("%d second", "%d seconds", i) % i))
for i in (60, 120, 300):
m = i / 60
choicelist.append(("%d" % i, ngettext("%d minute", "%d minutes", m) % m))
config.usage.timeshift_start_delay = ConfigSelection(default = "0", choices = choicelist)
config.usage.alternatives_priority = ConfigSelection(default = "0", choices = [
("0", "DVB-S/-C/-T"),
("1", "DVB-S/-T/-C"),
("2", "DVB-C/-S/-T"),
("3", "DVB-C/-T/-S"),
("4", "DVB-T/-C/-S"),
("5", "DVB-T/-S/-C") ])
nims = [("-1", _("auto"))]
for x in nimmanager.nim_slots:
nims.append((str(x.slot), x.getSlotName()))
config.usage.frontend_priority = ConfigSelection(default = "-1", choices = list(nims))
nims.insert(0,("-2", _("Disabled")))
config.usage.recording_frontend_priority = ConfigSelection(default = "-2", choices = nims)
config.misc.disable_background_scan = ConfigYesNo(default = False)
config.usage.show_event_progress_in_servicelist = ConfigSelection(default = 'barright', choices = [
('barleft', _("Progress bar left")),
('barright', _("Progress bar right")),
('percleft', _("Percentage left")),
('percright', _("Percentage right")),
('no', _("No")) ])
config.usage.show_channel_numbers_in_servicelist = ConfigYesNo(default = True)
config.usage.show_event_progress_in_servicelist.addNotifier(refreshServiceList)
config.usage.show_channel_numbers_in_servicelist.addNotifier(refreshServiceList)
config.usage.blinking_display_clock_during_recording = ConfigYesNo(default = False)
config.usage.show_message_when_recording_starts = ConfigYesNo(default = True)
config.usage.load_length_of_movies_in_moviellist = ConfigYesNo(default = True)
config.usage.show_icons_in_movielist = ConfigSelection(default = 'i', choices = [
('o', _("Off")),
('p', _("Progress")),
('s', _("Small progress")),
('i', _("Icons")),
])
config.usage.movielist_unseen = ConfigYesNo(default = False)
config.usage.swap_snr_on_osd = ConfigYesNo(default = False)
def SpinnerOnOffChanged(configElement):
setSpinnerOnOff(int(configElement.value))
config.usage.show_spinner.addNotifier(SpinnerOnOffChanged)
def EnableTtCachingChanged(configElement):
setEnableTtCachingOnOff(int(configElement.value))
config.usage.enable_tt_caching.addNotifier(EnableTtCachingChanged)
def TunerTypePriorityOrderChanged(configElement):
setTunerTypePriorityOrder(int(configElement.value))
config.usage.alternatives_priority.addNotifier(TunerTypePriorityOrderChanged, immediate_feedback=False)
def PreferredTunerChanged(configElement):
setPreferredTuner(int(configElement.value))
config.usage.frontend_priority.addNotifier(PreferredTunerChanged)
config.usage.hide_zap_errors = ConfigYesNo(default = False)
config.usage.hide_ci_messages = ConfigYesNo(default = True)
config.usage.show_cryptoinfo = ConfigYesNo(default = True)
config.usage.show_eit_nownext = ConfigYesNo(default = True)
config.usage.show_vcr_scart = ConfigYesNo(default = False)
if SystemInfo["Fan"]:
choicelist = [('off', _("Off")), ('on', _("On")), ('auto', _("Auto"))]
if os.path.exists("/proc/stb/fp/fan_choices"):
choicelist = [x for x in choicelist if x[0] in open("/proc/stb/fp/fan_choices", "r").read().strip().split(" ")]
config.usage.fan = ConfigSelection(choicelist)
def fanChanged(configElement):
open(SystemInfo["Fan"], "w").write(configElement.value)
config.usage.fan.addNotifier(fanChanged)
if SystemInfo["FanPWM"]:
def fanSpeedChanged(configElement):
open(SystemInfo["FanPWM"], "w").write(hex(configElement.value)[2:])
config.usage.fanspeed = ConfigSlider(default=127, increment=8, limits=(0, 255))
config.usage.fanspeed.addNotifier(fanSpeedChanged)
if SystemInfo["StandbyLED"]:
def standbyLEDChanged(configElement):
open(SystemInfo["StandbyLED"], "w").write(configElement.value and "on" or "off")
config.usage.standbyLED = ConfigYesNo(default = True)
config.usage.standbyLED.addNotifier(standbyLEDChanged)
if SystemInfo["WakeOnLAN"]:
def wakeOnLANChanged(configElement):
open(SystemInfo["WakeOnLAN"], "w").write(configElement.value and "on" or "off")
config.usage.wakeOnLAN = ConfigYesNo(default = False)
config.usage.wakeOnLAN.addNotifier(wakeOnLANChanged)
config.epg = ConfigSubsection()
config.epg.eit = ConfigYesNo(default = True)
config.epg.mhw = ConfigYesNo(default = False)
config.epg.freesat = ConfigYesNo(default = True)
config.epg.viasat = ConfigYesNo(default = True)
config.epg.netmed = ConfigYesNo(default = True)
config.epg.virgin = ConfigYesNo(default = False)
config.misc.showradiopic = ConfigYesNo(default = True)
def EpgSettingsChanged(configElement):
from enigma import eEPGCache
mask = 0xffffffff
if not config.epg.eit.value:
mask &= ~(eEPGCache.NOWNEXT | eEPGCache.SCHEDULE | eEPGCache.SCHEDULE_OTHER)
if not config.epg.mhw.value:
mask &= ~eEPGCache.MHW
if not config.epg.freesat.value:
mask &= ~(eEPGCache.FREESAT_NOWNEXT | eEPGCache.FREESAT_SCHEDULE | eEPGCache.FREESAT_SCHEDULE_OTHER)
if not config.epg.viasat.value:
mask &= ~eEPGCache.VIASAT
if not config.epg.netmed.value:
mask &= ~(eEPGCache.NETMED_SCHEDULE | eEPGCache.NETMED_SCHEDULE_OTHER)
if not config.epg.virgin.value:
mask &= ~(eEPGCache.VIRGIN_NOWNEXT | eEPGCache.VIRGIN_SCHEDULE)
eEPGCache.getInstance().setEpgSources(mask)
config.epg.eit.addNotifier(EpgSettingsChanged)
config.epg.mhw.addNotifier(EpgSettingsChanged)
config.epg.freesat.addNotifier(EpgSettingsChanged)
config.epg.viasat.addNotifier(EpgSettingsChanged)
config.epg.netmed.addNotifier(EpgSettingsChanged)
config.epg.virgin.addNotifier(EpgSettingsChanged)
config.epg.histminutes = ConfigSelectionNumber(min = 0, max = 120, stepwidth = 15, default = 0, wraparound = True)
def EpgHistorySecondsChanged(configElement):
from enigma import eEPGCache
eEPGCache.getInstance().setEpgHistorySeconds(config.epg.histminutes.getValue()*60)
config.epg.histminutes.addNotifier(EpgHistorySecondsChanged)
def setHDDStandby(configElement):
for hdd in harddiskmanager.HDDList():
hdd[1].setIdleTime(int(configElement.value))
config.usage.hdd_standby.addNotifier(setHDDStandby, immediate_feedback=False)
if SystemInfo["12V_Output"]:
def set12VOutput(configElement):
Misc_Options.getInstance().set_12V_output(configElement.value == "on" and 1 or 0)
config.usage.output_12V.addNotifier(set12VOutput, immediate_feedback=False)
config.usage.keymap = ConfigText(default = eEnv.resolve("${datadir}/enigma2/keymap.xml"))
config.seek = ConfigSubsection()
config.seek.selfdefined_13 = ConfigNumber(default=15)
config.seek.selfdefined_46 = ConfigNumber(default=60)
config.seek.selfdefined_79 = ConfigNumber(default=300)
config.seek.speeds_forward = ConfigSet(default=[2, 4, 8, 16, 32, 64, 128], choices=[2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128])
config.seek.speeds_backward = ConfigSet(default=[2, 4, 8, 16, 32, 64, 128], choices=[1, 2, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128])
config.seek.speeds_slowmotion = ConfigSet(default=[2, 4, 8], choices=[2, 4, 6, 8, 12, 16, 25])
config.seek.enter_forward = ConfigSelection(default = "2", choices = ["2", "4", "6", "8", "12", "16", "24", "32", "48", "64", "96", "128"])
config.seek.enter_backward = ConfigSelection(default = "1", choices = ["1", "2", "4", "6", "8", "12", "16", "24", "32", "48", "64", "96", "128"])
config.seek.on_pause = ConfigSelection(default = "play", choices = [
("play", _("Play")),
("step", _("Single step (GOP)")),
("last", _("Last speed")) ])
config.crash = ConfigSubsection()
config.crash.details = ConfigYesNo(default = False)
config.usage.timerlist_finished_timer_position = ConfigSelection(default = "end", choices = [("beginning", _("At beginning")), ("end", _("At end"))])
def updateEnterForward(configElement):
if not configElement.value:
configElement.value = [2]
updateChoices(config.seek.enter_forward, configElement.value)
config.seek.speeds_forward.addNotifier(updateEnterForward, immediate_feedback = False)
def updateEnterBackward(configElement):
if not configElement.value:
configElement.value = [2]
updateChoices(config.seek.enter_backward, configElement.value)
config.seek.speeds_backward.addNotifier(updateEnterBackward, immediate_feedback = False)
def updateEraseSpeed(el):
eBackgroundFileEraser.getInstance().setEraseSpeed(int(el.value))
def updateEraseFlags(el):
eBackgroundFileEraser.getInstance().setEraseFlags(int(el.value))
config.misc.erase_speed = ConfigSelection(default="20", choices = [
("10", "10 MB/s"),
("20", "20 MB/s"),
("50", "50 MB/s"),
("100", "100 MB/s")])
config.misc.erase_speed.addNotifier(updateEraseSpeed, immediate_feedback = False)
config.misc.erase_flags = ConfigSelection(default="1", choices = [
("0", _("Disable")),
("1", _("Internal hdd only")),
("3", _("Everywhere"))])
config.misc.erase_flags.addNotifier(updateEraseFlags, immediate_feedback = False)
if SystemInfo["ZapMode"]:
def setZapmode(el):
open(SystemInfo["ZapMode"], "w").write(el.value)
config.misc.zapmode = ConfigSelection(default = "mute", choices = [
("mute", _("Black screen")), ("hold", _("Hold screen")), ("mutetilllock", _("Black screen till locked")), ("holdtilllock", _("Hold till locked"))])
config.misc.zapmode.addNotifier(setZapmode, immediate_feedback = False)
config.subtitles = ConfigSubsection()
config.subtitles.ttx_subtitle_colors = ConfigSelection(default = "1", choices = [
("0", _("original")),
("1", _("white")),
("2", _("yellow")) ])
config.subtitles.ttx_subtitle_original_position = ConfigYesNo(default = False)
config.subtitles.subtitle_position = ConfigSelection( choices = ["0", "10", "20", "30", "40", "50", "60", "70", "80", "90", "100", "150", "200", "250", "300", "350", "400", "450"], default = "50")
config.subtitles.subtitle_alignment = ConfigSelection(choices = [("left", _("left")), ("center", _("center")), ("right", _("right"))], default = "center")
config.subtitles.subtitle_rewrap = ConfigYesNo(default = False)
config.subtitles.subtitle_borderwidth = ConfigSelection(choices = ["1", "2", "3", "4", "5"], default = "3")
config.subtitles.subtitle_fontsize = ConfigSelection(choices = ["16", "18", "20", "22", "24", "26", "28", "30", "32", "34", "36", "38", "40", "42", "44", "46", "48", "50", "52", "54"], default = "34")
subtitle_delay_choicelist = []
for i in range(-900000, 1845000, 45000):
if i == 0:
subtitle_delay_choicelist.append(("0", _("No delay")))
else:
subtitle_delay_choicelist.append(("%d" % i, "%2.1f sec" % (i / 90000.)))
config.subtitles.subtitle_noPTSrecordingdelay = ConfigSelection(default = "315000", choices = subtitle_delay_choicelist)
config.subtitles.dvb_subtitles_yellow = ConfigYesNo(default = False)
config.subtitles.dvb_subtitles_original_position = ConfigSelection(default = "0", choices = [("0", _("Original")), ("1", _("Fixed")), ("2", _("Relative"))])
config.subtitles.dvb_subtitles_centered = ConfigYesNo(default = True)
config.subtitles.subtitle_bad_timing_delay = ConfigSelection(default = "0", choices = subtitle_delay_choicelist)
config.subtitles.dvb_subtitles_backtrans = ConfigSelection(default = "0", choices = [
("0", _("No transparency")),
("25", "10%"),
("50", "20%"),
("75", "30%"),
("100", "40%"),
("125", "50%"),
("150", "60%"),
("175", "70%"),
("200", "80%"),
("225", "90%"),
("255", _("Full transparency"))])
config.subtitles.pango_subtitle_colors = ConfigSelection(default = "0", choices = [
("0", _("alternative")),
("1", _("white")),
("2", _("yellow")) ])
config.subtitles.pango_subtitles_delay = ConfigSelection(default = "0", choices = subtitle_delay_choicelist)
config.subtitles.pango_subtitles_fps = ConfigSelection(default = "1", choices = [
("1", _("Original")),
("23976", _("23.976")),
("24000", _("24")),
("25000", _("25")),
("29970", _("29.97")),
("30000", _("30"))])
config.subtitles.pango_autoturnon = ConfigYesNo(default = True)
config.autolanguage = ConfigSubsection()
audio_language_choices=[
("---", _("None")),
("orj dos ory org esl qaa und mis mul ORY ORJ Audio_ORJ", _("Original")),
("ara", _("Arabic")),
("eus baq", _("Basque")),
("bul", _("Bulgarian")),
("hrv", _("Croatian")),
("ces cze", _("Czech")),
("dan", _("Danish")),
("dut ndl", _("Dutch")),
("eng qaa", _("English")),
("est", _("Estonian")),
("fin", _("Finnish")),
("fra fre", _("French")),
("deu ger", _("German")),
("ell gre", _("Greek")),
("heb", _("Hebrew")),
("hun", _("Hungarian")),
("ita", _("Italian")),
("lav", _("Latvian")),
("lit", _("Lithuanian")),
("ltz", _("Luxembourgish")),
("nor", _("Norwegian")),
("pol", _("Polish")),
("por", _("Portuguese")),
("fas per", _("Persian")),
("ron rum", _("Romanian")),
("rus", _("Russian")),
("srp", _("Serbian")),
("slk slo", _("Slovak")),
("slv", _("Slovenian")),
("spa", _("Spanish")),
("swe", _("Swedish")),
("tha", _("Thai")),
("tur Audio_TUR", _("Turkish"))]
def setEpgLanguage(configElement):
eServiceEvent.setEPGLanguage(configElement.value)
config.autolanguage.audio_epglanguage = ConfigSelection(audio_language_choices[:1] + audio_language_choices [2:], default="---")
config.autolanguage.audio_epglanguage.addNotifier(setEpgLanguage)
def setEpgLanguageAlternative(configElement):
eServiceEvent.setEPGLanguageAlternative(configElement.value)
config.autolanguage.audio_epglanguage_alternative = ConfigSelection(audio_language_choices[:1] + audio_language_choices [2:], default="---")
config.autolanguage.audio_epglanguage_alternative.addNotifier(setEpgLanguageAlternative)
config.autolanguage.audio_autoselect1 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect2 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect3 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_autoselect4 = ConfigSelection(choices=audio_language_choices, default="---")
config.autolanguage.audio_defaultac3 = ConfigYesNo(default = False)
config.autolanguage.audio_defaultddp = ConfigYesNo(default = False)
config.autolanguage.audio_usecache = ConfigYesNo(default = True)
subtitle_language_choices = audio_language_choices[:1] + audio_language_choices [2:]
config.autolanguage.subtitle_autoselect1 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect2 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect3 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_autoselect4 = ConfigSelection(choices=subtitle_language_choices, default="---")
config.autolanguage.subtitle_hearingimpaired = ConfigYesNo(default = False)
config.autolanguage.subtitle_defaultimpaired = ConfigYesNo(default = False)
config.autolanguage.subtitle_defaultdvb = ConfigYesNo(default = False)
config.autolanguage.subtitle_usecache = ConfigYesNo(default = True)
config.autolanguage.equal_languages = ConfigSelection(default = "15", choices = [
("0", _("None")),("1", "1"),("2", "2"),("3", "1,2"),
("4", "3"),("5", "1,3"),("6", "2,3"),("7", "1,2,3"),
("8", "4"),("9", "1,4"),("10", "2,4"),("11", "1,2,4"),
("12", "3,4"),("13", "1,3,4"),("14", "2,3,4"),("15", _("All"))])
config.streaming = ConfigSubsection()
config.streaming.stream_ecm = ConfigYesNo(default = False)
config.streaming.descramble = ConfigYesNo(default = True)
config.streaming.stream_eit = ConfigYesNo(default = True)
config.streaming.stream_ait = ConfigYesNo(default = True)
config.streaming.authentication = ConfigYesNo(default = False)
def updateChoices(sel, choices):
if choices:
defval = None
val = int(sel.value)
if not val in choices:
tmp = choices[:]
tmp.reverse()
for x in tmp:
if x < val:
defval = str(x)
break
sel.setChoices(map(str, choices), defval)
def preferredPath(path):
if config.usage.setup_level.index < 2 or path == "<default>":
return None # config.usage.default_path.value, but delay lookup until usage
elif path == "<current>":
return config.movielist.last_videodir.value
elif path == "<timer>":
return config.movielist.last_timer_videodir.value
else:
return path
def preferredTimerPath():
return preferredPath(config.usage.timer_path.value)
def preferredInstantRecordPath():
return preferredPath(config.usage.instantrec_path.value)
def defaultMoviePath():
return defaultRecordingLocation(config.usage.default_path.value)
| berny6969/enigma2 | lib/python/Components/UsageConfig.py | Python | gpl-2.0 | 26,202 |
import math
from rpython.rlib import rfloat
from ..base import BaseTopazTest
class TestMath(BaseTopazTest):
def assert_float_equal(self, result, expected, eps=1e-15):
assert abs(result - expected) < eps
def test_domain_error(self, space):
space.execute("Math::DomainError")
def test_pi(self, space):
w_res = space.execute("return Math::PI")
assert space.float_w(w_res) == math.pi
def test_e(self, space):
w_res = space.execute("return Math::E")
assert space.float_w(w_res) == math.e
def test_acos(self, space):
w_res = space.execute("return [Math.acos(0), Math.acos(1)]")
assert self.unwrap(space, w_res) == [math.acos(0), 0]
def test_acosh(self, space):
w_res = space.execute("return [Math.acosh(1), Math.acosh(2)]")
assert self.unwrap(space, w_res) == [0, math.acosh(2)]
with self.raises(space, "Math::DomainError", 'Numerical argument is out of domain - "acosh"'):
space.execute("Math.acosh(0)")
def test_asin(self, space):
w_res = space.execute("return [Math.asin(0), Math.asin(1)]")
assert self.unwrap(space, w_res) == [0, math.asin(1)]
def test_asinh(self, space):
w_res = space.execute("return [Math.asinh(0), Math.asinh(1)]")
assert self.unwrap(space, w_res) == [math.asinh(0), math.asinh(1)]
def test_atan(self, space):
w_res = space.execute("return [Math.atan(0), Math.atan(1)]")
assert self.unwrap(space, w_res) == [0, math.atan(1)]
def test_atan2(self, space):
w_res = space.execute("return [Math.atan2(-0.0, -1.0), Math.atan2(-1, -1.0)]")
assert self.unwrap(space, w_res) == [math.atan2(-0.0, -1.0), math.atan2(-1.0, -1)]
def test_atanh(self, space):
w_res = space.execute("return [Math.atanh(1), Math.atanh(-1), Math.atanh(0), Math.atanh(0.5)]")
assert self.unwrap(space, w_res) == [float("inf"), float("-inf"), 0, math.atanh(0.5)]
with self.raises(space, "Math::DomainError", 'Numerical argument is out of domain - "atanh"'):
space.execute("Math.atanh(2)")
def test_cbrt(self, space):
w_res = space.execute("return [Math.cbrt(-8), Math.cbrt(-1), Math.cbrt(0)]")
assert self.unwrap(space, w_res) == [-2.0, -1.0, 0]
w_res = space.execute("return Math.cbrt(8)")
self.assert_float_equal(space.float_w(w_res), 2.0)
w_res = space.execute("return Math.cbrt(64)")
self.assert_float_equal(space.float_w(w_res), 4.0)
def test_cos(self, space):
w_res = space.execute("return [Math.cos(0), Math.cos(1)]")
assert self.unwrap(space, w_res) == [1, math.cos(1)]
def test_cosh(self, space):
w_res = space.execute("return [Math.cosh(0), Math.cosh(1), Math.cosh(123123)]")
assert self.unwrap(space, w_res) == [1, math.cosh(1), float("inf")]
def test_exp(self, space):
w_res = space.execute("return [Math.exp(0.0), Math.exp(1)]")
assert self.unwrap(space, w_res) == [1, math.exp(1)]
def test_frexp(self, space):
w_res = space.execute("return Math.frexp(1234)")
assert self.unwrap(space, w_res) == [math.frexp(1234)[0], 11]
def test_gamma(self, space):
w_res = space.execute("return Math.gamma(5.0)")
self.assert_float_equal(space.float_w(w_res), 24.0)
w_res = space.execute("return Math.gamma(6.0)")
self.assert_float_equal(space.float_w(w_res), 120.0)
w_res = space.execute("return Math.gamma(0.5)")
self.assert_float_equal(space.float_w(w_res), math.pi ** 0.5)
w_res = space.execute("return Math.gamma(1000)")
assert space.float_w(w_res) == float("inf")
w_res = space.execute("return Math.gamma(0.0)")
assert space.float_w(w_res) == float("inf")
w_res = space.execute("return Math.gamma(-0.0)")
assert space.float_w(w_res) == float("-inf")
w_res = space.execute("return Math.gamma(Float::INFINITY)")
assert space.float_w(w_res) == float("inf")
with self.raises(space, "Math::DomainError", 'Numerical argument is out of domain - "gamma"'):
space.execute("""Math.gamma(-1)""")
with self.raises(space, "Math::DomainError", 'Numerical argument is out of domain - "gamma"'):
space.execute("""Math.gamma(-Float::INFINITY)""")
w_res = space.execute("return Math.gamma(Float::NAN)")
assert math.isnan(space.float_w(w_res))
def test_lgamma(self, space):
res_w = space.execute("return Math.lgamma(6.0)")
w_res1, w_res2 = space.listview(res_w)
self.assert_float_equal(space.float_w(w_res1), math.log(120))
assert space.int_w(w_res2) == 1
res_w = space.execute("return Math.lgamma(-1)")
w_inf = space.execute("return Float::INFINITY")
w_res1, w_res2 = space.listview(res_w)
assert self.unwrap(space, w_res1) == self.unwrap(space, w_inf)
assert space.int_w(w_res2) == 1
res_w = space.execute("return Math.lgamma(Float::NAN)")
w_res1, w_res2 = space.listview(res_w)
assert math.isnan(self.unwrap(space, w_res1))
assert space.int_w(w_res2) == 1
def test_hypot(self, space):
w_res = space.execute("return Math.hypot(3, 4)")
assert self.unwrap(space, w_res) == 5
def test_ldexp(self, space):
w_res = space.execute("return Math.ldexp(Math.frexp(1234)[0], 11)")
assert self.unwrap(space, w_res) == 1234
with self.raises(space, "TypeError",
"can't convert String into Float"):
space.execute("Math.ldexp('1', 2)")
with self.raises(space, "TypeError",
"can't convert String into Integer"):
space.execute("Math.ldexp(1, '2')")
def test_log(self, space):
with self.raises(space, "Math::DomainError", 'Numerical argument is out of domain - "log"'):
space.execute("Math.log(-1)")
w_res = space.execute("return Math.log(0)")
assert space.float_w(w_res) == float("-inf")
w_res = space.execute("return Math.log(4, 10)")
self.assert_float_equal(space.float_w(w_res), math.log(4, 10))
w_res = space.execute("return Math.log(28)")
self.assert_float_equal(space.float_w(w_res), math.log(28))
w_res = space.execute("return Math.log(3, 4)")
self.assert_float_equal(space.float_w(w_res), math.log(3, 4))
def test_log10(self, space):
with self.raises(space, "Math::DomainError", 'Numerical argument is out of domain - "log10"'):
space.execute("Math.log10(-1)")
w_res = space.execute("return Math.log10(0)")
assert space.float_w(w_res) == float("-inf")
w_res = space.execute("return Math.log10(1)")
assert space.float_w(w_res) == 0.0
w_res = space.execute("return Math.log10(10)")
assert space.float_w(w_res) == 1.0
def test_log2(self, space):
with self.raises(space, "Math::DomainError", 'Numerical argument is out of domain - "log2"'):
space.execute("Math.log2(-1)")
w_res = space.execute("return Math.log2(0)")
assert space.float_w(w_res) == float("-inf")
w_res = space.execute("return Math.log2(1)")
assert space.float_w(w_res) == 0.0
w_res = space.execute("return Math.log2(2)")
assert space.float_w(w_res) == 1.0
w_res = space.execute("return Math.log2(32768)")
assert space.float_w(w_res) == 15.0
w_res = space.execute("return Math.log2(65536)")
assert space.float_w(w_res) == 16.0
def test_sin(self, space):
w_res = space.execute("return [Math.sin(0), Math.sin(1)]")
assert self.unwrap(space, w_res) == [0, math.sin(1)]
def test_sinh(self, space):
w_res = space.execute("return [Math.sinh(0), Math.sinh(2), Math.sinh(1234)]")
assert self.unwrap(space, w_res) == [0, math.sinh(2), float("inf")]
def test_sqrt(self, space):
w_res = space.execute("return [Math.sqrt(4), Math.sqrt(28)]")
assert self.unwrap(space, w_res) == [2, math.sqrt(28)]
def test_tan(self, space):
w_res = space.execute("return Math.tan(Float::INFINITY)")
assert math.isnan(space.float_w(w_res))
w_res = space.execute("return [Math.tan(0), Math.tan(1)]")
assert self.unwrap(space, w_res) == [0, math.tan(1)]
def test_tanh(self, space):
w_res = space.execute("return [Math.tanh(0), Math.tanh(1), Math.tanh(1234)]")
assert self.unwrap(space, w_res) == [0, math.tanh(1), 1.0]
def test_erf(self, space):
w_res = space.execute("return [Math.erf(0), Math.erf(10), Math.erf(-10)]")
assert self.unwrap(space, w_res) == [0.0, 1.0, -1.0]
def test_erfc(self, space):
w_res = space.execute("return [Math.erfc(-1), Math.erfc(0), Math.erfc(1.5)]")
assert self.unwrap(space, w_res) == [math.erfc(-1), 1.0, math.erfc(1.5)]
def test_type_error(self, space):
for methodname in ['sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'sqrt', 'cbrt', 'erf', 'erfc', 'gamma', 'lgamma',
'exp', 'frexp', 'log', 'log10', 'log2']:
with self.raises(space, "TypeError",
"can't convert String into Float"):
space.execute("Math.%s('1.0')" %methodname)
for methodname in ['hypot', 'atan2']:
with self.raises(space, "TypeError",
"can't convert String into Float"):
space.execute("Math.%s('1', 2)" %methodname)
space.execute("Math.%s(1, '2')" %methodname)
| babelsberg/babelsberg-r | tests/modules/test_math.py | Python | bsd-3-clause | 9,799 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_action_reason(osv.osv):
_name = "hr.action.reason"
_description = "Action Reason"
_columns = {
'name': fields.char('Reason', required=True, help='Specifies the reason for Signing In/Signing Out.'),
'action_type': fields.selection([('sign_in', 'Sign in'), ('sign_out', 'Sign out')], "Action Type"),
}
_defaults = {
'action_type': 'sign_in',
}
def _employee_get(obj, cr, uid, context=None):
ids = obj.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
return ids and ids[0] or False
class hr_attendance(osv.osv):
_name = "hr.attendance"
_description = "Attendance"
def _worked_hours_compute(self, cr, uid, ids, fieldnames, args, context=None):
"""For each hr.attendance record of action sign-in: assign 0.
For each hr.attendance record of action sign-out: assign number of hours since last sign-in.
"""
res = {}
for obj in self.browse(cr, uid, ids, context=context):
if obj.action == 'sign_in':
res[obj.id] = 0
elif obj.action == 'sign_out':
# Get the associated sign-in
last_signin_id = self.search(cr, uid, [
('employee_id', '=', obj.employee_id.id),
('name', '<', obj.name), ('action', '=', 'sign_in')
], limit=1, order='name DESC')
if last_signin_id:
last_signin = self.browse(cr, uid, last_signin_id, context=context)[0]
# Compute time elapsed between sign-in and sign-out
last_signin_datetime = datetime.strptime(last_signin.name, '%Y-%m-%d %H:%M:%S')
signout_datetime = datetime.strptime(obj.name, '%Y-%m-%d %H:%M:%S')
workedhours_datetime = (signout_datetime - last_signin_datetime)
res[obj.id] = ((workedhours_datetime.seconds) / 60) / 60.0
else:
res[obj.id] = False
return res
_columns = {
'name': fields.datetime('Date', required=True, select=1),
'action': fields.selection([('sign_in', 'Sign In'), ('sign_out', 'Sign Out'), ('action','Action')], 'Action', required=True),
'action_desc': fields.many2one("hr.action.reason", "Action Reason", domain="[('action_type', '=', action)]", help='Specifies the reason for Signing In/Signing Out in case of extra hours.'),
'employee_id': fields.many2one('hr.employee', "Employee", required=True, select=True),
'worked_hours': fields.function(_worked_hours_compute, type='float', string='Worked Hours', store=True),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), #please don't remove the lambda, if you remove it then the current time will not change
'employee_id': _employee_get,
}
def _altern_si_so(self, cr, uid, ids, context=None):
""" Alternance sign_in/sign_out check.
Previous (if exists) must be of opposite action.
Next (if exists) must be of opposite action.
"""
for att in self.browse(cr, uid, ids, context=context):
# search and browse for first previous and first next records
prev_att_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), ('name', '<', att.name), ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name DESC')
next_add_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), ('name', '>', att.name), ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name ASC')
prev_atts = self.browse(cr, uid, prev_att_ids, context=context)
next_atts = self.browse(cr, uid, next_add_ids, context=context)
# check for alternance, return False if at least one condition is not satisfied
if prev_atts and prev_atts[0].action == att.action: # previous exists and is same action
return False
if next_atts and next_atts[0].action == att.action: # next exists and is same action
return False
if (not prev_atts) and (not next_atts) and att.action != 'sign_in': # first attendance must be sign_in
return False
return True
_constraints = [(_altern_si_so, 'Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)', ['action'])]
_order = 'name desc'
class hr_employee(osv.osv):
_inherit = "hr.employee"
_description = "Employee"
def _state(self, cr, uid, ids, name, args, context=None):
result = {}
if not ids:
return result
for id in ids:
result[id] = 'absent'
cr.execute('SELECT hr_attendance.action, hr_attendance.employee_id \
FROM ( \
SELECT MAX(name) AS name, employee_id \
FROM hr_attendance \
WHERE action in (\'sign_in\', \'sign_out\') \
GROUP BY employee_id \
) AS foo \
LEFT JOIN hr_attendance \
ON (hr_attendance.employee_id = foo.employee_id \
AND hr_attendance.name = foo.name) \
WHERE hr_attendance.employee_id IN %s',(tuple(ids),))
for res in cr.fetchall():
result[res[1]] = res[0] == 'sign_in' and 'present' or 'absent'
return result
def _last_sign(self, cr, uid, ids, name, args, context=None):
result = {}
if not ids:
return result
for id in ids:
result[id] = False
cr.execute("""select max(name) as name
from hr_attendance
where action in ('sign_in', 'sign_out') and employee_id = %s""",(id,))
for res in cr.fetchall():
result[id] = res[0]
return result
def _attendance_access(self, cr, uid, ids, name, args, context=None):
# this function field use to hide attendance button to singin/singout from menu
group = self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'group_hr_attendance')
visible = False
if uid in [user.id for user in group.users]:
visible = True
return dict([(x, visible) for x in ids])
_columns = {
'state': fields.function(_state, type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Attendance'),
'last_sign': fields.function(_last_sign, type='datetime', string='Last Sign'),
'attendance_access': fields.function(_attendance_access, string='Attendance Access', type='boolean'),
}
def _action_check(self, cr, uid, emp_id, dt=False, context=None):
cr.execute('SELECT MAX(name) FROM hr_attendance WHERE employee_id=%s', (emp_id,))
res = cr.fetchone()
return not (res and (res[0]>=(dt or time.strftime('%Y-%m-%d %H:%M:%S'))))
def attendance_action_change(self, cr, uid, ids, context=None):
if context is None:
context = {}
action_date = context.get('action_date', False)
action = context.get('action', False)
hr_attendance = self.pool.get('hr.attendance')
warning_sign = {'sign_in': _('Sign In'), 'sign_out': _('Sign Out')}
for employee in self.browse(cr, uid, ids, context=context):
if not action:
if employee.state == 'present': action = 'sign_out'
if employee.state == 'absent': action = 'sign_in'
if not self._action_check(cr, uid, employee.id, action_date, context):
raise osv.except_osv(_('Warning'), _('You tried to %s with a date anterior to another event !\nTry to contact the HR Manager to correct attendances.')%(warning_sign[action],))
vals = {'action': action, 'employee_id': employee.id}
if action_date:
vals['name'] = action_date
hr_attendance.create(cr, uid, vals, context=context)
return True
| OpusVL/odoo | addons/hr_attendance/hr_attendance.py | Python | agpl-3.0 | 9,132 |
import socket
from os.path import basename
from math import floor
from i3pystatus import IntervalModule, formatp
from i3pystatus.core.util import TimeWrapper
class MPD(IntervalModule):
"""
Displays various information from MPD (the music player daemon)
.. rubric:: Available formatters (uses :ref:`formatp`)
* `{title}` — (the title of the current song)
* `{album}` — (the album of the current song, can be an empty string (e.g. for online streams))
* `{artist}` — (can be empty, too)
* `{filename}` — (file name with out extension and path; empty unless title is empty)
* `{song_elapsed}` — (Position in the currently playing song, uses :ref:`TimeWrapper`, default is `%m:%S`)
* `{song_length}` — (Length of the current song, same as song_elapsed)
* `{pos}` — (Position of current song in playlist, one-based)
* `{len}` — (Songs in playlist)
* `{status}` — (play, pause, stop mapped through the `status` dictionary)
* `{bitrate}` — (Current bitrate in kilobit/s)
* `{volume}` — (Volume set in MPD)
Left click on the module play/pauses, right click (un)mutes.
"""
interval = 1
settings = (
("host"),
("port", "MPD port"),
("format", "formatp string"),
("status", "Dictionary mapping pause, play and stop to output"),
("color", "The color of the text"),
("max_field_len", "Defines max length for in truncate_fields defined fields, if truncated, ellipsis are appended as indicator. It's applied *before* max_len. Value of 0 disables this."),
("max_len", "Defines max length for the hole string, if exceeding fields specefied in truncate_fields are truncated equaly. If truncated, ellipsis are appended as indicator. It's applied *after* max_field_len. Value of 0 disables this."),
("truncate_fields", "fields that will be truncated if exceeding max_field_len or max_len."),
)
host = "localhost"
port = 6600
s = None
format = "{title} {status}"
status = {
"pause": "▷",
"play": "▶",
"stop": "◾",
}
color = "#FFFFFF"
max_field_len = 25
max_len = 100
truncate_fields = ("title", "album", "artist")
on_leftclick = "switch_playpause"
on_rightclick = "next_song"
on_upscroll = on_rightclick
on_downscroll = "previous_song"
def _mpd_command(self, sock, command):
try:
sock.send((command + "\n").encode("utf-8"))
except Exception as e:
self.s = socket.create_connection((self.host, self.port))
sock = self.s
sock.recv(8192)
sock.send((command + "\n").encode("utf-8"))
try:
reply = sock.recv(16384).decode("utf-8")
replylines = reply.split("\n")[:-2]
return dict(
(line.split(": ", 1)) for line in replylines
)
except Exception as e:
return None
def run(self):
status = self._mpd_command(self.s, "status")
currentsong = self._mpd_command(self.s, "currentsong")
fdict = {
"pos": int(status.get("song", 0)) + 1,
"len": int(status["playlistlength"]),
"status": self.status[status["state"]],
"volume": int(status["volume"]),
"title": currentsong.get("Title", ""),
"album": currentsong.get("Album", ""),
"artist": currentsong.get("Artist", ""),
"song_length": TimeWrapper(currentsong.get("Time", 0)),
"song_elapsed": TimeWrapper(float(status.get("elapsed", 0))),
"bitrate": int(status.get("bitrate", 0)),
}
if not fdict["title"] and "filename" in fdict:
fdict["filename"] = '.'.join(
basename(currentsong["file"]).split('.')[:-1])
else:
fdict["filename"] = ""
if self.max_field_len > 0:
for key in self.truncate_fields:
if len(fdict[key]) > self.max_field_len:
fdict[key] = fdict[key][:self.max_field_len - 1] + "…"
full_text = formatp(self.format, **fdict).strip()
full_text_len = len(full_text)
if full_text_len > self.max_len and self.max_len > 0:
shrink = floor((self.max_len - full_text_len) /
len(self.truncate_fields)) - 1
for key in self.truncate_fields:
fdict[key] = fdict[key][:shrink] + "…"
full_text = formatp(self.format, **fdict).strip()
self.output = {
"full_text": full_text,
"color": self.color,
}
def switch_playpause(self):
try:
self._mpd_command(self.s, "%s" %
("play" if self._mpd_command(self.s, "status")["state"] in ["pause", "stop"] else "pause"))
except Exception as e:
pass
def next_song(self):
try:
self._mpd_command(self.s, "next")
except Exception as e:
pass
def previous_song(self):
try:
self._mpd_command(self.s, "previous")
except Exception as e:
pass
| plumps/i3pystatus | i3pystatus/mpd.py | Python | mit | 5,190 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import random
import re
from datetime import datetime, timedelta
from odoo import api, fields, models, modules, tools
class ImLivechatChannel(models.Model):
""" Livechat Channel
Define a communication channel, which can be accessed with 'script_external' (script tag to put on
external website), 'script_internal' (code to be integrated with odoo website) or via 'web_page' link.
It provides rating tools, and access rules for anonymous people.
"""
_name = 'im_livechat.channel'
_description = 'Livechat Channel'
def _default_image(self):
image_path = modules.get_module_resource('im_livechat', 'static/src/img', 'default.png')
return tools.image_resize_image_big(base64.b64encode(open(image_path, 'rb').read()))
def _default_user_ids(self):
return [(6, 0, [self._uid])]
# attribute fields
name = fields.Char('Name', required=True, help="The name of the channel")
button_text = fields.Char('Text of the Button', default='Have a Question? Chat with us.',
help="Default text displayed on the Livechat Support Button")
default_message = fields.Char('Welcome Message', default='How may I help you?',
help="This is an automated 'welcome' message that your visitor will see when they initiate a new conversation.")
input_placeholder = fields.Char('Chat Input Placeholder', help='Text that prompts the user to initiate the chat.')
# computed fields
web_page = fields.Char('Web Page', compute='_compute_web_page_link', store=False, readonly=True,
help="URL to a static page where you client can discuss with the operator of the channel.")
are_you_inside = fields.Boolean(string='Are you inside the matrix?',
compute='_are_you_inside', store=False, readonly=True)
script_external = fields.Text('Script (external)', compute='_compute_script_external', store=False, readonly=True)
nbr_channel = fields.Integer('Number of conversation', compute='_compute_nbr_channel', store=False, readonly=True)
rating_percentage_satisfaction = fields.Integer(
'% Happy', compute='_compute_percentage_satisfaction', store=False, default=-1,
help="Percentage of happy ratings over the past 7 days")
# images fields
image = fields.Binary('Image', default=_default_image, attachment=True,
help="This field holds the image used as photo for the group, limited to 1024x1024px.")
image_medium = fields.Binary('Medium', attachment=True,
help="Medium-sized photo of the group. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views.")
image_small = fields.Binary('Thumbnail', attachment=True,
help="Small-sized photo of the group. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required.")
# relationnal fields
user_ids = fields.Many2many('res.users', 'im_livechat_channel_im_user', 'channel_id', 'user_id', string='Operators', default=_default_user_ids)
channel_ids = fields.One2many('mail.channel', 'livechat_channel_id', 'Sessions')
rule_ids = fields.One2many('im_livechat.channel.rule', 'channel_id', 'Rules')
@api.one
def _are_you_inside(self):
self.are_you_inside = bool(self.env.uid in [u.id for u in self.user_ids])
@api.multi
def _compute_script_external(self):
view = self.env['ir.model.data'].get_object('im_livechat', 'external_loader')
values = {
"url": self.env['ir.config_parameter'].sudo().get_param('web.base.url'),
"dbname": self._cr.dbname,
}
for record in self:
values["channel_id"] = record.id
record.script_external = view.render(values)
@api.multi
def _compute_web_page_link(self):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for record in self:
record.web_page = "%s/im_livechat/support/%i" % (base_url, record.id)
@api.multi
@api.depends('channel_ids')
def _compute_nbr_channel(self):
for record in self:
record.nbr_channel = len(record.channel_ids)
@api.multi
@api.depends('channel_ids.rating_ids')
def _compute_percentage_satisfaction(self):
for record in self:
dt = fields.Datetime.to_string(datetime.utcnow() - timedelta(days=7))
repartition = record.channel_ids.rating_get_grades([('create_date', '>=', dt)])
total = sum(repartition.values())
if total > 0:
happy = repartition['great']
record.rating_percentage_satisfaction = ((happy*100) / total) if happy > 0 else 0
else:
record.rating_percentage_satisfaction = -1
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(ImLivechatChannel, self).create(vals)
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
return super(ImLivechatChannel, self).write(vals)
# --------------------------
# Action Methods
# --------------------------
@api.multi
def action_join(self):
self.ensure_one()
return self.write({'user_ids': [(4, self._uid)]})
@api.multi
def action_quit(self):
self.ensure_one()
return self.write({'user_ids': [(3, self._uid)]})
@api.multi
def action_view_rating(self):
""" Action to display the rating relative to the channel, so all rating of the
sessions of the current channel
:returns : the ir.action 'action_view_rating' with the correct domain
"""
self.ensure_one()
action = self.env['ir.actions.act_window'].for_xml_id('im_livechat', 'rating_rating_action_view_livechat_rating')
action['domain'] = [('parent_res_id', '=', self.id), ('parent_res_model', '=', 'im_livechat.channel')]
return action
# --------------------------
# Channel Methods
# --------------------------
@api.multi
def get_available_users(self):
""" get available user of a given channel
:retuns : return the res.users having their im_status online
"""
self.ensure_one()
return self.sudo().user_ids.filtered(lambda user: user.im_status == 'online')
@api.model
def get_mail_channel(self, livechat_channel_id, anonymous_name):
""" Return a mail.channel given a livechat channel. It creates one with a connected operator, or return false otherwise
:param livechat_channel_id : the identifier if the im_livechat.channel
:param anonymous_name : the name of the anonymous person of the channel
:type livechat_channel_id : int
:type anonymous_name : str
:return : channel header
:rtype : dict
"""
# get the avalable user of the channel
users = self.sudo().browse(livechat_channel_id).get_available_users()
if len(users) == 0:
return False
# choose the res.users operator and get its partner id
user = random.choice(users)
operator_partner_id = user.partner_id.id
# partner to add to the mail.channel
channel_partner_to_add = [(4, operator_partner_id)]
if self.env.user and self.env.user.active: # valid session user (not public)
channel_partner_to_add.append((4, self.env.user.partner_id.id))
# create the session, and add the link with the given channel
mail_channel = self.env["mail.channel"].with_context(mail_create_nosubscribe=False).sudo().create({
'channel_partner_ids': channel_partner_to_add,
'livechat_channel_id': livechat_channel_id,
'anonymous_name': anonymous_name,
'channel_type': 'livechat',
'name': ', '.join([anonymous_name, user.name]),
'public': 'private',
'email_send': False,
})
return mail_channel.sudo().with_context(im_livechat_operator_partner_id=operator_partner_id).channel_info()[0]
@api.model
def get_channel_infos(self, channel_id):
channel = self.browse(channel_id)
return {
'button_text': channel.button_text,
'input_placeholder': channel.input_placeholder,
'default_message': channel.default_message,
"channel_name": channel.name,
"channel_id": channel.id,
}
@api.model
def get_livechat_info(self, channel_id, username='Visitor'):
info = {}
info['available'] = len(self.browse(channel_id).get_available_users()) > 0
info['server_url'] = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
if info['available']:
info['options'] = self.sudo().get_channel_infos(channel_id)
info['options']["default_username"] = username
return info
class ImLivechatChannelRule(models.Model):
""" Channel Rules
Rules defining access to the channel (countries, and url matching). It also provide the 'auto pop'
option to open automatically the conversation.
"""
_name = 'im_livechat.channel.rule'
_description = 'Channel Rules'
_order = 'sequence asc'
regex_url = fields.Char('URL Regex',
help="Regular expression specifying the web pages this rule will be applied on.")
action = fields.Selection([('display_button', 'Display the button'), ('auto_popup', 'Auto popup'), ('hide_button', 'Hide the button')],
string='Action', required=True, default='display_button',
help="* 'Display the button' displays the chat button on the pages.\n"\
"* 'Auto popup' displays the button and automatically open the conversation pane.\n"\
"* 'Hide the button' hides the chat button on the pages.")
auto_popup_timer = fields.Integer('Auto popup timer', default=0,
help="Delay (in seconds) to automatically open the conversation window. Note: the selected action must be 'Auto popup' otherwise this parameter will not be taken into account.")
channel_id = fields.Many2one('im_livechat.channel', 'Channel',
help="The channel of the rule")
country_ids = fields.Many2many('res.country', 'im_livechat_channel_country_rel', 'channel_id', 'country_id', 'Country',
help="The rule will only be applied for these countries. Example: if you select 'Belgium' and 'United States' and that you set the action to 'Hide Button', the chat button will be hidden on the specified URL from the visitors located in these 2 countries. This feature requires GeoIP installed on your server.")
sequence = fields.Integer('Matching order', default=10,
help="Given the order to find a matching rule. If 2 rules are matching for the given url/country, the one with the lowest sequence will be chosen.")
def match_rule(self, channel_id, url, country_id=False):
""" determine if a rule of the given channel matches with the given url
:param channel_id : the identifier of the channel_id
:param url : the url to match with a rule
:param country_id : the identifier of the country
:returns the rule that matches the given condition. False otherwise.
:rtype : im_livechat.channel.rule
"""
def _match(rules):
for rule in rules:
if re.search(rule.regex_url or '', url):
return rule
return False
# first, search the country specific rules (the first match is returned)
if country_id: # don't include the country in the research if geoIP is not installed
domain = [('country_ids', 'in', [country_id]), ('channel_id', '=', channel_id)]
rule = _match(self.search(domain))
if rule:
return rule
# second, fallback on the rules without country
domain = [('country_ids', '=', False), ('channel_id', '=', channel_id)]
return _match(self.search(domain))
| maxive/erp | addons/im_livechat/models/im_livechat_channel.py | Python | agpl-3.0 | 12,297 |
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
from _inherit3 import *
class testInherit3(unittest.TestCase):
def testIt(self):
def testInst(c):
self.assertEqual(c.x, 0)
self.assertEqual(c.foo(3), 3)
x = c.X()
self.assertEqual(x.y, 0)
self.assertEqual(c.E.i, 0)
self.assertEqual(c.E.j, 1)
b = B()
c = C()
testInst(b)
testInst(c)
self.assertEqual(b.foo(), 1)
self.assertEqual(c.foo(), 0)
if __name__ == '__main__':
unittest.main()
| NixaSoftware/CVis | venv/bin/libs/python/pyste/tests/inherit3UT.py | Python | apache-2.0 | 770 |
#! /usr/bin/env python
import unittest
import sys
import os
import re
import decimal
import shutil
from subprocess import check_call, CalledProcessError
# Ensure that Python can find and load the GSLab libraries
#os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append('../..')
from gslab_fill import tablefill
from gslab_make.tests import nostderrout
class testTablefill(unittest.TestCase):
def setUp(self):
if not os.path.exists('./build/'):
os.mkdir('./build/')
def testInput(self):
for ext in ['lyx', 'tex']:
with nostderrout():
message = tablefill(input = '../../gslab_fill/tests/input/tables_appendix.txt ' + \
'../../gslab_fill/tests/input/tables_appendix_two.txt',
template = '../../gslab_fill/tests/input/tablefill_template.%s' % ext,
output = './build/tablefill_template_filled.%s' % ext)
self.assertIn('filled successfully', message)
tag_data = open('../../gslab_fill/tests/input/tablefill_template.%s' % ext, 'rU').readlines()
filled_data = open('./build/tablefill_template_filled.%s' % ext, 'rU').readlines()
self.assertEqual(len(tag_data), len(filled_data))
for n in range(len(tag_data)):
if ext == 'tex':
self.tag_compare_latex(tag_data[n], filled_data[n])
elif ext == 'lyx':
self.tag_compare_lyx(tag_data[n], filled_data[n])
def tag_compare_latex(self, tag_line, filled_line):
tag_line = tag_line.split('&')
filled_line = filled_line.split('&')
for col in range(len(tag_line)):
if re.match('^.*#\d+#', tag_line[col]) or re.match('^.*#\d+,#', tag_line[col]):
entry_tag = re.split('#', tag_line[col])[1]
decimal_places = int(entry_tag.replace(',', ''))
if decimal_places > 0:
self.assertTrue(re.search('\.', filled_line[col]))
decimal_part = re.split('\.', filled_line[col])[1]
non_decimal = re.compile(r'[^\d.]+')
decimal_part = non_decimal.sub('', decimal_part)
self.assertEqual(len(decimal_part), decimal_places)
else:
self.assertFalse(re.search('\.', filled_line[col]))
if re.match('^.*#\d+,#', tag_line[col]):
integer_part = re.split('\.', filled_line[col])[0]
if len(integer_part) > 3:
self.assertEqual(integer_part[-4], ',')
def tag_compare_lyx(self, tag_line, filled_line):
if re.match('^.*#\d+#', tag_line) or re.match('^.*#\d+,#', tag_line):
entry_tag = re.split('#', tag_line)[1]
decimal_places = int(entry_tag.replace(',', ''))
if decimal_places > 0:
self.assertTrue(re.search('\.', filled_line))
decimal_part = re.split('\.', filled_line)[1]
non_decimal = re.compile(r'[^\d.]+')
decimal_part = non_decimal.sub('', decimal_part)
self.assertEqual(len(decimal_part), decimal_places)
else:
self.assertFalse(re.search('\.', filled_line))
if re.match('^.*#\d+,#', tag_line):
integer_part = re.split('\.', filled_line)[0]
if len(integer_part) > 3:
self.assertEqual(integer_part[-4], ',')
def testBreaksRoundingString(self):
for ext in ['lyx', 'tex']:
with nostderrout():
error = tablefill(input = '../../gslab_fill/tests/input/tables_appendix.txt ' +
'../../gslab_fill/tests/input/tables_appendix_two.txt',
template = '../../gslab_fill/tests/input/tablefill_template_breaks.%s' % ext,
output = './build/tablefill_template_filled.%s' % ext)
self.assertIn('InvalidOperation', error)
def testIllegalSyntax(self):
# missing arguments
for ext in ['lyx', 'tex']:
with nostderrout():
error = tablefill(input = '../../gslab_fill/tests/input/tables_appendix.txt ' + \
'../../gslab_fill/tests/input/tables_appendix_two.txt',
template = '../../gslab_fill/tests/input/tablefill_template.%s' % ext)
self.assertIn('KeyError', error)
# non-existent input 1
for ext in ['lyx', 'tex']:
with nostderrout():
error = tablefill(input = '../../gslab_fill/tests/input/fake_file.txt ' + \
'../../gslab_fill/tests/input/tables_appendix_two.txt',
template = '../../gslab_fill/tests/input/tablefill_template_breaks.%s' % ext,
output = './build/tablefill_template_filled.%s' % ext)
self.assertIn('IOError', error)
# non-existent input 2
for ext in ['lyx', 'tex']:
with nostderrout():
error = tablefill(input = '../../gslab_fill/tests/input/tables_appendix.txt ' + \
'../../gslab_fill/tests/input/fake_file.txt',
template = '../../gslab_fill/tests/input/tablefill_template_breaks.%s' % ext,
output = './build/tablefill_template_filled.%s' % ext)
self.assertIn('IOError', error)
def testArgumentOrder(self):
for ext in ['lyx', 'tex']:
with nostderrout():
message = tablefill(input = '../../gslab_fill/tests/input/tables_appendix.txt ' + \
'../../gslab_fill/tests/input/tables_appendix_two.txt',
output = './build/tablefill_template_filled.%s' % ext,
template = '../../gslab_fill/tests/input/tablefill_template.%s' % ext)
self.assertIn('filled successfully', message)
with open('./build/tablefill_template_filled.%s' % ext, 'rU') as filled_file:
filled_data_args1 = filled_file.readlines()
with nostderrout():
message = tablefill(output = './build/tablefill_template_filled.%s' % ext,
template = '../../gslab_fill/tests/input/tablefill_template.%s' % ext,
input = '../../gslab_fill/tests/input/tables_appendix.txt ' + \
'../../gslab_fill/tests/input/tables_appendix_two.txt')
self.assertIn('filled successfully', message)
with open('./build/tablefill_template_filled.%s' % ext, 'rU') as filled_file:
filled_data_args2 = filled_file.readlines()
self.assertEqual(filled_data_args1, filled_data_args2)
def tearDown(self):
if os.path.exists('./build/'):
shutil.rmtree('./build/')
if __name__ == '__main__':
os.getcwd()
unittest.main()
| gslab-econ/gslab_python | gslab_fill/tests/test_tablefill.py | Python | mit | 7,464 |
#!/usr/local/bin/python
#
# BitKeeper hook script.
#
# svn_buildbot.py was used as a base for this file, if you find any bugs or
# errors please email me.
#
# Amar Takhar <amar@ntp.org>
'''
/path/to/bk_buildbot.py --repository "$REPOS" --revision "$REV" --branch \
"<branch>" --bbserver localhost --bbport 9989
'''
import commands
import sys
# We have hackish "-d" handling here rather than in the Options
# subclass below because a common error will be to not have twisted in
# PYTHONPATH; we want to be able to print that error to the log if
# debug mode is on, so we set it up before the imports.
DEBUG = None
if '-d' in sys.argv:
i = sys.argv.index('-d')
DEBUG = sys.argv[i + 1]
del sys.argv[i]
del sys.argv[i]
if DEBUG:
f = open(DEBUG, 'a')
sys.stderr = f
sys.stdout = f
from twisted.cred import credentials
from twisted.internet import reactor
from twisted.python import usage
from twisted.spread import pb
class Options(usage.Options):
optParameters = [
['repository', 'r', None,
"The repository that was changed."],
['revision', 'v', None,
"The revision that we want to examine (default: latest)"],
['branch', 'b', None,
"Name of the branch to insert into the branch field. (REQUIRED)"],
['category', 'c', None,
"Schedular category."],
['bbserver', 's', 'localhost',
"The hostname of the server that buildbot is running on"],
['bbport', 'p', 8007,
"The port that buildbot is listening on"]
]
optFlags = [
['dryrun', 'n', "Do not actually send changes"],
]
def __init__(self):
usage.Options.__init__(self)
def postOptions(self):
if self['repository'] is None:
raise usage.error("You must pass --repository")
class ChangeSender:
def getChanges(self, opts):
"""Generate and stash a list of Change dictionaries, ready to be sent
to the buildmaster's PBChangeSource."""
# first we extract information about the files that were changed
repo = opts['repository']
print "Repo:", repo
rev_arg = ''
if opts['revision']:
rev_arg = '-r"%s"' % (opts['revision'], )
changed = commands.getoutput("bk changes -v %s -d':GFILE:\\n' '%s'" % (
rev_arg, repo)).split('\n')
# Remove the first line, it's an info message you can't remove (annoying)
del changed[0]
change_info = commands.getoutput("bk changes %s -d':USER:\\n$each(:C:){(:C:)\\n}' '%s'" % (
rev_arg, repo)).split('\n')
# Remove the first line, it's an info message you can't remove (annoying)
del change_info[0]
who = change_info.pop(0)
branch = opts['branch']
message = '\n'.join(change_info)
revision = opts.get('revision')
changes = {'who': who,
'branch': branch,
'files': changed,
'comments': message,
'revision': revision}
if opts.get('category'):
changes['category'] = opts.get('category')
return changes
def sendChanges(self, opts, changes):
pbcf = pb.PBClientFactory()
reactor.connectTCP(opts['bbserver'], int(opts['bbport']), pbcf)
d = pbcf.login(credentials.UsernamePassword('change', 'changepw'))
d.addCallback(self.sendAllChanges, changes)
return d
def sendAllChanges(self, remote, changes):
dl = remote.callRemote('addChange', changes)
return dl
def run(self):
opts = Options()
try:
opts.parseOptions()
if not opts['branch']:
print "You must supply a branch with -b or --branch."
sys.exit(1)
except usage.error, ue:
print opts
print "%s: %s" % (sys.argv[0], ue)
sys.exit()
changes = self.getChanges(opts)
if opts['dryrun']:
for k in changes.keys():
print "[%10s]: %s" % (k, changes[k])
print "*NOT* sending any changes"
return
d = self.sendChanges(opts, changes)
def quit(*why):
print "quitting! because", why
reactor.stop()
def failed(f):
print "FAILURE: %s" % f
reactor.stop()
d.addErrback(failed)
d.addCallback(quit, "SUCCESS")
reactor.callLater(60, quit, "TIMEOUT")
reactor.run()
if __name__ == '__main__':
s = ChangeSender()
s.run()
| zozo123/buildbot | master/contrib/bk_buildbot.py | Python | gpl-3.0 | 4,579 |
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from functools import lru_cache
from itertools import chain
from pathlib import Path
import enum
import json
import os
import pickle
import re
import typing as T
import hashlib
from .. import build
from .. import dependencies
from .. import programs
from .. import mesonlib
from .. import mlog
from ..compilers import LANGUAGES_USING_LDFLAGS
from ..mesonlib import (
File, MachineChoice, MesonException, OptionType, OrderedSet, OptionOverrideProxy,
classify_unity_sources, unholder, OptionKey, join_args
)
if T.TYPE_CHECKING:
from ..arglist import CompilerArgs
from ..compilers import Compiler
from ..interpreter import Interpreter, Test
from ..mesonlib import FileMode
InstallType = T.List[T.Tuple[str, str, T.Optional['FileMode']]]
InstallSubdirsType = T.List[T.Tuple[str, str, T.Optional['FileMode'], T.Tuple[T.Set[str], T.Set[str]]]]
# Languages that can mix with C or C++ but don't support unity builds yet
# because the syntax we use for unity builds is specific to C/++/ObjC/++.
# Assembly files cannot be unitified and neither can LLVM IR files
LANGS_CANT_UNITY = ('d', 'fortran', 'vala')
class RegenInfo:
def __init__(self, source_dir, build_dir, depfiles):
self.source_dir = source_dir
self.build_dir = build_dir
self.depfiles = depfiles
class TestProtocol(enum.Enum):
EXITCODE = 0
TAP = 1
GTEST = 2
RUST = 3
@classmethod
def from_str(cls, string: str) -> 'TestProtocol':
if string == 'exitcode':
return cls.EXITCODE
elif string == 'tap':
return cls.TAP
elif string == 'gtest':
return cls.GTEST
elif string == 'rust':
return cls.RUST
raise MesonException(f'unknown test format {string}')
def __str__(self) -> str:
cls = type(self)
if self is cls.EXITCODE:
return 'exitcode'
elif self is cls.GTEST:
return 'gtest'
elif self is cls.RUST:
return 'rust'
return 'tap'
class CleanTrees:
'''
Directories outputted by custom targets that have to be manually cleaned
because on Linux `ninja clean` only deletes empty directories.
'''
def __init__(self, build_dir, trees):
self.build_dir = build_dir
self.trees = trees
class InstallData:
def __init__(self, source_dir: str, build_dir: str, prefix: str,
strip_bin: T.List[str], install_umask: T.Union[str, int],
mesonintrospect: T.List[str], version: str):
# TODO: in python 3.8 or with typing_Extensions install_umask could be:
# `T.Union[T.Literal['preserve'], int]`, which would be more accurate.
self.source_dir = source_dir
self.build_dir = build_dir
self.prefix = prefix
self.strip_bin = strip_bin
self.install_umask = install_umask
self.targets: T.List[TargetInstallData] = []
self.headers: T.List[InstallDataBase] = []
self.man: T.List[InstallDataBase] = []
self.data: T.List[InstallDataBase] = []
self.install_scripts: T.List[ExecutableSerialisation] = []
self.install_subdirs: T.List[SubdirInstallData] = []
self.mesonintrospect = mesonintrospect
self.version = version
class TargetInstallData:
def __init__(self, fname: str, outdir: str, aliases: T.Dict[str, str], strip: bool,
install_name_mappings: T.Dict, rpath_dirs_to_remove: T.Set[bytes],
install_rpath: str, install_mode: 'FileMode', subproject: str, optional: bool = False):
self.fname = fname
self.outdir = outdir
self.aliases = aliases
self.strip = strip
self.install_name_mappings = install_name_mappings
self.rpath_dirs_to_remove = rpath_dirs_to_remove
self.install_rpath = install_rpath
self.install_mode = install_mode
self.subproject = subproject
self.optional = optional
class InstallDataBase:
def __init__(self, path: str, install_path: str, install_mode: 'FileMode', subproject: str):
self.path = path
self.install_path = install_path
self.install_mode = install_mode
self.subproject = subproject
class SubdirInstallData(InstallDataBase):
def __init__(self, path: str, install_path: str, install_mode: 'FileMode', exclude, subproject: str):
super().__init__(path, install_path, install_mode, subproject)
self.exclude = exclude
class ExecutableSerialisation:
def __init__(self, cmd_args, env: T.Optional[build.EnvironmentVariables] = None, exe_wrapper=None,
workdir=None, extra_paths=None, capture=None) -> None:
self.cmd_args = cmd_args
self.env = env
if exe_wrapper is not None:
assert(isinstance(exe_wrapper, programs.ExternalProgram))
self.exe_runner = exe_wrapper
self.workdir = workdir
self.extra_paths = extra_paths
self.capture = capture
self.pickled = False
self.skip_if_destdir = False
self.verbose = False
self.subproject = ''
class TestSerialisation:
def __init__(self, name: str, project: str, suite: str, fname: T.List[str],
is_cross_built: bool, exe_wrapper: T.Optional[programs.ExternalProgram],
needs_exe_wrapper: bool, is_parallel: bool, cmd_args: T.List[str],
env: build.EnvironmentVariables, should_fail: bool,
timeout: T.Optional[int], workdir: T.Optional[str],
extra_paths: T.List[str], protocol: TestProtocol, priority: int,
cmd_is_built: bool, depends: T.List[str], version: str):
self.name = name
self.project_name = project
self.suite = suite
self.fname = fname
self.is_cross_built = is_cross_built
if exe_wrapper is not None:
assert(isinstance(exe_wrapper, programs.ExternalProgram))
self.exe_runner = exe_wrapper
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.extra_paths = extra_paths
self.protocol = protocol
self.priority = priority
self.needs_exe_wrapper = needs_exe_wrapper
self.cmd_is_built = cmd_is_built
self.depends = depends
self.version = version
def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, interpreter: T.Optional['Interpreter'] = None) -> T.Optional['Backend']:
if backend == 'ninja':
from . import ninjabackend
return ninjabackend.NinjaBackend(build, interpreter)
elif backend == 'vs':
from . import vs2010backend
return vs2010backend.autodetect_vs_version(build, interpreter)
elif backend == 'vs2010':
from . import vs2010backend
return vs2010backend.Vs2010Backend(build, interpreter)
elif backend == 'vs2015':
from . import vs2015backend
return vs2015backend.Vs2015Backend(build, interpreter)
elif backend == 'vs2017':
from . import vs2017backend
return vs2017backend.Vs2017Backend(build, interpreter)
elif backend == 'vs2019':
from . import vs2019backend
return vs2019backend.Vs2019Backend(build, interpreter)
elif backend == 'xcode':
from . import xcodebackend
return xcodebackend.XCodeBackend(build, interpreter)
return None
# This class contains the basic functionality that is needed by all backends.
# Feel free to move stuff in and out of it as you see fit.
class Backend:
def __init__(self, build: T.Optional[build.Build], interpreter: T.Optional['Interpreter']):
# Make it possible to construct a dummy backend
# This is used for introspection without a build directory
if build is None:
self.environment = None
return
self.build = build
self.interpreter = interpreter
self.environment = build.environment
self.processed_targets: T.Set[str] = set()
self.name = '<UNKNOWN>'
self.build_dir = self.environment.get_build_dir()
self.source_dir = self.environment.get_source_dir()
self.build_to_src = mesonlib.relpath(self.environment.get_source_dir(),
self.environment.get_build_dir())
self.src_to_build = mesonlib.relpath(self.environment.get_build_dir(),
self.environment.get_source_dir())
def generate(self) -> None:
raise RuntimeError('generate is not implemented in {}'.format(type(self).__name__))
def get_target_filename(self, t, *, warn_multi_output: bool = True):
if isinstance(t, build.CustomTarget):
if warn_multi_output and len(t.get_outputs()) != 1:
mlog.warning('custom_target {!r} has more than one output! '
'Using the first one.'.format(t.name))
filename = t.get_outputs()[0]
elif isinstance(t, build.CustomTargetIndex):
filename = t.get_outputs()[0]
else:
assert(isinstance(t, build.BuildTarget))
filename = t.get_filename()
return os.path.join(self.get_target_dir(t), filename)
def get_target_filename_abs(self, target):
return os.path.join(self.environment.get_build_dir(), self.get_target_filename(target))
def get_base_options_for_target(self, target: build.BuildTarget) -> OptionOverrideProxy:
return OptionOverrideProxy(target.option_overrides_base,
{k: v for k, v in self.environment.coredata.options.items()
if k.type in {OptionType.BASE, OptionType.BUILTIN}})
def get_compiler_options_for_target(self, target: build.BuildTarget) -> OptionOverrideProxy:
comp_reg = {k: v for k, v in self.environment.coredata.options.items() if k.is_compiler()}
comp_override = target.option_overrides_compiler
return OptionOverrideProxy(comp_override, comp_reg)
def get_option_for_target(self, option_name: 'OptionKey', target: build.BuildTarget):
if option_name in target.option_overrides_base:
override = target.option_overrides_base[option_name]
return self.environment.coredata.validate_option_value(option_name, override)
return self.environment.coredata.get_option(option_name.evolve(subproject=target.subproject))
def get_source_dir_include_args(self, target, compiler, *, absolute_path=False):
curdir = target.get_subdir()
if absolute_path:
lead = self.source_dir
else:
lead = self.build_to_src
tmppath = os.path.normpath(os.path.join(lead, curdir))
return compiler.get_include_args(tmppath, False)
def get_build_dir_include_args(self, target, compiler, *, absolute_path=False):
if absolute_path:
curdir = os.path.join(self.build_dir, target.get_subdir())
else:
curdir = target.get_subdir()
if curdir == '':
curdir = '.'
return compiler.get_include_args(curdir, False)
def get_target_filename_for_linking(self, target):
# On some platforms (msvc for instance), the file that is used for
# dynamic linking is not the same as the dynamic library itself. This
# file is called an import library, and we want to link against that.
# On all other platforms, we link to the library directly.
if isinstance(target, build.SharedLibrary):
link_lib = target.get_import_filename() or target.get_filename()
return os.path.join(self.get_target_dir(target), link_lib)
elif isinstance(target, build.StaticLibrary):
return os.path.join(self.get_target_dir(target), target.get_filename())
elif isinstance(target, (build.CustomTarget, build.CustomTargetIndex)):
if not target.is_linkable_target():
raise MesonException(f'Tried to link against custom target "{target.name}", which is not linkable.')
return os.path.join(self.get_target_dir(target), target.get_filename())
elif isinstance(target, build.Executable):
if target.import_filename:
return os.path.join(self.get_target_dir(target), target.get_import_filename())
else:
return None
raise AssertionError(f'BUG: Tried to link to {target!r} which is not linkable')
@lru_cache(maxsize=None)
def get_target_dir(self, target):
if self.environment.coredata.get_option(OptionKey('layout')) == 'mirror':
dirname = target.get_subdir()
else:
dirname = 'meson-out'
return dirname
def get_target_dir_relative_to(self, t, o):
'''Get a target dir relative to another target's directory'''
target_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(t))
othert_dir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(o))
return os.path.relpath(target_dir, othert_dir)
def get_target_source_dir(self, target):
# if target dir is empty, avoid extraneous trailing / from os.path.join()
target_dir = self.get_target_dir(target)
if target_dir:
return os.path.join(self.build_to_src, target_dir)
return self.build_to_src
def get_target_private_dir(self, target):
return os.path.join(self.get_target_filename(target, warn_multi_output=False) + '.p')
def get_target_private_dir_abs(self, target):
return os.path.join(self.environment.get_build_dir(), self.get_target_private_dir(target))
@lru_cache(maxsize=None)
def get_target_generated_dir(self, target, gensrc, src):
"""
Takes a BuildTarget, a generator source (CustomTarget or GeneratedList),
and a generated source filename.
Returns the full path of the generated source relative to the build root
"""
# CustomTarget generators output to the build dir of the CustomTarget
if isinstance(gensrc, (build.CustomTarget, build.CustomTargetIndex)):
return os.path.join(self.get_target_dir(gensrc), src)
# GeneratedList generators output to the private build directory of the
# target that the GeneratedList is used in
return os.path.join(self.get_target_private_dir(target), src)
def get_unity_source_file(self, target, suffix, number):
# There is a potential conflict here, but it is unlikely that
# anyone both enables unity builds and has a file called foo-unity.cpp.
osrc = f'{target.name}-unity{number}.{suffix}'
return mesonlib.File.from_built_file(self.get_target_private_dir(target), osrc)
def generate_unity_files(self, target, unity_src):
abs_files = []
result = []
compsrcs = classify_unity_sources(target.compilers.values(), unity_src)
unity_size = self.get_option_for_target(OptionKey('unity_size'), target)
def init_language_file(suffix, unity_file_number):
unity_src = self.get_unity_source_file(target, suffix, unity_file_number)
outfileabs = unity_src.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir())
outfileabs_tmp = outfileabs + '.tmp'
abs_files.append(outfileabs)
outfileabs_tmp_dir = os.path.dirname(outfileabs_tmp)
if not os.path.exists(outfileabs_tmp_dir):
os.makedirs(outfileabs_tmp_dir)
result.append(unity_src)
return open(outfileabs_tmp, 'w')
# For each language, generate unity source files and return the list
for comp, srcs in compsrcs.items():
files_in_current = unity_size + 1
unity_file_number = 0
ofile = None
for src in srcs:
if files_in_current >= unity_size:
if ofile:
ofile.close()
ofile = init_language_file(comp.get_default_suffix(), unity_file_number)
unity_file_number += 1
files_in_current = 0
ofile.write(f'#include<{src}>\n')
files_in_current += 1
if ofile:
ofile.close()
[mesonlib.replace_if_different(x, x + '.tmp') for x in abs_files]
return result
def relpath(self, todir, fromdir):
return os.path.relpath(os.path.join('dummyprefixdir', todir),
os.path.join('dummyprefixdir', fromdir))
def flatten_object_list(self, target, proj_dir_to_build_root=''):
obj_list = self._flatten_object_list(target, target.get_objects(), proj_dir_to_build_root)
return list(dict.fromkeys(obj_list))
def _flatten_object_list(self, target, objects, proj_dir_to_build_root):
obj_list = []
for obj in objects:
if isinstance(obj, str):
o = os.path.join(proj_dir_to_build_root,
self.build_to_src, target.get_subdir(), obj)
obj_list.append(o)
elif isinstance(obj, mesonlib.File):
obj_list.append(obj.rel_to_builddir(self.build_to_src))
elif isinstance(obj, build.ExtractedObjects):
if obj.recursive:
obj_list += self._flatten_object_list(obj.target, obj.objlist, proj_dir_to_build_root)
obj_list += self.determine_ext_objs(obj, proj_dir_to_build_root)
else:
raise MesonException('Unknown data type in object list.')
return obj_list
def is_swift_target(self, target):
for s in target.sources:
if s.endswith('swift'):
return True
return False
def determine_swift_dep_dirs(self, target):
result = []
for l in target.link_targets:
result.append(self.get_target_private_dir_abs(l))
return result
def get_executable_serialisation(self, cmd, workdir=None,
extra_bdeps=None, capture=None,
env: T.Optional[build.EnvironmentVariables] = None):
exe = cmd[0]
cmd_args = cmd[1:]
if isinstance(exe, programs.ExternalProgram):
exe_cmd = exe.get_command()
exe_for_machine = exe.for_machine
elif isinstance(exe, build.BuildTarget):
exe_cmd = [self.get_target_filename_abs(exe)]
exe_for_machine = exe.for_machine
elif isinstance(exe, build.CustomTarget):
# The output of a custom target can either be directly runnable
# or not, that is, a script, a native binary or a cross compiled
# binary when exe wrapper is available and when it is not.
# This implementation is not exhaustive but it works in the
# common cases.
exe_cmd = [self.get_target_filename_abs(exe)]
exe_for_machine = MachineChoice.BUILD
elif isinstance(exe, mesonlib.File):
exe_cmd = [exe.rel_to_builddir(self.environment.source_dir)]
exe_for_machine = MachineChoice.BUILD
else:
exe_cmd = [exe]
exe_for_machine = MachineChoice.BUILD
machine = self.environment.machines[exe_for_machine]
if machine.is_windows() or machine.is_cygwin():
extra_paths = self.determine_windows_extra_paths(exe, extra_bdeps or [])
else:
extra_paths = []
is_cross_built = not self.environment.machines.matches_build_machine(exe_for_machine)
if is_cross_built and self.environment.need_exe_wrapper():
exe_wrapper = self.environment.get_exe_wrapper()
if not exe_wrapper or not exe_wrapper.found():
msg = 'An exe_wrapper is needed but was not found. Please define one ' \
'in cross file and check the command and/or add it to PATH.'
raise MesonException(msg)
else:
if exe_cmd[0].endswith('.jar'):
exe_cmd = ['java', '-jar'] + exe_cmd
elif exe_cmd[0].endswith('.exe') and not (mesonlib.is_windows() or mesonlib.is_cygwin() or mesonlib.is_wsl()):
exe_cmd = ['mono'] + exe_cmd
exe_wrapper = None
workdir = workdir or self.environment.get_build_dir()
return ExecutableSerialisation(exe_cmd + cmd_args, env,
exe_wrapper, workdir,
extra_paths, capture)
def as_meson_exe_cmdline(self, tname, exe, cmd_args, workdir=None,
extra_bdeps=None, capture=None, force_serialize=False,
env: T.Optional[build.EnvironmentVariables] = None,
verbose: bool = False):
'''
Serialize an executable for running with a generator or a custom target
'''
cmd = [exe] + cmd_args
es = self.get_executable_serialisation(cmd, workdir, extra_bdeps, capture, env)
es.verbose = verbose
reasons = []
if es.extra_paths:
reasons.append('to set PATH')
if es.exe_runner:
reasons.append('to use exe_wrapper')
if workdir:
reasons.append('to set workdir')
if any('\n' in c for c in es.cmd_args):
reasons.append('because command contains newlines')
if es.env and es.env.varnames:
reasons.append('to set env')
force_serialize = force_serialize or bool(reasons)
if capture:
reasons.append('to capture output')
if not force_serialize:
if not capture:
return es.cmd_args, ''
return ((self.environment.get_build_command() +
['--internal', 'exe', '--capture', capture, '--'] + es.cmd_args),
', '.join(reasons))
if isinstance(exe, (programs.ExternalProgram,
build.BuildTarget, build.CustomTarget)):
basename = exe.name
elif isinstance(exe, mesonlib.File):
basename = os.path.basename(exe.fname)
else:
basename = os.path.basename(exe)
# Can't just use exe.name here; it will likely be run more than once
# Take a digest of the cmd args, env, workdir, and capture. This avoids
# collisions and also makes the name deterministic over regenerations
# which avoids a rebuild by Ninja because the cmdline stays the same.
data = bytes(str(es.env) + str(es.cmd_args) + str(es.workdir) + str(capture),
encoding='utf-8')
digest = hashlib.sha1(data).hexdigest()
scratch_file = f'meson_exe_{basename}_{digest}.dat'
exe_data = os.path.join(self.environment.get_scratch_dir(), scratch_file)
with open(exe_data, 'wb') as f:
pickle.dump(es, f)
return (self.environment.get_build_command() + ['--internal', 'exe', '--unpickle', exe_data],
', '.join(reasons))
def serialize_tests(self):
test_data = os.path.join(self.environment.get_scratch_dir(), 'meson_test_setup.dat')
with open(test_data, 'wb') as datafile:
self.write_test_file(datafile)
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
with open(benchmark_data, 'wb') as datafile:
self.write_benchmark_file(datafile)
return test_data, benchmark_data
def determine_linker_and_stdlib_args(self, target):
'''
If we're building a static library, there is only one static linker.
Otherwise, we query the target for the dynamic linker.
'''
if isinstance(target, build.StaticLibrary):
return self.build.static_linker[target.for_machine], []
l, stdlib_args = target.get_clink_dynamic_linker_and_stdlibs()
return l, stdlib_args
@staticmethod
def _libdir_is_system(libdir, compilers, env):
libdir = os.path.normpath(libdir)
for cc in compilers.values():
if libdir in cc.get_library_dirs(env):
return True
return False
def get_external_rpath_dirs(self, target):
dirs = set()
args = []
for lang in LANGUAGES_USING_LDFLAGS:
try:
args.extend(self.environment.coredata.get_external_link_args(target.for_machine, lang))
except Exception:
pass
# Match rpath formats:
# -Wl,-rpath=
# -Wl,-rpath,
rpath_regex = re.compile(r'-Wl,-rpath[=,]([^,]+)')
# Match solaris style compat runpath formats:
# -Wl,-R
# -Wl,-R,
runpath_regex = re.compile(r'-Wl,-R[,]?([^,]+)')
# Match symbols formats:
# -Wl,--just-symbols=
# -Wl,--just-symbols,
symbols_regex = re.compile(r'-Wl,--just-symbols[=,]([^,]+)')
for arg in args:
rpath_match = rpath_regex.match(arg)
if rpath_match:
for dir in rpath_match.group(1).split(':'):
dirs.add(dir)
runpath_match = runpath_regex.match(arg)
if runpath_match:
for dir in runpath_match.group(1).split(':'):
# The symbols arg is an rpath if the path is a directory
if Path(dir).is_dir():
dirs.add(dir)
symbols_match = symbols_regex.match(arg)
if symbols_match:
for dir in symbols_match.group(1).split(':'):
# Prevent usage of --just-symbols to specify rpath
if Path(dir).is_dir():
raise MesonException(f'Invalid arg for --just-symbols, {dir} is a directory.')
return dirs
def rpaths_for_bundled_shared_libraries(self, target, exclude_system=True):
paths = []
for dep in target.external_deps:
if not isinstance(dep, (dependencies.ExternalLibrary, dependencies.PkgConfigDependency)):
continue
la = dep.link_args
if len(la) != 1 or not os.path.isabs(la[0]):
continue
# The only link argument is an absolute path to a library file.
libpath = la[0]
libdir = os.path.dirname(libpath)
if exclude_system and self._libdir_is_system(libdir, target.compilers, self.environment):
# No point in adding system paths.
continue
# Don't remove rpaths specified in LDFLAGS.
if libdir in self.get_external_rpath_dirs(target):
continue
# Windows doesn't support rpaths, but we use this function to
# emulate rpaths by setting PATH, so also accept DLLs here
if os.path.splitext(libpath)[1] not in ['.dll', '.lib', '.so', '.dylib']:
continue
if libdir.startswith(self.environment.get_source_dir()):
rel_to_src = libdir[len(self.environment.get_source_dir()) + 1:]
assert not os.path.isabs(rel_to_src), f'rel_to_src: {rel_to_src} is absolute'
paths.append(os.path.join(self.build_to_src, rel_to_src))
else:
paths.append(libdir)
return paths
def determine_rpath_dirs(self, target: build.BuildTarget) -> T.Tuple[str, ...]:
if self.environment.coredata.get_option(OptionKey('layout')) == 'mirror':
result: OrderedSet[str] = target.get_link_dep_subdirs()
else:
result = OrderedSet()
result.add('meson-out')
result.update(self.rpaths_for_bundled_shared_libraries(target))
target.rpath_dirs_to_remove.update([d.encode('utf-8') for d in result])
return tuple(result)
@staticmethod
def canonicalize_filename(fname):
for ch in ('/', '\\', ':'):
fname = fname.replace(ch, '_')
return fname
def object_filename_from_source(self, target, source):
assert isinstance(source, mesonlib.File)
build_dir = self.environment.get_build_dir()
rel_src = source.rel_to_builddir(self.build_to_src)
# foo.vala files compile down to foo.c and then foo.c.o, not foo.vala.o
if rel_src.endswith(('.vala', '.gs')):
# See description in generate_vala_compile for this logic.
if source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
rel_src = os.path.relpath(rel_src, self.get_target_private_dir(target))
else:
rel_src = os.path.basename(rel_src)
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
source = 'meson-generated_' + rel_src[:-5] + '.c'
elif source.is_built:
if os.path.isabs(rel_src):
rel_src = rel_src[len(build_dir) + 1:]
targetdir = self.get_target_private_dir(target)
# A meson- prefixed directory is reserved; hopefully no-one creates a file name with such a weird prefix.
source = 'meson-generated_' + os.path.relpath(rel_src, targetdir)
else:
if os.path.isabs(rel_src):
# Use the absolute path directly to avoid file name conflicts
source = rel_src
else:
source = os.path.relpath(os.path.join(build_dir, rel_src),
os.path.join(self.environment.get_source_dir(), target.get_subdir()))
machine = self.environment.machines[target.for_machine]
return self.canonicalize_filename(source) + '.' + machine.get_object_suffix()
def determine_ext_objs(self, extobj, proj_dir_to_build_root):
result = []
# Merge sources and generated sources
sources = list(extobj.srclist)
for gensrc in extobj.genlist:
for s in gensrc.get_outputs():
path = self.get_target_generated_dir(extobj.target, gensrc, s)
dirpart, fnamepart = os.path.split(path)
sources.append(File(True, dirpart, fnamepart))
# Filter out headers and all non-source files
filtered_sources = []
for s in sources:
if self.environment.is_source(s) and not self.environment.is_header(s):
filtered_sources.append(s)
elif self.environment.is_object(s):
result.append(s.relative_name())
sources = filtered_sources
# extobj could contain only objects and no sources
if not sources:
return result
targetdir = self.get_target_private_dir(extobj.target)
# With unity builds, sources don't map directly to objects,
# we only support extracting all the objects in this mode,
# so just return all object files.
if self.is_unity(extobj.target):
compsrcs = classify_unity_sources(extobj.target.compilers.values(), sources)
sources = []
unity_size = self.get_option_for_target(OptionKey('unity_size'), extobj.target)
for comp, srcs in compsrcs.items():
if comp.language in LANGS_CANT_UNITY:
sources += srcs
continue
for i in range(len(srcs) // unity_size + 1):
osrc = self.get_unity_source_file(extobj.target,
comp.get_default_suffix(), i)
sources.append(osrc)
for osrc in sources:
objname = self.object_filename_from_source(extobj.target, osrc)
objpath = os.path.join(proj_dir_to_build_root, targetdir, objname)
result.append(objpath)
return result
def get_pch_include_args(self, compiler, target):
args = []
pchpath = self.get_target_private_dir(target)
includeargs = compiler.get_include_args(pchpath, False)
p = target.get_pch(compiler.get_language())
if p:
args += compiler.get_pch_use_args(pchpath, p[0])
return includeargs + args
def create_msvc_pch_implementation(self, target, lang, pch_header):
# We have to include the language in the file name, otherwise
# pch.c and pch.cpp will both end up as pch.obj in VS backends.
impl_name = f'meson_pch-{lang}.{lang}'
pch_rel_to_build = os.path.join(self.get_target_private_dir(target), impl_name)
# Make sure to prepend the build dir, since the working directory is
# not defined. Otherwise, we might create the file in the wrong path.
pch_file = os.path.join(self.build_dir, pch_rel_to_build)
os.makedirs(os.path.dirname(pch_file), exist_ok=True)
content = '#include "{}"'.format(os.path.basename(pch_header))
pch_file_tmp = pch_file + '.tmp'
with open(pch_file_tmp, 'w') as f:
f.write(content)
mesonlib.replace_if_different(pch_file, pch_file_tmp)
return pch_rel_to_build
@staticmethod
def escape_extra_args(compiler, args):
# all backslashes in defines are doubly-escaped
extra_args = []
for arg in args:
if arg.startswith('-D') or arg.startswith('/D'):
arg = arg.replace('\\', '\\\\')
extra_args.append(arg)
return extra_args
def generate_basic_compiler_args(self, target: build.BuildTarget, compiler: 'Compiler', no_warn_args: bool = False) -> 'CompilerArgs':
# Create an empty commands list, and start adding arguments from
# various sources in the order in which they must override each other
# starting from hard-coded defaults followed by build options and so on.
commands = compiler.compiler_args()
copt_proxy = self.get_compiler_options_for_target(target)
# First, the trivial ones that are impossible to override.
#
# Add -nostdinc/-nostdinc++ if needed; can't be overridden
commands += self.get_no_stdlib_args(target, compiler)
# Add things like /NOLOGO or -pipe; usually can't be overridden
commands += compiler.get_always_args()
# Only add warning-flags by default if the buildtype enables it, and if
# we weren't explicitly asked to not emit warnings (for Vala, f.ex)
if no_warn_args:
commands += compiler.get_no_warn_args()
else:
commands += compiler.get_warn_args(self.get_option_for_target(OptionKey('warning_level'), target))
# Add -Werror if werror=true is set in the build options set on the
# command-line or default_options inside project(). This only sets the
# action to be done for warnings if/when they are emitted, so it's ok
# to set it after get_no_warn_args() or get_warn_args().
if self.get_option_for_target(OptionKey('werror'), target):
commands += compiler.get_werror_args()
# Add compile args for c_* or cpp_* build options set on the
# command-line or default_options inside project().
commands += compiler.get_option_compile_args(copt_proxy)
# Add buildtype args: optimization level, debugging, etc.
commands += compiler.get_buildtype_args(self.get_option_for_target(OptionKey('buildtype'), target))
commands += compiler.get_optimization_args(self.get_option_for_target(OptionKey('optimization'), target))
commands += compiler.get_debug_args(self.get_option_for_target(OptionKey('debug'), target))
# Add compile args added using add_project_arguments()
commands += self.build.get_project_args(compiler, target.subproject, target.for_machine)
# Add compile args added using add_global_arguments()
# These override per-project arguments
commands += self.build.get_global_args(compiler, target.for_machine)
# Using both /ZI and /Zi at the same times produces a compiler warning.
# We do not add /ZI by default. If it is being used it is because the user has explicitly enabled it.
# /ZI needs to be removed in that case to avoid cl's warning to that effect (D9025 : overriding '/ZI' with '/Zi')
if ('/ZI' in commands) and ('/Zi' in commands):
commands.remove('/Zi')
# Compile args added from the env: CFLAGS/CXXFLAGS, etc, or the cross
# file. We want these to override all the defaults, but not the
# per-target compile args.
commands += self.environment.coredata.get_external_args(target.for_machine, compiler.get_language())
# Always set -fPIC for shared libraries
if isinstance(target, build.SharedLibrary):
commands += compiler.get_pic_args()
# Set -fPIC for static libraries by default unless explicitly disabled
if isinstance(target, build.StaticLibrary) and target.pic:
commands += compiler.get_pic_args()
elif isinstance(target, (build.StaticLibrary, build.Executable)) and target.pie:
commands += compiler.get_pie_args()
# Add compile args needed to find external dependencies. Link args are
# added while generating the link command.
# NOTE: We must preserve the order in which external deps are
# specified, so we reverse the list before iterating over it.
for dep in reversed(target.get_external_deps()):
if not dep.found():
continue
if compiler.language == 'vala':
if isinstance(dep, dependencies.PkgConfigDependency):
if dep.name == 'glib-2.0' and dep.version_reqs is not None:
for req in dep.version_reqs:
if req.startswith(('>=', '==')):
commands += ['--target-glib', req[2:]]
break
commands += ['--pkg', dep.name]
elif isinstance(dep, dependencies.ExternalLibrary):
commands += dep.get_link_args('vala')
else:
commands += compiler.get_dependency_compile_args(dep)
# Qt needs -fPIC for executables
# XXX: We should move to -fPIC for all executables
if isinstance(target, build.Executable):
commands += dep.get_exe_args(compiler)
# For 'automagic' deps: Boost and GTest. Also dependency('threads').
# pkg-config puts the thread flags itself via `Cflags:`
# Fortran requires extra include directives.
if compiler.language == 'fortran':
for lt in chain(target.link_targets, target.link_whole_targets):
priv_dir = self.get_target_private_dir(lt)
commands += compiler.get_include_args(priv_dir, False)
return commands
def build_target_link_arguments(self, compiler, deps):
args = []
for d in deps:
if not (d.is_linkable_target()):
raise RuntimeError(f'Tried to link with a non-library target "{d.get_basename()}".')
arg = self.get_target_filename_for_linking(d)
if not arg:
continue
if compiler.get_language() == 'd':
arg = '-Wl,' + arg
else:
arg = compiler.get_linker_lib_prefix() + arg
args.append(arg)
return args
def get_mingw_extra_paths(self, target):
paths = OrderedSet()
# The cross bindir
root = self.environment.properties[target.for_machine].get_root()
if root:
paths.add(os.path.join(root, 'bin'))
# The toolchain bindir
sys_root = self.environment.properties[target.for_machine].get_sys_root()
if sys_root:
paths.add(os.path.join(sys_root, 'bin'))
# Get program and library dirs from all target compilers
if isinstance(target, build.BuildTarget):
for cc in target.compilers.values():
paths.update(cc.get_program_dirs(self.environment))
paths.update(cc.get_library_dirs(self.environment))
return list(paths)
def determine_windows_extra_paths(self, target: T.Union[build.BuildTarget, str], extra_bdeps):
'''On Windows there is no such thing as an rpath.
We must determine all locations of DLLs that this exe
links to and return them so they can be used in unit
tests.'''
result = set()
prospectives = set()
if isinstance(target, build.BuildTarget):
prospectives.update(target.get_transitive_link_deps())
# External deps
for deppath in self.rpaths_for_bundled_shared_libraries(target, exclude_system=False):
result.add(os.path.normpath(os.path.join(self.environment.get_build_dir(), deppath)))
for bdep in extra_bdeps:
prospectives.add(bdep)
prospectives.update(bdep.get_transitive_link_deps())
# Internal deps
for ld in prospectives:
if ld == '' or ld == '.':
continue
dirseg = os.path.join(self.environment.get_build_dir(), self.get_target_dir(ld))
result.add(dirseg)
if (isinstance(target, build.BuildTarget) and
not self.environment.machines.matches_build_machine(target.for_machine)):
result.update(self.get_mingw_extra_paths(target))
return list(result)
def write_benchmark_file(self, datafile):
self.write_test_serialisation(self.build.get_benchmarks(), datafile)
def write_test_file(self, datafile):
self.write_test_serialisation(self.build.get_tests(), datafile)
def create_test_serialisation(self, tests: T.List['Test']) -> T.List[TestSerialisation]:
arr = []
for t in sorted(tests, key=lambda tst: -1 * tst.priority):
exe = t.get_exe()
if isinstance(exe, programs.ExternalProgram):
cmd = exe.get_command()
else:
cmd = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(t.get_exe()))]
if isinstance(exe, (build.BuildTarget, programs.ExternalProgram)):
test_for_machine = exe.for_machine
else:
# E.g. an external verifier or simulator program run on a generated executable.
# Can always be run without a wrapper.
test_for_machine = MachineChoice.BUILD
# we allow passing compiled executables to tests, which may be cross built.
# We need to consider these as well when considering whether the target is cross or not.
for a in t.cmd_args:
if isinstance(a, build.BuildTarget):
if a.for_machine is MachineChoice.HOST:
test_for_machine = MachineChoice.HOST
break
is_cross = self.environment.is_cross_build(test_for_machine)
if is_cross and self.environment.need_exe_wrapper():
exe_wrapper = self.environment.get_exe_wrapper()
else:
exe_wrapper = None
machine = self.environment.machines[exe.for_machine]
if machine.is_windows() or machine.is_cygwin():
extra_bdeps = []
if isinstance(exe, build.CustomTarget):
extra_bdeps = exe.get_transitive_build_target_deps()
extra_paths = self.determine_windows_extra_paths(exe, extra_bdeps)
else:
extra_paths = []
cmd_args = []
depends = set(t.depends)
if isinstance(exe, build.Target):
depends.add(exe)
for a in unholder(t.cmd_args):
if isinstance(a, build.Target):
depends.add(a)
if isinstance(a, build.BuildTarget):
extra_paths += self.determine_windows_extra_paths(a, [])
if isinstance(a, mesonlib.File):
a = os.path.join(self.environment.get_build_dir(), a.rel_to_builddir(self.build_to_src))
cmd_args.append(a)
elif isinstance(a, str):
cmd_args.append(a)
elif isinstance(a, build.Executable):
p = self.construct_target_rel_path(a, t.workdir)
if p == a.get_filename():
p = './' + p
cmd_args.append(p)
elif isinstance(a, build.Target):
cmd_args.append(self.construct_target_rel_path(a, t.workdir))
else:
raise MesonException('Bad object in test command.')
ts = TestSerialisation(t.get_name(), t.project_name, t.suite, cmd, is_cross,
exe_wrapper, self.environment.need_exe_wrapper(),
t.is_parallel, cmd_args, t.env,
t.should_fail, t.timeout, t.workdir,
extra_paths, t.protocol, t.priority,
isinstance(exe, build.Executable),
[x.get_id() for x in depends],
self.environment.coredata.version)
arr.append(ts)
return arr
def write_test_serialisation(self, tests: T.List['Test'], datafile: str):
pickle.dump(self.create_test_serialisation(tests), datafile)
def construct_target_rel_path(self, a, workdir):
if workdir is None:
return self.get_target_filename(a)
assert(os.path.isabs(workdir))
abs_path = self.get_target_filename_abs(a)
return os.path.relpath(abs_path, workdir)
def generate_depmf_install(self, d: InstallData) -> None:
if self.build.dep_manifest_name is None:
return
ifilename = os.path.join(self.environment.get_build_dir(), 'depmf.json')
ofilename = os.path.join(self.environment.get_prefix(), self.build.dep_manifest_name)
mfobj = {'type': 'dependency manifest', 'version': '1.0', 'projects': self.build.dep_manifest}
with open(ifilename, 'w') as f:
f.write(json.dumps(mfobj))
# Copy file from, to, and with mode unchanged
d.data.append(InstallDataBase(ifilename, ofilename, None, ''))
def get_regen_filelist(self):
'''List of all files whose alteration means that the build
definition needs to be regenerated.'''
deps = [str(Path(self.build_to_src) / df)
for df in self.interpreter.get_build_def_files()]
if self.environment.is_cross_build():
deps.extend(self.environment.coredata.cross_files)
deps.extend(self.environment.coredata.config_files)
deps.append('meson-private/coredata.dat')
self.check_clock_skew(deps)
return deps
def generate_regen_info(self):
deps = self.get_regen_filelist()
regeninfo = RegenInfo(self.environment.get_source_dir(),
self.environment.get_build_dir(),
deps)
filename = os.path.join(self.environment.get_scratch_dir(),
'regeninfo.dump')
with open(filename, 'wb') as f:
pickle.dump(regeninfo, f)
def check_clock_skew(self, file_list):
# If a file that leads to reconfiguration has a time
# stamp in the future, it will trigger an eternal reconfigure
# loop.
import time
now = time.time()
for f in file_list:
absf = os.path.join(self.environment.get_build_dir(), f)
ftime = os.path.getmtime(absf)
delta = ftime - now
# On Windows disk time stamps sometimes point
# to the future by a minuscule amount, less than
# 0.001 seconds. I don't know why.
if delta > 0.001:
raise MesonException(f'Clock skew detected. File {absf} has a time stamp {delta:.4f}s in the future.')
def build_target_to_cmd_array(self, bt):
if isinstance(bt, build.BuildTarget):
arr = [os.path.join(self.environment.get_build_dir(), self.get_target_filename(bt))]
else:
arr = bt.get_command()
return arr
def replace_extra_args(self, args, genlist):
final_args = []
for a in args:
if a == '@EXTRA_ARGS@':
final_args += genlist.get_extra_args()
else:
final_args.append(a)
return final_args
def replace_outputs(self, args, private_dir, output_list):
newargs = []
regex = re.compile(r'@OUTPUT(\d+)@')
for arg in args:
m = regex.search(arg)
while m is not None:
index = int(m.group(1))
src = f'@OUTPUT{index}@'
arg = arg.replace(src, os.path.join(private_dir, output_list[index]))
m = regex.search(arg)
newargs.append(arg)
return newargs
def get_build_by_default_targets(self):
result = OrderedDict()
# Get all build and custom targets that must be built by default
for name, t in self.build.get_targets().items():
if t.build_by_default:
result[name] = t
# Get all targets used as test executables and arguments. These must
# also be built by default. XXX: Sometime in the future these should be
# built only before running tests.
for t in self.build.get_tests():
exe = unholder(t.exe)
if isinstance(exe, (build.CustomTarget, build.BuildTarget)):
result[exe.get_id()] = exe
for arg in unholder(t.cmd_args):
if not isinstance(arg, (build.CustomTarget, build.BuildTarget)):
continue
result[arg.get_id()] = arg
for dep in t.depends:
assert isinstance(dep, (build.CustomTarget, build.BuildTarget))
result[dep.get_id()] = dep
return result
@lru_cache(maxsize=None)
def get_custom_target_provided_by_generated_source(self, generated_source):
libs = []
for f in generated_source.get_outputs():
if self.environment.is_library(f):
libs.append(os.path.join(self.get_target_dir(generated_source), f))
return libs
@lru_cache(maxsize=None)
def get_custom_target_provided_libraries(self, target):
libs = []
for t in target.get_generated_sources():
if not isinstance(t, build.CustomTarget):
continue
l = self.get_custom_target_provided_by_generated_source(t)
libs = libs + l
return libs
def is_unity(self, target):
optval = self.get_option_for_target(OptionKey('unity'), target)
if optval == 'on' or (optval == 'subprojects' and target.subproject != ''):
return True
return False
def get_custom_target_sources(self, target):
'''
Custom target sources can be of various object types; strings, File,
BuildTarget, even other CustomTargets.
Returns the path to them relative to the build root directory.
'''
srcs = []
for i in unholder(target.get_sources()):
if isinstance(i, str):
fname = [os.path.join(self.build_to_src, target.subdir, i)]
elif isinstance(i, build.BuildTarget):
fname = [self.get_target_filename(i)]
elif isinstance(i, (build.CustomTarget, build.CustomTargetIndex)):
fname = [os.path.join(self.get_custom_target_output_dir(i), p) for p in i.get_outputs()]
elif isinstance(i, build.GeneratedList):
fname = [os.path.join(self.get_target_private_dir(target), p) for p in i.get_outputs()]
elif isinstance(i, build.ExtractedObjects):
fname = [os.path.join(self.get_target_private_dir(i.target), p) for p in i.get_outputs(self)]
else:
fname = [i.rel_to_builddir(self.build_to_src)]
if target.absolute_paths:
fname = [os.path.join(self.environment.get_build_dir(), f) for f in fname]
srcs += fname
return srcs
def get_custom_target_depend_files(self, target, absolute_paths=False):
deps = []
for i in target.depend_files:
if isinstance(i, mesonlib.File):
if absolute_paths:
deps.append(i.absolute_path(self.environment.get_source_dir(),
self.environment.get_build_dir()))
else:
deps.append(i.rel_to_builddir(self.build_to_src))
else:
if absolute_paths:
deps.append(os.path.join(self.environment.get_source_dir(), target.subdir, i))
else:
deps.append(os.path.join(self.build_to_src, target.subdir, i))
return deps
def get_custom_target_output_dir(self, target):
# The XCode backend is special. A target foo/bar does
# not go to ${BUILDDIR}/foo/bar but instead to
# ${BUILDDIR}/${BUILDTYPE}/foo/bar.
# Currently we set the include dir to be the former,
# and not the latter. Thus we need this extra customisation
# point. If in the future we make include dirs et al match
# ${BUILDDIR}/${BUILDTYPE} instead, this becomes unnecessary.
return self.get_target_dir(target)
@lru_cache(maxsize=None)
def get_normpath_target(self, source) -> str:
return os.path.normpath(source)
def get_custom_target_dirs(self, target, compiler, *, absolute_path=False):
custom_target_include_dirs = []
for i in target.get_generated_sources():
# Generator output goes into the target private dir which is
# already in the include paths list. Only custom targets have their
# own target build dir.
if not isinstance(i, (build.CustomTarget, build.CustomTargetIndex)):
continue
idir = self.get_normpath_target(self.get_custom_target_output_dir(i))
if not idir:
idir = '.'
if absolute_path:
idir = os.path.join(self.environment.get_build_dir(), idir)
if idir not in custom_target_include_dirs:
custom_target_include_dirs.append(idir)
return custom_target_include_dirs
def get_custom_target_dir_include_args(self, target, compiler, *, absolute_path=False):
incs = []
for i in self.get_custom_target_dirs(target, compiler, absolute_path=absolute_path):
incs += compiler.get_include_args(i, False)
return incs
def eval_custom_target_command(self, target, absolute_outputs=False):
# We want the outputs to be absolute only when using the VS backend
# XXX: Maybe allow the vs backend to use relative paths too?
source_root = self.build_to_src
build_root = '.'
outdir = self.get_custom_target_output_dir(target)
if absolute_outputs:
source_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
outdir = os.path.join(self.environment.get_build_dir(), outdir)
outputs = []
for i in target.get_outputs():
outputs.append(os.path.join(outdir, i))
inputs = self.get_custom_target_sources(target)
# Evaluate the command list
cmd = []
for i in target.command:
if isinstance(i, build.BuildTarget):
cmd += self.build_target_to_cmd_array(i)
continue
elif isinstance(i, build.CustomTarget):
# GIR scanner will attempt to execute this binary but
# it assumes that it is in path, so always give it a full path.
tmp = i.get_outputs()[0]
i = os.path.join(self.get_custom_target_output_dir(i), tmp)
elif isinstance(i, mesonlib.File):
i = i.rel_to_builddir(self.build_to_src)
if target.absolute_paths or absolute_outputs:
i = os.path.join(self.environment.get_build_dir(), i)
# FIXME: str types are blindly added ignoring 'target.absolute_paths'
# because we can't know if they refer to a file or just a string
elif isinstance(i, str):
if '@SOURCE_ROOT@' in i:
i = i.replace('@SOURCE_ROOT@', source_root)
if '@BUILD_ROOT@' in i:
i = i.replace('@BUILD_ROOT@', build_root)
if '@CURRENT_SOURCE_DIR@' in i:
i = i.replace('@CURRENT_SOURCE_DIR@', os.path.join(source_root, target.subdir))
if '@DEPFILE@' in i:
if target.depfile is None:
msg = 'Custom target {!r} has @DEPFILE@ but no depfile ' \
'keyword argument.'.format(target.name)
raise MesonException(msg)
dfilename = os.path.join(outdir, target.depfile)
i = i.replace('@DEPFILE@', dfilename)
if '@PRIVATE_DIR@' in i:
if target.absolute_paths:
pdir = self.get_target_private_dir_abs(target)
else:
pdir = self.get_target_private_dir(target)
i = i.replace('@PRIVATE_DIR@', pdir)
else:
err_msg = 'Argument {0} is of unknown type {1}'
raise RuntimeError(err_msg.format(str(i), str(type(i))))
cmd.append(i)
# Substitute the rest of the template strings
values = mesonlib.get_filenames_templates_dict(inputs, outputs)
cmd = mesonlib.substitute_values(cmd, values)
# This should not be necessary but removing it breaks
# building GStreamer on Windows. The underlying issue
# is problems with quoting backslashes on Windows
# which is the seventh circle of hell. The downside is
# that this breaks custom targets whose command lines
# have backslashes. If you try to fix this be sure to
# check that it does not break GST.
#
# The bug causes file paths such as c:\foo to get escaped
# into c:\\foo.
#
# Unfortunately we have not been able to come up with an
# isolated test case for this so unless you manage to come up
# with one, the only way is to test the building with Gst's
# setup. Note this in your MR or ping us and we will get it
# fixed.
#
# https://github.com/mesonbuild/meson/pull/737
cmd = [i.replace('\\', '/') for i in cmd]
return inputs, outputs, cmd
def get_run_target_env(self, target: build.RunTarget) -> build.EnvironmentVariables:
env = target.env if target.env else build.EnvironmentVariables()
introspect_cmd = join_args(self.environment.get_build_command() + ['introspect'])
env.set('MESON_SOURCE_ROOT', [self.environment.get_source_dir()])
env.set('MESON_BUILD_ROOT', [self.environment.get_build_dir()])
env.set('MESON_SUBDIR', [target.subdir])
env.set('MESONINTROSPECT', [introspect_cmd])
return env
def run_postconf_scripts(self) -> None:
from ..scripts.meson_exe import run_exe
introspect_cmd = join_args(self.environment.get_build_command() + ['introspect'])
env = {'MESON_SOURCE_ROOT': self.environment.get_source_dir(),
'MESON_BUILD_ROOT': self.environment.get_build_dir(),
'MESONINTROSPECT': introspect_cmd,
}
for s in self.build.postconf_scripts:
name = ' '.join(s.cmd_args)
mlog.log(f'Running postconf script {name!r}')
run_exe(s, env)
def create_install_data(self) -> InstallData:
strip_bin = self.environment.lookup_binary_entry(MachineChoice.HOST, 'strip')
if strip_bin is None:
if self.environment.is_cross_build():
mlog.warning('Cross file does not specify strip binary, result will not be stripped.')
else:
# TODO go through all candidates, like others
strip_bin = [self.environment.default_strip[0]]
d = InstallData(self.environment.get_source_dir(),
self.environment.get_build_dir(),
self.environment.get_prefix(),
strip_bin,
self.environment.coredata.get_option(OptionKey('install_umask')),
self.environment.get_build_command() + ['introspect'],
self.environment.coredata.version)
self.generate_depmf_install(d)
self.generate_target_install(d)
self.generate_header_install(d)
self.generate_man_install(d)
self.generate_data_install(d)
self.generate_custom_install_script(d)
self.generate_subdir_install(d)
return d
def create_install_data_files(self):
install_data_file = os.path.join(self.environment.get_scratch_dir(), 'install.dat')
with open(install_data_file, 'wb') as ofile:
pickle.dump(self.create_install_data(), ofile)
def generate_target_install(self, d: InstallData) -> None:
for t in self.build.get_targets().values():
if not t.should_install():
continue
outdirs, custom_install_dir = t.get_install_dir(self.environment)
# Sanity-check the outputs and install_dirs
num_outdirs, num_out = len(outdirs), len(t.get_outputs())
if num_outdirs != 1 and num_outdirs != num_out:
m = 'Target {!r} has {} outputs: {!r}, but only {} "install_dir"s were found.\n' \
"Pass 'false' for outputs that should not be installed and 'true' for\n" \
'using the default installation directory for an output.'
raise MesonException(m.format(t.name, num_out, t.get_outputs(), num_outdirs))
install_mode = t.get_custom_install_mode()
# Install the target output(s)
if isinstance(t, build.BuildTarget):
# In general, stripping static archives is tricky and full of pitfalls.
# Wholesale stripping of static archives with a command such as
#
# strip libfoo.a
#
# is broken, as GNU's strip will remove *every* symbol in a static
# archive. One solution to this nonintuitive behaviour would be
# to only strip local/debug symbols. Unfortunately, strip arguments
# are not specified by POSIX and therefore not portable. GNU's `-g`
# option (i.e. remove debug symbols) is equivalent to Apple's `-S`.
#
# TODO: Create GNUStrip/AppleStrip/etc. hierarchy for more
# fine-grained stripping of static archives.
should_strip = not isinstance(t, build.StaticLibrary) and self.get_option_for_target(OptionKey('strip'), t)
assert isinstance(should_strip, bool), 'for mypy'
# Install primary build output (library/executable/jar, etc)
# Done separately because of strip/aliases/rpath
if outdirs[0] is not False:
mappings = t.get_link_deps_mapping(d.prefix, self.environment)
i = TargetInstallData(self.get_target_filename(t), outdirs[0],
t.get_aliases(), should_strip, mappings,
t.rpath_dirs_to_remove,
t.install_rpath, install_mode, t.subproject)
d.targets.append(i)
if isinstance(t, (build.SharedLibrary, build.SharedModule, build.Executable)):
# On toolchains/platforms that use an import library for
# linking (separate from the shared library with all the
# code), we need to install that too (dll.a/.lib).
if t.get_import_filename():
if custom_install_dir:
# If the DLL is installed into a custom directory,
# install the import library into the same place so
# it doesn't go into a surprising place
implib_install_dir = outdirs[0]
else:
implib_install_dir = self.environment.get_import_lib_dir()
# Install the import library; may not exist for shared modules
i = TargetInstallData(self.get_target_filename_for_linking(t),
implib_install_dir, {}, False, {}, set(), '', install_mode,
t.subproject, optional=isinstance(t, build.SharedModule))
d.targets.append(i)
if not should_strip and t.get_debug_filename():
debug_file = os.path.join(self.get_target_dir(t), t.get_debug_filename())
i = TargetInstallData(debug_file, outdirs[0],
{}, False, {}, set(), '',
install_mode, t.subproject,
optional=True)
d.targets.append(i)
# Install secondary outputs. Only used for Vala right now.
if num_outdirs > 1:
for output, outdir in zip(t.get_outputs()[1:], outdirs[1:]):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
i = TargetInstallData(f, outdir, {}, False, {}, set(), None,
install_mode, t.subproject)
d.targets.append(i)
elif isinstance(t, build.CustomTarget):
# If only one install_dir is specified, assume that all
# outputs will be installed into it. This is for
# backwards-compatibility and because it makes sense to
# avoid repetition since this is a common use-case.
#
# To selectively install only some outputs, pass `false` as
# the install_dir for the corresponding output by index
if num_outdirs == 1 and num_out > 1:
for output in t.get_outputs():
f = os.path.join(self.get_target_dir(t), output)
i = TargetInstallData(f, outdirs[0], {}, False, {}, set(), None, install_mode,
t.subproject, optional=not t.build_by_default)
d.targets.append(i)
else:
for output, outdir in zip(t.get_outputs(), outdirs):
# User requested that we not install this output
if outdir is False:
continue
f = os.path.join(self.get_target_dir(t), output)
i = TargetInstallData(f, outdir, {}, False, {}, set(), None, install_mode,
t.subproject, optional=not t.build_by_default)
d.targets.append(i)
def generate_custom_install_script(self, d: InstallData) -> None:
d.install_scripts = self.build.install_scripts
def generate_header_install(self, d: InstallData) -> None:
incroot = self.environment.get_includedir()
headers = self.build.get_headers()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for h in headers:
outdir = h.get_custom_install_dir()
if outdir is None:
outdir = os.path.join(incroot, h.get_install_subdir())
for f in h.get_sources():
if not isinstance(f, File):
msg = 'Invalid header type {!r} can\'t be installed'
raise MesonException(msg.format(f))
abspath = f.absolute_path(srcdir, builddir)
i = InstallDataBase(abspath, outdir, h.get_custom_install_mode(), h.subproject)
d.headers.append(i)
def generate_man_install(self, d: InstallData) -> None:
manroot = self.environment.get_mandir()
man = self.build.get_man()
for m in man:
for f in m.get_sources():
num = f.split('.')[-1]
subdir = m.get_custom_install_dir()
if subdir is None:
if m.locale:
subdir = os.path.join(manroot, m.locale, 'man' + num)
else:
subdir = os.path.join(manroot, 'man' + num)
fname = f.fname
if m.locale: # strip locale from file name
fname = fname.replace(f'.{m.locale}', '')
srcabs = f.absolute_path(self.environment.get_source_dir(), self.environment.get_build_dir())
dstabs = os.path.join(subdir, os.path.basename(fname))
i = InstallDataBase(srcabs, dstabs, m.get_custom_install_mode(), m.subproject)
d.man.append(i)
def generate_data_install(self, d: InstallData):
data = self.build.get_data()
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
for de in data:
assert(isinstance(de, build.Data))
subdir = de.install_dir
if not subdir:
subdir = os.path.join(self.environment.get_datadir(), self.interpreter.build.project_name)
for src_file, dst_name in zip(de.sources, de.rename):
assert(isinstance(src_file, mesonlib.File))
dst_abs = os.path.join(subdir, dst_name)
i = InstallDataBase(src_file.absolute_path(srcdir, builddir), dst_abs, de.install_mode, de.subproject)
d.data.append(i)
def generate_subdir_install(self, d: InstallData) -> None:
for sd in self.build.get_install_subdirs():
if sd.from_source_dir:
from_dir = self.environment.get_source_dir()
else:
from_dir = self.environment.get_build_dir()
src_dir = os.path.join(from_dir,
sd.source_subdir,
sd.installable_subdir).rstrip('/')
dst_dir = os.path.join(self.environment.get_prefix(),
sd.install_dir)
if not sd.strip_directory:
dst_dir = os.path.join(dst_dir, os.path.basename(src_dir))
i = SubdirInstallData(src_dir, dst_dir, sd.install_mode, sd.exclude, sd.subproject)
d.install_subdirs.append(i)
def get_introspection_data(self, target_id: str, target: build.Target) -> T.List[T.Dict[str, T.Union[bool, str, T.List[T.Union[str, T.Dict[str, T.Union[str, T.List[str], bool]]]]]]]:
'''
Returns a list of source dicts with the following format for a given target:
[
{
"language": "<LANG>",
"compiler": ["result", "of", "comp.get_exelist()"],
"parameters": ["list", "of", "compiler", "parameters],
"sources": ["list", "of", "all", "<LANG>", "source", "files"],
"generated_sources": ["list", "of", "generated", "source", "files"]
}
]
This is a limited fallback / reference implementation. The backend should override this method.
'''
if isinstance(target, (build.CustomTarget, build.BuildTarget)):
source_list_raw = target.sources
source_list = []
for j in source_list_raw:
if isinstance(j, mesonlib.File):
source_list += [j.absolute_path(self.source_dir, self.build_dir)]
elif isinstance(j, str):
source_list += [os.path.join(self.source_dir, j)]
elif isinstance(j, (build.CustomTarget, build.BuildTarget)):
source_list += [os.path.join(self.build_dir, j.get_subdir(), o) for o in j.get_outputs()]
source_list = list(map(lambda x: os.path.normpath(x), source_list))
compiler = []
if isinstance(target, build.CustomTarget):
tmp_compiler = target.command
if not isinstance(compiler, list):
tmp_compiler = [compiler]
for j in tmp_compiler:
if isinstance(j, mesonlib.File):
compiler += [j.absolute_path(self.source_dir, self.build_dir)]
elif isinstance(j, str):
compiler += [j]
elif isinstance(j, (build.BuildTarget, build.CustomTarget)):
compiler += j.get_outputs()
else:
raise RuntimeError('Type "{}" is not supported in get_introspection_data. This is a bug'.format(type(j).__name__))
return [{
'language': 'unknown',
'compiler': compiler,
'parameters': [],
'sources': source_list,
'generated_sources': []
}]
return []
def get_devenv(self) -> build.EnvironmentVariables:
env = build.EnvironmentVariables()
extra_paths = set()
library_paths = set()
for t in self.build.get_targets().values():
cross_built = not self.environment.machines.matches_build_machine(t.for_machine)
can_run = not cross_built or not self.environment.need_exe_wrapper()
in_default_dir = t.should_install() and not t.get_install_dir(self.environment)[1]
if not can_run or not in_default_dir:
continue
tdir = os.path.join(self.environment.get_build_dir(), self.get_target_dir(t))
if isinstance(t, build.Executable):
# Add binaries that are going to be installed in bindir into PATH
# so they get used by default instead of searching on system when
# in developer environment.
extra_paths.add(tdir)
if mesonlib.is_windows() or mesonlib.is_cygwin():
# On windows we cannot rely on rpath to run executables from build
# directory. We have to add in PATH the location of every DLL needed.
extra_paths.update(self.determine_windows_extra_paths(t, []))
elif isinstance(t, build.SharedLibrary):
# Add libraries that are going to be installed in libdir into
# LD_LIBRARY_PATH. This allows running system applications using
# that library.
library_paths.add(tdir)
if mesonlib.is_windows() or mesonlib.is_cygwin():
extra_paths.update(library_paths)
elif mesonlib.is_osx():
env.prepend('DYLD_LIBRARY_PATH', list(library_paths))
else:
env.prepend('LD_LIBRARY_PATH', list(library_paths))
env.prepend('PATH', list(extra_paths))
return env
| QuLogic/meson | mesonbuild/backend/backends.py | Python | apache-2.0 | 77,063 |
"""
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from cobbler import item
from cobbler import utils
from cobbler import validate
from cobbler.cexceptions import CX
from cobbler.utils import _
# this data structure is described in item.py
FIELDS = [
# non-editable in UI (internal)
["ctime", 0, 0, "", False, "", 0, "float"],
["depth", 2, 0, "", False, "", 0, "float"],
["mtime", 0, 0, "", False, "", 0, "float"],
["parent", None, 0, "", False, "", 0, "str"],
["uid", None, 0, "", False, "", 0, "str"],
# editable in UI
["apt_components", "", 0, "Apt Components (apt only)", True, "ex: main restricted universe", [], "list"],
["apt_dists", "", 0, "Apt Dist Names (apt only)", True, "ex: precise precise-updates", [], "list"],
["arch", "", 0, "Arch", True, "ex: i386, x86_64", ['i386', 'x86_64', 'ppc', 'ppc64', 'ppc64le', 'ppc64el', "arm", 'noarch', 'src'], "str"],
["breed", "", 0, "Breed", True, "", validate.REPO_BREEDS, "str"],
["comment", "", 0, "Comment", True, "Free form text description", 0, "str"],
["createrepo_flags", '<<inherit>>', 0, "Createrepo Flags", True, "Flags to use with createrepo", 0, "dict"],
["environment", {}, 0, "Environment Variables", True, "Use these environment variables during commands (key=value, space delimited)", 0, "dict"],
["keep_updated", True, 0, "Keep Updated", True, "Update this repo on next 'cobbler reposync'?", 0, "bool"],
["mirror", None, 0, "Mirror", True, "Address of yum or rsync repo to mirror", 0, "str"],
["mirror_locally", True, 0, "Mirror locally", True, "Copy files or just reference the repo externally?", 0, "bool"],
["name", "", 0, "Name", True, "Ex: f10-i386-updates", 0, "str"],
["owners", "SETTINGS:default_ownership", 0, "Owners", True, "Owners list for authz_ownership (space delimited)", [], "list"],
["priority", 99, 0, "Priority", True, "Value for yum priorities plugin, if installed", 0, "int"],
["proxy", "SETTINGS:proxy_url_ext", "<<inherit>>", "Proxy information", True, "ex: http://example.com:8080", [], "str"],
["rpm_list", [], 0, "RPM List", True, "Mirror just these RPMs (yum only)", 0, "list"],
["yumopts", {}, 0, "Yum Options", True, "Options to write to yum config file", 0, "dict"],
]
class Repo(item.Item):
"""
A Cobbler repo object.
"""
TYPE_NAME = _("repo")
COLLECTION_TYPE = "repo"
def __init__(self, *args, **kwargs):
super(Repo, self).__init__(*args, **kwargs)
self.breed = None
self.arch = None
self.environment = None
self.yumopts = None
#
# override some base class methods first (item.Item)
#
def make_clone(self):
_dict = self.to_dict()
cloned = Repo(self.collection_mgr)
cloned.from_dict(_dict)
return cloned
def get_fields(self):
return FIELDS
def get_parent(self):
"""
currently the Cobbler object space does not support subobjects of this object
as it is conceptually not useful.
"""
return None
def check_if_valid(self):
if self.name is None:
raise CX("name is required")
if self.mirror is None:
raise CX("Error with repo %s - mirror is required" % (self.name))
#
# specific methods for item.File
#
def _guess_breed(self):
# backwards compatibility
if (self.breed == "" or self.breed is None):
if self.mirror.startswith("http://") or self.mirror.startswith("ftp://"):
self.set_breed("yum")
elif self.mirror.startswith("rhn://"):
self.set_breed("rhn")
else:
self.set_breed("rsync")
def set_mirror(self, mirror):
"""
A repo is (initially, as in right now) is something that can be rsynced.
reposync/repotrack integration over HTTP might come later.
"""
self.mirror = mirror
if self.arch is None or self.arch == "":
if mirror.find("x86_64") != -1:
self.set_arch("x86_64")
elif mirror.find("x86") != -1 or mirror.find("i386") != -1:
self.set_arch("i386")
self._guess_breed()
def set_keep_updated(self, keep_updated):
"""
This allows the user to disable updates to a particular repo for whatever reason.
"""
self.keep_updated = utils.input_boolean(keep_updated)
def set_yumopts(self, options):
"""
Kernel options are a space delimited list,
like 'a=b c=d e=f g h i=j' or a dictionary.
"""
(success, value) = utils.input_string_or_dict(options, allow_multiples=False)
if not success:
raise CX(_("invalid yum options"))
else:
self.yumopts = value
def set_environment(self, options):
"""
Yum can take options from the environment. This puts them there before
each reposync.
"""
(success, value) = utils.input_string_or_dict(options, allow_multiples=False)
if not success:
raise CX(_("invalid environment options"))
else:
self.environment = value
def set_priority(self, priority):
"""
Set the priority of the repository. 1= highest, 99=default
Only works if host is using priorities plugin for yum.
"""
try:
priority = int(str(priority))
except:
raise CX(_("invalid priority level: %s") % priority)
self.priority = priority
def set_rpm_list(self, rpms):
"""
Rather than mirroring the entire contents of a repository (Fedora Extras, for instance,
contains games, and we probably don't want those), make it possible to list the packages
one wants out of those repos, so only those packages + deps can be mirrored.
"""
self.rpm_list = utils.input_string_or_list(rpms)
def set_createrepo_flags(self, createrepo_flags):
"""
Flags passed to createrepo when it is called. Common flags to use would be
-c cache or -g comps.xml to generate group information.
"""
if createrepo_flags is None:
createrepo_flags = ""
self.createrepo_flags = createrepo_flags
def set_breed(self, breed):
if breed:
return utils.set_repo_breed(self, breed)
def set_os_version(self, os_version):
if os_version:
return utils.set_repo_os_version(self, os_version)
def set_arch(self, arch):
"""
Override the arch used for reposync
"""
return utils.set_arch(self, arch, repo=True)
def set_mirror_locally(self, value):
self.mirror_locally = utils.input_boolean(value)
def set_apt_components(self, value):
self.apt_components = utils.input_string_or_list(value)
def set_apt_dists(self, value):
self.apt_dists = utils.input_string_or_list(value)
return True
def set_proxy(self, value):
self.proxy = value
return True
# EOF
| sjmh/cobbler | cobbler/item_repo.py | Python | gpl-2.0 | 7,839 |
import re
import sys
import json
import tensorflow as tf
import optimizees as optim
import util
import util.paths as paths
import util.tf_utils as tf_utils
from opts import model_trainer, distributed
def will_overwrite_snapshots(snapshots_path, eid):
if not snapshots_path.exists():
return False
snapshot_regex = re.compile(r'epoch-(?P<eid>\d+).index')
files = [str(s).split('/')[-1] for s in snapshots_path.iterdir()]
eids = [snapshot_regex.match(p).group('eid') for p in files if snapshot_regex.match(p)]
if eids:
max_eid = max(int(eid) for eid in eids)
if eids and eid < max_eid:
return True
return False
def setup_experiment(flags):
model_path = paths.model_path(flags.name)
print("Training model: ", flags.name)
print("Model path: ", model_path)
print("Snapshots path: ", model_path / 'snapshots')
if not flags.force and will_overwrite_snapshots(model_path / 'snapshots', flags.eid):
print("You will overwrite existing checkpoints. Add -f to force it.")
sys.exit(1)
if flags.eid >= flags.n_epochs:
print("Error: eid >= n_epochs")
sys.exit(1)
open_mode = 'w' if flags.force else 'a'
with (model_path / 'train_config.json').open(open_mode) as conf:
bad_kws = {'name', 'experiment_name', 'gpu', 'cpu', 'command_name', 'debug', 'force', 'verbose'}
training_options = {k: v for k, v in vars(flags).items() if k not in bad_kws}
json.dump(training_options, conf, sort_keys=True, indent=4)
opt = util.load_opt(model_path)
opt.debug = flags.debug
return opt, model_path
@tf_utils.with_tf_graph
def training(flags, opt):
optimizees = optim.get_optimizees(flags.optimizee,
clip_by_value=True,
random_scale=flags.enable_random_scaling,
noisy_grad=flags.noisy_grad)
for optimizee in optimizees.values():
optimizee.build()
opt = distributed.distribute(opt, tf_utils.get_devices(flags))
kwargs = util.get_kwargs(opt.build, flags)
opt.build(optimizees, **kwargs)
feed_dict = {
opt.train_lr: flags.train_lr,
opt.momentum: flags.momentum
}
session = tf.get_default_session()
session.run(tf.global_variables_initializer(), feed_dict=feed_dict)
trainer = model_trainer.Trainer()
try:
kwargs = util.get_kwargs(trainer.train, flags)
rets = trainer.setup_and_run(opt, 'train', session=session, **kwargs)
except tf.errors.InvalidArgumentError as error:
print("Op: ", error.op)
print("Input: ", error.op.inputs)
print(error)
raise
return rets
def run_train(flags):
opt, model_path = setup_experiment(flags)
rets = training(flags, opt)
util.dump_results(model_path, rets)
| justanothercoder/LSTM-Optimizer-TF | training.py | Python | mit | 2,884 |
##########################################################################
#
# Copyright 2008-2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""Trace code generation for Windows DLLs."""
import ntpath
from trace import Tracer
from dispatch import Dispatcher
from specs.stdapi import API
class DllDispatcher(Dispatcher):
def dispatchModule(self, module):
tag = module.name.upper()
print r'HMODULE g_h%sModule = NULL;' % (tag,)
print r''
print r'static PROC'
print r'_get%sProcAddress(LPCSTR lpProcName) {' % tag
print r' if (!g_h%sModule) {' % tag
print r' char szDll[MAX_PATH] = {0};'
print r' if (!GetSystemDirectoryA(szDll, MAX_PATH)) {'
print r' return NULL;'
print r' }'
print r' strcat(szDll, "\\%s.dll");' % module.name
print r' g_h%sModule = LoadLibraryA(szDll);' % tag
print r' if (!g_h%sModule) {' % tag
print r' return NULL;'
print r' }'
print r' }'
print r' return GetProcAddress(g_h%sModule, lpProcName);' % tag
print r'}'
print r''
Dispatcher.dispatchModule(self, module)
def getProcAddressName(self, module, function):
return '_get%sProcAddress' % (module.name.upper())
class DllTracer(Tracer):
def header(self, api):
for module in api.modules:
dispatcher = DllDispatcher()
dispatcher.dispatchModule(module)
Tracer.header(self, api)
| tuanthng/apitrace | wrappers/dlltrace.py | Python | mit | 2,690 |
"""
Main functions for generating ABED results
"""
from .cache import update_result_cache
from .cv_tt import cvtt_tables
from .assess import assess_tables
from .export import export_tables
from ..conf import settings
from ..html.main import generate_html
def make_results(task_dict, skip_cache=False):
""" This is the main function for result generation. """
abed_cache = update_result_cache(task_dict, skip_cache=skip_cache)
if settings.TYPE == "ASSESS":
tables = assess_tables(abed_cache)
elif settings.TYPE == "CV_TT":
tables = cvtt_tables(abed_cache)
else:
raise NotImplementedError(
"Result generation for RAW mode is " "not implemented yet."
)
summary_tables = export_tables(tables)
tables.extend(summary_tables)
generate_html(task_dict, tables)
| GjjvdBurg/ABED | abed/results/main.py | Python | gpl-2.0 | 832 |
import chatterbot
from chatterbot import ChatBot # importing the chatbot
from chatterbot.trainers import ListTrainer # importing the training list for the chatbot
name = "PYSHA"
chatbot = ChatBot(name, read_only=False)
chatbot = ChatBot("PYSHA") # passing in the name for the chatbot
conversations = ["Hello", "Hi there!", "How are you doing?", "I'm doing great.", "That is good to hear", "Thank you.",
"You're"]
chatbot.set_trainer(ListTrainer) # Training on the basis of list of inputs
chatbot.train(conversations) # training the conversational list ,
responce = chatbot.get_response(input("SAY:"))
print(responce) | shafaypro/PYSHA | Chatstuff/_sample.py | Python | gpl-3.0 | 651 |
try:
from . import version
except ImportError: # pragma: no cover
from . import _version as version
__version__ = version.version
from .contact_map import (
ContactMap, ContactFrequency, ContactDifference,
AtomMismatchedContactDifference, ResidueMismatchedContactDifference,
OverrideTopologyContactDifference
)
from .contact_count import ContactCount
from .contact_trajectory import ContactTrajectory, RollingContactFrequency
from .min_dist import NearestAtoms, MinimumDistanceCounter
from .concurrence import (
Concurrence, AtomContactConcurrence, ResidueContactConcurrence,
ConcurrencePlotter, plot_concurrence
)
from .dask_runner import DaskContactFrequency, DaskContactTrajectory
from . import plot_utils
| dwhswenson/contact_map | contact_map/__init__.py | Python | lgpl-2.1 | 746 |
from django.apps import AppConfig
class CoursesConfig(AppConfig):
name = 'courses'
verbose_name = "Course Administration"
| gitsimon/tq_website | courses/apps.py | Python | gpl-2.0 | 132 |
"""Fetch NASA GPM data.
IMERG-Early has at most 4 hour latency.
IMERG-Late has about 14 hours.
Replace HHE with HHL
IMERG-Final is many months delayed
Drop L in the above.
2001-today
RUN from RUN_20AFTER.sh for 5 hours ago.
"""
import subprocess
import json
import datetime
import os
import sys
import tempfile
from PIL import Image
import numpy as np
import requests
from pyiem import mrms
from pyiem.util import utc, ncopen, logger, exponential_backoff
LOG = logger()
def compute_source(valid):
"""Which source to use."""
utcnow = utc()
if (utcnow - valid) < datetime.timedelta(hours=24):
return "E"
if (utcnow - valid) < datetime.timedelta(days=120):
return "L"
return ""
def main(argv):
"""Go Main Go."""
valid = utc(*[int(a) for a in argv[1:6]])
source = compute_source(valid)
routes = "ac" if len(argv) > 6 else "a"
LOG.debug("Using source: `%s` for valid: %s[%s]", source, valid, routes)
url = valid.strftime(
"https://gpm1.gesdisc.eosdis.nasa.gov/thredds/ncss/aggregation/"
f"GPM_3IMERGHH{source}.06/%Y/GPM_3IMERGHH{source}"
".06_Aggregation_%Y%03j.ncml.ncml?"
"var=precipitationCal&time=%Y-%m-%dT%H%%3A%M%%3A00Z&accept=netcdf4"
)
req = exponential_backoff(requests.get, url, timeout=120)
ct = req.headers.get("content-type", "")
# Sometimes, the service returns a 200 that is an error webpage :(
if req.status_code != 200 or not ct.startswith("application/x-netcdf4"):
LOG.info(
"failed to fetch %s [%s, %s] using source %s",
valid,
req.status_code,
ct,
source,
)
LOG.debug(url)
return
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(req.content)
with ncopen(tmp.name) as nc:
# x, y
pmm = nc.variables["precipitationCal"][0, :, :] / 2.0 # mmhr to 30min
pmm = np.flipud(pmm.T)
os.unlink(tmp.name)
if np.max(pmm) > 102:
LOG.warning("overflow with max(%s) value > 102", np.max(pmm))
# idx: 0-200 0.25mm -> 50 mm
# idx: 201-253 1mm -> 50-102 mm
img = np.where(pmm >= 102, 254, 0)
img = np.where(
np.logical_and(pmm >= 50, pmm < 102),
201 + (pmm - 50) / 1.0,
img,
)
img = np.where(np.logical_and(pmm > 0, pmm < 50), pmm / 0.25, img)
img = np.where(pmm < 0, 255, img)
png = Image.fromarray(img.astype("u1"))
png.putpalette(mrms.make_colorramp())
png.save(f"{tmp.name}.png")
ISO = "%Y-%m-%dT%H:%M:%SZ"
metadata = {
"start_valid": (valid - datetime.timedelta(minutes=15)).strftime(ISO),
"end_valid": (valid + datetime.timedelta(minutes=15)).strftime(ISO),
"units": "mm",
"source": "F" if source == "" else source, # E, L, F
"generation_time": utc().strftime(ISO),
}
with open(f"{tmp.name}.json", "w", encoding="utf8") as fp:
fp.write(json.dumps(metadata))
pqstr = (
f"pqinsert -i -p 'plot {routes} {valid:%Y%m%d%H%M} "
"gis/images/4326/imerg/p30m.json "
f"GIS/imerg/p30m_{valid:%Y%m%d%H%M}.json json' {tmp.name}.json"
)
subprocess.call(pqstr, shell=True)
os.unlink(f"{tmp.name}.json")
with open(f"{tmp.name}.wld", "w", encoding="utf8") as fp:
fp.write("\n".join(["0.1", "0.0", "0.0", "-0.1", "-179.95", "89.95"]))
pqstr = (
f"pqinsert -i -p 'plot {routes} {valid:%Y%m%d%H%M} "
"gis/images/4326/imerg/p30m.wld "
f"GIS/imerg/p30m_{valid:%Y%m%d%H%M}.wld wld' {tmp.name}.wld"
)
subprocess.call(pqstr, shell=True)
os.unlink(f"{tmp.name}.wld")
pqstr = (
f"pqinsert -i -p 'plot {routes} {valid:%Y%m%d%H%M} "
"gis/images/4326/imerg/p30m.png "
f"GIS/imerg/p30m_{valid:%Y%m%d%H%M}.png png' {tmp.name}.png"
)
subprocess.call(pqstr, shell=True)
os.unlink(f"{tmp.name}.png")
if __name__ == "__main__":
main(sys.argv)
| akrherz/iem | scripts/dl/download_imerg.py | Python | mit | 3,970 |
from ftplib import FTP, error_perm
import os
from os.path import dirname
import re
from transmat.util import do_it
LIST_RE = re.compile('^(\S+).*\s+(20\d\d|\d\d\:\d\d)\s(.*?)$')
class FTPSession(object):
"""An object representing a session with an FTP server.
This attempts to implement a robust session. If the FTP server
disconnects us during an upload or download, we try to reconnect.
"""
client = None
dir_known_to_exist = {'/': True}
def __init__(self, options=None, host=None, username=None, password=None,
root='/', index=None):
self.options = options
self.host = host
self.username = username
self.password = password
self.root = root
self.index = index
def connect(self):
print "Connecting to %s..." % self.host
self.client = FTP(self.host)
self.client.login(self.username, self.password)
self.client.cwd(self.root)
print self.client.getwelcome()
def disconnect(self):
self.client.quit()
def reconnect(self):
print "! Resetting connection to server..."
self.disconnect()
self.connect()
def mkdir(self, dir):
if self.options.dry_run:
return
if dir not in self.dir_known_to_exist:
print "Creating directory %s..." % dir
try:
self.client.mkd(dir)
except error_perm, e:
if "No such file" in str(e):
# assume this means parent doesn't exist yet
print "(Trying to create %s now...)" % dirname(dir)
self.mkdir(dirname(dir))
self.mkdir(dir)
# almost... :)
if "File exists" not in str(e):
raise
self.dir_known_to_exist[dir] = True
def delete(self, filename):
self.client.delete(filename)
def upload(self, filename):
return self.upload_to(filename, os.path.join(self.root, filename))
def upload_to(self, local_filename, remote_filename):
if self.options.dry_run:
print "WOULD UPLOAD %s -> %s" % (local_filename, remote_filename)
if self.options.compare and remote_filename != '/filehash.txt':
temp_filename = "/tmp/file"
try:
self.download_to(remote_filename, temp_filename)
except Exception as e:
print str(e)
do_it("echo -n '' >%s" % temp_filename)
do_it("diff -u '%s' '%s' || echo" %
(temp_filename, local_filename))
return False
print "UPLOAD %s -> %s" % (local_filename, remote_filename)
dir = dirname(remote_filename)
self.mkdir(dir)
tries = 0
local_file = open(local_filename)
done = False
while not done:
try:
stor = "STOR %s" % remote_filename.encode('utf-8')
self.client.storbinary(stor, local_file)
done = True
except Exception, e:
print "FAILURE: FTP STOR failed: " + str(e)
if tries < 10:
self.reconnect()
tries += 1
print "RETRY UPLOAD %s -> %s" % (local_filename,
remote_filename)
else:
msg = "Can't connect to server, tried %s times" % tries
raise Exception(msg)
local_file.close()
return True
def upload_if_newer(self, filename):
"""Uses the file index.
"""
if self.index.has_been_touched(filename):
if self.upload(filename):
self.index.update(filename)
else:
dir = "/" + dirname(filename)
self.dir_known_to_exist[dir] = True
def download_to(self, remote_filename, local_filename):
local_dir = os.path.dirname(local_filename)
if local_dir and not os.path.exists(local_dir):
print "Dir '%s' does not exist, creating first..." % local_dir
os.makedirs(local_dir)
file = open(local_filename, "w")
def receive(data):
file.write(data)
self.client.retrbinary("RETR %s" % remote_filename, receive)
file.close()
# XXX ignored_paths support is weak
# XXX ignored_dir support is nonexistent
def traverse_ftp_dir(self, process, dir):
remote_dir = os.path.normpath(os.path.join(self.root, dir))
if remote_dir in ['/' + x for x in self.options.ignored_paths]:
return
self.client.cwd(remote_dir)
entries = []
subdirs = []
def gather(line):
entries.append(line)
try:
self.client.retrlines('LIST', gather)
except EOFError as e:
print "Hit EOF while listing remote directory %s" % remote_dir
print "Gathered entries: %s" % entries
# XXX try to recover?
raise
for entry in entries:
match = LIST_RE.match(entry)
if match is not None:
filename = match.group(3)
is_dir = False
if match.group(1).startswith('d') and \
filename not in ['', '.', '..']:
is_dir = True
if self.options.verbose_traversal:
print "%s%s" % (
os.path.join(remote_dir, filename),
"/" if is_dir else ""
)
if is_dir:
subdirs.append(filename)
else:
if filename in self.options.ignored_files:
continue
process(dir, remote_dir, filename)
else:
# XXX raise or ignore?
if self.options.verbose_traversal:
print "No match on '%s'" % entry
for subdir in subdirs:
self.traverse_ftp_dir(process, os.path.join(dir, subdir))
| cpressey/transmat | src/transmat/remote.py | Python | unlicense | 6,155 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 短信内容中含有敏感词,请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
FAILEDOPERATION_CONTAINSENSITIVEWORD = 'FailedOperation.ContainSensitiveWord'
# 请求包解析失败,通常情况下是由于没有遵守 API 接口说明规范导致的,请参考 [请求包体解析1004错误详解](https://cloud.tencent.com/document/product/382/9558#.E8.BF.94.E5.9B.9E1004.E9.94.99.E8.AF.AF.E5.A6.82.E4.BD.95.E5.A4.84.E7.90.86.EF.BC.9F)。
FAILEDOPERATION_FAILRESOLVEPACKET = 'FailedOperation.FailResolvePacket'
# 套餐包余量不足,请 [购买套餐包](https://buy.cloud.tencent.com/sms)。
FAILEDOPERATION_INSUFFICIENTBALANCEINSMSPACKAGE = 'FailedOperation.InsufficientBalanceInSmsPackage'
# 解析请求包体时候失败。
FAILEDOPERATION_JSONPARSEFAIL = 'FailedOperation.JsonParseFail'
# 营销短信发送时间限制,为避免骚扰用户,营销短信只允许在8点到22点发送。
FAILEDOPERATION_MARKETINGSENDTIMECONSTRAINT = 'FailedOperation.MarketingSendTimeConstraint'
# 没有申请签名之前,无法申请模板,请根据 [创建签名](https://cloud.tencent.com/document/product/382/37794#.E5.88.9B.E5.BB.BA.E7.AD.BE.E5.90.8D) 申请完成之后再次申请。
FAILEDOPERATION_MISSINGSIGNATURE = 'FailedOperation.MissingSignature'
# 无法识别签名,请确认是否已有签名通过申请,一般是签名未通过申请,可以查看 [签名审核](https://cloud.tencent.com/document/product/382/37745)。
FAILEDOPERATION_MISSINGSIGNATURELIST = 'FailedOperation.MissingSignatureList'
# 此签名 ID 未提交申请或不存在,不能进行修改操作,请检查您的 SignId 是否填写正确。
FAILEDOPERATION_MISSINGSIGNATURETOMODIFY = 'FailedOperation.MissingSignatureToModify'
# 无法识别模板,请确认是否已有模板通过申请,一般是模板未通过申请,可以查看 [模板审核](https://cloud.tencent.com/document/product/382/37745)。
FAILEDOPERATION_MISSINGTEMPLATELIST = 'FailedOperation.MissingTemplateList'
# 此模板 ID 未提交申请或不存在,不能进行修改操作,请检查您的 TemplateId是否填写正确。
FAILEDOPERATION_MISSINGTEMPLATETOMODIFY = 'FailedOperation.MissingTemplateToModify'
# 非企业认证无法使用签名及模板相关接口,您可以[ 变更实名认证模式](https://cloud.tencent.com/document/product/378/34075),变更为企业认证用户后,约1小时左右生效。
FAILEDOPERATION_NOTENTERPRISECERTIFICATION = 'FailedOperation.NotEnterpriseCertification'
# 其他错误,一般是由于参数携带不符合要求导致,请参考API接口说明,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
FAILEDOPERATION_OTHERERROR = 'FailedOperation.OtherError'
# 未知错误,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
FAILEDOPERATION_PARAMETERSOTHERERROR = 'FailedOperation.ParametersOtherError'
# 手机号在黑名单库中,通常是用户退订或者命中运营商黑名单导致的,可联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81) 解决。
FAILEDOPERATION_PHONENUMBERINBLACKLIST = 'FailedOperation.PhoneNumberInBlacklist'
# 签名个数达到最大值。
FAILEDOPERATION_SIGNNUMBERLIMIT = 'FailedOperation.SignNumberLimit'
# 签名未审批或格式错误。(1)可登录 [短信控制台](https://console.cloud.tencent.com/smsv2),核查签名是否已审批并且审批通过;(2)核查是否符合格式规范,签名只能由中英文、数字组成,要求2 - 12个字,若存在疑问可联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
FAILEDOPERATION_SIGNATUREINCORRECTORUNAPPROVED = 'FailedOperation.SignatureIncorrectOrUnapproved'
# 此模板已经通过审核,无法再次进行修改。
FAILEDOPERATION_TEMPLATEALREADYPASSEDCHECK = 'FailedOperation.TemplateAlreadyPassedCheck'
# 模板 ID 或签名 ID 不存在。
FAILEDOPERATION_TEMPLATEIDNOTEXIST = 'FailedOperation.TemplateIdNotExist'
# 模板未审批或内容不匹配。(1)可登陆 [短信控制台](https://console.cloud.tencent.com/smsv2),核查模板是否已审批并审批通过;(2)核查是否符合 [格式规范](https://cloud.tencent.com/document/product/382/9558#.E8.BF.94.E5.9B.9E1014.E9.94.99.E8.AF.AF.E5.A6.82.E4.BD.95.E5.A4.84.E7.90.86.EF.BC.9F),若存在疑问可联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
FAILEDOPERATION_TEMPLATEINCORRECTORUNAPPROVED = 'FailedOperation.TemplateIncorrectOrUnapproved'
# 模板个数达到最大值。
FAILEDOPERATION_TEMPLATENUMBERLIMIT = 'FailedOperation.TemplateNumberLimit'
# 后台构造用户参数失败,可联系 [sms helper](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81) 。
INTERNALERROR_CONSTRUCTUSERDATAFAIL = 'InternalError.ConstructUserDataFail'
# 解析用户参数失败,可联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
INTERNALERROR_JSONPARSEFAIL = 'InternalError.JsonParseFail'
# 其他错误,请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81) 并提供失败手机号。
INTERNALERROR_OTHERERROR = 'InternalError.OtherError'
# 解析运营商包体失败,可联系 [sms helper](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81) 。
INTERNALERROR_PARSEBACKENDRESPONSEFAIL = 'InternalError.ParseBackendResponseFail'
# 请求发起时间不正常,通常是由于您的服务器时间与腾讯云服务器时间差异超过10分钟导致的,请核对服务器时间及 API 接口中的时间字段是否正常。
INTERNALERROR_REQUESTTIMEEXCEPTION = 'InternalError.RequestTimeException'
# 不存在该 RESTAPI 接口,请核查 REST API 接口说明。
INTERNALERROR_RESTAPIINTERFACENOTEXIST = 'InternalError.RestApiInterfaceNotExist'
# 接口超时或短信收发包超时,请检查您的网络是否有波动,或联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81) 解决。
INTERNALERROR_SENDANDRECVFAIL = 'InternalError.SendAndRecvFail'
# 后端包体中请求包体没有 Sig 字段或 Sig 为空。
INTERNALERROR_SIGFIELDMISSING = 'InternalError.SigFieldMissing'
# 后端校验 Sig 失败。
INTERNALERROR_SIGVERIFICATIONFAIL = 'InternalError.SigVerificationFail'
# 请求下发短信超时,请参考 [60008错误详解](https://cloud.tencent.com/document/product/382/9558#.E8.BF.94.E5.9B.9E60008.E9.94.99.E8.AF.AF.E5.A6.82.E4.BD.95.E5.A4.84.E7.90.86.EF.BC.9F)。
INTERNALERROR_TIMEOUT = 'InternalError.Timeout'
# 未知错误类型。
INTERNALERROR_UNKNOWNERROR = 'InternalError.UnknownError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 账号与应用id不匹配。
INVALIDPARAMETER_APPIDANDBIZID = 'InvalidParameter.AppidAndBizId'
# International 或者 SmsType 参数有误,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
INVALIDPARAMETER_INVALIDPARAMETERS = 'InvalidParameter.InvalidParameters'
# 请求的短信内容太长,短信长度规则请参考 [国内短信内容长度计算规则](https://cloud.tencent.com/document/product/382/18058)。
INVALIDPARAMETERVALUE_CONTENTLENGTHLIMIT = 'InvalidParameterValue.ContentLengthLimit'
# 上传的转码图片格式错误,请参照 API 接口说明中对改字段的说明,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
INVALIDPARAMETERVALUE_IMAGEINVALID = 'InvalidParameterValue.ImageInvalid'
# 手机号格式错误,请参考 [1016错误详解](https://cloud.tencent.com/document/product/382/9558#.E8.BF.94.E5.9B.9E1016.E9.94.99.E8.AF.AF.E5.A6.82.E4.BD.95.E5.A4.84.E7.90.86.EF.BC.9F)。
INVALIDPARAMETERVALUE_INCORRECTPHONENUMBER = 'InvalidParameterValue.IncorrectPhoneNumber'
# DocumentType 字段校验错误,请参照 API 接口说明中对改字段的说明,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
INVALIDPARAMETERVALUE_INVALIDDOCUMENTTYPE = 'InvalidParameterValue.InvalidDocumentType'
# International 字段校验错误,请参照 API 接口说明中对改字段的说明,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
INVALIDPARAMETERVALUE_INVALIDINTERNATIONAL = 'InvalidParameterValue.InvalidInternational'
# 无效的拉取起始/截止时间,具体原因可能是请求的 SendDateTime 大于 EndDateTime。
INVALIDPARAMETERVALUE_INVALIDSTARTTIME = 'InvalidParameterValue.InvalidStartTime'
# UsedMethod 字段校验错误,请参照 API 接口说明中对改字段的说明,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
INVALIDPARAMETERVALUE_INVALIDUSEDMETHOD = 'InvalidParameterValue.InvalidUsedMethod'
# 无法识别签名,请确认是否已有签名通过申请,一般是签名未通过申请,可以查看 [签名审核](https://cloud.tencent.com/document/product/382/37745#.E6.AD.A5.E9.AA.A43.EF.BC.9A.E7.AD.89.E5.BE.85.E5.AE.A1.E6.A0.B8) 。
INVALIDPARAMETERVALUE_MISSINGSIGNATURELIST = 'InvalidParameterValue.MissingSignatureList'
# 禁止在模板变量中使用 URL。
INVALIDPARAMETERVALUE_PROHIBITEDUSEURLINTEMPLATEPARAMETER = 'InvalidParameterValue.ProhibitedUseUrlInTemplateParameter'
# SdkAppId 不存在。
INVALIDPARAMETERVALUE_SDKAPPIDNOTEXIST = 'InvalidParameterValue.SdkAppIdNotExist'
# 此签名已经通过审核,无法再次进行修改。
INVALIDPARAMETERVALUE_SIGNALREADYPASSEDCHECK = 'InvalidParameterValue.SignAlreadyPassedCheck'
# 已存在相同的待审核签名。
INVALIDPARAMETERVALUE_SIGNEXISTANDUNAPPROVED = 'InvalidParameterValue.SignExistAndUnapproved'
# 验证码模板参数格式错误,验证码类模板,模板变量只能传入0 - 6位(包括6位)纯数字。
INVALIDPARAMETERVALUE_TEMPLATEPARAMETERFORMATERROR = 'InvalidParameterValue.TemplateParameterFormatError'
# 单个模板变量字符数超过12个,企业认证用户不限制单个变量值字数,您可以 [变更实名认证模式](https://cloud.tencent.com/document/product/378/34075),变更为企业认证用户后,该限制变更约1小时左右生效。
INVALIDPARAMETERVALUE_TEMPLATEPARAMETERLENGTHLIMIT = 'InvalidParameterValue.TemplateParameterLengthLimit'
# 业务短信国家/地区日下发条数超过设定的上限,可自行到控制台调整短信频率限制策略。
LIMITEXCEEDED_APPCOUNTRYORREGIONDAILYLIMIT = 'LimitExceeded.AppCountryOrRegionDailyLimit'
# 业务短信国家/地区在黑名单中,可自行到控制台调整短信限制策略。
LIMITEXCEEDED_APPCOUNTRYORREGIONINBLACKLIST = 'LimitExceeded.AppCountryOrRegionInBlacklist'
# 业务短信日下发条数超过设定的上限 ,可自行到控制台调整短信频率限制策略。
LIMITEXCEEDED_APPDAILYLIMIT = 'LimitExceeded.AppDailyLimit'
# 业务短信国际/港澳台日下发条数超过设定的上限,可自行到控制台调整短信频率限制策略。
LIMITEXCEEDED_APPGLOBALDAILYLIMIT = 'LimitExceeded.AppGlobalDailyLimit'
# 业务短信中国大陆日下发条数超过设定的上限,可自行到控制台调整短信频率限制策略。
LIMITEXCEEDED_APPMAINLANDCHINADAILYLIMIT = 'LimitExceeded.AppMainlandChinaDailyLimit'
# 短信日下发条数超过设定的上限 (国际/港澳台),如需调整限制,可联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773)。
LIMITEXCEEDED_DAILYLIMIT = 'LimitExceeded.DailyLimit'
# 下发短信命中了频率限制策略,可自行到控制台调整短信频率限制策略,如有其他需求请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
LIMITEXCEEDED_DELIVERYFREQUENCYLIMIT = 'LimitExceeded.DeliveryFrequencyLimit'
# 调用接口单次提交的手机号个数超过200个,请遵守 API 接口输入参数 PhoneNumberSet 描述。
LIMITEXCEEDED_PHONENUMBERCOUNTLIMIT = 'LimitExceeded.PhoneNumberCountLimit'
# 单个手机号日下发短信条数超过设定的上限,可自行到控制台调整短信频率限制策略。
LIMITEXCEEDED_PHONENUMBERDAILYLIMIT = 'LimitExceeded.PhoneNumberDailyLimit'
# 单个手机号1小时内下发短信条数超过设定的上限,可自行到控制台调整短信频率限制策略。
LIMITEXCEEDED_PHONENUMBERONEHOURLIMIT = 'LimitExceeded.PhoneNumberOneHourLimit'
# 单个手机号下发相同内容超过设定的上限,可自行到控制台调整短信频率限制策略。
LIMITEXCEEDED_PHONENUMBERSAMECONTENTDAILYLIMIT = 'LimitExceeded.PhoneNumberSameContentDailyLimit'
# 单个手机号30秒内下发短信条数超过设定的上限,可自行到控制台调整短信频率限制策略。
LIMITEXCEEDED_PHONENUMBERTHIRTYSECONDLIMIT = 'LimitExceeded.PhoneNumberThirtySecondLimit'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 传入的号码列表为空,请确认您的参数中是否传入号码。
MISSINGPARAMETER_EMPTYPHONENUMBERSET = 'MissingParameter.EmptyPhoneNumberSet'
# 账户下无此签名或者模板,请登录控制台查询。
MISSINGPARAMETER_MISSINGSIGNATURELIST = 'MissingParameter.MissingSignatureList'
# 个人用户没有发营销短信的权限,请参考 [权益区别](https://cloud.tencent.com/document/product/382/13444)。
UNAUTHORIZEDOPERATION_INDIVIDUALUSERMARKETINGSMSPERMISSIONDENY = 'UnauthorizedOperation.IndividualUserMarketingSmsPermissionDeny'
# 请求 IP 不在白名单中,您配置了校验请求来源 IP,但是检测到当前请求 IP 不在配置列表中,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
UNAUTHORIZEDOPERATION_REQUESTIPNOTINWHITELIST = 'UnauthorizedOperation.RequestIpNotInWhitelist'
# 请求没有权限,请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
UNAUTHORIZEDOPERATION_REQUESTPERMISSIONDENY = 'UnauthorizedOperation.RequestPermissionDeny'
# 此 SdkAppId 禁止提供服务,如有需要请联系 [腾讯云短信小助手](https://cloud.tencent.com/document/product/382/3773#.E6.8A.80.E6.9C.AF.E4.BA.A4.E6.B5.81)。
UNAUTHORIZEDOPERATION_SDKAPPIDISDISABLED = 'UnauthorizedOperation.SdkAppIdIsDisabled'
# 欠费被停止服务,可自行登录腾讯云充值来缴清欠款。
UNAUTHORIZEDOPERATION_SERIVCESUSPENDDUETOARREARS = 'UnauthorizedOperation.SerivceSuspendDueToArrears'
# SmsSdkAppId 校验失败,请检查 [SmsSdkAppId](https://console.cloud.tencent.com/smsv2/app-manage) 是否属于 [云API密钥](https://console.cloud.tencent.com/cam/capi) 的关联账户。
UNAUTHORIZEDOPERATION_SMSSDKAPPIDVERIFYFAIL = 'UnauthorizedOperation.SmsSdkAppIdVerifyFail'
# 不支持该请求。
UNSUPPORTEDOPERATION_ = 'UnsupportedOperation.'
# 群发请求里既有国内手机号也有国际手机号。请排查是否存在(1)使用国内签名或模板却发送短信到国际手机号;(2)使用国际签名或模板却发送短信到国内手机号;
UNSUPPORTEDOPERATION_CONTAINDOMESTICANDINTERNATIONALPHONENUMBER = 'UnsupportedOperation.ContainDomesticAndInternationalPhoneNumber'
# 不支持该地区短信下发。
UNSUPPORTEDOPERATION_UNSUPORTEDREGION = 'UnsupportedOperation.UnsuportedRegion'
| tzpBingo/github-trending | codespace/python/tencentcloud/sms/v20190711/errorcodes.py | Python | mit | 16,804 |
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import mc_unittest
from rogerthat.bizz import roles
from rogerthat.bizz.friend_helper import FriendHelper
from rogerthat.bizz.friends import makeFriends, ORIGIN_USER_INVITE
from rogerthat.bizz.profile import create_user_profile, create_service_profile
from rogerthat.bizz.roles import has_role, create_service_role, grant_role
from rogerthat.bizz.service import create_menu_item
from rogerthat.dal.friend import get_friends_map
from rogerthat.dal.profile import get_user_profile
from rogerthat.dal.service import get_service_identity
from rogerthat.models import ServiceRole
from rogerthat.rpc import users
from rogerthat.to.friends import FriendTO, FRIEND_TYPE_SERVICE
class Test(mc_unittest.TestCase):
def setUp(self):
super(Test, self).setUp(1)
self.service_user = users.User(u'monitoring@rogerth.at')
_, service_identity = create_service_profile(self.service_user, u"Monitoring service")
self.service_identity_user = service_identity.user
self.john = users.User(u'john_doe@foo.com')
up = create_user_profile(self.john, u"John Doe")
up.grant_role(service_identity.user, roles.ROLE_ADMIN)
up.put()
self.jane = users.User(u'jane_doe@foo.com')
create_user_profile(self.jane, u"Jane Doe")
def test_grant_role(self):
john_profile = get_user_profile(self.john, True)
jane_profile = get_user_profile(self.jane, True)
si = get_service_identity(self.service_identity_user)
self.assertTrue(john_profile.has_role(self.service_identity_user, roles.ROLE_ADMIN))
self.assertTrue(has_role(si, john_profile, roles.ROLE_ADMIN))
self.assertFalse(jane_profile.has_role(self.service_identity_user, roles.ROLE_ADMIN))
self.assertFalse(has_role(si, jane_profile, roles.ROLE_ADMIN))
def test_remove_role(self):
john_profile = get_user_profile(self.john, True)
john_profile.revoke_role(self.service_identity_user, roles.ROLE_ADMIN)
john_profile.put()
jane_profile = get_user_profile(self.jane, True)
self.assertFalse(john_profile.has_role(self.service_identity_user, roles.ROLE_ADMIN))
self.assertFalse(jane_profile.has_role(self.service_identity_user, roles.ROLE_ADMIN))
def create_roles_and_menu_items(self):
role1 = create_service_role(self.service_user, u'role1', ServiceRole.TYPE_MANAGED)
role2 = create_service_role(self.service_user, u'role2', ServiceRole.TYPE_MANAGED)
create_menu_item(self.service_user, u'gear', u'#e6e6e6', u'label1', u'tag1', [1, 1, 1], screen_branding=None,
static_flow_name=None, requires_wifi=False, run_in_background=False, roles=[role1.role_id],
is_broadcast_settings=False, broadcast_branding=None)
create_menu_item(self.service_user, u'gear', u'#e6e6e6', u'label2', u'tag2', [1, 2, 1], screen_branding=None,
static_flow_name=None, requires_wifi=False, run_in_background=False, roles=[role2.role_id],
is_broadcast_settings=False, broadcast_branding=None)
return role1, role2
def test_service_menu_item_roles(self):
role1, _ = self.create_roles_and_menu_items()
grant_role(self.service_identity_user, self.john, role1)
makeFriends(self.john, self.service_identity_user, None, None, ORIGIN_USER_INVITE)
makeFriends(self.jane, self.service_identity_user, None, None, ORIGIN_USER_INVITE)
helper = FriendHelper.from_data_store(self.service_identity_user, FRIEND_TYPE_SERVICE)
john_friendTO = FriendTO.fromDBFriendMap(helper, get_friends_map(self.john), self.service_identity_user,
includeAvatarHash=False, includeServiceDetails=True,
targetUser=self.john)
self.assertEqual(1, len(john_friendTO.actionMenu.items))
self.assertListEqual([1, 1, 1], john_friendTO.actionMenu.items[0].coords)
jane_friendTO = FriendTO.fromDBFriendMap(helper, get_friends_map(self.jane), self.service_identity_user,
includeAvatarHash=False, includeServiceDetails=True,
targetUser=self.jane)
self.assertEqual(0, len(jane_friendTO.actionMenu.items))
| rogerthat-platform/rogerthat-backend | src-test/rogerthat_tests/mobicage/bizz/test_roles.py | Python | apache-2.0 | 5,001 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <trond@hindenes.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_chocolatey
version_added: "1.9"
short_description: Manage packages using chocolatey
description:
- Manage packages using Chocolatey (U(http://chocolatey.org/)).
- If Chocolatey is missing from the system, the module will install it.
- List of packages can be found at U(http://chocolatey.org/packages).
requirements:
- chocolatey >= 0.10.5 (will be upgraded if older)
options:
name:
description:
- Name of the package to be installed.
- This must be a single package name.
required: yes
state:
description:
- State of the package on the system.
choices:
- absent
- downgrade
- latest
- present
- reinstalled
default: present
force:
description:
- Forces install of the package (even if it already exists).
- Using C(force) will cause ansible to always report that a change was made.
type: bool
default: 'no'
upgrade:
description:
- If package is already installed it, try to upgrade to the latest version or to the specified version.
- As of Ansible v2.3 this is deprecated, set parameter C(state) to C(latest) for the same result.
type: bool
default: 'no'
version:
description:
- Specific version of the package to be installed.
- Ignored when C(state) is set to C(absent).
source:
description:
- Specify source rather than using default chocolatey repository.
install_args:
description:
- Arguments to pass to the native installer.
version_added: '2.1'
params:
description:
- Parameters to pass to the package
version_added: '2.1'
allow_empty_checksums:
description:
- Allow empty checksums to be used.
type: bool
default: 'no'
version_added: '2.2'
ignore_checksums:
description:
- Ignore checksums altogether.
type: bool
default: 'no'
version_added: '2.2'
ignore_dependencies:
description:
- Ignore dependencies, only install/upgrade the package itself.
type: bool
default: 'no'
version_added: '2.1'
timeout:
description:
- The time to allow chocolatey to finish before timing out.
default: 2700
version_added: '2.3'
aliases: [ execution_timeout ]
skip_scripts:
description:
- Do not run I(chocolateyInstall.ps1) or I(chocolateyUninstall.ps1) scripts.
type: bool
default: 'no'
version_added: '2.4'
proxy_url:
description:
- Proxy url used to install chocolatey and the package.
version_added: '2.4'
proxy_username:
description:
- Proxy username used to install chocolatey and the package.
- When dealing with a username with double quote characters C("), they
need to be escaped with C(\) beforehand. See examples for more details.
version_added: '2.4'
proxy_password:
description:
- Proxy password used to install chocolatey and the package.
- See notes in C(proxy_username) when dealing with double quotes in a
password.
version_added: '2.4'
notes:
- Provide the C(version) parameter value as a string (e.g. C('6.1')), otherwise it
is considered to be a floating-point number and depending on the locale could
become C(6,1), which will cause a failure.
- When using verbosity 2 or less (C(-vv)) the C(stdout) output will be restricted.
- When using verbosity 4 (C(-vvvv)) the C(stdout) output will be more verbose.
- When using verbosity 5 (C(-vvvvv)) the C(stdout) output will include debug output.
- This module will install or upgrade Chocolatey when needed.
author:
- Trond Hindenes (@trondhindenes)
- Peter Mounce (@petemounce)
- Pepe Barbe (@elventear)
- Adam Keech (@smadam813)
- Pierre Templier (@ptemplier)
'''
# TODO:
# * Better parsing when a package has dependencies - currently fails
# * Time each item that is run
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
# * Version provided not as string might be translated to 6,6 depending on Locale (results in errors)
EXAMPLES = r'''
- name: Install git
win_chocolatey:
name: git
state: present
- name: Upgrade installed packages
win_chocolatey:
name: all
state: latest
- name: Install notepadplusplus version 6.6
win_chocolatey:
name: notepadplusplus.install
version: '6.6'
- name: Install git from specified repository
win_chocolatey:
name: git
source: https://someserver/api/v2/
- name: Uninstall git
win_chocolatey:
name: git
state: absent
- name: install multiple packages
win_chocolatey:
name: '{{ item }}'
state: absent
with_items:
- pscx
- windirstat
- name: uninstall multiple packages
win_chocolatey:
name: '{{ item }}'
state: absent
with_items:
- pscx
- windirstat
- name: Install curl using proxy
win_chocolatey:
name: curl
proxy_url: http://proxy-server:8080/
proxy_username: joe
proxy_password: p@ssw0rd
- name: Install curl with proxy credentials that contain quotes
win_chocolatey:
name: curl
proxy_url: http://proxy-server:8080/
proxy_username: user with \"escaped\" double quotes
proxy_password: pass with \"escaped\" double quotes
'''
RETURN = r'''
choco_bootstrap_output:
description: DEPRECATED, will be removed in 2.6, use stdout instead.
returned: changed, choco task returned a failure
type: str
sample: Chocolatey upgraded 1/1 packages.
choco_error_cmd:
description: DEPRECATED, will be removed in 2.6, use command instead.
returned: changed, choco task returned a failure
type: str
sample: choco.exe install -r --no-progress -y sysinternals --timeout 2700 --failonunfound
choco_error_log:
description: DEPRECATED, will be removed in 2.6, use stdout instead.
returned: changed, choco task returned a failure
type: str
sample: sysinternals not installed. The package was not found with the source(s) listed
command:
description: The full command used in the chocolatey task.
returned: changed
type: str
sample: choco.exe install -r --no-progress -y sysinternals --timeout 2700 --failonunfound
rc:
description: The return code from the chocolatey task.
returned: changed
type: int
sample: 0
stdout:
description: The stdout from the chocolatey task. The verbosity level of the
messages are affected by Ansible verbosity setting, see notes for more
details.
returned: changed
type: str
sample: Chocolatey upgraded 1/1 packages.
'''
| Nicop06/ansible | lib/ansible/modules/windows/win_chocolatey.py | Python | gpl-3.0 | 7,511 |
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
# view imports
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
from django.contrib.auth import get_user_model
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from django.views.generic.edit import FormMixin
from django.contrib import messages
from rest_framework.decorators import api_view, renderer_classes
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from avatar.forms import UploadAvatarForm, DeleteAvatarForm, PrimaryAvatarForm
from .forms import UserForm, UserRegisterForm, UploadAvatarFormNu, DeleteAvatarFormNu, PrimaryAvatarFormNu
# Import the customized User model
User = get_user_model() # use this function for swapping user model
from django.contrib.auth.models import Group
import urllib2, urllib
#from django.contrib.sites.models import Site
CAPTION = str('OneGreek is redefining Greek recruitment. Review fraternity profiles, register for rush, and manage upcoming events. All for free, entirely through Facebook.')
def post_to_facebook(
user,
link="http://arizona.onegreek.org",
name="Onegreek.org",
picture="https://djangonu-onegreek.s3.amazonaws.com/img/logos/800x600logo.jpg",
caption=CAPTION,
chapter="",
message="",
adverb="rushing"
):
post_url = "https://graph.facebook.com/{}/feed".format(user.fb_uid)
#site = Site.objects.get(id=1)
post_data = [
("message", "{} is {} {} on Onegreek.org".format(user.get_full_name(), adverb, chapter)),
#("link", site.domain),
#("name", site.name),
("link", link),
("name", name),
("picture", picture),
("caption", caption),
('access_token', user.get_fb_access_token().token)
]
result = urllib2.urlopen(post_url, urllib.urlencode(post_data))
class UserDetailView(LoginRequiredMixin, FormMixin, DetailView):
model = User
form_class = UserForm
# These next two lines tell the view to index lookups by username
slug_field = "pk"
slug_url_kwarg = "pk"
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
avatars = self.get_object().avatar_set.all()
context['avatars'] = avatars
context['profile_form'] = self.form_class(instance=self.get_object())
context['avatar_upload_form'] = UploadAvatarFormNu(user=self.get_object())
context['avatar_primary_form'] = PrimaryAvatarForm(user=self.get_object(), avatars=avatars)
context['avatar_delete_form'] = DeleteAvatarForm(user=self.get_object(), avatars=avatars)
return context
def get_success_url(self):
messages.success(self.request, 'Successfully updated your profile', 'success')
if 'next' in self.request.GET:
return self.request.GET['next']
else:
return reverse("users:edit")
class UserRedirectView(RedirectView):
permanent = False
def get_redirect_url(self):
user = self.request.user
redirect_url = reverse('account_login')
# is user anon?
if user.is_authenticated():
# is user an active of a chapter?
if user.chapter_id:
# yes
redirect_url = reverse("chapters:detail", kwargs={"pk": user.chapter_id})
else:
# no
redirect_url = reverse("chapters:list")
return redirect_url
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
messages.success(self.request, 'Successfully updated your profile', 'success')
if 'next' in self.request.GET:
return self.request.GET['next']
else:
return reverse("users:redirect")
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(pk=self.request.user.pk)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "pk"
slug_url_kwarg = "pk"
def get_context_data(self, **kwargs):
context = super(UserListView, self).get_context_data(**kwargs)
context.update(user_form=UserForm())
return context
from rest_framework import viewsets
from .serializers import UserSerializer, GroupSerializer, GroupUpdateSerializer, GroupCreateSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
def get_queryset(self):
q = super(UserViewSet, self).get_queryset()
if 'group' in self.request.GET:
return q.filter(groups__id=self.request.GET['group'])
elif 'chapter' in self.request.GET:
chapter_id = self.request.GET['chapter']
if 'rush' in self.request.GET:
return q.filter(groups__name__istartswith='chapter_%s' % chapter_id).distinct()
else:
return q.filter(chapter_id=chapter_id).exclude(status="active_pending")
else:
chapter_id = self.request.user.chapter_id
if chapter_id:
return q.filter(groups__name__istartswith='chapter_%d' % chapter_id).distinct()
else:
return q
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
def create(self, request, *args, **kwargs):
self.serializer_class = GroupCreateSerializer
return super(GroupViewSet, self).create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
self.serializer_class = GroupUpdateSerializer
return super(GroupViewSet, self).update(request, *args, **kwargs)
def get_queryset(self):
q = super(GroupViewSet, self).get_queryset()
return q.filter(chapter__id=self.request.user.chapter_id)
@api_view(['POST'])
@renderer_classes((JSONRenderer,))
def mod_group(request, format=None):
if 'group_id' in request.DATA and 'action' in request.DATA \
and 'chapter_id' in request.DATA and 'user_set' in request.DATA:
group = get_object_or_404(Group, id=request.DATA['group_id'])
action = request.DATA['action']
chapter_id = request.DATA['chapter_id']
user_set = request.DATA['user_set']
for user_id in user_set:
user = get_object_or_404(User, id=user_id)
user.groups.clear()
user.status = 'rush'
user.chapter = None
user.save()
response = {'success': True}
else:
response = {'success': False}
return Response(response)
@api_view(['GET', 'POST'])
@renderer_classes((JSONRenderer,))
def call_list(request, format=None):
user_id = request.DATA['user_id']
group_id = request.DATA['group_id']
action = request.DATA['action']
user = get_object_or_404(User, id=user_id)
if action == "add":
user.groups.add(group_id)
elif action == "remove":
user.groups.remove(group_id)
user.save()
response = {'success': True}
return Response(response)
from chapters.models import Chapter
@api_view(['GET', 'POST'])
@renderer_classes((JSONRenderer,))
def mod_user_groups(request, format=None):
user_id = request.DATA['user_id']
chapter_id = request.DATA['chapter_id']
status = request.DATA['status']
new_status = request.DATA['new_status']
action = request.DATA['action']
user = get_object_or_404(User, id=user_id)
chapter = get_object_or_404(Chapter, id=chapter_id)
group = None
new_group = None
response = {
'success': False,
'user_full_name': user.get_full_name(),
'action': action,
'status': status,
'new_status': new_status
}
if status == 'active':
group = chapter.linked_active_group
elif status == 'active_pending':
group = chapter.linked_pending_group
elif status == 'rush':
group = chapter.linked_rush_group
elif status == 'pledge':
group = chapter.linked_pledge_group
elif status == 'admin':
group = chapter.linked_admin_group
if new_status == 'active':
new_group = chapter.linked_active_group
elif new_status == 'active_pending':
new_group = chapter.linked_pending_group
elif new_status == 'rush':
new_group = chapter.linked_rush_group
elif new_status == 'pledge':
new_group = chapter.linked_pledge_group
elif new_status == 'admin':
new_group = chapter.linked_admin_group
elif new_status == 'call':
new_group = chapter.linked_call_group
if group:
if action == "add":
user.groups.add(new_group.id)
# admin is not a status
if status != 'admin':
# rush uses a different 'add' action than other status
if status == 'rush':
if new_status != 'call':
# If action: add and status: rush and not new status: call the user is being added to chapter
user.groups.remove(group.id, chapter.linked_call_group.id)
# remove from rush group here because 'add' action won't remove groups otherwise
user.chapter = chapter
# set chapter for new member
user.status = new_status
#send facebook post for active chapter
#post_to_facebook(
# user,
# chapter=chapter.fraternity_title,
# adverb="a registered active of"
#)
else:
user.groups.add(new_group.id)
else:
user.status = status
response['status'] = user.status
elif action == "remove":
if new_status != 'call':
# not removing from call list
user.groups.remove(group.id)
user.groups.add(new_group.id)
if new_status == 'rush':
user.chapter = None
# If action: remove and status: active_pending the user is being removed permanently
if status == 'active_pending':
call_group = chapter.linked_call_group
admin_group = chapter.linked_admin_group
user.groups.remove(new_group.id)
user.groups.remove(call_group.id)
user.groups.remove(admin_group.id)
user.status = new_status
else:
# remove from call list
user.groups.remove(new_group.id)
user.save()
response['success'] = True
return Response(response)
@api_view(['GET'])
@renderer_classes((JSONRenderer,))
def user_in_chapter_group(request, format=None):
user_id = request.GET['check_user_id']
chapter_id = request.GET['chapter_id']
status = request.GET['status']
# Not user.status as includes call and admin
user = get_object_or_404(User, id=user_id)
chapter = get_object_or_404(Chapter, id=chapter_id)
group = None
if status == 'active':
group = chapter.linked_active_group
elif status == 'active_pending':
group = chapter.linked_pending_group
elif status == 'rush':
group = chapter.linked_rush_group
elif status == 'pledge':
group = chapter.linked_pledge_group
elif status == 'admin':
group = chapter.linked_admin_group
elif status == 'call':
group = chapter.linked_call_group
_response = {
'success': False,
'status': status,
'chapter': chapter.title,
'user': user.get_full_name(),
'in_group': False
}
if group:
_response['success'] = True
if group in user.groups.all():
_response['in_group'] = True
return Response(_response)
| goldhand/onegreek | onegreek/users/views.py | Python | bsd-3-clause | 12,581 |
#!/usr/bin/python
"""Test of push button output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("<Control>f"))
sequence.append(TypeAction("Button Boxes"))
sequence.append(KeyComboAction("Return"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Return"))
sequence.append(utils.AssertPresentationAction(
"1. OK push button",
["BRAILLE LINE: 'gtk3-demo application Button Boxes frame'",
" VISIBLE: 'Button Boxes frame', cursor=1",
"BRAILLE LINE: 'gtk3-demo application Button Boxes frame Horizontal Button Boxes panel Spread panel OK push button'",
" VISIBLE: 'OK push button', cursor=1",
"SPEECH OUTPUT: 'Button Boxes frame'",
"SPEECH OUTPUT: 'Horizontal Button Boxes panel.'",
"SPEECH OUTPUT: 'Spread panel.'",
"SPEECH OUTPUT: 'OK push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"2. OK push button Where Am I",
["BRAILLE LINE: 'gtk3-demo application Button Boxes frame Horizontal Button Boxes panel Spread panel OK push button'",
" VISIBLE: 'OK push button', cursor=1",
"SPEECH OUTPUT: 'OK push button"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"3. Cancel push button",
["BRAILLE LINE: 'gtk3-demo application Button Boxes frame Horizontal Button Boxes panel Spread panel Cancel push button'",
" VISIBLE: 'Cancel push button', cursor=1",
"SPEECH OUTPUT: 'Cancel push button'"]))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"4. OK Edge button",
["BRAILLE LINE: 'gtk3-demo application Button Boxes frame Horizontal Button Boxes panel Edge panel OK push button'",
" VISIBLE: 'OK push button', cursor=1",
"SPEECH OUTPUT: 'Edge panel.'",
"SPEECH OUTPUT: 'OK push button'"]))
sequence.append(KeyComboAction("<Alt>F4", 500))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| GNOME/orca | test/keystrokes/gtk3-demo/role_push_button.py | Python | lgpl-2.1 | 2,278 |
"""
Support for the Meraki CMX location service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.meraki/
"""
import asyncio
import logging
import json
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (HTTP_BAD_REQUEST, HTTP_UNPROCESSABLE_ENTITY)
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, SOURCE_TYPE_ROUTER)
CONF_VALIDATOR = 'validator'
CONF_SECRET = 'secret'
DEPENDENCIES = ['http']
URL = '/api/meraki'
VERSION = '2.0'
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_VALIDATOR): cv.string,
vol.Required(CONF_SECRET): cv.string
})
@asyncio.coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up an endpoint for the Meraki tracker."""
hass.http.register_view(
MerakiView(config, async_see))
return True
class MerakiView(HomeAssistantView):
"""View to handle Meraki requests."""
url = URL
name = 'api:meraki'
def __init__(self, config, async_see):
"""Initialize Meraki URL endpoints."""
self.async_see = async_see
self.validator = config[CONF_VALIDATOR]
self.secret = config[CONF_SECRET]
@asyncio.coroutine
def get(self, request):
"""Meraki message received as GET."""
return self.validator
@asyncio.coroutine
def post(self, request):
"""Meraki CMX message received."""
try:
data = yield from request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
_LOGGER.debug("Meraki Data from Post: %s", json.dumps(data))
if not data.get('secret', False):
_LOGGER.error("secret invalid")
return self.json_message('No secret', HTTP_UNPROCESSABLE_ENTITY)
if data['secret'] != self.secret:
_LOGGER.error("Invalid Secret received from Meraki")
return self.json_message('Invalid secret',
HTTP_UNPROCESSABLE_ENTITY)
elif data['version'] != VERSION:
_LOGGER.error("Invalid API version: %s", data['version'])
return self.json_message('Invalid version',
HTTP_UNPROCESSABLE_ENTITY)
else:
_LOGGER.debug('Valid Secret')
if data['type'] not in ('DevicesSeen', 'BluetoothDevicesSeen'):
_LOGGER.error("Unknown Device %s", data['type'])
return self.json_message('Invalid device type',
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.debug("Processing %s", data['type'])
if len(data["data"]["observations"]) == 0:
_LOGGER.debug("No observations found")
return
self._handle(request.app['hass'], data)
@callback
def _handle(self, hass, data):
for i in data["data"]["observations"]:
data["data"]["secret"] = "hidden"
lat = i["location"]["lat"]
lng = i["location"]["lng"]
try:
accuracy = int(float(i["location"]["unc"]))
except ValueError:
accuracy = 0
mac = i["clientMac"]
_LOGGER.debug("clientMac: %s", mac)
if lat == "NaN" or lng == "NaN":
_LOGGER.debug(
"No coordinates received, skipping location for: " + mac
)
gps_location = None
accuracy = None
else:
gps_location = (lat, lng)
attrs = {}
if i.get('os', False):
attrs['os'] = i['os']
if i.get('manufacturer', False):
attrs['manufacturer'] = i['manufacturer']
if i.get('ipv4', False):
attrs['ipv4'] = i['ipv4']
if i.get('ipv6', False):
attrs['ipv6'] = i['ipv6']
if i.get('seenTime', False):
attrs['seenTime'] = i['seenTime']
if i.get('ssid', False):
attrs['ssid'] = i['ssid']
hass.async_add_job(self.async_see(
gps=gps_location,
mac=mac,
source_type=SOURCE_TYPE_ROUTER,
gps_accuracy=accuracy,
attributes=attrs
))
| ewandor/home-assistant | homeassistant/components/device_tracker/meraki.py | Python | apache-2.0 | 4,571 |
import unittest, time, sys, random, json
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_rf, h2o_util, h2o_import as h2i
import h2o_browse as h2b
import h2o_jobs
OVERWRITE_RF_MODEL = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1, java_heap_GB=14)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_big1_nopoll_fvec(self):
csvFilename = 'hhp_107_01.data.gz'
hex_key = csvFilename + ".hex"
print "\n" + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvFilename,
hex_key=hex_key, timeoutSecs=15, schema='put')
rfViewInitial = []
# dispatch multiple jobs back to back
for jobDispatch in range(3):
start = time.time()
kwargs = {}
model_key = ""
if OVERWRITE_RF_MODEL:
print "Since we're overwriting here, we have to wait for each to complete noPoll=False"
model_key = 'SRF_model'
else:
model_key = 'SRF_model' + str(jobDispatch)
kwargs['ntrees'] = 1
if OVERWRITE_RF_MODEL:
print "Change the number of trees, while keeping the rf model key name the same"
print "Checks that we correctly overwrite previous rf model"
kwargs['ntrees'] += 1
kwargs['seed'] = random.randint(0, sys.maxint)
kwargs['response'] = "C107"
# FIX! what model keys do these get?
randomNode = h2o.nodes[random.randint(0,len(h2o.nodes)-1)]
h2o_cmd.runSpeeDRF(node=randomNode, parseResult=parseResult, destination_key=model_key,
timeoutSecs=300, noPoll=False, **kwargs)
print "rf job dispatch end on ", csvFilename, 'took', time.time() - start, 'seconds'
print "\njobDispatch #", jobDispatch
print "\n MODEL KEY: ", model_key
rfViewInitial.append(h2o_cmd.runSpeeDRFView(None, model_key, timeoutSecs=60))
# h2o_jobs.pollWaitJobs(pattern='SRF_model', timeoutSecs=300, pollTimeoutSecs=10, retryDelaySecs=5)
# we saved the initial response?
# if we do another poll they should be done now, and better to get it that
# way rather than the inspect (to match what simpleCheckGLM is expected
first = None
print "rfViewInitial", rfViewInitial
for rfView in rfViewInitial:
print "Checking completed job:", rfView
print "rfView", h2o.dump_json(rfView)
model_key = rfView["speedrf_model"]['_key']
ntree = rfView["speedrf_model"]["parameters"]['ntrees']
print "Temporary hack: need to do two rf views minimum, to complete a RF (confusion matrix creation)"
# allow it to poll to complete
rfViewResult = h2o_cmd.runSpeeDRFView(None, model_key, timeoutSecs=60)
if first is None: # we'll use this to compare the others
first = rfViewResult.copy()
firstModelKey = model_key
print "first", h2o.dump_json(first)
else:
print "Comparing", model_key, "to", firstModelKey
df = h2o_util.JsonDiff(rfViewResult, first, vice_versa=True, with_values=True)
print "df.difference:", h2o.dump_json(df.difference)
if __name__ == '__main__':
h2o.unit_main()
| rowhit/h2o-2 | py/testdir_single_jvm/test_speedrf_big1_nopoll.py | Python | apache-2.0 | 3,630 |
# -*- coding: utf-8 -*-
## Comments and reviews for records.
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""HTML Templates for commenting features """
__revision__ = "$Id$"
import cgi
# Invenio imports
from invenio.urlutils import create_html_link, create_url
from invenio.webuser import get_user_info, collect_user_info, isGuestUser, get_email
from invenio.dateutils import convert_datetext_to_dategui
from invenio.webmessage_mailutils import email_quoted_txt2html
from invenio.config import CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL,\
CFG_SITE_SUPPORT_EMAIL,\
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBCOMMENT_ALLOW_COMMENTS, \
CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR, \
CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN, \
CFG_WEBCOMMENT_AUTHOR_DELETE_COMMENT_OPTION, \
CFG_CERN_SITE, \
CFG_SITE_RECORD, \
CFG_WEBCOMMENT_MAX_ATTACHED_FILES, \
CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE
from invenio.htmlutils import get_html_text_editor, create_html_select
from invenio.messages import gettext_set_language
from invenio.bibformat import format_record
from invenio.access_control_engine import acc_authorize_action
from invenio.access_control_admin import acc_get_user_roles_from_user_info, acc_get_role_id
from invenio.search_engine_utils import get_fieldvalues
class Template:
"""templating class, refer to webcomment.py for examples of call"""
def tmpl_get_first_comments_without_ranking(self, recID, ln, comments, nb_comments_total, warnings):
"""
@param recID: record id
@param ln: language
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param nb_comments_total: total number of comments for this record
@param warnings: list of warning tuples (warning_text, warning_color)
@return: html of comments
"""
# load the right message language
_ = gettext_set_language(ln)
# naming data fields of comments
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_id = 6
warnings = self.tmpl_warnings(warnings, ln)
# write button
write_button_label = _("Write a comment")
write_button_link = '%s/%s/%s/comments/add' % (CFG_SITE_URL, CFG_SITE_RECORD, recID)
write_button_form = '<input type="hidden" name="ln" value="%s"/>' % ln
write_button_form = self.createhiddenform(action=write_button_link, method="get", text=write_button_form, button=write_button_label)
# comments
comment_rows = ''
last_comment_round_name = None
comment_round_names = [comment[0] for comment in comments]
if comment_round_names:
last_comment_round_name = comment_round_names[-1]
for comment_round_name, comments_list in comments:
comment_rows += '<div id="cmtRound%s" class="cmtRound">' % (comment_round_name)
if comment_round_name:
comment_rows += '<div class="webcomment_comment_round_header">' + \
_('%(x_nb)i Comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "</div>"
else:
comment_rows += '<div class="webcomment_comment_round_header">' + \
_('%(x_nb)i Comments') % {'x_nb': len(comments_list),} + "</div>"
for comment in comments_list:
if comment[c_nickname]:
nickname = comment[c_nickname]
display = nickname
else:
(uid, nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(nickname, display, ln)
comment_rows += """
<tr>
<td>"""
report_link = '%s/%s/%s/comments/report?ln=%s&comid=%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln, comment[c_id])
reply_link = '%s/%s/%s/comments/add?ln=%s&comid=%s&action=REPLY' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln, comment[c_id])
comment_rows += self.tmpl_get_comment_without_ranking(req=None, ln=ln, nickname=messaging_link, comment_uid=comment[c_user_id],
date_creation=comment[c_date_creation],
body=comment[c_body], status='', nb_reports=0,
report_link=report_link, reply_link=reply_link, recID=recID)
comment_rows += """
<br />
<br />
</td>
</tr>"""
# Close comment round
comment_rows += '</div>'
# output
if nb_comments_total > 0:
out = warnings
comments_label = len(comments) > 1 and _("Showing the latest %i comments:") % len(comments) \
or ""
out += """
<div class="video_content_clear"></div>
<table class="webcomment_header_comments">
<tr>
<td class="blocknote">%(comment_title)s</td>
</tr>
</table>
<div class="websomment_header_comments_label">%(comments_label)s</div>
%(comment_rows)s
%(view_all_comments_link)s
%(write_button_form)s<br />""" % \
{'comment_title': _("Discuss this document"),
'comments_label': comments_label,
'nb_comments_total' : nb_comments_total,
'recID': recID,
'comment_rows': comment_rows,
'tab': ' '*4,
'siteurl': CFG_SITE_URL,
's': nb_comments_total>1 and 's' or "",
'view_all_comments_link': nb_comments_total>0 and '''<a class="webcomment_view_all_comments" href="%s/%s/%s/comments/display">View all %s comments</a>''' \
% (CFG_SITE_URL, CFG_SITE_RECORD, recID, nb_comments_total) or "",
'write_button_form': write_button_form,
'nb_comments': len(comments)
}
if not comments:
out = """
<!-- comments title table -->
<table class="webcomment_header_comments">
<tr>
<td class="blocknote">%(discuss_label)s:</td>
</tr>
</table>
<div class="webcomment_header_details">%(detailed_info)s
<br />
</div>
%(form)s
""" % {'form': write_button_form,
'discuss_label': _("Discuss this document"),
'detailed_info': _("Start a discussion about any aspect of this document.")
}
return out
def tmpl_record_not_found(self, status='missing', recID="", ln=CFG_SITE_LANG):
"""
Displays a page when bad or missing record ID was given.
@param status: 'missing' : no recID was given
'inexistant': recID doesn't have an entry in the database
'nan' : recID is not a number
'invalid' : recID is an error code, i.e. in the interval [-99,-1]
@param return: body of the page
"""
_ = gettext_set_language(ln)
if status == 'inexistant':
body = _("Sorry, the record %s does not seem to exist.") % (recID,)
elif status in ('nan', 'invalid'):
body = _("Sorry, %s is not a valid ID value.") % (recID,)
else:
body = _("Sorry, no record ID was provided.")
body += "<br /><br />"
link = "<a href=\"%s?ln=%s\">%s</a>." % (CFG_SITE_URL, ln, CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME))
body += _("You may want to start browsing from %s") % link
return body
def tmpl_get_first_comments_with_ranking(self, recID, ln, comments=None, nb_comments_total=None, avg_score=None, warnings=[]):
"""
@param recID: record id
@param ln: language
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param nb_comments_total: total number of comments for this record
@param avg_score: average score of all reviews
@param warnings: list of warning tuples (warning_text, warning_color)
@return: html of comments
"""
# load the right message language
_ = gettext_set_language(ln)
# naming data fields of comments
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_nb_votes_yes = 4
c_nb_votes_total = 5
c_star_score = 6
c_title = 7
c_id = 8
warnings = self.tmpl_warnings(warnings, ln)
#stars
if avg_score > 0:
avg_score_img = 'stars-' + str(avg_score).split('.')[0] + '-' + str(avg_score).split('.')[1] + '.png'
else:
avg_score_img = "stars-0-0.png"
# voting links
useful_dict = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'recID' : recID,
'ln' : ln,
'yes_img' : 'smchk_gr.gif', #'yes.gif',
'no_img' : 'iconcross.gif' #'no.gif'
}
link = '<a href="%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/reviews/vote?ln=%(ln)s&comid=%%(comid)s' % useful_dict
useful_yes = link + '&com_value=1">' + _("Yes") + '</a>'
useful_no = link + '&com_value=-1">' + _("No") + '</a>'
#comment row
comment_rows = ' '
last_comment_round_name = None
comment_round_names = [comment[0] for comment in comments]
if comment_round_names:
last_comment_round_name = comment_round_names[-1]
for comment_round_name, comments_list in comments:
comment_rows += '<div id="cmtRound%s" class="cmtRound">' % (comment_round_name)
comment_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "<br/>"
for comment in comments_list:
if comment[c_nickname]:
nickname = comment[c_nickname]
display = nickname
else:
(uid, nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(nickname, display, ln)
comment_rows += '''
<tr>
<td>'''
report_link = '%s/%s/%s/reviews/report?ln=%s&comid=%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln, comment[c_id])
comment_rows += self.tmpl_get_comment_with_ranking(None, ln=ln, nickname=messaging_link,
comment_uid=comment[c_user_id],
date_creation=comment[c_date_creation],
body=comment[c_body],
status='', nb_reports=0,
nb_votes_total=comment[c_nb_votes_total],
nb_votes_yes=comment[c_nb_votes_yes],
star_score=comment[c_star_score],
title=comment[c_title], report_link=report_link, recID=recID)
comment_rows += '''
%s %s / %s<br />''' % (_("Was this review helpful?"), useful_yes % {'comid':comment[c_id]}, useful_no % {'comid':comment[c_id]})
comment_rows += '''
<br />
</td>
</tr>'''
# Close comment round
comment_rows += '</div>'
# write button
write_button_link = '''%s/%s/%s/reviews/add''' % (CFG_SITE_URL, CFG_SITE_RECORD, recID)
write_button_form = ' <input type="hidden" name="ln" value="%s"/>' % ln
write_button_form = self.createhiddenform(action=write_button_link, method="get", text=write_button_form, button=_("Write a review"))
if nb_comments_total > 0:
avg_score_img = str(avg_score_img)
avg_score = str(avg_score)
nb_comments_total = str(nb_comments_total)
score = '<b>'
score += _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '</b><img src="' + CFG_SITE_URL + '/img/' + avg_score_img + '" alt="' + avg_score + '" />',
'x_nb_reviews': nb_comments_total}
useful_label = _("Readers found the following %s reviews to be most helpful.")
useful_label %= len(comments) > 1 and len(comments) or ""
view_all_comments_link ='<a class"webcomment_view_all_reviews" href="%s/%s/%s/reviews/display?ln=%s&do=hh">' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln)
view_all_comments_link += _("View all %s reviews") % nb_comments_total
view_all_comments_link += '</a><br />'
out = warnings + """
<!-- review title table -->
<table class="webcomment_header_ratings">
<tr>
<td class="blocknote">%(comment_title)s:</td>
</tr>
</table>
%(score_label)s<br />
%(useful_label)s
<!-- review table -->
<table class="webcomment_review_title_table">
%(comment_rows)s
</table>
%(view_all_comments_link)s
%(write_button_form)s<br />
""" % \
{ 'comment_title' : _("Rate this document"),
'score_label' : score,
'useful_label' : useful_label,
'recID' : recID,
'view_all_comments' : _("View all %s reviews") % (nb_comments_total,),
'write_comment' : _("Write a review"),
'comment_rows' : comment_rows,
'tab' : ' '*4,
'siteurl' : CFG_SITE_URL,
'view_all_comments_link': nb_comments_total>0 and view_all_comments_link or "",
'write_button_form' : write_button_form
}
else:
out = '''
<!-- review title table -->
<table class="webcomment_header_ratings">
<tr>
<td class="blocknote"><div class="webcomment_review_first_introduction">%s:</td>
</tr>
</table>
%s<br />
%s
<br />''' % (_("Rate this document"),
_('Be the first to review this document.</div>'),
write_button_form)
return out
def tmpl_get_comment_without_ranking(self, req, ln, nickname, comment_uid, date_creation, body, status, nb_reports, reply_link=None, report_link=None, undelete_link=None, delete_links=None, unreport_link=None, recID=-1, com_id='', attached_files=None, collapsed_p=False):
"""
private function
@param req: request object to fetch user info
@param ln: language
@param nickname: nickname
@param date_creation: date comment was written
@param body: comment body
@param status: status of the comment:
da: deleted by author
dm: deleted by moderator
ok: active
@param nb_reports: number of reports the comment has
@param reply_link: if want reply and report, give the http links
@param report_link: if want reply and report, give the http links
@param undelete_link: http link to delete the message
@param delete_links: http links to delete the message
@param unreport_link: http link to unreport the comment
@param recID: recID where the comment is posted
@param com_id: ID of the comment displayed
@param attached_files: list of attached files
@param collapsed_p: if the comment should be collapsed or not
@return: html table of comment
"""
from invenio.search_engine import guess_primary_collection_of_a_record
# load the right message language
_ = gettext_set_language(ln)
user_info = collect_user_info(req)
date_creation = convert_datetext_to_dategui(date_creation, ln=ln)
if attached_files is None:
attached_files = []
out = ''
final_body = email_quoted_txt2html(body)
title = nickname
title += '<a name="C%s" id="C%s"></a>' % (com_id, com_id)
links = ''
if not isGuestUser(user_info['uid']):
# Add link to toggle comment visibility
links += create_html_link(CFG_SITE_URL + '/' + CFG_SITE_RECORD + '/' + str(recID) + '/comments/toggle',
{'comid': com_id, 'ln': ln, 'collapse': collapsed_p and '0' or '1', 'referer': user_info['uri']},
_("Close"),
{'onclick': "return toggle_visibility(this, %s, 'fast');" % com_id},
escape_linkattrd=False)
moderator_links = ''
if reply_link:
links += '<a class="webcomment_comment_reply" href="' + reply_link +'">' + _("Reply") +'</a>'
if report_link and status != 'ap':
links += '<a class="webcomment_comment_report" href="' + report_link +'">' + _("Report abuse") + '</a>'
# Check if user is a comment moderator
record_primary_collection = guess_primary_collection_of_a_record(recID)
(auth_code, auth_msg) = acc_authorize_action(user_info, 'moderatecomments', collection=record_primary_collection)
if status in ['dm', 'da'] and req:
if not auth_code:
if status == 'dm':
final_body = '<div class="webcomment_deleted_comment_message">(Comment deleted by the moderator) - not visible for users<br /><br />' +\
final_body + '</div>'
else:
final_body = '<div class="webcomment_deleted_comment_message">(Comment deleted by the author) - not visible for users<br /><br />' +\
final_body + '</div>'
links = ''
moderator_links += '<a class="webcomment_deleted_comment_undelete" href="' + undelete_link + '">' + _("Undelete comment") + '</a>'
else:
if status == 'dm':
final_body = '<div class="webcomment_deleted_comment_message">Comment deleted by the moderator</div>'
else:
final_body = '<div class="webcomment_deleted_comment_message">Comment deleted by the author</div>'
links = ''
else:
if not auth_code:
moderator_links += '<a class="webcomment_comment_delete" href="' + delete_links['mod'] +'">' + _("Delete comment") + '</a>'
elif (user_info['uid'] == comment_uid) and CFG_WEBCOMMENT_AUTHOR_DELETE_COMMENT_OPTION:
moderator_links += '<a class="webcomment_comment_delete" href="' + delete_links['auth'] +'">' + _("Delete comment") + '</a>'
if nb_reports >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN:
if not auth_code:
final_body = '<div class="webcomment_reported_comment_message">(Comment reported. Pending approval) - not visible for users<br /><br />' + final_body + '</div>'
links = ''
moderator_links += '<a class="webcomment_reported_comment_unreport" href="' + unreport_link +'">' + _("Unreport comment") + '</a>'
else:
final_body = '<div class="webcomment_comment_pending_approval_message">This comment is pending approval due to user reports</div>'
links = ''
if links and moderator_links:
links = links + moderator_links
elif not links:
links = moderator_links
attached_files_html = ''
if attached_files:
attached_files_html = '<div class="cmtfilesblock"><b>%s:</b><br/>' % (len(attached_files) == 1 and _("Attached file") or _("Attached files"))
for (filename, filepath, fileurl) in attached_files:
attached_files_html += create_html_link(urlbase=fileurl, urlargd={},
link_label=cgi.escape(filename)) + '<br />'
attached_files_html += '</div>'
toggle_visibility_block = ''
if not isGuestUser(user_info['uid']):
toggle_visibility_block = """<div class="webcomment_toggle_visibility"><a id="collapsible_ctr_%(comid)s" class="%(collapse_ctr_class)s" href="%(toggle_url)s" onclick="return toggle_visibility(this, %(comid)i);" title="%(collapse_label)s"><span style="display:none">%(collapse_label)s</span></a></div>""" % \
{'comid': com_id,
'toggle_url': create_url(CFG_SITE_URL + '/' + CFG_SITE_RECORD + '/' + str(recID) + '/comments/toggle', {'comid': com_id, 'ln': ln, 'collapse': collapsed_p and '0' or '1', 'referer': user_info['uri']}),
'collapse_ctr_class': collapsed_p and 'webcomment_collapse_ctr_right' or 'webcomment_collapse_ctr_down',
'collapse_label': collapsed_p and _("Open") or _("Close")}
out += """
<div class="webcomment_comment_box">
%(toggle_visibility_block)s
<div class="webcomment_comment_avatar"><img class="webcomment_comment_avatar_default" src="%(site_url)s/img/user-icon-1-24x24.gif" alt="avatar" /></div>
<div class="webcomment_comment_content">
<div class="webcomment_comment_title">
%(title)s
<div class="webcomment_comment_date">%(date)s</div>
<a class="webcomment_permalink" title="Permalink to this comment" href="#C%(comid)i">¶</a>
</div>
<div class="collapsible_content" id="collapsible_content_%(comid)i" style="%(collapsible_content_style)s">
<blockquote>
%(body)s
</blockquote>
%(attached_files_html)s
<div class="webcomment_comment_options">%(links)s</div>
</div>
<div class="clearer"></div>
</div>
<div class="clearer"></div>
</div>""" % \
{'title' : title,
'body' : final_body,
'links' : links,
'attached_files_html': attached_files_html,
'date': date_creation,
'site_url': CFG_SITE_URL,
'comid': com_id,
'collapsible_content_style': collapsed_p and 'display:none' or '',
'toggle_visibility_block': toggle_visibility_block,
}
return out
def tmpl_get_comment_with_ranking(self, req, ln, nickname, comment_uid, date_creation, body, status, nb_reports, nb_votes_total, nb_votes_yes, star_score, title, report_link=None, delete_links=None, undelete_link=None, unreport_link=None, recID=-1):
"""
private function
@param req: request object to fetch user info
@param ln: language
@param nickname: nickname
@param date_creation: date comment was written
@param body: comment body
@param status: status of the comment
@param nb_reports: number of reports the comment has
@param nb_votes_total: total number of votes for this review
@param nb_votes_yes: number of positive votes for this record
@param star_score: star score for this record
@param title: title of review
@param report_link: if want reply and report, give the http links
@param undelete_link: http link to delete the message
@param delete_link: http link to delete the message
@param unreport_link: http link to unreport the comment
@param recID: recID where the comment is posted
@return: html table of review
"""
from invenio.search_engine import guess_primary_collection_of_a_record
# load the right message language
_ = gettext_set_language(ln)
if star_score > 0:
star_score_img = 'stars-' + str(star_score) + '-0.png'
else:
star_score_img = 'stars-0-0.png'
out = ""
date_creation = convert_datetext_to_dategui(date_creation, ln=ln)
reviewed_label = _("Reviewed by %(x_nickname)s on %(x_date)s") % {'x_nickname': nickname, 'x_date':date_creation}
## FIX
nb_votes_yes = str(nb_votes_yes)
nb_votes_total = str(nb_votes_total)
useful_label = _("%(x_nb_people)s out of %(x_nb_total)s people found this review useful") % {'x_nb_people': nb_votes_yes,
'x_nb_total': nb_votes_total}
links = ''
_body = ''
if body != '':
_body = '''
<blockquote>
%s
</blockquote>''' % email_quoted_txt2html(body, linebreak_html='')
# Check if user is a comment moderator
record_primary_collection = guess_primary_collection_of_a_record(recID)
user_info = collect_user_info(req)
(auth_code, auth_msg) = acc_authorize_action(user_info, 'moderatecomments', collection=record_primary_collection)
if status in ['dm', 'da'] and req:
if not auth_code:
if status == 'dm':
_body = '<div class="webcomment_deleted_review_message">(Review deleted by moderator) - not visible for users<br /><br />' +\
_body + '</div>'
else:
_body = '<div class="webcomment_deleted_review_message">(Review deleted by author) - not visible for users<br /><br />' +\
_body + '</div>'
links = '<a class="webcomment_deleted_review_undelete" href="' + undelete_link + '">' + _("Undelete review") + '</a>'
else:
if status == 'dm':
_body = '<div class="webcomment_deleted_review_message">Review deleted by moderator</div>'
else:
_body = '<div class="webcomment_deleted_review_message">Review deleted by author</div>'
links = ''
else:
if not auth_code:
links += '<a class="webcomment_review_delete" href="' + delete_links['mod'] +'">' + _("Delete review") + '</a>'
if nb_reports >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN:
if not auth_code:
_body = '<div class="webcomment_review_pending_approval_message">(Review reported. Pending approval) - not visible for users<br /><br />' + _body + '</div>'
links += ' | '
links += '<a class="webcomment_reported_review_unreport" href="' + unreport_link +'">' + _("Unreport review") + '</a>'
else:
_body = '<div class="webcomment_review_pending_approval_message">This review is pending approval due to user reports.</div>'
links = ''
out += '''
<div class="webcomment_review_box">
<div class="webcomment_review_box_inner">
<img src="%(siteurl)s/img/%(star_score_img)s" alt="%(star_score)s/>
<div class="webcomment_review_title">%(title)s</div>
<div class="webcomment_review_label_reviewed">%(reviewed_label)s</div>
<div class="webcomment_review_label_useful">%(useful_label)s</div>
%(body)s
</div>
</div>
%(abuse)s''' % {'siteurl' : CFG_SITE_URL,
'star_score_img': star_score_img,
'star_score' : star_score,
'title' : cgi.escape(title),
'reviewed_label': reviewed_label,
'useful_label' : useful_label,
'body' : _body,
'abuse' : links
}
return out
def tmpl_get_comments(self, req, recID, ln,
nb_per_page, page, nb_pages,
display_order, display_since,
CFG_WEBCOMMENT_ALLOW_REVIEWS,
comments, total_nb_comments,
avg_score,
warnings,
border=0, reviews=0,
total_nb_reviews=0,
nickname='', uid=-1, note='',score=5,
can_send_comments=False,
can_attach_files=False,
user_is_subscribed_to_discussion=False,
user_can_unsubscribe_from_discussion=False,
display_comment_rounds=None):
"""
Get table of all comments
@param recID: record id
@param ln: language
@param nb_per_page: number of results per page
@param page: page number
@param display_order: hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param display_since: all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param CFG_WEBCOMMENT_ALLOW_REVIEWS: is ranking enable, get from config.py/CFG_WEBCOMMENT_ALLOW_REVIEWS
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param total_nb_comments: total number of comments for this record
@param avg_score: average score of reviews for this record
@param warnings: list of warning tuples (warning_text, warning_color)
@param border: boolean, active if want to show border around each comment/review
@param reviews: boolean, enabled for reviews, disabled for comments
@param can_send_comments: boolean, if user can send comments or not
@param can_attach_files: boolean, if user can attach file to comment or not
@param user_is_subscribed_to_discussion: True if user already receives new comments by email
@param user_can_unsubscribe_from_discussion: True is user is allowed to unsubscribe from discussion
"""
# load the right message language
_ = gettext_set_language(ln)
# CERN hack begins: display full ATLAS user name. Check further below too.
current_user_fullname = ""
override_nickname_p = False
if CFG_CERN_SITE:
from invenio.search_engine import get_all_collections_of_a_record
user_info = collect_user_info(uid)
if 'atlas-readaccess-active-members [CERN]' in user_info['group']:
# An ATLAS member is never anonymous to its colleagues
# when commenting inside ATLAS collections
recid_collections = get_all_collections_of_a_record(recID)
if 'ATLAS' in str(recid_collections):
override_nickname_p = True
current_user_fullname = user_info.get('external_fullname', '')
# CERN hack ends
# naming data fields of comments
if reviews:
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_status = 4
c_nb_reports = 5
c_nb_votes_yes = 6
c_nb_votes_total = 7
c_star_score = 8
c_title = 9
c_id = 10
c_round_name = 11
c_restriction = 12
reply_to = 13
c_visibility = 14
discussion = 'reviews'
comments_link = '<a href="%s/%s/%s/comments/">%s</a> (%i)' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, _('Comments'), total_nb_comments)
reviews_link = '<b>%s (%i)</b>' % (_('Reviews'), total_nb_reviews)
add_comment_or_review = self.tmpl_add_comment_form_with_ranking(recID, uid, current_user_fullname or nickname, ln, '', score, note, warnings, show_title_p=True, can_attach_files=can_attach_files)
else:
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_status = 4
c_nb_reports = 5
c_id = 6
c_round_name = 7
c_restriction = 8
reply_to = 9
c_visibility = 10
discussion = 'comments'
comments_link = '<b>%s (%i)</b>' % (_('Comments'), total_nb_comments)
reviews_link = '<a href="%s/%s/%s/reviews/">%s</a> (%i)' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, _('Reviews'), total_nb_reviews)
add_comment_or_review = self.tmpl_add_comment_form(recID, uid, nickname, ln, note, warnings, can_attach_files=can_attach_files, user_is_subscribed_to_discussion=user_is_subscribed_to_discussion)
# voting links
useful_dict = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'recID' : recID,
'ln' : ln,
'do' : display_order,
'ds' : display_since,
'nb' : nb_per_page,
'p' : page,
'reviews' : reviews,
'discussion' : discussion
}
useful_yes = '<a href="%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/vote?ln=%(ln)s&comid=%%(comid)s&com_value=1&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/display">' + _("Yes") + '</a>'
useful_yes %= useful_dict
useful_no = '<a href="%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/vote?ln=%(ln)s&comid=%%(comid)s&com_value=-1&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/display">' + _("No") + '</a>'
useful_no %= useful_dict
warnings = self.tmpl_warnings(warnings, ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'module' : 'comments',
'function' : 'index',
'discussion': discussion,
'arguments' : 'do=%s&ds=%s&nb=%s' % (display_order, display_since, nb_per_page),
'arg_page' : '&p=%s' % page,
'page' : page,
'rec_id' : recID}
if not req:
req = None
## comments table
comments_rows = ''
last_comment_round_name = None
comment_round_names = [comment[0] for comment in comments]
if comment_round_names:
last_comment_round_name = comment_round_names[-1]
for comment_round_name, comments_list in comments:
comment_round_style = "display:none;"
comment_round_is_open = False
if comment_round_name in display_comment_rounds:
comment_round_is_open = True
comment_round_style = ""
comments_rows += '<div id="cmtRound%s" class="cmtround">' % (comment_round_name)
if not comment_round_is_open and \
(comment_round_name or len(comment_round_names) > 1):
new_cmtgrp = list(display_comment_rounds)
new_cmtgrp.append(comment_round_name)
comments_rows += '''<img src="/img/right-trans.gif" id="cmtarrowiconright%(grp_id)s" alt="Open group" /><img src="/img/down-trans.gif" id="cmtarrowicondown%(grp_id)s" alt="Close group" style="display:none" />
<a class="cmtgrpswitch" name="cmtgrpLink%(grp_id)s" onclick="var cmtarrowicondown=document.getElementById('cmtarrowicondown%(grp_id)s');var cmtarrowiconright=document.getElementById('cmtarrowiconright%(grp_id)s');var subgrp=document.getElementById('cmtSubRound%(grp_id)s');if (subgrp.style.display==''){subgrp.style.display='none';cmtarrowiconright.style.display='';cmtarrowicondown.style.display='none';}else{subgrp.style.display='';cmtarrowiconright.style.display='none';cmtarrowicondown.style.display='';};return false;"''' % {'grp_id': comment_round_name}
comments_rows += 'href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s' % link_dic
comments_rows += '&' + '&'.join(["cmtgrp=" + grp for grp in new_cmtgrp if grp != 'none']) + \
'#cmtgrpLink%s' % (comment_round_name) + '\">'
comments_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "</a><br/>"
elif comment_round_name or len(comment_round_names) > 1:
new_cmtgrp = list(display_comment_rounds)
new_cmtgrp.remove(comment_round_name)
comments_rows += '''<img src="/img/right-trans.gif" id="cmtarrowiconright%(grp_id)s" alt="Open group" style="display:none" /><img src="/img/down-trans.gif" id="cmtarrowicondown%(grp_id)s" alt="Close group" />
<a class="cmtgrpswitch" name="cmtgrpLink%(grp_id)s" onclick="var cmtarrowicondown=document.getElementById('cmtarrowicondown%(grp_id)s');var cmtarrowiconright=document.getElementById('cmtarrowiconright%(grp_id)s');var subgrp=document.getElementById('cmtSubRound%(grp_id)s');if (subgrp.style.display==''){subgrp.style.display='none';cmtarrowiconright.style.display='';cmtarrowicondown.style.display='none';}else{subgrp.style.display='';cmtarrowiconright.style.display='none';cmtarrowicondown.style.display='';};return false;"''' % {'grp_id': comment_round_name}
comments_rows += 'href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s' % link_dic
comments_rows += '&' + ('&'.join(["cmtgrp=" + grp for grp in new_cmtgrp if grp != 'none']) or 'cmtgrp=none' ) + \
'#cmtgrpLink%s' % (comment_round_name) + '\">'
comments_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name}+ "</a><br/>"
comments_rows += '<div id="cmtSubRound%s" class="cmtsubround" style="%s">' % (comment_round_name,
comment_round_style)
comments_rows += '''
<script type='text/javascript'>//<![CDATA[
function toggle_visibility(this_link, comid, duration) {
if (duration == null) duration = 0;
var isVisible = $('#collapsible_content_' + comid).is(':visible');
$('#collapsible_content_' + comid).toggle(duration);
$('#collapsible_ctr_' + comid).toggleClass('webcomment_collapse_ctr_down');
$('#collapsible_ctr_' + comid).toggleClass('webcomment_collapse_ctr_right');
if (isVisible){
$('#collapsible_ctr_' + comid).attr('title', '%(open_label)s');
$('#collapsible_ctr_' + comid + ' > span').html('%(open_label)s');
} else {
$('#collapsible_ctr_' + comid).attr('title', '%(close_label)s');
$('#collapsible_ctr_' + comid + ' > span').html('%(close_label)s');
}
$.ajax({
type: 'POST',
url: '%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/toggle',
data: {'comid': comid, 'ln': '%(ln)s', 'collapse': isVisible && 1 || 0}
});
/* Replace our link with a jump to the adequate, in case needed
(default link is for non-Javascript user) */
this_link.href = "#C" + comid
/* Find out if after closing comment we shall scroll a bit to the top,
i.e. go back to main anchor of the comment that we have just set */
var top = $(window).scrollTop();
if ($(window).scrollTop() >= $("#C" + comid).offset().top) {
// Our comment is now above the window: scroll to it
return true;
}
return false;
}
//]]></script>
''' % {'siteurl': CFG_SITE_URL,
'recID': recID,
'ln': ln,
'CFG_SITE_RECORD': CFG_SITE_RECORD,
'open_label': _("Open"),
'close_label': _("Close")}
thread_history = [0]
previous_depth = 0
for comment in comments_list:
if comment[reply_to] not in thread_history:
# Going one level down in the thread
thread_history.append(comment[reply_to])
depth = thread_history.index(comment[reply_to])
else:
depth = thread_history.index(comment[reply_to])
thread_history = thread_history[:depth + 1]
if previous_depth > depth:
comments_rows += ("""</div>""" * (previous_depth-depth))
if previous_depth < depth:
comments_rows += ("""<div class="webcomment_thread_block">""" * (depth-previous_depth))
previous_depth = depth
# CERN hack begins: display full ATLAS user name.
comment_user_fullname = ""
if CFG_CERN_SITE and override_nickname_p:
comment_user_fullname = get_email(comment[c_user_id])
# CERN hack ends
if comment[c_nickname]:
_nickname = comment[c_nickname]
display = _nickname
else:
(uid, _nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(_nickname, comment_user_fullname or display, ln)
from invenio.webcomment import get_attached_files # FIXME
files = get_attached_files(recID, comment[c_id])
# do NOT delete the HTML comment below. It is used for parsing... (I plead unguilty!)
comments_rows += """
<!-- start comment row -->
<div>"""
delete_links = {}
if not reviews:
report_link = '%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/report?ln=%(ln)s&comid=%%(comid)s&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/display' % useful_dict % {'comid':comment[c_id]}
reply_link = '%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/add?ln=%(ln)s&action=REPLY&comid=%%(comid)s' % useful_dict % {'comid':comment[c_id]}
delete_links['mod'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_mod?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
delete_links['auth'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_auth?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
undelete_link = "%s/admin/webcomment/webcommentadmin.py/undel_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
unreport_link = "%s/admin/webcomment/webcommentadmin.py/unreport_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
comments_rows += self.tmpl_get_comment_without_ranking(req, ln, messaging_link, comment[c_user_id], comment[c_date_creation], comment[c_body], comment[c_status], comment[c_nb_reports], reply_link, report_link, undelete_link, delete_links, unreport_link, recID, comment[c_id], files, comment[c_visibility])
else:
report_link = '%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/reviews/report?ln=%(ln)s&comid=%%(comid)s&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/reviews/display' % useful_dict % {'comid': comment[c_id]}
delete_links['mod'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_mod?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
delete_links['auth'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_auth?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
undelete_link = "%s/admin/webcomment/webcommentadmin.py/undel_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
unreport_link = "%s/admin/webcomment/webcommentadmin.py/unreport_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
comments_rows += self.tmpl_get_comment_with_ranking(req, ln, messaging_link, comment[c_user_id], comment[c_date_creation], comment[c_body], comment[c_status], comment[c_nb_reports], comment[c_nb_votes_total], comment[c_nb_votes_yes], comment[c_star_score], comment[c_title], report_link, delete_links, undelete_link, unreport_link, recID)
helpful_label = _("Was this review helpful?")
report_abuse_label = "(" + _("Report abuse") + ")"
yes_no_separator = '<td> / </td>'
if comment[c_nb_reports] >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN or comment[c_status] in ['dm', 'da']:
report_abuse_label = ""
helpful_label = ""
useful_yes = ""
useful_no = ""
yes_no_separator = ""
comments_rows += """
<table>
<tr>
<td>%(helpful_label)s %(tab)s</td>
<td> %(yes)s </td>
%(yes_no_separator)s
<td> %(no)s </td>
<td class="reportabuse">%(tab)s%(tab)s<a href="%(report)s">%(report_abuse_label)s</a></td>
</tr>
</table>""" \
% {'helpful_label': helpful_label,
'yes' : useful_yes % {'comid':comment[c_id]},
'yes_no_separator': yes_no_separator,
'no' : useful_no % {'comid':comment[c_id]},
'report' : report_link % {'comid':comment[c_id]},
'report_abuse_label': comment[c_nb_reports] >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN and '' or report_abuse_label,
'tab' : ' '*2}
# do NOT remove HTML comment below. It is used for parsing...
comments_rows += """
</div>
<!-- end comment row -->"""
comments_rows += '</div></div>'
## page links
page_links = ''
# Previous
if page != 1:
link_dic['arg_page'] = 'p=%s' % (page - 1)
page_links += '<a href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\"><<</a> ' % link_dic
else:
page_links += ' %s ' % (' '*(len(_('Previous'))+7))
# Page Numbers
for i in range(1, nb_pages+1):
link_dic['arg_page'] = 'p=%s' % i
link_dic['page'] = '%s' % i
if i != page:
page_links += '''
<a href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\">%(page)s</a> ''' % link_dic
else:
page_links += ''' <b>%s</b> ''' % i
# Next
if page != nb_pages:
link_dic['arg_page'] = 'p=%s' % (page + 1)
page_links += '''
<a href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\">>></a> ''' % link_dic
else:
page_links += '%s' % (' '*(len(_('Next'))+7))
## stuff for ranking if enabled
if reviews:
if avg_score > 0:
avg_score_img = 'stars-' + str(avg_score).split('.')[0] + '-' + str(avg_score).split('.')[1] + '.png'
else:
avg_score_img = "stars-0-0.png"
ranking_average = '<br /><b>'
ranking_average += _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '</b><img src="' + CFG_SITE_URL + '/img/' + avg_score_img + '" alt="' + str(avg_score) + '" />',
'x_nb_reviews': str(total_nb_reviews)}
ranking_average += '<br />'
else:
ranking_average = ""
write_button_link = '''%s/%s/%s/%s/add''' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, discussion)
write_button_form = '<input type="hidden" name="ln" value="%s"/>'
write_button_form = self.createhiddenform(action=write_button_link,
method="get",
text=write_button_form,
button = reviews and _('Write a review') or _('Write a comment'))
if reviews:
total_label = _("There is a total of %s reviews")
else:
total_label = _("There is a total of %s comments")
total_label %= total_nb_comments
review_or_comment_first = ''
if reviews == 0 and total_nb_comments == 0 and can_send_comments:
review_or_comment_first = _("Start a discussion about any aspect of this document.") + '<br />'
elif reviews == 1 and total_nb_reviews == 0 and can_send_comments:
review_or_comment_first = _("Be the first to review this document.") + '<br />'
# do NOT remove the HTML comments below. Used for parsing
body = '''
%(comments_and_review_tabs)s
<!-- start comments table -->
<div class="webcomment_comment_table">
%(comments_rows)s
</div>
<!-- end comments table -->
%(review_or_comment_first)s
<br />''' % \
{ 'record_label': _("Record"),
'back_label': _("Back to search results"),
'total_label': total_label,
'write_button_form' : write_button_form,
'write_button_form_again' : total_nb_comments>3 and write_button_form or "",
'comments_rows' : comments_rows,
'total_nb_comments' : total_nb_comments,
'comments_or_reviews' : reviews and _('review') or _('comment'),
'comments_or_reviews_title' : reviews and _('Review') or _('Comment'),
'siteurl' : CFG_SITE_URL,
'module' : "comments",
'recid' : recID,
'ln' : ln,
#'border' : border,
'ranking_avg' : ranking_average,
'comments_and_review_tabs' : CFG_WEBCOMMENT_ALLOW_REVIEWS and \
CFG_WEBCOMMENT_ALLOW_COMMENTS and \
'%s | %s <br />' % \
(comments_link, reviews_link) or '',
'review_or_comment_first' : review_or_comment_first
}
# form is not currently used. reserved for an eventual purpose
#form = """
# Display <select name="nb" size="1"> per page
# <option value="all">All</option>
# <option value="10">10</option>
# <option value="25">20</option>
# <option value="50">50</option>
# <option value="100" selected="selected">100</option>
# </select>
# comments per page that are <select name="ds" size="1">
# <option value="all" selected="selected">Any age</option>
# <option value="1d">1 day old</option>
# <option value="3d">3 days old</option>
# <option value="1w">1 week old</option>
# <option value="2w">2 weeks old</option>
# <option value="1m">1 month old</option>
# <option value="3m">3 months old</option>
# <option value="6m">6 months old</option>
# <option value="1y">1 year old</option>
# </select>
# and sorted by <select name="do" size="1">
# <option value="od" selected="selected">Oldest first</option>
# <option value="nd">Newest first</option>
# %s
# </select>
# """ % \
# (reviews==1 and '''
# <option value=\"hh\">most helpful</option>
# <option value=\"lh\">least helpful</option>
# <option value=\"hs\">highest star ranking</option>
# <option value=\"ls\">lowest star ranking</option>
# </select>''' or '''
# </select>''')
#
#form_link = "%(siteurl)s/%(module)s/%(function)s" % link_dic
#form = self.createhiddenform(action=form_link, method="get", text=form, button='Go', recid=recID, p=1)
pages = """
<div>
%(v_label)s %(comments_or_reviews)s %(results_nb_lower)s-%(results_nb_higher)s <br />
%(page_links)s
</div>
""" % \
{'v_label': _("Viewing"),
'page_links': _("Page:") + page_links ,
'comments_or_reviews': reviews and _('review') or _('comment'),
'results_nb_lower': len(comments)>0 and ((page-1) * nb_per_page)+1 or 0,
'results_nb_higher': page == nb_pages and (((page-1) * nb_per_page) + len(comments)) or (page * nb_per_page)}
if nb_pages > 1:
#body = warnings + body + form + pages
body = warnings + body + pages
else:
body = warnings + body
if reviews == 0:
if not user_is_subscribed_to_discussion:
body += '<div class="comment-subscribe">' + '<img src="%s/img/mail-icon-12x8.gif" border="0" alt="" />' % CFG_SITE_URL + \
' ' + '<b>' + create_html_link(urlbase=CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(recID) + '/comments/subscribe',
urlargd={},
link_label=_('Subscribe')) + \
'</b>' + ' to this discussion. You will then receive all new comments by email.' + '</div>'
body += '<br />'
elif user_can_unsubscribe_from_discussion:
body += '<div class="comment-subscribe">' + '<img src="%s/img/mail-icon-12x8.gif" border="0" alt="" />' % CFG_SITE_URL + \
' ' + '<b>' + create_html_link(urlbase=CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(recID) + '/comments/unsubscribe',
urlargd={},
link_label=_('Unsubscribe')) + \
'</b>' + ' from this discussion. You will no longer receive emails about new comments.' + '</div>'
body += '<br />'
if can_send_comments:
body += add_comment_or_review
else:
body += '<br/><em>' + _("You are not authorized to comment or review.") + '</em>'
return '<div class="webcomment_container">' + body + '</div>'
def create_messaging_link(self, to, display_name, ln=CFG_SITE_LANG):
"""prints a link to the messaging system"""
link = "%s/yourmessages/write?msg_to=%s&ln=%s" % (CFG_SITE_URL, to, ln)
if to:
return '<a href="%s" class="maillink">%s</a>' % (link, display_name)
else:
return display_name
def createhiddenform(self, action="", method="get", text="", button="confirm", cnfrm='', **hidden):
"""
create select with hidden values and submit button
@param action: name of the action to perform on submit
@param method: 'get' or 'post'
@param text: additional text, can also be used to add non hidden input
@param button: value/caption on the submit button
@param cnfrm: if given, must check checkbox to confirm
@param **hidden: dictionary with name=value pairs for hidden input
@return: html form
"""
output = """
<form action="%s" method="%s">""" % (action, method.lower().strip() in ['get', 'post'] and method or 'get')
output += """
<table style="width:90%">
<tr>
<td style="vertical-align: top">
"""
output += text + '\n'
if cnfrm:
output += """
<input type="checkbox" name="confirm" value="1" />"""
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += """
</td>
</tr>
<tr>
<td>"""
output += """
<input class="adminbutton" type="submit" value="%s" />""" % (button, )
output += """
</td>
</tr>
</table>
</form>"""
return output
def create_write_comment_hiddenform(self, action="", method="get", text="", button="confirm", cnfrm='',
enctype='', form_id=None, form_name=None, **hidden):
"""
create select with hidden values and submit button
@param action: name of the action to perform on submit
@param method: 'get' or 'post'
@param text: additional text, can also be used to add non hidden input
@param button: value/caption on the submit button
@param cnfrm: if given, must check checkbox to confirm
@param form_id: HTML 'id' attribute of the form tag
@param form_name: HTML 'name' attribute of the form tag
@param **hidden: dictionary with name=value pairs for hidden input
@return: html form
"""
enctype_attr = ''
if enctype:
enctype_attr = 'enctype="%s"' % enctype
output = """
<form action="%s" method="%s" %s%s%s>""" % \
(action, method.lower().strip() in ['get', 'post'] and method or 'get',
enctype_attr, form_name and ' name="%s"' % form_name or '',
form_id and ' id="%s"' % form_id or '')
if cnfrm:
output += """
<input type="checkbox" name="confirm" value="1" />"""
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += text + '\n'
output += """
</form>"""
return output
def tmpl_warnings(self, warnings=[], ln=CFG_SITE_LANG):
"""
Display len(warnings) warning fields
@param warnings: list of warning tuples (warning_text, warning_color)
@param ln=language
@return: html output
"""
if type(warnings) is not list:
warnings = [warnings]
warningbox = ""
if warnings:
for i in range(len(warnings)):
warning_text = warnings[i][0]
warning_color = warnings[i][1]
if warning_color == 'green':
span_class = 'exampleleader'
else:
span_class = 'important'
warningbox += '''
<span class="%(span_class)s">%(warning)s</span><br />''' % \
{ 'span_class' : span_class,
'warning' : warning_text }
return warningbox
else:
return ""
def tmpl_error(self, error, ln=CFG_SITE_LANG):
"""
Display error
@param error: string
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
errorbox = ""
if error != "":
errorbox = "<div class=\"errorbox\">\n <b>Error:</b>\n"
errorbox += " <p>"
errorbox += error + " </p>"
errorbox += "</div><br />\n"
return errorbox
def tmpl_add_comment_form(self, recID, uid, nickname, ln, msg,
warnings, textual_msg=None, can_attach_files=False,
user_is_subscribed_to_discussion=False, reply_to=None):
"""
Add form for comments
@param recID: record id
@param uid: user id
@param ln: language
@param msg: comment body contents for when refreshing due to
warning, or when replying to a comment
@param textual_msg: same as 'msg', but contains the textual
version in case user cannot display CKeditor
@param warnings: list of warning tuples (warning_text, warning_color)
@param can_attach_files: if user can upload attach file to record or not
@param user_is_subscribed_to_discussion: True if user already receives new comments by email
@param reply_to: the ID of the comment we are replying to. None if not replying
@return html add comment form
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'module' : 'comments',
'function' : 'add',
'arguments' : 'ln=%s&action=%s' % (ln, 'SUBMIT'),
'recID' : recID}
if textual_msg is None:
textual_msg = msg
# FIXME a cleaner handling of nicknames is needed.
if not nickname:
(uid, nickname, display) = get_user_info(uid)
if nickname:
note = _("Note: Your nickname, %s, will be displayed as author of this comment.") % ('<i>' + nickname + '</i>')
else:
(uid, nickname, display) = get_user_info(uid)
link = '<a href="%s/youraccount/edit">' % CFG_SITE_SECURE_URL
note = _("Note: you have not %(x_url_open)sdefined your nickname%(x_url_close)s. %(x_nickname)s will be displayed as the author of this comment.") % \
{'x_url_open': link,
'x_url_close': '</a>',
'x_nickname': ' <br /><i>' + display + '</i>'}
if not CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR:
note += '<br />' + ' '*10 + cgi.escape('You can use some HTML tags: <a href>, <strong>, <blockquote>, <br />, <p>, <em>, <ul>, <li>, <b>, <i>')
#from invenio.search_engine import print_record
#record_details = print_record(recID=recID, format='hb', ln=ln)
warnings = self.tmpl_warnings(warnings, ln)
# Prepare file upload settings. We must enable file upload in
# the ckeditor + a simple file upload interface (independant from editor)
file_upload_url = None
simple_attach_file_interface = ''
if isGuestUser(uid):
simple_attach_file_interface = "<small><em>%s</em></small><br/>" % _("Once logged in, authorized users can also attach files.")
if can_attach_files:
# Note that files can be uploaded only when user is logged in
#file_upload_url = '%s/%s/%i/comments/attachments/put' % \
# (CFG_SITE_URL, CFG_SITE_RECORD, recID)
simple_attach_file_interface = '''
<div id="uploadcommentattachmentsinterface">
<small>%(attach_msg)s: <em>(%(nb_files_limit_msg)s. %(file_size_limit_msg)s)</em></small><br />
<input class="multi max-%(CFG_WEBCOMMENT_MAX_ATTACHED_FILES)s" type="file" name="commentattachment[]"/><br />
<noscript>
<input type="file" name="commentattachment[]" /><br />
</noscript>
</div>
''' % \
{'CFG_WEBCOMMENT_MAX_ATTACHED_FILES': CFG_WEBCOMMENT_MAX_ATTACHED_FILES,
'attach_msg': CFG_WEBCOMMENT_MAX_ATTACHED_FILES == 1 and _("Optionally, attach a file to this comment") or \
_("Optionally, attach files to this comment"),
'nb_files_limit_msg': _("Max one file") and CFG_WEBCOMMENT_MAX_ATTACHED_FILES == 1 or \
_("Max %i files") % CFG_WEBCOMMENT_MAX_ATTACHED_FILES,
'file_size_limit_msg': CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE > 0 and _("Max %(x_nb_bytes)s per file") % {'x_nb_bytes': (CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE < 1024*1024 and (str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/1024) + 'KB') or (str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/(1024*1024)) + 'MB'))} or ''}
editor = get_html_text_editor(name='msg',
content=msg,
textual_content=textual_msg,
width='100%',
height='400px',
enabled=CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR,
file_upload_url=file_upload_url,
toolbar_set = "WebComment",
ln=ln)
subscribe_to_discussion = ''
if not user_is_subscribed_to_discussion:
# Offer to subscribe to discussion
subscribe_to_discussion = '<small><input type="checkbox" name="subscribe" id="subscribe"/><label for="subscribe">%s</label></small>' % _("Send me an email when a new comment is posted")
form = """<div id="comment-write"><h2>%(add_comment)s</h2>
%(editor)s
<br />
%(simple_attach_file_interface)s
<span class="reportabuse">%(note)s</span>
<div class="submit-area">
%(subscribe_to_discussion)s<br />
<input class="adminbutton" type="submit" value="Add comment" onclick="user_must_confirm_before_leaving_page = false;return true;"/>
%(reply_to)s
</div>
</div>
""" % {'note': note,
'record_label': _("Article") + ":",
'comment_label': _("Comment") + ":",
'add_comment': _('Add comment'),
'editor': editor,
'subscribe_to_discussion': subscribe_to_discussion,
'reply_to': reply_to and '<input type="hidden" name="comid" value="%s"/>' % reply_to or '',
'simple_attach_file_interface': simple_attach_file_interface}
form_link = "%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/%(function)s?%(arguments)s" % link_dic
form = self.create_write_comment_hiddenform(action=form_link, method="post", text=form, button='Add comment',
enctype='multipart/form-data', form_id='cmtForm',
form_name='cmtForm')
return warnings + form + self.tmpl_page_do_not_leave_comment_page_js(ln=ln)
def tmpl_add_comment_form_with_ranking(self, recID, uid, nickname, ln, msg, score, note,
warnings, textual_msg=None, show_title_p=False,
can_attach_files=False):
"""
Add form for reviews
@param recID: record id
@param uid: user id
@param ln: language
@param msg: comment body contents for when refreshing due to warning
@param textual_msg: the textual version of 'msg' when user cannot display Ckeditor
@param score: review score
@param note: review title
@param warnings: list of warning tuples (warning_text, warning_color)
@param show_title_p: if True, prefix the form with "Add Review" as title
@param can_attach_files: if user can upload attach file to record or not
@return: html add review form
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'module' : 'comments',
'function' : 'add',
'arguments' : 'ln=%s&action=%s' % (ln, 'SUBMIT'),
'recID' : recID}
warnings = self.tmpl_warnings(warnings, ln)
if textual_msg is None:
textual_msg = msg
#from search_engine import print_record
#record_details = print_record(recID=recID, format='hb', ln=ln)
if nickname:
note_label = _("Note: Your nickname, %s, will be displayed as the author of this review.")
note_label %= ('<i>' + nickname + '</i>')
else:
(uid, nickname, display) = get_user_info(uid)
link = '<a href="%s/youraccount/edit">' % CFG_SITE_SECURE_URL
note_label = _("Note: you have not %(x_url_open)sdefined your nickname%(x_url_close)s. %(x_nickname)s will be displayed as the author of this comment.") % \
{'x_url_open': link,
'x_url_close': '</a>',
'x_nickname': ' <br /><i>' + display + '</i>'}
selected0 = ''
selected1 = ''
selected2 = ''
selected3 = ''
selected4 = ''
selected5 = ''
if score == 0:
selected0 = ' selected="selected"'
elif score == 1:
selected1 = ' selected="selected"'
elif score == 2:
selected2 = ' selected="selected"'
elif score == 3:
selected3 = ' selected="selected"'
elif score == 4:
selected4 = ' selected="selected"'
elif score == 5:
selected5 = ' selected="selected"'
## file_upload_url = None
## if can_attach_files:
## file_upload_url = '%s/%s/%i/comments/attachments/put' % \
## (CFG_SITE_URL, CFG_SITE_RECORD, recID)
editor = get_html_text_editor(name='msg',
content=msg,
textual_content=msg,
width='90%',
height='400px',
enabled=CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR,
# file_upload_url=file_upload_url,
toolbar_set = "WebComment",
ln=ln)
form = """%(add_review)s
<table style="width: 100%%">
<tr>
<td style="padding-bottom: 10px;">%(rate_label)s:
<select name=\"score\" size=\"1\">
<option value=\"0\"%(selected0)s>-%(select_label)s-</option>
<option value=\"5\"%(selected5)s>***** (best)</option>
<option value=\"4\"%(selected4)s>****</option>
<option value=\"3\"%(selected3)s>***</option>
<option value=\"2\"%(selected2)s>**</option>
<option value=\"1\"%(selected1)s>* (worst)</option>
</select>
</td>
</tr>
<tr>
<td>%(title_label)s:</td>
</tr>
<tr>
<td style="padding-bottom: 10px;">
<input type="text" name="note" maxlength="250" style="width:90%%" value="%(note)s" />
</td>
</tr>
<tr>
<td>%(write_label)s:</td>
</tr>
<tr>
<td>
%(editor)s
</td>
</tr>
<tr>
<td class="reportabuse">%(note_label)s</td></tr>
</table>
""" % {'article_label': _('Article'),
'rate_label': _("Rate this article"),
'select_label': _("Select a score"),
'title_label': _("Give a title to your review"),
'write_label': _("Write your review"),
'note_label': note_label,
'note' : note!='' and cgi.escape(note, quote=True) or "",
'msg' : msg!='' and msg or "",
#'record' : record_details
'add_review': show_title_p and ('<h2>'+_('Add review')+'</h2>') or '',
'selected0': selected0,
'selected1': selected1,
'selected2': selected2,
'selected3': selected3,
'selected4': selected4,
'selected5': selected5,
'editor': editor,
}
form_link = "%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/reviews/%(function)s?%(arguments)s" % link_dic
form = self.createhiddenform(action=form_link, method="post", text=form, button=_('Add Review'))
return warnings + form
def tmpl_add_comment_successful(self, recID, ln, reviews, warnings, success):
"""
@param recID: record id
@param ln: language
@return: html page of successfully added comment/review
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'module' : 'comments',
'function' : 'display',
'arguments' : 'ln=%s&do=od' % ln,
'recID' : recID,
'discussion': reviews == 1 and 'reviews' or 'comments'}
link = "%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/%(function)s?%(arguments)s" % link_dic
if warnings:
out = self.tmpl_warnings(warnings, ln) + '<br /><br />'
else:
if reviews:
out = _("Your review was successfully added.") + '<br /><br />'
else:
out = _("Your comment was successfully added.") + '<br /><br />'
link += "#C%s" % success
out += '<a href="%s">' % link
out += _('Back to record') + '</a>'
out += '<br/><br/>' \
+ _('You can also view all the comments you have submitted so far on "%(x_url_open)sYour Comments%(x_url_close)s" page.') % \
{'x_url_open': '<a target="_blank" href="%(CFG_SITE_URL)s/yourcomments?ln=%(ln)s">' % {'CFG_SITE_URL': CFG_SITE_URL, 'ln': ln},
'x_url_close': '</a>'}
return out
def tmpl_create_multiple_actions_form(self,
form_name="",
form_action="",
method="get",
action_display={},
action_field_name="",
button_label="",
button_name="",
content="",
**hidden):
""" Creates an HTML form with a multiple choice of actions and a button to select it.
@param form_action: link to the receiver of the formular
@param form_name: name of the HTML formular
@param method: either 'GET' or 'POST'
@param action_display: dictionary of actions.
action is HTML name (name of action)
display is the string provided in the popup
@param action_field_name: html name of action field
@param button_label: what's written on the button
@param button_name: html name of the button
@param content: what's inside te formular
@param **hidden: dictionary of name/value pairs of hidden fields.
"""
output = """
<form action="%s" method="%s">""" % (form_action, method)
output += """
<table>
<tr>
<td style="vertical-align: top" colspan="2">
"""
output += content + '\n'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += """
</td>
</tr>
<tr>
<td style="text-align:right;">"""
if type(action_display) is dict and len(action_display.keys()):
output += """
<select name="%s">""" % action_field_name
for (key, value) in action_display.items():
output += """
<option value="%s">%s</option>""" % (key, value)
output += """
</select>"""
output += """
</td>
<td style="text-align:left;">
<input class="adminbutton" type="submit" value="%s" name="%s"/>""" % (button_label, button_name)
output += """
</td>
</tr>
</table>
</form>"""
return output
def tmpl_admin_index(self, ln):
"""
Index page
"""
# load the right message language
_ = gettext_set_language(ln)
out = '<ol>'
if CFG_WEBCOMMENT_ALLOW_COMMENTS or CFG_WEBCOMMENT_ALLOW_REVIEWS:
if CFG_WEBCOMMENT_ALLOW_COMMENTS:
out += '<h3>Comments status</h3>'
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/hot?ln=%(ln)s&comments=1">%(hot_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'hot_cmt_label': _("View most commented records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/latest?ln=%(ln)s&comments=1">%(latest_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'latest_cmt_label': _("View latest commented records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/comments?ln=%(ln)s&reviews=0">%(reported_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'reported_cmt_label': _("View all comments reported as abuse")}
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
out += '<h3>Reviews status</h3>'
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/hot?ln=%(ln)s&comments=0">%(hot_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'hot_rev_label': _("View most reviewed records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/latest?ln=%(ln)s&comments=0">%(latest_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'latest_rev_label': _("View latest reviewed records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/comments?ln=%(ln)s&reviews=1">%(reported_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'reported_rev_label': _("View all reviews reported as abuse")}
#<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/delete?ln=%(ln)s&comid=-1">%(delete_label)s</a></li>
out +="""
<h3>General</h3>
<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/users?ln=%(ln)s">%(view_users)s</a></li>
<li><a href="%(siteurl)s/help/admin/webcomment-admin-guide">%(guide)s</a></li>
""" % {'siteurl' : CFG_SITE_URL,
#'delete_label': _("Delete/Undelete comment(s) or suppress abuse report(s)"),
'view_users': _("View all users who have been reported"),
'ln' : ln,
'guide' : _("Guide")}
else:
out += _("Comments and reviews are disabled") + '<br />'
out += '</ol>'
from invenio.bibrankadminlib import addadminbox
return addadminbox('<b>%s</b>'% _("Menu"), [out])
def tmpl_admin_delete_form(self, ln, warnings):
"""
Display admin interface to fetch list of records to delete
@param warnings: list of warning tuples (warning_text, warning_color)
see tmpl_warnings, warning_color is optional
"""
# load the right message language
_ = gettext_set_language(ln)
warnings = self.tmpl_warnings(warnings, ln)
out = '''
<br />
%s<br />
<br />'''% _("Please enter the ID of the comment/review so that you can view it before deciding whether to delete it or not")
form = '''
<table>
<tr>
<td>%s</td>
<td><input type=text name="comid" size="10" maxlength="10" value="" /></td>
</tr>
<tr>
<td><br /></td>
<tr>
</table>
<br />
%s <br/>
<br />
<table>
<tr>
<td>%s</td>
<td><input type=text name="recid" size="10" maxlength="10" value="" /></td>
</tr>
<tr>
<td><br /></td>
<tr>
</table>
<br />
''' % (_("Comment ID:"),
_("Or enter a record ID to list all the associated comments/reviews:"),
_("Record ID:"))
form_link = "%s/admin/webcomment/webcommentadmin.py/delete?ln=%s" % (CFG_SITE_URL, ln)
form = self.createhiddenform(action=form_link, method="get", text=form, button=_('View Comment'))
return warnings + out + form
def tmpl_admin_users(self, ln, users_data):
"""
@param users_data: tuple of ct, i.e. (ct, ct, ...)
where ct is a tuple (total_number_reported, total_comments_reported, total_reviews_reported, total_nb_votes_yes_of_reported,
total_nb_votes_total_of_reported, user_id, user_email, user_nickname)
sorted by order of ct having highest total_number_reported
"""
_ = gettext_set_language(ln)
u_reports = 0
u_comment_reports = 1
u_reviews_reports = 2
u_nb_votes_yes = 3
u_nb_votes_total = 4
u_uid = 5
u_email = 6
u_nickname = 7
if not users_data:
return self.tmpl_warnings([(_("There have been no reports so far."), 'green')])
user_rows = ""
for utuple in users_data:
com_label = _("View all %s reported comments") % utuple[u_comment_reports]
com_link = '''<a href="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&uid=%s&reviews=0">%s</a><br />''' % \
(CFG_SITE_URL, ln, utuple[u_uid], com_label)
rev_label = _("View all %s reported reviews") % utuple[u_reviews_reports]
rev_link = '''<a href="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&uid=%s&reviews=1">%s</a>''' % \
(CFG_SITE_URL, ln, utuple[u_uid], rev_label)
if not utuple[u_nickname]:
user_info = get_user_info(utuple[u_uid])
nickname = user_info[2]
else:
nickname = utuple[u_nickname]
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
review_row = """
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>"""
review_row %= (utuple[u_nb_votes_yes],
utuple[u_nb_votes_total] - utuple[u_nb_votes_yes],
utuple[u_nb_votes_total])
else:
review_row = ''
user_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(nickname)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(email)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(uid)s</td>%(review_row)s
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray; font-weight: bold;">%(reports)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(com_link)s%(rev_link)s</td>
</tr>""" % { 'nickname' : nickname,
'email' : utuple[u_email],
'uid' : utuple[u_uid],
'reports' : utuple[u_reports],
'review_row': review_row,
'siteurl' : CFG_SITE_URL,
'ln' : ln,
'com_link' : CFG_WEBCOMMENT_ALLOW_COMMENTS and com_link or "",
'rev_link' : CFG_WEBCOMMENT_ALLOW_REVIEWS and rev_link or ""
}
out = "<br />"
out += _("Here is a list, sorted by total number of reports, of all users who have had a comment reported at least once.")
out += """
<br />
<br />
<table class="admin_wvar" style="width: 100%%;">
<thead>
<tr class="adminheaderleft">
<th>"""
out += _("Nickname") + '</th>\n'
out += '<th>' + _("Email") + '</th>\n'
out += '<th>' + _("User ID") + '</th>\n'
if CFG_WEBCOMMENT_ALLOW_REVIEWS > 0:
out += '<th>' + _("Number positive votes") + '</th>\n'
out += '<th>' + _("Number negative votes") + '</th>\n'
out += '<th>' + _("Total number votes") + '</th>\n'
out += '<th>' + _("Total number of reports") + '</th>\n'
out += '<th>' + _("View all user's reported comments/reviews") + '</th>\n'
out += """
</tr>
</thead>
<tbody>%s
</tbody>
</table>
""" % user_rows
return out
def tmpl_admin_select_comment_checkbox(self, cmt_id):
""" outputs a checkbox named "comidXX" where XX is cmt_id """
return '<input type="checkbox" name="comid%i" />' % int(cmt_id)
def tmpl_admin_user_info(self, ln, nickname, uid, email):
""" prepares informations about a user"""
_ = gettext_set_language(ln)
out = """
%(nickname_label)s: %(messaging)s<br />
%(uid_label)s: %(uid)i<br />
%(email_label)s: <a href="mailto:%(email)s">%(email)s</a>"""
out %= {'nickname_label': _("Nickname"),
'messaging': self.create_messaging_link(uid, nickname, ln),
'uid_label': _("User ID"),
'uid': int(uid),
'email_label': _("Email"),
'email': email}
return out
def tmpl_admin_review_info(self, ln, reviews, nb_reports, cmt_id, rec_id, status):
""" outputs information about a review """
_ = gettext_set_language(ln)
if reviews:
reported_label = _("This review has been reported %i times")
else:
reported_label = _("This comment has been reported %i times")
reported_label %= int(nb_reports)
out = """
%(reported_label)s<br />
<a href="%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)i?ln=%(ln)s">%(rec_id_label)s</a><br />
%(cmt_id_label)s"""
out %= {'reported_label': reported_label,
'rec_id_label': _("Record") + ' #' + str(rec_id),
'siteurl': CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'rec_id': int(rec_id),
'cmt_id_label': _("Comment") + ' #' + str(cmt_id),
'ln': ln}
if status in ['dm', 'da']:
out += '<br /><div style="color:red;">Marked as deleted</div>'
return out
def tmpl_admin_latest(self, ln, comment_data, comments, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is return by webcommentadminlib.py/query_get_latest i.e.
tuple (nickname, uid, date_creation, body, id) if latest comments or
tuple (nickname, uid, date_creation, body, star_score, id) if latest reviews
"""
_ = gettext_set_language(ln)
out = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
out += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/latest?ln=%s&comments=%s">' % (CFG_SITE_URL, ln, comments)
out += '<input type="hidden" name="ln" value=%s>' % ln
out += '<input type="hidden" name="comments" value=%s>' % comments
out += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
out += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
out += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
out += '</select></div></form><br />'
if error == 1:
out += "<i>User is not authorized to view such collection.</i><br />"
return out
elif error == 2:
out += "<i>There are no %s for this collection.</i><br />" % (comments and 'comments' or 'reviews')
return out
out += """
<ol>
"""
for (cmt_tuple, meta_data) in comment_data:
bibrec_id = meta_data[3]
content = format_record(bibrec_id, "hs")
if not comments:
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> reviewed by %(user)s</a>
(%(stars)s) \"%(body)s\" on <i> %(date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(bibrec_id) + '/reviews',
'user':cmt_tuple[0] ,
'stars': '*' * int(cmt_tuple[4]) ,
'body': cmt_tuple[3][:20] + '...',
'date': cmt_tuple[2]}
else:
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> commented by %(user)s</a>,
\"%(body)s\" on <i> %(date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(bibrec_id) + '/comments',
'user':cmt_tuple[0] ,
'body': cmt_tuple[3][:20] + '...',
'date': cmt_tuple[2]}
out += """</ol>"""
return out
def tmpl_admin_hot(self, ln, comment_data, comments, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is return by webcommentadminlib.py/query_get_hot i.e.
tuple (id_bibrec, date_last_comment, users, count)
"""
_ = gettext_set_language(ln)
out = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
out += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/hot?ln=%s&comments=%s">' % (CFG_SITE_URL, ln, comments)
out += '<input type="hidden" name="ln" value=%s>' % ln
out += '<input type="hidden" name="comments" value=%s>' % comments
out += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
out += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
out += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
out += '</select></div></form><br />'
if error == 1:
out += "<i>User is not authorized to view such collection.</i><br />"
return out
elif error == 2:
out += "<i>There are no %s for this collection.</i><br />" % (comments and 'comments' or 'reviews')
return out
for cmt_tuple in comment_data:
bibrec_id = cmt_tuple[0]
content = format_record(bibrec_id, "hs")
last_comment_date = cmt_tuple[1]
total_users = cmt_tuple[2]
total_comments = cmt_tuple[3]
if comments:
comment_url = CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(bibrec_id) + '/comments'
str_comment = int(total_comments) > 1 and 'comments' or 'comment'
else:
comment_url = CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(bibrec_id) + '/reviews'
str_comment = int(total_comments) > 1 and 'reviews' or 'review'
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> %(total_comments)s
%(str_comment)s</a>
(%(total_users)s %(user)s), latest on <i> %(last_comment_date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': comment_url ,
'total_comments': total_comments,
'str_comment': str_comment,
'total_users': total_users,
'user': int(total_users) > 1 and 'users' or 'user',
'last_comment_date': last_comment_date}
out += """</ol>"""
return out
def tmpl_admin_comments(self, ln, uid, comID, recID, comment_data, reviews, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is returned by webcomment.py/query_retrieve_comments_or_remarks i.e.
tuple of comment where comment is
tuple (nickname,
date_creation,
body,
id) if ranking disabled or
tuple (nickname,
date_creation,
body,
nb_votes_yes,
nb_votes_total,
star_score,
title,
id)
"""
_ = gettext_set_language(ln)
coll_form = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
coll_form += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&reviews=%s">' % (CFG_SITE_URL, ln, reviews)
coll_form += '<input type="hidden" name="ln" value=%s>' % ln
coll_form += '<input type="hidden" name="reviews" value=%s>' % reviews
coll_form += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
coll_form += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
coll_form += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
coll_form += '</select></div></form><br />'
if error == 1:
coll_form += "<i>User is not authorized to view such collection.</i><br />"
return coll_form
elif error == 2:
coll_form += "<i>There are no %s for this collection.</i><br />" % (reviews and 'reviews' or 'comments')
return coll_form
comments = []
comments_info = []
checkboxes = []
users = []
for (cmt_tuple, meta_data) in comment_data:
if reviews:
comments.append(self.tmpl_get_comment_with_ranking(None,#request object
ln,
cmt_tuple[0],#nickname
cmt_tuple[1],#userid
cmt_tuple[2],#date_creation
cmt_tuple[3],#body
cmt_tuple[9],#status
0,
cmt_tuple[5],#nb_votes_total
cmt_tuple[4],#nb_votes_yes
cmt_tuple[6],#star_score
cmt_tuple[7]))#title
else:
comments.append(self.tmpl_get_comment_without_ranking(None,#request object
ln,
cmt_tuple[0],#nickname
cmt_tuple[1],#userid
cmt_tuple[2],#date_creation
cmt_tuple[3],#body
cmt_tuple[5],#status
0,
None, #reply_link
None, #report_link
None, #undelete_link
None, #delete_links
None, #unreport_link
-1, # recid
cmt_tuple[4] # com_id
))
users.append(self.tmpl_admin_user_info(ln,
meta_data[0], #nickname
meta_data[1], #uid
meta_data[2]))#email
if reviews:
status = cmt_tuple[9]
else:
status = cmt_tuple[5]
comments_info.append(self.tmpl_admin_review_info(ln,
reviews,
meta_data[5], # nb abuse reports
meta_data[3], # cmt_id
meta_data[4], # rec_id
status)) # status
checkboxes.append(self.tmpl_admin_select_comment_checkbox(meta_data[3]))
form_link = "%s/admin/webcomment/webcommentadmin.py/del_com?ln=%s" % (CFG_SITE_URL, ln)
out = """
<table class="admin_wvar" style="width:100%%;">
<thead>
<tr class="adminheaderleft">
<th>%(review_label)s</th>
<th>%(written_by_label)s</th>
<th>%(review_info_label)s</th>
<th>%(select_label)s</th>
</tr>
</thead>
<tbody>""" % {'review_label': reviews and _("Review") or _("Comment"),
'written_by_label': _("Written by"),
'review_info_label': _("General informations"),
'select_label': _("Select")}
for i in range (0, len(comments)):
out += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintd" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (comments[i], users[i], comments_info[i], checkboxes[i])
out += """
</tbody>
</table>"""
if reviews:
action_display = {
'delete': _('Delete selected reviews'),
'unreport': _('Suppress selected abuse report'),
'undelete': _('Undelete selected reviews')
}
else:
action_display = {
'undelete': _('Undelete selected comments'),
'delete': _('Delete selected comments'),
'unreport': _('Suppress selected abuse report')
}
form = self.tmpl_create_multiple_actions_form(form_name="admin_comment",
form_action=form_link,
method="post",
action_display=action_display,
action_field_name='action',
button_label=_("OK"),
button_name="okbutton",
content=out)
if uid > 0:
header = '<br />'
if reviews:
header += _("Here are the reported reviews of user %s") % uid
else:
header += _("Here are the reported comments of user %s") % uid
header += '<br /><br />'
if comID > 0 and recID <= 0 and uid <= 0:
if reviews:
header = '<br />' +_("Here is review %s")% comID + '<br /><br />'
else:
header = '<br />' +_("Here is comment %s")% comID + '<br /><br />'
if uid > 0 and comID > 0 and recID <= 0:
if reviews:
header = '<br />' + _("Here is review %(x_cmtID)s written by user %(x_user)s") % {'x_cmtID': comID, 'x_user': uid}
else:
header = '<br />' + _("Here is comment %(x_cmtID)s written by user %(x_user)s") % {'x_cmtID': comID, 'x_user': uid}
header += '<br/ ><br />'
if comID <= 0 and recID <= 0 and uid <= 0:
header = '<br />'
if reviews:
header += _("Here are all reported reviews sorted by the most reported")
else:
header += _("Here are all reported comments sorted by the most reported")
header += "<br /><br />"
elif recID > 0:
header = '<br />'
if reviews:
header += _("Here are all reviews for record %i, sorted by the most reported" % recID)
header += '<br /><a href="%s/admin/webcomment/webcommentadmin.py/delete?comid=&recid=%s&reviews=0">%s</a>' % (CFG_SITE_URL, recID, _("Show comments"))
else:
header += _("Here are all comments for record %i, sorted by the most reported" % recID)
header += '<br /><a href="%s/admin/webcomment/webcommentadmin.py/delete?comid=&recid=%s&reviews=1">%s</a>' % (CFG_SITE_URL, recID, _("Show reviews"))
header += "<br /><br />"
return coll_form + header + form
def tmpl_admin_del_com(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_deleted),
was_successfully_deleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style="padding-right:10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully deleted"), table_rows)
return out
def tmpl_admin_undel_com(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_undeleted),
was_successfully_undeleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style="padding-right:10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully undeleted"), table_rows)
return out
def tmpl_admin_suppress_abuse_report(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_deleted),
was_successfully_deleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style ="padding-right: 10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully suppressed abuse report"), table_rows)
return out
def tmpl_mini_review(self, recID, ln=CFG_SITE_LANG, action='SUBMIT',
avg_score=0, nb_comments_total=0):
"""Display the mini version of reviews (only the grading part)"""
_ = gettext_set_language(ln)
url = '%s/%s/%s/reviews/add?ln=%s&action=%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln, action)
if avg_score > 0:
score = _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '<b>%.1f</b>' % avg_score,
'x_nb_reviews': nb_comments_total}
else:
score = '(' +_("Not yet reviewed") + ')'
if avg_score == 5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', 'full'
elif avg_score >= 4.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', 'half'
elif avg_score >= 4:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', ''
elif avg_score >= 3.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'half', ''
elif avg_score >= 3:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', '', ''
elif avg_score >= 2.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'half', '', ''
elif avg_score >= 2:
s1, s2, s3, s4, s5 = 'full', 'full', '', '', ''
elif avg_score >= 1.5:
s1, s2, s3, s4, s5 = 'full', 'half', '', '', ''
elif avg_score == 1:
s1, s2, s3, s4, s5 = 'full', '', '', '', ''
else:
s1, s2, s3, s4, s5 = '', '', '', '', ''
out = '''
<small class="detailedRecordActions">%(rate)s:</small><br /><br />
<div style="margin:auto;width:160px;">
<span style="display:none;">Rate this document:</span>
<div class="star %(s1)s" ><a href="%(url)s&score=1">1</a>
<div class="star %(s2)s" ><a href="%(url)s&score=2">2</a>
<div class="star %(s3)s" ><a href="%(url)s&score=3">3</a>
<div class="star %(s4)s" ><a href="%(url)s&score=4">4</a>
<div class="star %(s5)s" ><a href="%(url)s&score=5">5</a></div></div></div></div></div>
<div style="clear:both"> </div>
</div>
<small>%(score)s</small>
''' % {'url': url,
'score': score,
'rate': _("Rate this document"),
's1': s1,
's2': s2,
's3': s3,
's4': s4,
's5': s5
}
return out
def tmpl_email_new_comment_header(self, recID, title, reviews,
comID, report_numbers,
can_unsubscribe=True,
ln=CFG_SITE_LANG, uid=-1):
"""
Prints the email header used to notify subscribers that a new
comment/review was added.
@param recid: the ID of the commented/reviewed record
@param title: the title of the commented/reviewed record
@param reviews: True if it is a review, else if a comment
@param comID: the comment ID
@param report_numbers: the report number(s) of the record
@param can_unsubscribe: True if user can unsubscribe from alert
@param ln: language
"""
# load the right message language
_ = gettext_set_language(ln)
user_info = collect_user_info(uid)
out = _("Hello:") + '\n\n' + \
(reviews and _("The following review was sent to %(CFG_SITE_NAME)s by %(user_nickname)s:") or \
_("The following comment was sent to %(CFG_SITE_NAME)s by %(user_nickname)s:")) % \
{'CFG_SITE_NAME': CFG_SITE_NAME,
'user_nickname': user_info['nickname']}
out += '\n(<%s>)' % (CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recID))
out += '\n\n\n'
return out
def tmpl_email_new_comment_footer(self, recID, title, reviews,
comID, report_numbers,
can_unsubscribe=True,
ln=CFG_SITE_LANG):
"""
Prints the email footer used to notify subscribers that a new
comment/review was added.
@param recid: the ID of the commented/reviewed record
@param title: the title of the commented/reviewed record
@param reviews: True if it is a review, else if a comment
@param comID: the comment ID
@param report_numbers: the report number(s) of the record
@param can_unsubscribe: True if user can unsubscribe from alert
@param ln: language
"""
# load the right message language
_ = gettext_set_language(ln)
out = '\n\n-- \n'
out += _("This is an automatic message, please don't reply to it.")
out += '\n'
out += _("To post another comment, go to <%(x_url)s> instead.") % \
{'x_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recID) + \
(reviews and '/reviews' or '/comments') + '/add'}
out += '\n'
if not reviews:
out += _("To specifically reply to this comment, go to <%(x_url)s>") % \
{'x_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recID) + \
'/comments/add?action=REPLY&comid=' + str(comID)}
out += '\n'
if can_unsubscribe:
out += _("To unsubscribe from this discussion, go to <%(x_url)s>") % \
{'x_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recID) + \
'/comments/unsubscribe'}
out += '\n'
out += _("For any question, please use <%(CFG_SITE_SUPPORT_EMAIL)s>") % \
{'CFG_SITE_SUPPORT_EMAIL': CFG_SITE_SUPPORT_EMAIL}
return out
def tmpl_email_new_comment_admin(self, recID):
"""
Prints the record information used in the email to notify the
system administrator that a new comment has been posted.
@param recID: the ID of the commented/reviewed record
"""
out = ""
title = get_fieldvalues(recID, "245__a")
authors = ', '.join(get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a"))
#res_author = ""
#res_rep_num = ""
#for author in authors:
# res_author = res_author + ' ' + author
dates = get_fieldvalues(recID, "260__c")
report_nums = get_fieldvalues(recID, "037__a")
report_nums += get_fieldvalues(recID, "088__a")
report_nums = ', '.join(report_nums)
#for rep_num in report_nums:
# res_rep_num = res_rep_num + ', ' + rep_num
out += " Title = %s \n" % (title and title[0] or "No Title")
out += " Authors = %s \n" % authors
if dates:
out += " Date = %s \n" % dates[0]
out += " Report number = %s" % report_nums
return out
def tmpl_page_do_not_leave_comment_page_js(self, ln):
"""
Code to ask user confirmation when leaving the page, so that the
comment is not lost if clicking by mistake on links.
@param ln: the user language
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''
<script type="text/javascript" language="JavaScript">//<![CDATA[
var initial_comment_value = document.forms.cmtForm.msg.value;
var user_must_confirm_before_leaving_page = true;
window.onbeforeunload = confirmExit;
function confirmExit() {
var editor_type_field = document.getElementById('%(name)seditortype');
if (editor_type_field && editor_type_field.value == 'ckeditor') {
var oEditor = CKEDITOR.instances.%(name)s;
if (user_must_confirm_before_leaving_page && oEditor.checkDirty()) {
/* Might give false positives, when editor pre-loaded
with content. But is better than the opposite */
return "%(message)s";
}
} else {
if (user_must_confirm_before_leaving_page && document.forms.cmtForm.msg.value != initial_comment_value){
return "%(message)s";
}
}
}
//]]></script>
''' % {'message': _('Your comment will be lost.').replace('"', '\\"'),
'name': 'msg'}
return out
def tmpl_your_comments(self, user_info, comments, page_number=1, selected_order_by_option="lcf", selected_display_number_option="all", selected_display_format_option="rc", nb_total_results=0, nb_total_pages=0, ln=CFG_SITE_LANG):
"""
Display all submitted comments by the user
@param user_info: standard user info object.
@param comments: ordered list of tuples (id_bibrec, comid, date_creation, body, status, in_reply_to_id_cmtRECORDCOMMENT)
@param page_number: page on which the user is.
@type page_number: integer
@param selected_order_by_option: seleccted ordering option. Can be one of:
- ocf: Oldest comment first
- lcf: Latest comment first
- grof: Group by record, oldest commented first
- grlf: Group by record, latest commented first
@type selected_order_by_option: string
@param selected_display_number_option: number of results to show per page. Can be a string-digit or 'all'.
@type selected_display_number_option: string
@param selected_display_format_option: how to show records. Can be one of:
- rc: Records and comments
- ro: Records only
- co: Comments only
@type selected_display_format_option: string
@param nb_total_results: total number of items to display.
@type nb_total_results: integer
@param nb_total_pages: total number of pages.
@type nb_total_pages: integer
@ln: language
@type ln: string
"""
# load the right message language
_ = gettext_set_language(ln)
your_comments_order_by_options = (('ocf', _("Oldest comment first")),
('lcf', _("Latest comment first")),
('grof', _("Group by record, oldest commented first")),
('grlf', _("Group by record, latest commented first")),
)
your_comments_display_format_options = (('rc', _("Records and comments")),
('ro', _('Records only')),
('co', _('Comments only')),
)
your_comments_display_number_options = (('20', _("%s items") % 20),
('50', _("%s items") % 50),
('100', _("%s items") % 100),
('500',_("%s items") % 500),
('all', _('All items')),
)
out = ""
out += _("Below is the list of the comments you have submitted so far.") + "<br/>"
if CFG_CERN_SITE:
if nb_total_results == 0:
out = _('You have not yet submitted any comment in the document "discussion" tab.') + "<br/>"
user_roles = acc_get_user_roles_from_user_info(user_info)
if acc_get_role_id('ATLASDraftPublication') in user_roles:
out += _('You might find other comments here: ')
out += create_html_link(urlbase=CFG_SITE_URL + '/search',
urlargd={'ln': ln,
'cc': 'ATLAS Publication Drafts Comments',
'p': user_info['email'],
'f': '859__f'},
link_label='ATLAS Publication Drafts Comments')
elif acc_get_role_id('cmsphysicsmembers') in user_roles:
out += _('You might find other comments here: ')
out += create_html_link(urlbase=CFG_SITE_URL + '/search',
urlargd={'ln': ln,
'cc': '',
'p': user_info['email'],
'f': '859__f'},
link_label='CMS Publication Drafts Comments')
elif acc_get_role_id('LHCbDraftPublication') in user_roles:
out += _('You might find other comments here: ')
out += create_html_link(urlbase=CFG_SITE_URL + '/search',
urlargd={'ln': ln,
'cc': '',
'p': user_info['email'],
'f': '859__f'},
link_label='LHCb Publication Drafts Comments')
out += '<br/>'
if nb_total_results == 0:
return out
else:
if nb_total_results == 0:
return _("You have not yet submitted any comment. Browse documents from the search interface and take part to discussions!")
# Show controls
format_selection = create_html_select(your_comments_display_format_options,
name="format", selected=selected_display_format_option,
attrs={'id': 'format',
'onchange': 'this.form.submit();'})
order_by_selection = create_html_select(your_comments_order_by_options,
name="order_by", selected=selected_order_by_option,
attrs={'id': 'order_by',
'onchange': 'this.form.submit();'})
nb_per_page_selection = create_html_select(your_comments_display_number_options,
name="per_page", selected=selected_display_number_option,
attrs={'id': 'per_page',
'onchange': 'this.form.submit();'})
out += '''
<form method="get" class="yourcommentsdisplayoptionsform">
<fieldset id="yourcommentsdisplayoptions">
<legend>%(display_option_label)s:</legend>
<label for="format">%(format_selection_label)s :</label> %(format_selection)s
<label for="order_by">%(order_selection_label)s :</label> %(order_by_selection)s
<label for="per_page">%(per_page_selection_label)s :</label> %(nb_per_page_selection)s
<noscript><input type="submit" value="%(refresh_label)s" class="formbutton"/></noscript>
</fieldset>
</form>
''' % {'format_selection_label': _("Display"),
'order_selection_label': _("Order by"),
'per_page_selection_label': _("Per page"),
'format_selection': format_selection,
'order_by_selection': order_by_selection,
'nb_per_page_selection': nb_per_page_selection,
'display_option_label': _("Display options"),
'refresh_label': _("Refresh"),
}
# Show comments
last_id_bibrec = None
nb_record_groups = 0
out += '<div id="yourcommentsmaincontent">'
for id_bibrec, comid, date_creation, body, status, in_reply_to_id_cmtRECORDCOMMENT in comments:
if last_id_bibrec != id_bibrec and selected_display_format_option in ('rc', 'ro'):
# We moved to another record. Show some info about
# current record.
if last_id_bibrec:
# Close previous group
out += "</div></div>"
nb_record_groups += 1
#You might want to hide this information if user does
#not have access, though it would make sense that he
#can at least know on which page is comment appears..
out += '''<div class="yourcommentsrecordgroup" id="yourcomments-record-group-%(recid)s">
<div class="yourcommentsrecordgroup%(recid)sheader">• ''' % {'recid': id_bibrec} + \
format_record(id_bibrec, of="HS") + '</div><div style="padding-left: 20px;">'
if selected_display_format_option != 'ro':
final_body = email_quoted_txt2html(body)
title = '<a name="C%s" id="C%s"></a>' % (comid, comid)
if status == "dm":
final_body = '<div class="webcomment_deleted_comment_message">%s</div>' % _("Comment deleted by the moderator")
elif status == "da":
final_body = ('<div class="webcomment_deleted_comment_message">%s<br /><br />' % _("You have deleted this comment: it is not visible by other users")) +\
final_body + '</div>'
links = []
if in_reply_to_id_cmtRECORDCOMMENT:
links.append(create_html_link(urlbase=CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(id_bibrec) + '/comments/',
urlargd={'ln': ln},
link_label=_('(in reply to a comment)'),
urlhash=str(in_reply_to_id_cmtRECORDCOMMENT)))
links.append(create_html_link(urlbase=CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(id_bibrec) + '/comments/',
urlargd={'ln': ln},
link_label=_('See comment on discussion page'),
urlhash='C' + str(comid)))
out += '''
<div class="webcomment_comment_box">
<div class="webcomment_comment_avatar"><img class="webcomment_comment_avatar_default" src="%(site_url)s/img/user-icon-1-24x24.gif" alt="avatar" /></div>
<div class="webcomment_comment_content">
<div class="webcomment_comment_title">
%(title)s
<div class="webcomment_comment_date">%(date)s</div>
<a class="webcomment_permalink" title="Permalink to this comment" href="#C%(comid)i">¶</a>
</div>
<div class="collapsible_content">
<blockquote>
%(body)s
</blockquote>
<div class="webcomment_comment_options">%(links)s</div>
</div>
<div class="clearer"></div>
</div>
<div class="clearer"></div>
</div>''' % \
{'title' : title,
'body' : final_body,
'links' : " ".join(links),
'date' : date_creation,
'site_url' : CFG_SITE_URL,
'comid' : comid,
}
last_id_bibrec = id_bibrec
out += '</div>' # end 'yourcommentsmaincontent'
# Show page navigation
page_links = ''
if selected_display_format_option == 'ro' and \
selected_order_by_option in ('ocf', 'lcf'):
# We just have an approximation here (we count by
# comments, not record...)
page_links += (_("%i comments found in total (not shown on this page)") % nb_total_results) + ' '
else:
page_links += (_("%i items found in total") % nb_total_results) + ' '
if selected_display_number_option != 'all':
# Previous
if page_number != 1:
page_links += create_html_link(CFG_SITE_URL + '/yourcomments/',
{'page': page_number - 1,
'order_by': selected_order_by_option,
'per_page': selected_display_number_option,
'format': selected_display_format_option,
'ln': ln},
_("Previous"))
# Page Numbers
for i in range(1, nb_total_pages + 1):
if i != page_number:
page_links += ' ' + \
create_html_link(CFG_SITE_URL + '/yourcomments/',
{'page': i,
'order_by': selected_order_by_option,
'per_page': selected_display_number_option,
'format': selected_display_format_option,
'ln': ln},
str(i)) + \
' '
elif nb_total_pages > 1:
page_links += ''' <b>%s</b> ''' % i
# Next
if page_number != nb_total_pages:
page_links += create_html_link(CFG_SITE_URL + '/yourcomments/',
{'page': page_number + 1,
'order_by': selected_order_by_option,
'per_page': selected_display_number_option,
'format': selected_display_format_option,
'ln': ln},
_("Next"))
out += '<br/><div id="yourcommentsnavigationlinks">' + page_links + '</div>'
return out
| EUDAT-B2SHARE/invenio-old | modules/webcomment/lib/webcomment_templates.py | Python | gpl-2.0 | 133,333 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
from nova.compute import power_state
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt import virtapi
LOG = logging.getLogger(__name__)
_FAKE_NODES = ['fake-mini']
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
get_host_stats()
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = ['fake-mini']
class FakeInstance(object):
def __init__(self, name, state):
self.name = name
self.state = state
def __getitem__(self, key):
return getattr(self, key)
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
}
"""Fake hypervisor driver"""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.host_status_base = {
'host_name-description': 'Fake Host',
'host_hostname': 'fake-mini',
'host_memory_total': 8000000000,
'host_memory_overhead': 10000000,
'host_memory_free': 7900000000,
'host_memory_free_computed': 7900000000,
'host_other_config': {},
'host_ip_address': '192.168.1.109',
'host_cpu_info': {},
'disk_available': 500000000000,
'disk_total': 600000000000,
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
'host_name_label': 'fake-mini',
'hypervisor_hostname': 'fake-mini',
}
self._mounts = {}
def init_host(self, host):
return
def list_instances(self):
return self.instances.keys()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
name = instance['name']
state = power_state.RUNNING
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
def snapshot(self, context, instance, name):
if not instance['name'] in self.instances:
raise exception.InstanceNotRunning()
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
pass
@staticmethod
def get_host_ip_addr():
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def poll_rescued_instances(self, timeout):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
pass
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
pass
def power_off(self, instance):
pass
def power_on(self, instance):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, instance):
pass
def resume(self, instance, network_info, block_device_info=None):
pass
def destroy(self, instance, network_info, block_device_info=None):
key = instance['name']
if key in self.instances:
del self.instances[key]
else:
LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") %
{'key': key,
'inst': self.instances}, instance=instance)
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach the disk to the instance at mountpoint using info"""
if not instance_name in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
return True
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach the disk attached to the instance"""
try:
del self._mounts[instance_name][mountpoint]
except KeyError:
pass
return True
def get_info(self, instance):
if instance['name'] not in self.instances:
raise exception.InstanceNotFound(instance_id=instance['name'])
i = self.instances[instance['name']]
return {'state': i.state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
def get_diagnostics(self, instance_name):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
bw = []
return bw
def get_all_volume_usage(self, context, instances, start_time,
stop_time=None):
"""Return usage info for volumes attached to vms on
a given host"""
volusage = []
return volusage
def block_stats(self, instance_name, disk_id):
return [0L, 0L, 0L, 0L, None]
def interface_stats(self, instance_name, iface_id):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, instance):
return {'internal_access_path': 'FAKE',
'host': 'fakevncconsole.com',
'port': 6969}
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_security_group_members(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def refresh_provider_fw_rules(self):
pass
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
if nodename not in _FAKE_NODES:
raise exception.NovaException("node %s is not found" % nodename)
dic = {'vcpus': 1,
'memory_mb': 8192,
'local_gb': 1028,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
'hypervisor_hostname': nodename,
'cpu_info': '?'}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def get_instance_disk_info(self, instance_name):
return
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
return
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
return
def confirm_migration(self, migration, instance, network_info):
return
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info):
return
def unfilter_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
""" Removes the named VM, as if it crashed. For testing"""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
stats = []
for nodename in _FAKE_NODES:
host_status = self.host_status_base.copy()
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
stats.append(host_status)
if len(stats) == 0:
raise exception.NovaException("FakeDriver has no node")
elif len(stats) == 1:
return stats[0]
else:
return stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_disk_available_least(self):
""" """
pass
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
def get_available_nodes(self):
return _FAKE_NODES
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
return db.instance_update_and_get_original(context,
instance_uuid,
updates)
def instance_get_by_uuid(self, context, instance_uuid):
return db.instance_get_by_uuid(context, instance_uuid)
def instance_get_all_by_host(self, context, host):
return db.instance_get_all_by_host(context, host)
def aggregate_get_by_host(self, context, host, key=None):
return db.aggregate_get_by_host(context, host, key)
def aggregate_metadata_add(self, context, aggregate_id, metadata,
set_delete=False):
return db.aggregate_metadata_add(context, aggregate_id, metadata,
set_delete)
def aggregate_metadata_delete(self, context, aggregate_id, key):
return db.aggregate_metadata_delete(context, aggregate_id, key)
def security_group_get_by_instance(self, context, instance_uuid):
return db.security_group_get_by_instance(context, instance_uuid)
def security_group_rule_get_by_security_group(self, context,
security_group_id):
return db.security_group_rule_get_by_security_group(context,
security_group_id)
def provider_fw_rule_get_all(self, context):
return db.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return db.agent_build_get_by_triple(context,
hypervisor, os, architecture)
| aristanetworks/arista-ovs-nova | nova/virt/fake.py | Python | apache-2.0 | 14,277 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PropertyBatchDescriptionList(Model):
"""Describes a list of property batch operations to be executed. Either all or
none of the operations will be committed.
:param operations: A list of the property batch operations to be executed.
:type operations: list of :class:`PropertyBatchOperation
<azure.servicefabric.models.PropertyBatchOperation>`
"""
_attribute_map = {
'operations': {'key': 'Operations', 'type': '[PropertyBatchOperation]'},
}
def __init__(self, operations=None):
self.operations = operations
| AutorestCI/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/property_batch_description_list.py | Python | mit | 1,084 |
# Copyright 2017 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard import api
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
class CGroups(horizon.Panel):
name = _("Consistency Groups")
slug = 'cgroups'
permissions = (
('openstack.services.volume', 'openstack.services.volumev2',
'openstack.services.volumev3'),
)
policy_rules = (("volume", "consistencygroup:get_all"),)
def allowed(self, context):
request = context['request']
try:
return (
super(CGroups, self).allowed(context) and
request.user.has_perms(self.permissions) and
policy.check(self.policy_rules, request) and
api.cinder.get_microversion(request, 'consistency_groups') and
not api.cinder.get_microversion(request, 'groups')
)
except Exception:
LOG.error("Call to list enabled services failed. This is likely "
"due to a problem communicating with the Cinder "
"endpoint. Consistency Group panel will not be "
"displayed.")
return False
| NeCTAR-RC/horizon | openstack_dashboard/dashboards/project/cgroups/panel.py | Python | apache-2.0 | 1,844 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.