code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# coding=utf-8
import unittest
"""889. Construct Binary Tree from Preorder and Postorder Traversal
https://leetcode.com/problems/construct-binary-tree-from-preorder-and-postorder-traversal/description/
Return any binary tree that matches the given preorder and postorder
traversals.
Values in the traversals `pre` and `post` are distinct positive integers.
**Example 1:**
**Input:** pre = [1,2,4,5,3,6,7], post = [4,5,2,6,7,3,1]
**Output:** [1,2,3,4,5,6,7]
**Note:**
* `1 <= pre.length == post.length <= 30`
* `pre[]` and `post[]` are both permutations of `1, 2, ..., pre.length`.
* It is guaranteed an answer exists. If there exists multiple answers, you can return any of them.
Similar Questions:
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def constructFromPrePost(self, pre, post):
"""
:type pre: List[int]
:type post: List[int]
:rtype: TreeNode
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
| openqt/algorithms | leetcode/python/lc889-construct-binary-tree-from-preorder-and-postorder-traversal.py | Python | gpl-3.0 | 1,207 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 21 10:23:46 2016
"""
import argparse
import cPickle
import seaborn as sns
import matplotlib.pylab as plt
import pandas as pd
import numpy as np
from matplotlib.lines import Line2D
import re
sns.set_style("darkgrid")
lscol_ptn = re.compile("LockingState([0-9]+)")
def determine_num_joints(df, _):
return len([ lscol_ptn.match(c).group(1) for c in df.columns if lscol_ptn.match(c) is not None])
def plot_locking_states(df, meta, num_joints=None):
marker_style = dict(linestyle=':', marker='o', s=100,)
def format_axes(ax):
ax.margins(0.2)
ax.set_axis_off()
if num_joints is None:
num_joints = determine_num_joints(df)
points = np.ones(num_joints)
fig, ax = plt.subplots()
for j in range(num_joints):
ax.text(-1.5, j, "%d" % j)
ax.text(0, -1.5, "time")
for t in df.index:
lock_states = df.loc[t][ [ "LockingState%d" % k for k in range(num_joints) ] ].tolist()
c = ["orange" if l else "k" for l in lock_states]
ax.scatter((t+0.1) * points, range(num_joints), color=c, **marker_style)
format_axes(ax)
ax.set_title('Locking state evolution')
ax.set_xlabel("t")
plt.plot()
def plot_entropy(df, meta, num_joints=None):
if num_joints is None:
num_joints = determine_num_joints(df)
plt.figure()
for j in range(num_joints):
var_name="Entropy%d"%j
plt.plot(df[var_name], label=var_name)
plt.legend()
def plot_dependency_posterior(df, meta, t, num_joints=None):
if num_joints is None:
num_joints = determine_num_joints(df)
plt.figure()
posterior=np.array([df["Posterior%d"%j].iloc[t] for j in range(num_joints)])
plt.matshow(posterior, interpolation='nearest')
plt.show()
def print_actions(df, num_joints=None):
pd.options.display.float_format = '{:,.2f}'.format
pd.set_option('expand_frame_repr', False)
if num_joints is None:
num_joints = determine_num_joints(df, None)
print(df[[u'CheckedJoint'] +
['DesiredPos{}'.format(j) for j in range(num_joints)] +
['LockingState{}'.format(j) for j in range(num_joints)]
])
#Index([u'DesiredPos0', u'DesiredPos1', u'DesiredPos2', u'DesiredPos3',
# u'DesiredPos4', u'CheckedJoint', u'RealPos0', u'RealPos1', u'RealPos2',
# u'RealPos3', u'RealPos4', u'LockingState0', u'LockingState1',
# u'LockingState2', u'LockingState3', u'LockingState4', u'Posterior0',
# u'Entropy0', u'Posterior1', u'Entropy1', u'Posterior2', u'Entropy2',
# u'Posterior3', u'Entropy3', u'Posterior4', u'Entropy4'],
# dtype='object')
def open_pickle_file(pkl_file):
with open(pkl_file) as f:
df, meta = cPickle.load(f)
return df, meta
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", required=True,
help="pickle file")
args = parser.parse_args()
df, meta = open_pickle_file(args.file)
print_actions(df)
plot_locking_states(df, meta, num_joints=determine_num_joints(df, meta))
plot_entropy(df,meta, num_joints=determine_num_joints(df, meta))
plot_dependency_posterior(df,meta,-1, num_joints=determine_num_joints(df, meta))
plt.show()
| hildensia/joint_dependency | joint_dependency/interpret_results.py | Python | mit | 3,372 |
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('coapOption')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
from . import coapUtils as u
from . import coapException as e
from . import coapDefines as d
#============================ classes =========================================
class coapOption(object):
def __init__(self,optionNumber):
# store params
self.optionNumber = optionNumber
self.length = 0
#======================== abstract methods ================================
def getPayloadBytes(self):
raise NotImplementedError()
#======================== public ==========================================
def toBytes(self,lastOptionNum):
payload = self.getPayloadBytes()
delta = self.optionNumber-lastOptionNum
# optionDelta and optionDeltaExt fields
if delta<=12:
optionDelta = delta
optionDeltaExt = u.int2buf( delta,0)
elif delta<=(0xff+13):
optionDelta = 13
optionDeltaExt = u.int2buf( delta-13,1)
elif delta<=(0xffff+269):
optionDelta = 14
optionDeltaExt = u.int2buf(delta-269,2)
else:
raise ValueError('delta is too large: {0}'.format(delta))
# optionLength and optionLengthExt fields
if len(payload)<=12:
optionLength = len(payload)
optionLengthExt = u.int2buf( len(payload),0)
elif len(payload)<=(0xff+13):
optionLength = 13
optionLengthExt = u.int2buf( len(payload)-13,1)
elif len(payload)<=(0xffff+269):
optionLength = 14
optionLengthExt = u.int2buf(len(payload)-269,2)
else:
raise ValueError('payload is too long, {0} bytes'.format(len(payload)))
returnVal = []
returnVal += [optionDelta<<4 | optionLength]
returnVal += optionDeltaExt
returnVal += optionLengthExt
returnVal += payload
return returnVal
#=== OPTION_NUM_IFMATCH
#=== OPTION_NUM_URIHOST
#=== OPTION_NUM_ETAG
#=== OPTION_NUM_IFNONEMATCH
#=== OPTION_NUM_URIPORT
#=== OPTION_NUM_LOCATIONPATH
#=== OPTION_NUM_URIPATH
class UriPath(coapOption):
def __init__(self,path):
# initialize parent
coapOption.__init__(self,d.OPTION_NUM_URIPATH)
# store params
self.path = path
def __repr__(self):
return 'UriPath(path={0})'.format(self.path)
def getPayloadBytes(self):
return [ord(b) for b in self.path]
#=== OPTION_NUM_CONTENTFORMAT
class ContentFormat(coapOption):
def __init__(self,cformat):
assert len(cformat)==1
assert cformat[0] in d.FORMAT_ALL
# initialize parent
coapOption.__init__(self,d.OPTION_NUM_CONTENTFORMAT)
# store params
self.format = cformat[0]
def __repr__(self):
return 'ContentFormat(format={0})'.format(self.format)
def getPayloadBytes(self):
return [self.format]
#=== OPTION_NUM_MAXAGE
#=== OPTION_NUM_URIQUERY
#=== OPTION_NUM_ACCEPT
#=== OPTION_NUM_LOCATIONQUERY
#=== OPTION_NUM_BLOCK2
class Block2(coapOption):
def __init__(self,num=None,m=None,szx=None,rawbytes=[]):
if rawbytes:
assert num==None
assert m==None
assert szx==None
else:
assert num!=None
assert m!=None
assert szx!=None
# initialize parent
coapOption.__init__(self,d.OPTION_NUM_BLOCK2)
# store params
if num:
# values of num, m, szx specified explicitly
self.num = num
self.m = m
self.szx = szx
else:
# values of num, m, szx need to be extracted
if len(rawbytes)==1:
self.num = (rawbytes[0]>>4)&0x0f
self.m = (rawbytes[0]>>3)&0x01
self.szx = (rawbytes[0]>>0)&0x07
elif len(rawbytes)==2:
self.num = rawbytes[0]<<8 | (rawbytes[1]>>4)&0x0f
self.m = (rawbytes[1]>>3)&0x01
self.szx = (rawbytes[1]>>0)&0x07
elif len(rawbytes)==3:
self.num = rawbytes[0]<<16 | rawbytes[1]<<8 | (rawbytes[2]>>4)&0x0f
self.m = (rawbytes[2]>>3)&0x01
self.szx = (rawbytes[2]>>0)&0x07
else:
raise ValueError('unexpected Block2 len={0}'.format(len(rawbytes)))
def __repr__(self):
return 'Block2(num={0},m={1},szx={2})'.format(self.num,self.m,self.szx)
def getPayloadBytes(self):
return NotImplementedError()
#=== OPTION_NUM_BLOCK1
#=== OPTION_NUM_PROXYURI
#=== OPTION_NUM_PROXYSCHEME
#============================ functions =======================================
def parseOption(message,previousOptionNumber):
'''
\brief Extract an option from the beginning of a message.
\param[in] message A list of bytes.
\param[in] previousOptionNumber The option number from the previous option
in the message; set to 0 if this is the first option.
\return A tuple with the following elements:
- element 0 is the option that was extracted. If no option was found
(end of the options or end of the packet), None is returned.
- element 1 is the message without the option.
'''
log.debug(
'parseOption message={0} previousOptionNumber={1}'.format(
u.formatBuf(message),
previousOptionNumber,
)
)
#==== detect end of packet
if len(message)==0:
message = message[1:]
return (None,message)
#==== detect payload marker
if message[0]==d.COAP_PAYLOAD_MARKER:
message = message[1:]
return (None,message)
#==== parse option
# header
optionDelta = (message[0]>>4)&0x0f
optionLength = (message[0]>>0)&0x0f
message = message[1:]
# optionDelta
if optionDelta<=12:
pass
elif optionDelta==13:
if len(message)<1:
raise e.messageFormatError('message to short, {0} bytes: not space for 1B optionDelta'.format(len(message)))
optionDelta = u.buf2int(message[0])+13
message = message[1:]
elif optionDelta==14:
if len(message)<2:
raise e.messageFormatError('message to short, {0} bytes: not space for 2B optionDelta'.format(len(message)))
optionDelta = u.buf2int(message[0:1])+269
message = message[2:]
else:
raise e.messageFormatError('invalid optionDelta={0}'.format(optionDelta))
log.debug('optionDelta = {0}'.format(optionDelta))
# optionLength
if optionLength<=12:
pass
elif optionLength==13:
if len(message)<1:
raise e.messageFormatError('message to short, {0} bytes: not space for 1B optionLength'.format(len(message)))
optionLength = u.buf2int(message[0])+13
message = message[1:]
elif optionLength==14:
if len(message)<2:
raise e.messageFormatError('message to short, {0} bytes: not space for 2B optionLength'.format(len(message)))
optionLength = u.buf2int(message[0:1])+269
message = message[2:]
else:
raise e.messageFormatError('invalid optionLength={0}'.format(optionLength))
log.debug('optionLength = {0}'.format(optionLength))
# optionValue
if len(message)<optionLength:
raise e.messageFormatError('message to short, {0} bytes: not space for optionValue'.format(len(message)))
optionValue = message[:optionLength]
message = message[optionLength:]
log.debug('optionValue = {0}'.format(u.formatBuf(optionValue)))
#===== create option
optionNumber = previousOptionNumber+optionDelta
log.debug('optionNumber = {0}'.format(optionNumber))
if optionNumber not in d.OPTION_NUM_ALL:
raise e.messageFormatError('invalid option number {0} (0x{0:x})'.format(optionNumber))
if optionNumber==d.OPTION_NUM_URIPATH:
option = UriPath(path=''.join([chr(b) for b in optionValue]))
elif optionNumber==d.OPTION_NUM_CONTENTFORMAT:
option = ContentFormat(cformat=optionValue)
elif optionNumber==d.OPTION_NUM_BLOCK2:
option = Block2(rawbytes=optionValue)
else:
raise NotImplementedError('option {0} not implemented'.format(optionNumber))
return (option,message)
| ccarrizosa/coap | coap/coapOption.py | Python | bsd-3-clause | 9,071 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from barbican.plugin.crypto import manager
from barbican.tests import utils
class WhenTestingManager(utils.BaseTestCase):
def test_can_override_enabled_plugins(self):
"""Verify can override default configuration for plugin selection."""
# Reset manager singleton otherwise we have test execution
# order problems
manager._PLUGIN_MANAGER = None
manager.CONF.set_override(
"enabled_crypto_plugins",
['foo_plugin'],
group='crypto')
manager_to_test = manager.get_manager()
self.assertIsInstance(
manager_to_test, manager._CryptoPluginManager)
self.assertListEqual(['foo_plugin'],
manager_to_test._names)
| cneill/barbican | barbican/tests/plugin/crypto/test_manager.py | Python | apache-2.0 | 1,298 |
#!/usr/bin/env python3
import argparse
import glob
import os
import struct
import sys
def clamp_to_min_max(value, min, max):
if value > max:
value = max
elif value < min:
value = min
return value
def clamp_to_u8(value):
return clamp_to_min_max(value, 0, 255)
def parse_args():
parser = argparse.ArgumentParser(description="Set the static effect")
parser.add_argument('-d', '--device', type=str, help="Device string like \"0003:1532:0045.000C\"")
parser.add_argument('--colour', required=True, nargs=3, metavar=("R", "G", "B"), type=int, help="Static colour")
args = parser.parse_args()
return args
def run():
args = parse_args()
if args.device is None:
mouse_dirs = glob.glob(os.path.join('/sys/bus/hid/drivers/razermouse/', "*:*:*.*"))
if len(mouse_dirs) > 1:
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
if len(mouse_dirs) < 1:
print("No mouse directories found. Make sure the driver is binded", file=sys.stderr)
sys.exit(1)
mouse_dir = mouse_dirs[0]
else:
mouse_dir = os.path.join('/sys/bus/hid/drivers/razermouse/', args.device)
if not os.path.isdir(mouse_dir):
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
values = map(clamp_to_u8, args.colour)
byte_string = struct.pack(">BBB", *values)
static_mode_filepath = os.path.join(mouse_dir, "mode_static")
with open(static_mode_filepath, 'wb') as static_mode_file:
static_mode_file.write(byte_string)
print("Done")
if __name__ == '__main__':
run() | z3ntu/razer-drivers | scripts/razer_mouse/driver/static_effect.py | Python | gpl-2.0 | 1,698 |
#
# Copyright 2013 IBM Corp.
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Middleware to replace the plain text message body of an error
response with one formatted so the client can parse it.
Based on pecan.middleware.errordocument
"""
import json
from lxml import etree
import webob
from ceilometer.api import hooks
from ceilometer.openstack.common import gettextutils
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class ParsableErrorMiddleware(object):
"""Replace error body with something the client can parse."""
@staticmethod
def best_match_language(accept_language):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not accept_language:
return None
all_languages = gettextutils.get_available_languages('ceilometer')
return accept_language.best_match(all_languages)
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# Request for this state, modified by replace_start_response()
# and used when an error is being reported.
state = {}
def replacement_start_response(status, headers, exc_info=None):
"""Overrides the default response to make errors parsable."""
try:
status_code = int(status.split(' ')[0])
state['status_code'] = status_code
except (ValueError, TypeError): # pragma: nocover
raise Exception((
'ErrorDocumentMiddleware received an invalid '
'status %s' % status
))
else:
if (state['status_code'] / 100) not in (2, 3):
# Remove some headers so we can replace them later
# when we have the full error message and can
# compute the length.
headers = [(h, v)
for (h, v) in headers
if h not in ('Content-Length', 'Content-Type')
]
# Save the headers in case we need to modify them.
state['headers'] = headers
return start_response(status, headers, exc_info)
app_iter = self.app(environ, replacement_start_response)
if (state['status_code'] / 100) not in (2, 3):
req = webob.Request(environ)
# Find the first TranslationHook in the array of hooks and use the
# translatable_error object from it
error = None
for hook in self.app.hooks:
if isinstance(hook, hooks.TranslationHook):
error = hook.local_error.translatable_error
break
user_locale = self.best_match_language(req.accept_language)
if (req.accept.best_match(['application/json', 'application/xml'])
== 'application/xml'):
try:
# simple check xml is valid
fault = etree.fromstring('\n'.join(app_iter))
# Add the translated error to the xml data
if error is not None:
for fault_string in fault.findall('faultstring'):
fault_string.text = (
gettextutils.translate(
error, user_locale))
body = ['<error_message>' + etree.tostring(fault)
+ '</error_message>']
except etree.XMLSyntaxError as err:
LOG.error(_('Error parsing HTTP response: %s') % err)
body = ['<error_message>%s' % state['status_code']
+ '</error_message>']
state['headers'].append(('Content-Type', 'application/xml'))
else:
try:
fault = json.loads('\n'.join(app_iter))
if error is not None and 'faultstring' in fault:
fault['faultstring'] = (
gettextutils.translate(
error, user_locale))
body = [json.dumps({'error_message': fault})]
except ValueError as err:
body = [json.dumps({'error_message': '\n'.join(app_iter)})]
state['headers'].append(('Content-Type', 'application/json'))
state['headers'].append(('Content-Length', str(len(body[0]))))
else:
body = app_iter
return body
| ChinaMassClouds/copenstack-server | openstack/src/ceilometer-2014.2.2/ceilometer/api/middleware.py | Python | gpl-2.0 | 5,417 |
import os
import re
import gettext
import locale
import threading # libsearchfilter_toggle starts thread libsearchfilter_loop
import operator
import gtk
import gobject
import pango
import ui
import misc
import formatting
import mpdhelper as mpdh
from consts import consts
import breadcrumbs
def library_set_data(album=None, artist=None, genre=None, year=None,
path=None):
if album is not None:
album = unicode(album)
if artist is not None:
artist = unicode(artist)
if genre is not None:
genre = unicode(genre)
if year is not None:
year = unicode(year)
if path is not None:
path = unicode(path)
return (album, artist, genre, year, path)
def library_get_data(data, *args):
name_to_index = {'album': 0, 'artist': 1, 'genre': 2, 'year': 3, 'path': 4}
# Data retrieved from the gtktreeview model is not in
# unicode anymore, so convert it.
retlist = [unicode(data[name_to_index[arg]]) if data[name_to_index[arg]] \
else None for arg in args]
if len(retlist) == 1:
return retlist[0]
else:
return retlist
class Library(object):
def __init__(self, config, mpd, artwork, TAB_LIBRARY, album_filename,
settings_save, filtering_entry_make_red,
filtering_entry_revert_color, filter_key_pressed,
on_add_item, connected, on_library_button_press, new_tab,
get_multicd_album_root_dir):
self.artwork = artwork
self.config = config
self.mpd = mpd
self.librarymenu = None # cyclic dependency, set later
self.album_filename = album_filename
self.settings_save = settings_save
self.filtering_entry_make_red = filtering_entry_make_red
self.filtering_entry_revert_color = filtering_entry_revert_color
self.filter_key_pressed = filter_key_pressed
self.on_add_item = on_add_item
self.connected = connected
self.on_library_button_press = on_library_button_press
self.get_multicd_album_root_dir = get_multicd_album_root_dir
self.NOTAG = _("Untagged")
self.VAstr = _("Various Artists")
self.search_terms = [_('Artist'), _('Title'), _('Album'), _('Genre'),
_('Filename'), _('Everything')]
self.search_terms_mpd = ['artist', 'title', 'album', 'genre', 'file',
'any']
self.libfilterbox_cmd_buf = None
self.libfilterbox_cond = None
self.libfilterbox_source = None
self.prevlibtodo_base = None
self.prevlibtodo_base_results = None
self.prevlibtodo = None
self.save_timeout = None
self.libsearch_last_tooltip = None
self.lib_view_filesystem_cache = None
self.lib_view_artist_cache = None
self.lib_view_genre_cache = None
self.lib_view_album_cache = None
self.lib_list_genres = None
self.lib_list_artists = None
self.lib_list_albums = None
self.lib_list_years = None
self.view_caches_reset()
self.libraryvbox = gtk.VBox()
self.library = ui.treeview()
self.library_selection = self.library.get_selection()
self.breadcrumbs = breadcrumbs.CrumbBox()
self.breadcrumbs.props.spacing = 2
expanderwindow2 = ui.scrollwindow(add=self.library)
self.searchbox = gtk.HBox()
self.searchcombo = ui.combo(items=self.search_terms)
self.searchcombo.set_tooltip_text(_("Search terms"))
self.searchtext = ui.entry()
self.searchtext.set_tooltip_text(_("Search library"))
self.searchbutton = ui.button(img=ui.image(stock=gtk.STOCK_CANCEL),
h=self.searchcombo.size_request()[1])
self.searchbutton.set_no_show_all(True)
self.searchbutton.hide()
self.searchbutton.set_tooltip_text(_("End Search"))
self.libraryview = ui.button(relief=gtk.RELIEF_NONE)
self.libraryview.set_tooltip_text(_("Library browsing view"))
# disabled as breadcrumbs replace this:
# self.searchbox.pack_start(self.libraryview, False, False, 1)
# self.searchbox.pack_start(gtk.VSeparator(), False, False, 2)
self.searchbox.pack_start(ui.label(_("Search:")), False, False, 3)
self.searchbox.pack_start(self.searchtext, True, True, 2)
self.searchbox.pack_start(self.searchcombo, False, False, 2)
self.searchbox.pack_start(self.searchbutton, False, False, 2)
self.libraryvbox.pack_start(self.breadcrumbs, False, False, 2)
self.libraryvbox.pack_start(expanderwindow2, True, True)
self.libraryvbox.pack_start(self.searchbox, False, False, 2)
self.tab = new_tab(self.libraryvbox, gtk.STOCK_HARDDISK, TAB_LIBRARY,
self.library)
# Assign some pixbufs for use in self.library
self.openpb2 = self.library.render_icon(gtk.STOCK_OPEN,
gtk.ICON_SIZE_LARGE_TOOLBAR)
self.harddiskpb2 = self.library.render_icon(gtk.STOCK_HARDDISK,
gtk.ICON_SIZE_LARGE_TOOLBAR)
self.openpb = self.library.render_icon(gtk.STOCK_OPEN,
gtk.ICON_SIZE_MENU)
self.harddiskpb = self.library.render_icon(gtk.STOCK_HARDDISK,
gtk.ICON_SIZE_MENU)
self.albumpb = gtk.gdk.pixbuf_new_from_file_at_size(
album_filename, consts.LIB_COVER_SIZE, consts.LIB_COVER_SIZE)
self.genrepb = self.library.render_icon('gtk-orientation-portrait',
gtk.ICON_SIZE_LARGE_TOOLBAR)
self.artistpb = self.library.render_icon('artist',
gtk.ICON_SIZE_LARGE_TOOLBAR)
self.sonatapb = self.library.render_icon('sonata', gtk.ICON_SIZE_MENU)
# list of the library views: (id, name, icon name, label)
self.VIEWS = [
(consts.VIEW_FILESYSTEM, 'filesystem',
gtk.STOCK_HARDDISK, _("Filesystem")),
(consts.VIEW_ALBUM, 'album',
'album', _("Albums")),
(consts.VIEW_ARTIST, 'artist',
'artist', _("Artists")),
(consts.VIEW_GENRE, 'genre',
gtk.STOCK_ORIENTATION_PORTRAIT, _("Genres")),
]
self.library_view_assign_image()
self.library.connect('row_activated', self.on_library_row_activated)
self.library.connect('button_press_event',
self.on_library_button_press)
self.library.connect('key-press-event', self.on_library_key_press)
self.library.connect('query-tooltip', self.on_library_query_tooltip)
expanderwindow2.connect('scroll-event', self.on_library_scrolled)
self.libraryview.connect('clicked', self.library_view_popup)
self.searchtext.connect('key-press-event',
self.libsearchfilter_key_pressed)
self.searchtext.connect('activate', self.libsearchfilter_on_enter)
self.searchbutton.connect('clicked', self.on_search_end)
self.libfilter_changed_handler = self.searchtext.connect(
'changed', self.libsearchfilter_feed_loop)
searchcombo_changed_handler = self.searchcombo.connect(
'changed', self.on_library_search_combo_change)
# Initialize library data and widget
self.libraryposition = {}
self.libraryselectedpath = {}
self.searchcombo.handler_block(searchcombo_changed_handler)
self.searchcombo.set_active(self.config.last_search_num)
self.searchcombo.handler_unblock(searchcombo_changed_handler)
self.librarydata = gtk.ListStore(gtk.gdk.Pixbuf, gobject.TYPE_PYOBJECT,
str)
self.library.set_model(self.librarydata)
self.library.set_search_column(2)
self.librarycell = gtk.CellRendererText()
self.librarycell.set_property("ellipsize", pango.ELLIPSIZE_END)
self.libraryimg = gtk.CellRendererPixbuf()
self.librarycolumn = gtk.TreeViewColumn()
self.librarycolumn.pack_start(self.libraryimg, False)
self.librarycolumn.pack_start(self.librarycell, True)
self.librarycolumn.set_attributes(self.libraryimg, pixbuf=0)
self.librarycolumn.set_attributes(self.librarycell, markup=2)
self.librarycolumn.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.library.append_column(self.librarycolumn)
self.library_selection.set_mode(gtk.SELECTION_MULTIPLE)
def get_libraryactions(self):
return [(name + 'view', icon, label,
None, None, self.on_libraryview_chosen)
for _view, name, icon, label in self.VIEWS]
def get_model(self):
return self.librarydata
def get_widgets(self):
return self.libraryvbox
def get_treeview(self):
return self.library
def get_selection(self):
return self.library_selection
def set_librarymenu(self, librarymenu):
self.librarymenu = librarymenu
self.librarymenu.attach_to_widget(self.libraryview, None)
def library_view_popup(self, button):
self.librarymenu.popup(None, None, self.library_view_position_menu, 1,
0, button)
def library_view_position_menu(self, _menu, button):
x, y, _width, height = button.get_allocation()
return (self.config.x + x, self.config.y + y + height, True)
def on_libraryview_chosen(self, action):
if self.search_visible():
self.on_search_end(None)
if action.get_name() == 'filesystemview':
self.config.lib_view = consts.VIEW_FILESYSTEM
elif action.get_name() == 'artistview':
self.config.lib_view = consts.VIEW_ARTIST
elif action.get_name() == 'genreview':
self.config.lib_view = consts.VIEW_GENRE
elif action.get_name() == 'albumview':
self.config.lib_view = consts.VIEW_ALBUM
self.library.grab_focus()
self.library_view_assign_image()
self.libraryposition = {}
self.libraryselectedpath = {}
self.library_browse(self.library_set_data(path="/"))
try:
if len(self.librarydata) > 0:
self.library_selection.unselect_range((0,),
(len(self.librarydata)-1,))
except:
pass
gobject.idle_add(self.library.scroll_to_point, 0, 0)
def library_view_assign_image(self):
_view, _name, icon, label = [v for v in self.VIEWS
if v[0] == self.config.lib_view][0]
self.libraryview.set_image(ui.image(stock=icon))
self.libraryview.set_label(" " + label)
def view_caches_reset(self):
# We should call this on first load and whenever mpd is
# updated.
self.lib_view_filesystem_cache = None
self.lib_view_artist_cache = None
self.lib_view_genre_cache = None
self.lib_view_album_cache = None
self.lib_list_genres = None
self.lib_list_artists = None
self.lib_list_albums = None
self.lib_list_years = None
def on_library_scrolled(self, _widget, _event):
try:
# Use gobject.idle_add so that we can get the visible
# state of the treeview
gobject.idle_add(self._on_library_scrolled)
except:
pass
def _on_library_scrolled(self):
if not self.config.show_covers:
return
# This avoids a warning about a NULL node in get_visible_range
if not self.library.props.visible:
return
vis_range = self.library.get_visible_range()
if vis_range is None:
return
try:
start_row = int(vis_range[0][0])
end_row = int(vis_range[1][0])
except IndexError:
# get_visible_range failed
return
self.artwork.library_artwork_update(self.librarydata, start_row,
end_row, self.albumpb)
def library_browse(self, _widget=None, root=None):
# Populates the library list with entries
if not self.connected():
return
if root is None or (self.config.lib_view == consts.VIEW_FILESYSTEM \
and self.library_get_data(root, 'path') is None):
root = self.library_set_data(path="/")
if self.config.wd is None or (self.config.lib_view == \
consts.VIEW_FILESYSTEM and \
self.library_get_data(self.config.wd,
'path') is None):
self.config.wd = self.library_set_data(path="/")
prev_selection = []
prev_selection_root = False
prev_selection_parent = False
if root == self.config.wd:
# This will happen when the database is updated. So, lets save
# the current selection in order to try to re-select it after
# the update is over.
model, selected = self.library_selection.get_selected_rows()
for path in selected:
if model.get_value(model.get_iter(path), 2) == "/":
prev_selection_root = True
elif model.get_value(model.get_iter(path), 2) == "..":
prev_selection_parent = True
else:
prev_selection.append(model.get_value(model.get_iter(path),
1))
self.libraryposition[self.config.wd] = \
self.library.get_visible_rect()[1]
path_updated = True
else:
path_updated = False
new_level = self.library_get_data_level(root)
curr_level = self.library_get_data_level(self.config.wd)
# The logic below is more consistent with, e.g., thunar.
if new_level > curr_level:
# Save position and row for where we just were if we've
# navigated into a sub-directory:
self.libraryposition[self.config.wd] = \
self.library.get_visible_rect()[1]
model, rows = self.library_selection.get_selected_rows()
if len(rows) > 0:
data = self.librarydata.get_value(
self.librarydata.get_iter(rows[0]), 2)
if not data in ("..", "/"):
self.libraryselectedpath[self.config.wd] = rows[0]
elif (self.config.lib_view == consts.VIEW_FILESYSTEM and \
root != self.config.wd) \
or (self.config.lib_view != consts.VIEW_FILESYSTEM and new_level != \
curr_level):
# If we've navigated to a parent directory, don't save
# anything so that the user will enter that subdirectory
# again at the top position with nothing selected
self.libraryposition[self.config.wd] = 0
self.libraryselectedpath[self.config.wd] = None
# In case sonata is killed or crashes, we'll save the library state
# in 5 seconds (first removing any current settings_save timeouts)
if self.config.wd != root:
try:
gobject.source_remove(self.save_timeout)
except:
pass
self.save_timeout = gobject.timeout_add(5000, self.settings_save)
self.config.wd = root
self.library.freeze_child_notify()
self.librarydata.clear()
# Populate treeview with data:
bd = []
while len(bd) == 0:
if self.config.lib_view == consts.VIEW_FILESYSTEM:
bd = self.library_populate_filesystem_data(
self.library_get_data(self.config.wd, 'path'))
elif self.config.lib_view == consts.VIEW_ALBUM:
album, artist, year = self.library_get_data(self.config.wd,
'album', 'artist',
'year')
if album is not None:
bd = self.library_populate_data(artist=artist, album=album,
year=year)
else:
bd = self.library_populate_toplevel_data(albumview=True)
elif self.config.lib_view == consts.VIEW_ARTIST:
artist, album, year = self.library_get_data(self.config.wd,
'artist', 'album',
'year')
if artist is not None and album is not None:
bd = self.library_populate_data(artist=artist, album=album,
year=year)
elif artist is not None:
bd = self.library_populate_data(artist=artist)
else:
bd = self.library_populate_toplevel_data(artistview=True)
elif self.config.lib_view == consts.VIEW_GENRE:
genre, artist, album, year = self.library_get_data(
self.config.wd, 'genre', 'artist', 'album', 'year')
if genre is not None and artist is not None and album is \
not None:
bd = self.library_populate_data(genre=genre, artist=artist,
album=album, year=year)
elif genre is not None and artist is not None:
bd = self.library_populate_data(genre=genre, artist=artist)
elif genre is not None:
bd = self.library_populate_data(genre=genre)
else:
bd = self.library_populate_toplevel_data(genreview=True)
if len(bd) == 0:
# Nothing found; go up a level until we reach the top level
# or results are found
last_wd = self.config.wd
self.config.wd = self.library_get_parent()
if self.config.wd == last_wd:
break
for _sort, path in bd:
self.librarydata.append(path)
self.library.thaw_child_notify()
# Scroll back to set view for current dir:
self.library.realize()
gobject.idle_add(self.library_set_view, not path_updated)
if len(prev_selection) > 0 or prev_selection_root or \
prev_selection_parent:
# Retain pre-update selection:
self.library_retain_selection(prev_selection, prev_selection_root,
prev_selection_parent)
# Update library artwork as necessary
self.on_library_scrolled(None, None)
self.update_breadcrumbs()
def update_breadcrumbs(self):
# remove previous buttons
for b in self.breadcrumbs:
self.breadcrumbs.remove(b)
# add the views button first
b = ui.button(text=_(" v "), can_focus=False, relief=gtk.RELIEF_NONE)
b.connect('clicked', self.library_view_popup)
self.breadcrumbs.pack_start(b, False, False)
b.show()
# add the ellipsis explicitly XXX make this unnecessary
b = ui.label("...")
self.breadcrumbs.pack_start(b, False, False)
b.show()
# find info for current view
view, _name, icon, label = [v for v in self.VIEWS
if v[0] == self.config.lib_view][0]
# the first crumb is the root of the current view
crumbs = [(label, icon, None, self.library_set_data(path='/'))]
# rest of the crumbs are specific to the view
if view == consts.VIEW_FILESYSTEM:
path = self.library_get_data(self.config.wd, 'path')
if path and path != '/':
parts = path.split('/')
else:
parts = [] # no crumbs for /
# append a crumb for each part
for i, part in enumerate(parts):
partpath = '/'.join(parts[:i + 1])
target = self.library_set_data(path=partpath)
crumbs.append((part, gtk.STOCK_OPEN, None, target))
else:
if view == consts.VIEW_ALBUM:
# We don't want to show an artist button in album view
keys = 'genre', 'album'
nkeys = 2
else:
keys = 'genre', 'artist', 'album'
nkeys = 3
parts = self.library_get_data(self.config.wd, *keys)
# append a crumb for each part
for i, key, part in zip(range(nkeys), keys, parts):
if part is None:
continue
partdata = dict(zip(keys, parts)[:i + 1])
target = self.library_set_data(**partdata)
pb, icon = None, None
if key == 'album':
# Album artwork, with self.alumbpb as a backup:
artist, album, path = self.library_get_data(self.config.wd,
'artist', 'album', 'path')
cache_data = self.library_set_data(artist=artist,
album=album, path=path)
pb = self.artwork.get_library_artwork_cached_pb(cache_data,
None)
if pb is None:
icon = 'album'
elif key == 'artist':
icon = 'artist'
else:
icon = gtk.STOCK_ORIENTATION_PORTRAIT
crumbs.append((part, icon, pb, target))
# add a button for each crumb
for crumb in crumbs:
text, icon, pb, target = crumb
text = misc.escape_html(text)
if crumb is crumbs[-1]:
text = "<b>%s</b>" % text
label = ui.label(markup=text)
if icon:
image = ui.image(stock=icon)
elif pb:
pb = pb.scale_simple(16, 16, gtk.gdk.INTERP_HYPER)
image = ui.image(pb=pb)
b = breadcrumbs.CrumbButton(image, label)
if crumb is crumbs[-1]:
# FIXME makes the button request minimal space:
# label.props.ellipsize = pango.ELLIPSIZE_END
b.props.active = True
# FIXME why doesn't the tooltip show?
b.set_tooltip_text(label.get_label())
b.connect('toggled', self.library_browse, target)
self.breadcrumbs.pack_start(b, False, False)
b.show_all()
def library_populate_add_parent_rows(self):
return [] # disabled as breadcrumbs replace these
if self.config.lib_view == consts.VIEW_FILESYSTEM:
bd = [('0', [self.harddiskpb, self.library_set_data(path='/'),
'/'])]
bd += [('1', [self.openpb, self.library_set_data(path='..'),
'..'])]
else:
bd = [('0', [self.harddiskpb2, self.library_set_data(path='/'),
'/'])]
bd += [('1', [self.openpb2, self.library_set_data(path='..'),
'..'])]
return bd
def library_populate_filesystem_data(self, path):
# List all dirs/files at path
bd = []
if path == '/' and self.lib_view_filesystem_cache is not None:
# Use cache if possible...
bd = self.lib_view_filesystem_cache
else:
for item in self.mpd.lsinfo(path):
if 'directory' in item:
name = mpdh.get(item, 'directory').split('/')[-1]
data = self.library_set_data(path=mpdh.get(item,
'directory'))
bd += [('d' + unicode(name).lower(), [self.openpb, data,
misc.escape_html(name)])]
elif 'file' in item:
data = self.library_set_data(path=mpdh.get(item, 'file'))
bd += [('f' + unicode(mpdh.get(item, 'file')).lower(),
[self.sonatapb, data,
formatting.parse(self.config.libraryformat, item,
True)])]
bd.sort(key=operator.itemgetter(0))
if path != '/' and len(bd) > 0:
bd = self.library_populate_add_parent_rows() + bd
if path == '/':
self.lib_view_filesystem_cache = bd
return bd
def library_get_toplevel_cache(self, genreview=False, artistview=False,
albumview=False):
if genreview and self.lib_view_genre_cache is not None:
bd = self.lib_view_genre_cache
elif artistview and self.lib_view_artist_cache is not None:
bd = self.lib_view_artist_cache
elif albumview and self.lib_view_album_cache is not None:
bd = self.lib_view_album_cache
else:
return None
# Check if we can update any artwork:
for _sort, info in bd:
pb = info[0]
if pb == self.albumpb:
artist, album, path = self.library_get_data(info[1], 'artist',
'album', 'path')
key = self.library_set_data(path=path, artist=artist,
album=album)
pb2 = self.artwork.get_library_artwork_cached_pb(key, None)
if pb2 is not None:
info[0] = pb2
return bd
def library_populate_toplevel_data(self, genreview=False, artistview=False,
albumview=False):
bd = self.library_get_toplevel_cache(genreview, artistview, albumview)
if bd is not None:
# We have our cached data, woot.
return bd
bd = []
if genreview or artistview:
# Only for artist/genre views, album view is handled differently
# since multiple artists can have the same album name
if genreview:
items = self.library_return_list_items('genre')
pb = self.genrepb
else:
items = self.library_return_list_items('artist')
pb = self.artistpb
if not (self.NOTAG in items):
items.append(self.NOTAG)
for item in items:
if genreview:
playtime, num_songs = self.library_return_count(genre=item)
data = self.library_set_data(genre=item)
else:
playtime, num_songs = self.library_return_count(
artist=item)
data = self.library_set_data(artist=item)
if num_songs > 0:
display = misc.escape_html(item)
display += self.add_display_info(num_songs,
int(playtime) / 60)
bd += [(misc.lower_no_the(item), [pb, data, display])]
elif albumview:
albums = []
untagged_found = False
for item in self.mpd.listallinfo('/'):
if 'file' in item and 'album' in item:
album = mpdh.get(item, 'album')
artist = mpdh.get(item, 'artist', self.NOTAG)
year = mpdh.get(item, 'date', self.NOTAG)
path = self.get_multicd_album_root_dir(
os.path.dirname(mpdh.get(item, 'file')))
data = self.library_set_data(album=album, artist=artist,
year=year, path=path)
albums.append(data)
if album == self.NOTAG:
untagged_found = True
if not untagged_found:
albums.append(self.library_set_data(album=self.NOTAG))
albums = misc.remove_list_duplicates(albums, case=False)
albums = self.list_identify_VA_albums(albums)
for item in albums:
album, artist, year, path = self.library_get_data(item,
'album',
'artist',
'year',
'path')
playtime, num_songs = self.library_return_count(artist=artist,
album=album,
year=year)
if num_songs > 0:
data = self.library_set_data(artist=artist, album=album,
year=year, path=path)
display = misc.escape_html(album)
if artist and year and len(artist) > 0 and len(year) > 0 \
and artist != self.NOTAG and year != self.NOTAG:
display += " <span weight='light'>(%s, %s)</span>" \
% (misc.escape_html(artist),
misc.escape_html(year))
elif artist and len(artist) > 0 and artist != self.NOTAG:
display += " <span weight='light'>(%s)</span>" \
% misc.escape_html(artist)
elif year and len(year) > 0 and year != self.NOTAG:
display += " <span weight='light'>(%s)</span>" \
% misc.escape_html(year)
display += self.add_display_info(num_songs,
int(playtime) / 60)
bd += [(misc.lower_no_the(album), [self.albumpb, data,
display])]
bd.sort(locale.strcoll, key=operator.itemgetter(0))
if genreview:
self.lib_view_genre_cache = bd
elif artistview:
self.lib_view_artist_cache = bd
elif albumview:
self.lib_view_album_cache = bd
return bd
def list_identify_VA_albums(self, albums):
for i in range(len(albums)):
if i + consts.NUM_ARTISTS_FOR_VA - 1 > len(albums)-1:
break
VA = False
for j in range(1, consts.NUM_ARTISTS_FOR_VA):
if unicode(self.library_get_data(albums[i], 'album')).lower() \
!= unicode(self.library_get_data(albums[i + j],
'album')).lower() or \
self.library_get_data(albums[i], 'year') != \
self.library_get_data(albums[i + j], 'year') or \
self.library_get_data(albums[i], 'path') != \
self.library_get_data(albums[i + j], 'path'):
break
if unicode(self.library_get_data(albums[i], 'artist')) == \
unicode(self.library_get_data(albums[i + j], 'artist')):
albums.pop(i + j)
break
if j == consts.NUM_ARTISTS_FOR_VA - 1:
VA = True
if VA:
album, year, path = self.library_get_data(albums[i], 'album',
'year', 'path')
artist = self.VAstr
albums[i] = self.library_set_data(album=album, artist=artist,
year=year, path=path)
j = 1
while i + j <= len(albums) - 1:
if unicode(self.library_get_data(albums[i],
'album')).lower() == \
unicode(self.library_get_data(albums[i + j],
'album')).lower() \
and self.library_get_data(albums[i], 'year') == \
self.library_get_data(albums[i + j], 'year'):
albums.pop(i + j)
else:
break
return albums
def get_VAstr(self):
return self.VAstr
def library_populate_data(self, genre=None, artist=None, album=None,
year=None):
# Create treeview model info
bd = []
if genre is not None and artist is None and album is None:
# Artists within a genre
artists = self.library_return_list_items('artist', genre=genre)
if len(artists) > 0:
if not self.NOTAG in artists:
artists.append(self.NOTAG)
for artist in artists:
playtime, num_songs = self.library_return_count(
genre=genre, artist=artist)
if num_songs > 0:
display = misc.escape_html(artist)
display += self.add_display_info(num_songs,
int(playtime) / 60)
data = self.library_set_data(genre=genre,
artist=artist)
bd += [(misc.lower_no_the(artist),
[self.artistpb, data, display])]
elif artist is not None and album is None:
# Albums/songs within an artist and possibly genre
# Albums first:
if genre is not None:
albums = self.library_return_list_items('album', genre=genre,
artist=artist)
else:
albums = self.library_return_list_items('album', artist=artist)
for album in albums:
if genre is not None:
years = self.library_return_list_items('date', genre=genre,
artist=artist,
album=album)
else:
years = self.library_return_list_items('date',
artist=artist,
album=album)
if not self.NOTAG in years:
years.append(self.NOTAG)
for year in years:
if genre is not None:
playtime, num_songs = self.library_return_count(
genre=genre, artist=artist, album=album, year=year)
if num_songs > 0:
files = self.library_return_list_items(
'file', genre=genre, artist=artist,
album=album, year=year)
path = os.path.dirname(files[0])
data = self.library_set_data(genre=genre,
artist=artist,
album=album,
year=year, path=path)
else:
playtime, num_songs = self.library_return_count(
artist=artist, album=album, year=year)
if num_songs > 0:
files = self.library_return_list_items(
'file', artist=artist, album=album, year=year)
path = os.path.dirname(files[0])
data = self.library_set_data(artist=artist,
album=album,
year=year, path=path)
if num_songs > 0:
cache_data = self.library_set_data(artist=artist,
album=album,
path=path)
display = misc.escape_html(album)
if year and len(year) > 0 and year != self.NOTAG:
display += " <span weight='light'>(%s)</span>" \
% misc.escape_html(year)
display += self.add_display_info(num_songs,
int(playtime) / 60)
ordered_year = year
if ordered_year == self.NOTAG:
ordered_year = '9999'
pb = self.artwork.get_library_artwork_cached_pb(
cache_data, self.albumpb)
bd += [(ordered_year + misc.lower_no_the(album),
[pb, data, display])]
# Now, songs not in albums:
bd += self.library_populate_data_songs(genre, artist, self.NOTAG,
None)
else:
# Songs within an album, artist, year, and possibly genre
bd += self.library_populate_data_songs(genre, artist, album, year)
if len(bd) > 0:
bd = self.library_populate_add_parent_rows() + bd
bd.sort(locale.strcoll, key=operator.itemgetter(0))
return bd
def library_populate_data_songs(self, genre, artist, album, year):
bd = []
if genre is not None:
songs, _playtime, _num_songs = \
self.library_return_search_items(genre=genre, artist=artist,
album=album, year=year)
else:
songs, _playtime, _num_songs = self.library_return_search_items(
artist=artist, album=album, year=year)
for song in songs:
data = self.library_set_data(path=mpdh.get(song, 'file'))
track = mpdh.get(song, 'track', '99', False, 2)
disc = mpdh.get(song, 'disc', '99', False, 2)
try:
bd += [('f' + disc + track + misc.lower_no_the(
mpdh.get(song, 'title')), [self.sonatapb, data,
formatting.parse(
self.config.libraryformat,
song, True)])]
except:
bd += [('f' + disc + track + \
unicode(mpdh.get(song, 'file')).lower(),
[self.sonatapb, data,
formatting.parse(self.config.libraryformat, song,
True)])]
return bd
def library_return_list_items(self, itemtype, genre=None, artist=None,
album=None, year=None, ignore_case=True):
# Returns all items of tag 'itemtype', in alphabetical order,
# using mpd's 'list'. If searchtype is passed, use
# a case insensitive search, via additional 'list'
# queries, since using a single 'list' call will be
# case sensitive.
results = []
searches = self.library_compose_list_count_searchlist(genre, artist,
album, year)
if len(searches) > 0:
for s in searches:
# If we have untagged tags (''), use search instead
# of list because list will not return anything.
if '' in s:
items = []
songs, playtime, num_songs = \
self.library_return_search_items(genre, artist,
album, year)
for song in songs:
items.append(mpdh.get(song, itemtype))
else:
items = self.mpd.list(itemtype, *s)
for item in items:
if len(item) > 0:
results.append(item)
else:
if genre is None and artist is None and album is None and year \
is None:
for item in self.mpd.list(itemtype):
if len(item) > 0:
results.append(item)
if ignore_case:
results = misc.remove_list_duplicates(results, case=False)
results.sort(locale.strcoll)
return results
def library_return_count(self, genre=None, artist=None, album=None,
year=None):
# Because mpd's 'count' is case sensitive, we have to
# determine all equivalent items (case insensitive) and
# call 'count' for each of them. Using 'list' + 'count'
# involves much less data to be transferred back and
# forth than to use 'search' and count manually.
searches = self.library_compose_list_count_searchlist(genre, artist,
album, year)
playtime = 0
num_songs = 0
for s in searches:
if '' in s and self.mpd.version <= (0, 13):
# Can't return count for empty tags, use search instead:
_results, playtime, num_songs = \
self.library_return_search_items(
genre=genre, artist=artist, album=album, year=year)
else:
count = self.mpd.count(*s)
playtime += mpdh.get(count, 'playtime', 0, True)
num_songs += mpdh.get(count, 'songs', 0, True)
return (playtime, num_songs)
def library_compose_list_count_searchlist_single(self, search, typename,
cached_list, searchlist):
s = []
skip_type = (typename == 'artist' and search == self.VAstr)
if search is not None and not skip_type:
if search == self.NOTAG:
itemlist = [search, '']
else:
itemlist = []
if cached_list is None:
cached_list = self.library_return_list_items(typename,
ignore_case=False)
# This allows us to match untagged items
cached_list.append('')
for item in cached_list:
if unicode(item).lower() == unicode(search).lower():
itemlist.append(item)
if len(itemlist) == 0:
# There should be no results!
return None, cached_list
for item in itemlist:
if len(searchlist) > 0:
for item2 in searchlist:
s.append(item2 + (typename, item))
else:
s.append((typename, item))
else:
s = searchlist
return s, cached_list
def library_compose_list_count_searchlist(self, genre=None, artist=None,
album=None, year=None):
s = []
s, self.lib_list_genres = \
self.library_compose_list_count_searchlist_single(
genre, 'genre', self.lib_list_genres, s)
if s is None:
return []
s, self.lib_list_artists = \
self.library_compose_list_count_searchlist_single(
artist, 'artist', self.lib_list_artists, s)
if s is None:
return []
s, self.lib_list_albums = \
self.library_compose_list_count_searchlist_single(
album, 'album', self.lib_list_albums, s)
if s is None:
return []
s, self.lib_list_years = \
self.library_compose_list_count_searchlist_single(
year, 'date', self.lib_list_years, s)
if s is None:
return []
return s
def library_compose_search_searchlist_single(self, search, typename,
searchlist):
s = []
skip_type = (typename == 'artist' and search == self.VAstr)
if search is not None and not skip_type:
if search == self.NOTAG:
itemlist = [search, '']
else:
itemlist = [search]
for item in itemlist:
if len(searchlist) > 0:
for item2 in searchlist:
s.append(item2 + (typename, item))
else:
s.append((typename, item))
else:
s = searchlist
return s
def library_compose_search_searchlist(self, genre=None, artist=None,
album=None, year=None):
s = []
s = self.library_compose_search_searchlist_single(genre, 'genre', s)
s = self.library_compose_search_searchlist_single(album, 'album', s)
s = self.library_compose_search_searchlist_single(artist, 'artist', s)
s = self.library_compose_search_searchlist_single(year, 'date', s)
return s
def library_return_search_items(self, genre=None, artist=None, album=None,
year=None):
# Returns all mpd items, using mpd's 'search', along with
# playtime and num_songs.
searches = self.library_compose_search_searchlist(genre, artist, album,
year)
for s in searches:
args_tuple = tuple(map(str, s))
playtime = 0
num_songs = 0
results = []
if '' in s and self.mpd.version <= (0, 13):
# Can't search for empty tags, search broader and
# filter instead:
# Strip empty tag args from tuple:
pos = list(args_tuple).index('')
strip_type = list(args_tuple)[pos-1]
new_lst = []
for i, item in enumerate(list(args_tuple)):
if i != pos and i != pos-1:
new_lst.append(item)
args_tuple = tuple(new_lst)
else:
strip_type = None
if len(args_tuple) == 0:
return None, 0, 0
items = self.mpd.search(*args_tuple)
if items is not None:
for item in items:
if strip_type is None or (strip_type is not None and not \
strip_type in item.keys()):
match = True
pos = 0
# Ensure that if, e.g., "foo" is searched,
# "foobar" isn't returned too
for arg in args_tuple[::2]:
if arg in item and \
unicode(mpdh.get(item, arg)).upper() != \
unicode(args_tuple[pos + 1]).upper():
match = False
break
pos += 2
if match:
results.append(item)
num_songs += 1
playtime += mpdh.get(item, 'time', 0, True)
return (results, int(playtime), num_songs)
def add_display_info(self, num_songs, playtime):
return "\n<small><span weight='light'>%s %s, %s %s</span></small>" \
% (num_songs, gettext.ngettext('song', 'songs', num_songs),
playtime, gettext.ngettext('minute', 'minutes', playtime))
def library_retain_selection(self, prev_selection, prev_selection_root,
prev_selection_parent):
# Unselect everything:
if len(self.librarydata) > 0:
self.library_selection.unselect_range((0,),
(len(self.librarydata) - 1,))
# Now attempt to retain the selection from before the update:
for value in prev_selection:
for row in self.librarydata:
if value == row[1]:
self.library_selection.select_path(row.path)
break
if prev_selection_root:
self.library_selection.select_path((0,))
if prev_selection_parent:
self.library_selection.select_path((1,))
def library_set_view(self, select_items=True):
# select_items should be false if the same directory has merely
# been refreshed (updated)
try:
if self.config.wd in self.libraryposition:
self.library.scroll_to_point(
-1, self.libraryposition[self.config.wd])
else:
self.library.scroll_to_point(0, 0)
except:
self.library.scroll_to_point(0, 0)
# Select and focus previously selected item
if select_items:
if self.config.wd in self.libraryselectedpath:
try:
if self.libraryselectedpath[self.config.wd]:
self.library_selection.select_path(
self.libraryselectedpath[self.config.wd])
self.library.grab_focus()
except:
pass
def library_set_data(self, *args, **kwargs):
return library_set_data(*args, **kwargs)
def library_get_data(self, data, *args):
return library_get_data(data, *args)
def library_get_data_level(self, data):
if self.config.lib_view == consts.VIEW_FILESYSTEM:
# Returns the number of directories down:
if library_get_data(data, 'path') == '/':
# Every other path doesn't start with "/", so
# start the level numbering at -1
return -1
else:
return library_get_data(data, 'path').count("/")
else:
# Returns the number of items stored in data, excluding
# the path:
level = 0
album, artist, genre, year = library_get_data(
data, 'album', 'artist', 'genre', 'year')
for item in [album, artist, genre, year]:
if item is not None:
level += 1
return level
def on_library_key_press(self, widget, event):
if event.keyval == gtk.gdk.keyval_from_name('Return'):
self.on_library_row_activated(widget, widget.get_cursor()[0])
return True
def on_library_query_tooltip(self, widget, x, y, keyboard_mode, tooltip):
if keyboard_mode or not self.search_visible():
widget.set_tooltip_text(None)
return False
bin_x, bin_y = widget.convert_widget_to_bin_window_coords(x, y)
pathinfo = widget.get_path_at_pos(bin_x, bin_y)
if not pathinfo:
widget.set_tooltip_text(None)
# If the user hovers over an empty row and then back to
# a row with a search result, this will ensure the tooltip
# shows up again:
gobject.idle_add(self.library_search_tooltips_enable, widget, x, y,
keyboard_mode, None)
return False
treepath, _col, _x2, _y2 = pathinfo
i = self.librarydata.get_iter(treepath[0])
path = misc.escape_html(self.library_get_data(
self.librarydata.get_value(i, 1), 'path'))
song = self.librarydata.get_value(i, 2)
new_tooltip = "<b>%s:</b> %s\n<b>%s:</b> %s" \
% (_("Song"), song, _("Path"), path)
if new_tooltip != self.libsearch_last_tooltip:
self.libsearch_last_tooltip = new_tooltip
self.library.set_property('has-tooltip', False)
gobject.idle_add(self.library_search_tooltips_enable, widget, x, y,
keyboard_mode, tooltip)
gobject.idle_add(widget.set_tooltip_markup, new_tooltip)
return
self.libsearch_last_tooltip = new_tooltip
return False #api says we should return True, but this doesn't work?
def library_search_tooltips_enable(self, widget, x, y, keyboard_mode,
tooltip):
self.library.set_property('has-tooltip', True)
if tooltip is not None:
self.on_library_query_tooltip(widget, x, y, keyboard_mode, tooltip)
def on_library_row_activated(self, _widget, path, _column=0):
if path is None:
# Default to last item in selection:
_model, selected = self.library_selection.get_selected_rows()
if len(selected) >= 1:
path = selected[0]
else:
return
value = self.librarydata.get_value(self.librarydata.get_iter(path), 1)
icon = self.librarydata.get_value(self.librarydata.get_iter(path), 0)
if icon == self.sonatapb:
# Song found, add item
self.on_add_item(self.library)
elif value == self.library_set_data(path=".."):
self.library_browse_parent(None)
else:
self.library_browse(None, value)
def library_get_parent(self):
if self.config.lib_view == consts.VIEW_ALBUM:
value = self.library_set_data(path="/")
elif self.config.lib_view == consts.VIEW_ARTIST:
album, artist = self.library_get_data(self.config.wd, 'album',
'artist')
if album is not None:
value = self.library_set_data(artist=artist)
else:
value = self.library_set_data(path="/")
elif self.config.lib_view == consts.VIEW_GENRE:
album, artist, genre = self.library_get_data(
self.config.wd, 'album', 'artist', 'genre')
if album is not None:
value = self.library_set_data(genre=genre, artist=artist)
elif artist is not None:
value = self.library_set_data(genre=genre)
else:
value = self.library_set_data(path="/")
else:
newvalue = '/'.join(
self.library_get_data(self.config.wd, 'path').split('/')[:-1])\
or '/'
value = self.library_set_data(path=newvalue)
return value
def library_browse_parent(self, _action):
if not self.search_visible():
if self.library.is_focus():
value = self.library_get_parent()
self.library_browse(None, value)
return True
def not_parent_is_selected(self):
# Returns True if something is selected and it's not
# ".." or "/":
model, rows = self.library_selection.get_selected_rows()
for path in rows:
i = model.get_iter(path)
value = model.get_value(i, 2)
if value != ".." and value != "/":
return True
return False
def get_path_child_filenames(self, return_root, selected_only=True):
# If return_root=True, return main directories whenever possible
# instead of individual songs in order to reduce the number of
# mpd calls we need to make. We won't want this behavior in some
# instances, like when we want all end files for editing tags
items = []
if selected_only:
model, rows = self.library_selection.get_selected_rows()
else:
model = self.librarydata
rows = [(i,) for i in range(len(model))]
for path in rows:
i = model.get_iter(path)
pb = model.get_value(i, 0)
data = model.get_value(i, 1)
value = model.get_value(i, 2)
if value != ".." and value != "/":
album, artist, year, genre, path = self.library_get_data(
data, 'album', 'artist', 'year', 'genre', 'path')
if path is not None and album is None and artist is None and \
year is None and genre is None:
if pb == self.sonatapb:
# File
items.append(path)
else:
# Directory
if not return_root:
items += self.library_get_path_files_recursive(
path)
else:
items.append(path)
else:
results, _playtime, _num_songs = \
self.library_return_search_items(
genre=genre, artist=artist, album=album,
year=year)
for item in results:
items.append(mpdh.get(item, 'file'))
# Make sure we don't have any EXACT duplicates:
items = misc.remove_list_duplicates(items, case=True)
return items
def library_get_path_files_recursive(self, path):
results = []
for item in self.mpd.lsinfo(path):
if 'directory' in item:
results = results + self.library_get_path_files_recursive(
mpdh.get(item, 'directory'))
elif 'file' in item:
results.append(mpdh.get(item, 'file'))
return results
def on_library_search_combo_change(self, _combo=None):
self.config.last_search_num = self.searchcombo.get_active()
if not self.search_visible():
return
self.prevlibtodo = ""
self.prevlibtodo_base = "__"
self.libsearchfilter_feed_loop(self.searchtext)
def on_search_end(self, _button, move_focus=True):
if self.search_visible():
self.libsearchfilter_toggle(move_focus)
def search_visible(self):
return self.searchbutton.get_property('visible')
def libsearchfilter_toggle(self, move_focus):
if not self.search_visible() and self.connected():
self.library.set_property('has-tooltip', True)
ui.show(self.searchbutton)
self.prevlibtodo = 'foo'
self.prevlibtodo_base = "__"
self.prevlibtodo_base_results = []
# extra thread for background search work,
# synchronized with a condition and its internal mutex
self.libfilterbox_cond = threading.Condition()
self.libfilterbox_cmd_buf = self.searchtext.get_text()
qsearch_thread = threading.Thread(target=self.libsearchfilter_loop)
qsearch_thread.setDaemon(True)
qsearch_thread.start()
elif self.search_visible():
ui.hide(self.searchbutton)
self.searchtext.handler_block(self.libfilter_changed_handler)
self.searchtext.set_text("")
self.searchtext.handler_unblock(self.libfilter_changed_handler)
self.libsearchfilter_stop_loop()
self.library_browse(root=self.config.wd)
if move_focus:
self.library.grab_focus()
def libsearchfilter_feed_loop(self, editable):
if not self.search_visible():
self.libsearchfilter_toggle(None)
# Lets only trigger the searchfilter_loop if 200ms pass
# without a change in gtk.Entry
try:
gobject.source_remove(self.libfilterbox_source)
except:
pass
self.libfilterbox_source = gobject.timeout_add(
300, self.libsearchfilter_start_loop, editable)
def libsearchfilter_start_loop(self, editable):
self.libfilterbox_cond.acquire()
self.libfilterbox_cmd_buf = editable.get_text()
self.libfilterbox_cond.notifyAll()
self.libfilterbox_cond.release()
def libsearchfilter_stop_loop(self):
self.libfilterbox_cond.acquire()
self.libfilterbox_cmd_buf = '$$$QUIT###'
self.libfilterbox_cond.notifyAll()
self.libfilterbox_cond.release()
def libsearchfilter_loop(self):
while True:
# copy the last command or pattern safely
self.libfilterbox_cond.acquire()
try:
while(self.libfilterbox_cmd_buf == '$$$DONE###'):
self.libfilterbox_cond.wait()
todo = self.libfilterbox_cmd_buf
self.libfilterbox_cond.release()
except:
todo = self.libfilterbox_cmd_buf
searchby = self.search_terms_mpd[self.config.last_search_num]
if self.prevlibtodo != todo:
if todo == '$$$QUIT###':
gobject.idle_add(self.filtering_entry_revert_color,
self.searchtext)
return
elif len(todo) > 1:
gobject.idle_add(self.libsearchfilter_do_search,
searchby, todo)
elif len(todo) == 0:
gobject.idle_add(self.filtering_entry_revert_color,
self.searchtext)
self.libsearchfilter_toggle(False)
else:
gobject.idle_add(self.filtering_entry_revert_color,
self.searchtext)
self.libfilterbox_cond.acquire()
self.libfilterbox_cmd_buf = '$$$DONE###'
try:
self.libfilterbox_cond.release()
except:
pass
self.prevlibtodo = todo
def libsearchfilter_do_search(self, searchby, todo):
if not self.prevlibtodo_base in todo:
# Do library search based on first two letters:
self.prevlibtodo_base = todo[:2]
self.prevlibtodo_base_results = self.mpd.search(searchby,
self.prevlibtodo_base)
subsearch = False
else:
subsearch = True
# Now, use filtering similar to playlist filtering:
# this make take some seconds... and we'll escape the search text
# because we'll be searching for a match in items that are also escaped
#
# Note that the searching is not order specific. That is, "foo bar"
# will match on "fools bar" and "barstool foo".
todos = todo.split(" ")
regexps = []
for i in range(len(todos)):
todos[i] = misc.escape_html(todos[i])
todos[i] = re.escape(todos[i])
todos[i] = '.*' + todos[i].lower()
regexps.append(re.compile(todos[i]))
matches = []
if searchby != 'any':
for row in self.prevlibtodo_base_results:
is_match = True
for regexp in regexps:
if not regexp.match(unicode(mpdh.get(row,
searchby)).lower()):
is_match = False
break
if is_match:
matches.append(row)
else:
for row in self.prevlibtodo_base_results:
allstr = " ".join(mpdh.get(row, meta) for meta in row)
is_match = True
for regexp in regexps:
if not regexp.match(unicode(allstr).lower()):
is_match = False
break
if is_match:
matches.append(row)
if subsearch and len(matches) == len(self.librarydata):
# nothing changed..
return
self.library.freeze_child_notify()
currlen = len(self.librarydata)
bd = [[self.sonatapb,
self.library_set_data(path=mpdh.get(item, 'file')),
formatting.parse(self.config.libraryformat, item, True)]
for item in matches if 'file' in item]
bd.sort(locale.strcoll, key=operator.itemgetter(2))
for i, item in enumerate(bd):
if i < currlen:
j = self.librarydata.get_iter((i, ))
for index in range(len(item)):
if item[index] != self.librarydata.get_value(j, index):
self.librarydata.set_value(j, index, item[index])
else:
self.librarydata.append(item)
# Remove excess items...
newlen = len(bd)
if newlen == 0:
self.librarydata.clear()
else:
for i in range(currlen - newlen):
j = self.librarydata.get_iter((currlen - 1 - i,))
self.librarydata.remove(j)
self.library.thaw_child_notify()
if len(matches) == 0:
gobject.idle_add(self.filtering_entry_make_red, self.searchtext)
else:
gobject.idle_add(self.library.set_cursor, '0')
gobject.idle_add(self.filtering_entry_revert_color,
self.searchtext)
def libsearchfilter_key_pressed(self, widget, event):
self.filter_key_pressed(widget, event, self.library)
def libsearchfilter_on_enter(self, _entry):
self.on_library_row_activated(None, None)
def libsearchfilter_set_focus(self):
gobject.idle_add(self.searchtext.grab_focus)
def libsearchfilter_get_style(self):
return self.searchtext.get_style()
| onto/sonata | sonata/library.py | Python | gpl-3.0 | 66,200 |
#
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command, MirrorSafeCommand
from git_command import git
from project import HEAD
class Version(Command, MirrorSafeCommand):
common = False
helpSummary = "Display the version of xrepo"
helpUsage = """
%prog
"""
def Execute(self, opt, args):
rp = self.manifest.repoProject
rem = rp.GetRemote(rp.remote.name)
print 'xrepo version %s' % rp.work_git.describe(HEAD)
print ' (from %s)' % rem.url
print git.version().strip()
print 'Python %s' % sys.version
| lovesecho/xrepo | subcmds/version.py | Python | apache-2.0 | 1,123 |
from babel.dates import format_date
from babel.numbers import format_currency
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from bluebottle.utils.email_backend import send_mail
from tenant_extras.utils import TenantLanguage
def mail_monthly_donation_processed_notification(monthly_order):
cur_language = translation.get_language()
receiver = monthly_order.user
with TenantLanguage(receiver.primary_language):
subject = _("Thank you for your monthly support")
translation.activate(cur_language)
send_mail(
template_name='recurring_donations/mails/monthly_donation.mail',
subject=subject,
to=receiver,
order=monthly_order,
receiver_first_name=receiver.first_name.capitalize(),
link='/go/projects',
date=format_date(locale='nl_NL'),
amount=format_currency(monthly_order.amount, 'EUR', locale='nl_NL')
)
def mail_project_funded_monthly_donor_notification(receiver, project):
with TenantLanguage(receiver.primary_language):
subject = _("Congratulations: project completed!")
send_mail(
template_name='recurring_donations/mails/project_full_monthly_donor.mail',
subject=subject,
receiver_first_name=receiver.first_name.capitalize(),
to=receiver,
project=project,
link='/go/projects/{0}'.format(project.slug)
)
| jfterpstra/bluebottle | bluebottle/recurring_donations/mails.py | Python | bsd-3-clause | 1,426 |
# -*-coding: utf-8 -*-
import colander
from . import(
ResourceSchema,
BaseForm,
BaseSearchForm,
BaseAssignForm,
)
from ..resources.mails import MailsResource
from ..models.mail import Mail
from ..models.address import Address
from ..models.note import Note
from ..models.task import Task
from ..lib.qb.mails import MailsQueryBuilder
from ..lib.utils.common_utils import translate as _
from ..lib.utils.security_utils import get_auth_employee
@colander.deferred
def name_validator(node, kw):
request = kw.get('request')
def validator(node, value):
mail = Mail.by_name(value)
if (
mail
and str(mail.id) != request.params.get('id')
):
raise colander.Invalid(
node,
_(u'Mail with the same name exists'),
)
return colander.All(colander.Length(max=255), validator,)
class _MailSchema(ResourceSchema):
name = colander.SchemaNode(
colander.String(),
validator=name_validator,
)
subject = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=255),
)
descr = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=255),
)
html_content = colander.SchemaNode(
colander.String(),
missing=None
)
class MailForm(BaseForm):
_schema = _MailSchema
def submit(self, mail=None):
if not mail:
mail = Mail(
resource=MailsResource.create_resource(
get_auth_employee(self.request)
)
)
else:
mail.resource.notes = []
mail.resource.tasks = []
mail.name = self._controls.get('name')
mail.subject = self._controls.get('subject')
mail.html_content = self._controls.get('html_content')
mail.descr = self._controls.get('descr')
for id in self._controls.get('note_id'):
note = Note.get(id)
mail.resource.notes.append(note)
for id in self._controls.get('task_id'):
task = Task.get(id)
mail.resource.tasks.append(task)
return mail
class MailSearchForm(BaseSearchForm):
_qb = MailsQueryBuilder
class MailAssignForm(BaseAssignForm):
def submit(self, ids):
for id in ids:
mail = Mail.get(id)
mail.resource.maintainer_id = self._controls.get(
'maintainer_id'
)
| mazvv/travelcrm | travelcrm/forms/mails.py | Python | gpl-3.0 | 2,494 |
from twitterpandas import TwitterPandas
from examples.keys import TWITTER_OAUTH_SECRET, TWITTER_OAUTH_TOKEN, TWITTER_CONSUMER_SECRET, TWITTER_CONSUMER_KEY
__author__ = 'freddievargus'
if __name__ == '__main__':
tp = TwitterPandas(
TWITTER_OAUTH_TOKEN,
TWITTER_OAUTH_SECRET,
TWITTER_CONSUMER_KEY,
TWITTER_CONSUMER_SECRET
)
df = tp.saved_searches()
print(df.head(), '\n')
print(df.info(), '\n\n')
search_id = df[['id_str']].values[0][0]
df = tp.get_saved_search(search_id)
print(df, '\n')
print(df.info())
| wdm0006/twitter-pandas | examples/saved_search_methods.py | Python | bsd-3-clause | 576 |
import matplotlib.colors
import matplotlib.cm
import matplotlib.colorbar
import matplotlib.pyplot
import numpy
# colormaps: "viridis", "plasma_r","seismic"
def get_colormap(observable_name=None):
if "diff_minutes" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-30, vmax=30)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "diff_number" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-4, vmax=4)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "diff_relative" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-0.7, vmax=0.7)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "diff_multiples" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-2, vmax=2)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "diff_simpson" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-0.5, vmax=0.5)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "diff_n_trips" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-10, vmax=10)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "diff_n_routes" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-5, vmax=5)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "simpson" in observable_name:
norm = matplotlib.colors.Normalize(vmin=0, vmax=1.0)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "n_trips" in observable_name:
norm = matplotlib.colors.Normalize(vmin=0, vmax=30)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "n_routes" in observable_name:
norm = matplotlib.colors.Normalize(vmin=0, vmax=15)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "delay_minutes" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-300, vmax=300)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "delay_seconds" in observable_name:
norm = matplotlib.colors.Normalize(vmin=-1500, vmax=1500)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
elif "n_locations" in observable_name:
norm = matplotlib.colors.Normalize(vmin=0, vmax=2000)
cmap = matplotlib.cm.get_cmap(name="viridis", lut=None)
else:
norm = matplotlib.colors.Normalize(vmin=-30, vmax=30)
cmap = matplotlib.cm.get_cmap(name="seismic", lut=None)
return cmap, norm
def get_colormap_with_params(vmin, vmax, name="seismic"):
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
cmap = matplotlib.cm.get_cmap(name=name, lut=None)
return cmap, norm
def get_list_of_colors(values, observable_name=None):
cmap, norm = get_colormap(observable_name)
scalarmap = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
colorvalues = []
for value in values:
colorvalue = scalarmap.to_rgba(value)
colorvalues.append(colorvalue)
return colorvalues, norm, cmap
def createcolorbar(cmap, norm):
"""Create a colourbar with limits of lwr and upr"""
cax, kw = matplotlib.colorbar.make_axes(matplotlib.pyplot.gca())
c = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
return c
| CxAalto/gtfspy | gtfspy/colormaps.py | Python | mit | 3,330 |
from pycp2k.inputsection import InputSection
class _each211(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
| SINGROUP/pycp2k | pycp2k/classes/_each211.py | Python | lgpl-3.0 | 1,114 |
# coding: utf-8
"""
39. Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
from django.conf import settings
from django.core import mail
from django.test import Client, TestCase, RequestFactory
from views import get_view
class ClientTest(TestCase):
fixtures = ['testdata.json']
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {'var': u'\xf2'}
response = self.client.get('/test_client/get_view/', data)
# Check some response details
self.assertContains(response, 'This is a test')
self.assertEqual(response.context['var'], u'\xf2')
self.assertEqual(response.templates[0].name, 'GET Template')
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get('/test_client/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty GET Template')
self.assertTemplateNotUsed(response, 'Empty POST Template')
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post('/test_client/post_view/', {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'Empty POST Template')
self.assertTemplateNotUsed(response, 'Empty GET Template')
self.assertTemplateUsed(response, 'Empty POST Template')
def test_post(self):
"POST some data to a view"
post_data = {
'value': 37
}
response = self.client.post('/test_client/post_view/', post_data)
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], '37')
self.assertEqual(response.templates[0].name, 'POST Template')
self.failUnless('Data received' in response.content)
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/test_client/header_view/")
self.assertEquals(response['X-DJANGO-TEST'], 'Slartibartfast')
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?><library><book><title>Blink</title><author>Malcolm Gladwell</author></book></library>"""
response = self.client.post("/test_client/raw_post_view/", test_doc,
content_type="text/xml")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, "Blink - Malcolm Gladwell")
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get('/test_client/redirect_view/')
# Check that the response was a 302 (redirect) and that
# assertRedirect() understands to put an implicit http://testserver/ in
# front of non-absolute URLs.
self.assertRedirects(response, '/test_client/get_view/')
host = 'django.testserver'
client_providing_host = Client(HTTP_HOST=host)
response = client_providing_host.get('/test_client/redirect_view/')
# Check that the response was a 302 (redirect) with absolute URI
self.assertRedirects(response, '/test_client/get_view/', host=host)
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get('/test_client/redirect_view/', {'var': 'value'})
# Check if parameters are intact
self.assertRedirects(response, 'http://testserver/test_client/get_view/?var=value')
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get('/test_client/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=301)
client_providing_host = Client(HTTP_HOST='django.testserver')
response = client_providing_host.get('/test_client/permanent_redirect_view/')
# Check that the response was a 301 (permanent redirect) with absolute URI
self.assertRedirects(response, 'http://django.testserver/test_client/get_view/', status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get('/test_client/temporary_redirect_view/')
# Check that the response was a 302 (non-permanent redirect)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get('/test_client/double_redirect_view/')
# Check that the response was a 302, and that
# the attempt to get the redirection location returned 301 when retrieved
self.assertRedirects(response, 'http://testserver/test_client/permanent_redirect_view/', target_status_code=301)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get('/test_client/double_redirect_view/', follow=True)
self.assertRedirects(response, 'http://testserver/test_client/get_view/', status_code=302, target_status_code=200)
self.assertEquals(len(response.redirect_chain), 2)
def test_redirect_http(self):
"GET a URL that redirects to an http URI"
response = self.client.get('/test_client/http_redirect_view/',follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"GET a URL that redirects to an https URI"
response = self.client.get('/test_client/https_redirect_view/',follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get('/test_client/bad_view/')
# Check that the response was a 404, and that the content contains MAGIC
self.assertContains(response, 'MAGIC', status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {
'text': 'Hello World',
'multi': ('b','c','e')
}
response = self.client.get('/test_client/form_view/', data=hints)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Form GET Template")
# Check that the multi-value data has been rolled out ok
self.assertContains(response, 'Select a valid choice.', 0)
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertContains(response, 'This field is required.', 3)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view/', post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid e-mail address.')
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'foo@example.com',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data OK')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'value': 37
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, 'form_view.html')
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'This field is required.')
self.assertFormError(response, 'form', 'single', 'This field is required.')
self.assertFormError(response, 'form', 'multi', 'This field is required.')
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
'text': 'Hello World',
'email': 'not an email address',
'value': 37,
'single': 'b',
'multi': ('b','c','e')
}
response = self.client.post('/test_client/form_view_with_template/', post_data)
self.assertContains(response, 'POST data has errors')
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, 'base.html')
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(response, 'form', 'email', 'Enter a valid e-mail address.')
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get('/test_client/unknown_view/')
# Check that the response was a 404
self.assertEqual(response.status_code, 404)
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_method_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_login_and_custom_redirect(self):
"Request a page that is protected with @login_required(redirect_field_name='redirect_to')"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/login_protected_view_custom_redirect/')
self.assertRedirects(response, 'http://testserver/accounts/login/?redirect_to=/test_client/login_protected_view_custom_redirect/')
# Log in
login = self.client.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view_custom_redirect/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username='otheruser', password='nopassword')
self.failIf(login)
def test_view_with_inactive_login(self):
"Request a page that is protected with @login, but use an inactive login"
login = self.client.login(username='inactive', password='password')
self.failIf(login)
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username='testclient', password='password')
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'].username, 'testclient')
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get('/test_client/login_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/login_protected_view/')
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/test_client/permission_protected_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_view/')
# TODO: Log in with right permissions and request the page again
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get('/test_client/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_method_view/')
# Log in
login = self.client.login(username='testclient', password='password')
self.failUnless(login, 'Could not log in')
# Log in with wrong permissions. Should result in 302.
response = self.client.get('/test_client/permission_protected_method_view/')
self.assertRedirects(response, 'http://testserver/accounts/login/?next=/test_client/permission_protected_method_view/')
# TODO: Log in with right permissions and request the page again
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
try:
self.client.session['tobacconist']
self.fail("Shouldn't have a session value")
except KeyError:
pass
from django.contrib.sessions.models import Session
response = self.client.post('/test_client/session_view/')
# Check that the session was modified
self.assertEquals(self.client.session['tobacconist'], 'hovercraft')
def test_view_with_exception(self):
"Request a page that is known to throw an error"
self.assertRaises(KeyError, self.client.get, "/test_client/broken_view/")
#Try the same assertion, a different way
try:
self.client.get('/test_client/broken_view/')
self.fail('Should raise an error')
except KeyError:
pass
def test_mail_sending(self):
"Test that mail is redirected to a dummy outbox during test setup"
response = self.client.get('/test_client/mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Test message')
self.assertEqual(mail.outbox[0].body, 'This is a test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
def test_mass_mail_sending(self):
"Test that mass mail is redirected to a dummy outbox during test setup"
response = self.client.get('/test_client/mass_mail_sending_view/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'First Test message')
self.assertEqual(mail.outbox[0].body, 'This is the first test email')
self.assertEqual(mail.outbox[0].from_email, 'from@example.com')
self.assertEqual(mail.outbox[0].to[0], 'first@example.com')
self.assertEqual(mail.outbox[0].to[1], 'second@example.com')
self.assertEqual(mail.outbox[1].subject, 'Second Test message')
self.assertEqual(mail.outbox[1].body, 'This is the second test email')
self.assertEqual(mail.outbox[1].from_email, 'from@example.com')
self.assertEqual(mail.outbox[1].to[0], 'second@example.com')
self.assertEqual(mail.outbox[1].to[1], 'third@example.com')
class CSRFEnabledClientTests(TestCase):
def setUp(self):
# Enable the CSRF middleware for this test
self.old_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES
csrf_middleware_class = 'django.middleware.csrf.CsrfViewMiddleware'
if csrf_middleware_class not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += (csrf_middleware_class,)
def tearDown(self):
settings.MIDDLEWARE_CLASSES = self.old_MIDDLEWARE_CLASSES
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post('/test_client/post_view/', {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post('/test_client/post_view/', {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(TestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertEqual(hasattr(self.client, "i_am_customized"), True)
class RequestFactoryTest(TestCase):
def test_request_factory(self):
factory = RequestFactory()
request = factory.get('/somewhere/')
response = get_view(request)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is a test')
| faun/django_test | tests/modeltests/test_client/models.py | Python | bsd-3-clause | 21,290 |
import os.path
from django import forms
from django.contrib.admin.helpers import ActionForm
from django.utils.translation import gettext_lazy as _
class ImportForm(forms.Form):
import_file = forms.FileField(
label=_('File to import')
)
input_format = forms.ChoiceField(
label=_('Format'),
choices=(),
)
def __init__(self, import_formats, *args, **kwargs):
super().__init__(*args, **kwargs)
choices = []
for i, f in enumerate(import_formats):
choices.append((str(i), f().get_title(),))
if len(import_formats) > 1:
choices.insert(0, ('', '---'))
self.fields['input_format'].choices = choices
class ConfirmImportForm(forms.Form):
import_file_name = forms.CharField(widget=forms.HiddenInput())
original_file_name = forms.CharField(widget=forms.HiddenInput())
input_format = forms.CharField(widget=forms.HiddenInput())
def clean_import_file_name(self):
data = self.cleaned_data['import_file_name']
data = os.path.basename(data)
return data
class ExportForm(forms.Form):
file_format = forms.ChoiceField(
label=_('Format'),
choices=(),
)
def __init__(self, formats, *args, **kwargs):
super().__init__(*args, **kwargs)
choices = []
for i, f in enumerate(formats):
choices.append((str(i), f().get_title(),))
if len(formats) > 1:
choices.insert(0, ('', '---'))
self.fields['file_format'].choices = choices
def export_action_form_factory(formats):
"""
Returns an ActionForm subclass containing a ChoiceField populated with
the given formats.
"""
class _ExportActionForm(ActionForm):
"""
Action form with export format ChoiceField.
"""
file_format = forms.ChoiceField(
label=_('Format'), choices=formats, required=False)
_ExportActionForm.__name__ = str('ExportActionForm')
return _ExportActionForm
| django-import-export/django-import-export | import_export/forms.py | Python | bsd-2-clause | 2,026 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class FlattenParameterGroup(Model):
"""
Additional parameters for the putSimpleProductWithGrouping operation.
:param name: Product name with value 'groupproduct'
:type name: str
:param product_id: Unique identifier representing a specific product for
a given latitude & longitude. For example, uberX in San Francisco will
have a different product_id than uberX in Los Angeles.
:type product_id: str
:param description: Description of product.
:type description: str
:param max_product_display_name: Display name of product.
:type max_product_display_name: str
:param generic_value: Generic URL value.
:type generic_value: str
:param odatavalue: URL value.
:type odatavalue: str
"""
_validation = {
'name': {'required': True},
'product_id': {'required': True},
'max_product_display_name': {'required': True},
}
def __init__(self, name, product_id, max_product_display_name, description=None, generic_value=None, odatavalue=None):
self.name = name
self.product_id = product_id
self.description = description
self.max_product_display_name = max_product_display_name
self.generic_value = generic_value
self.odatavalue = odatavalue
| sharadagarwal/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/ModelFlattening/autorestresourceflatteningtestservice/models/flatten_parameter_group.py | Python | mit | 1,800 |
{% if cookiecutter.use_celery == "y" %}
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # pragma: no cover
app = Celery('{{cookiecutter.repo_name}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.repo_name}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
| stepanovsh/project_template | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/taskapp/celery.py | Python | bsd-3-clause | 1,059 |
import codecs
import platform
import subprocess
import tempfile
import paramiko
from pytest import raises
import mockssh
def test_ssh_session(server):
for uid in server.users:
with server.client(uid) as c:
_, stdout, _ = c.exec_command("ls /")
assert "etc" in (codecs.decode(bit, "utf8")
for bit in stdout.read().split())
_, stdout, _ = c.exec_command("hostname")
assert (codecs.decode(stdout.read().strip(), "utf8") ==
platform.node())
def test_ssh_failed_commands(server):
for uid in server.users:
with server.client(uid) as c:
_, _, stderr = c.exec_command("rm /")
stderr = codecs.decode(stderr.read(), "utf8")
assert (stderr.startswith("rm: cannot remove") or
stderr.startswith("rm: /: is a directory"))
def test_multiple_connections1(server):
_test_multiple_connections(server)
def test_multiple_connections2(server):
_test_multiple_connections(server)
def test_multiple_connections3(server):
_test_multiple_connections(server)
def test_multiple_connections4(server):
_test_multiple_connections(server)
def test_multiple_connections5(server):
_test_multiple_connections(server)
def _test_multiple_connections(server):
# This test will deadlock without ea1e0f80aac7253d2d346732eefd204c6627f4c8
fd, pkey_path = tempfile.mkstemp()
user, private_key = list(server._users.items())[0]
open(pkey_path, 'w').write(open(private_key[0]).read())
ssh_command = 'ssh -oStrictHostKeyChecking=no '
ssh_command += "-i %s -p %s %s@localhost " % (pkey_path, server.port, user)
ssh_command += 'echo hello'
p = subprocess.check_output(ssh_command, shell=True)
assert p.decode('utf-8').strip() == 'hello'
def test_invalid_user(server):
with raises(KeyError) as exc:
server.client("unknown-user")
assert exc.value.args[0] == "unknown-user"
def test_add_user(server, user_key_path):
with raises(KeyError):
server.client("new-user")
server.add_user("new-user", user_key_path)
with server.client("new-user") as c:
_, stdout, _ = c.exec_command("echo 42")
assert codecs.decode(stdout.read().strip(), "utf8") == "42"
def test_overwrite_handler(server, monkeypatch):
class MyHandler(mockssh.server.Handler):
def check_auth_password(self, username, password):
if username == "foo" and password == "bar":
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
monkeypatch.setattr(server, 'handler_cls', MyHandler)
with paramiko.SSHClient() as client:
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
assert client.connect(server.host, server.port, "foo", "bar") is None
with raises(paramiko.ssh_exception.AuthenticationException):
client.connect(server.host, server.port, "fooooo", "barrrr")
| carletes/mock-ssh-server | mockssh/test_server.py | Python | mit | 2,991 |
#!/usr/bin/env python
import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('pyALGENCAN', parent_package, top_path)
config.add_library('algencan',
sources=[os.path.join('source', '*.f')],
include_dirs=['source'])
config.add_extension('algencan',
sources=['source/f2py/algencan.pyf'],
libraries=['algencan'])
config.add_data_files('LICENSE', 'README')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| DailyActie/Surrogate-Model | 01-codes/pyOpt-1.2.0/pyOpt/pyALGENCAN/setup.py | Python | mit | 697 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'BootVersion.source'
db.delete_column(u'boots_bootversion', 'source')
# Deleting field 'BootVersion.append'
db.delete_column(u'boots_bootversion', 'append')
# Adding field 'BootVersion.command'
db.add_column(u'boots_bootversion', 'command',
self.gf('django.db.models.fields.TextField')(default='./django-admin.py start'),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'BootVersion.source'
raise RuntimeError("Cannot reverse this migration. 'BootVersion.source' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'BootVersion.source'
db.add_column(u'boots_bootversion', 'source',
self.gf('django.db.models.fields.URLField')(max_length=200),
keep_default=False)
# Adding field 'BootVersion.append'
db.add_column(u'boots_bootversion', 'append',
self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True),
keep_default=False)
# Deleting field 'BootVersion.command'
db.delete_column(u'boots_bootversion', 'command')
models = {
u'accounts.team': {
'Meta': {'object_name': 'Team'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'accounts.user': {
'Meta': {'object_name': 'User'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'default_users'", 'to': u"orm['accounts.Team']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users'", 'symmetrical': 'False', 'to': u"orm['accounts.Team']"}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'boots.boot': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Boot'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'flagged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'r_star_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'r_star_count_day': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'r_star_count_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'r_star_count_week': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Team']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'boots.bootversion': {
'Meta': {'ordering': "('-created',)", 'unique_together': "(('boot', 'slug'),)", 'object_name': 'BootVersion'},
'boot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': u"orm['boots.Boot']"}),
'command': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('django.db.models.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'boots.star': {
'Meta': {'unique_together': "(('boot', 'user'),)", 'object_name': 'Star'},
'boot': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stars'", 'to': u"orm['boots.Boot']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['boots'] | sophilabs/djangoboot | app/boots/migrations/0009_auto__del_field_bootversion_source__del_field_bootversion_append__add_.py | Python | mit | 9,284 |
import sys, inspect, pdb
from . import ModelObjs
"""
This module contains definitions for function decorators that are used
to instantiate instances of objects such as Stochs.
"""
def stochastic( label=None, func=None, observed=False, dtype=float ):
"""
Decorator for the Stoch class.
CALLING
For an unobserved Stoch:
@pyhm.stochastic( observe=False, dtype=float )
def A( value=xvalue, parents=parents ):
def logp( value, parents ):
...
return logp_value
def random( parents ):
...
return random_draw
The above will instantiate an unobserved Stoch named A (this will be
both the name of the variable and it's identity key) with current value equal
to xvalue, and with parameters par1 and par2.
Note that xvalue can be a single number or an array. However, it is optional
to provide a value for an unobserved Stoch - it can also be set to None.
The external arguments par1 and par2 can be Stochs, or else anything
that will be accepted by the logp() function. The random() function is optional,
and defines how to generate random samples from the Stoch probability
distribution.
Alternatively, for an observed Stoch:
@pyhm.stochastic( observe=True, dtype=float )
def A( value=xdata, par1=par1, par2=par2 ):
def logp( value, par1=par1, par2=par2 ):
...
return logp_value
Unlike the unobserved Stoch case, it is necessary to provide an external
argument for the value. A random() function is not needed, as the value of an
observed Stoch is fixed to its 'observed' value.
"""
def instantiate_stochastic( func, label=label ):
dictionary = {}
# Extract the basic properties:
if label is not None:
dictionary['name'] = label
else:
dictionary['name'] = func.__name__
dictionary['observed'] = observed
dictionary['dtype'] = dtype
# Identify if logp and random functions
# have been passed in:
dictionary['logp'] = None
dictionary['random'] = None
keys = ['logp', 'random']
def probe_func( frame, event, arg ):
if event=='return':
l = frame.f_locals
for key in keys:
dictionary[key] = l.get( key )
sys.settrace( None )
return probe_func
sys.settrace( probe_func )
func()
if dictionary['logp'] is None:
err_str = '\nStochastic {0} logp not defined'\
.format( dictionary['name'] )
raise ValueError(err_str)
if ( dictionary['random'] is not None )*( dictionary['observed']==True ):
err_str = '\nCan\'t have random function defined for Stochastic {0}'\
.format( dictionary['name'] )
err_str += 'because \'observed\' is set to True'
raise ValueError(err_str)
# Unpack the value and parents inputs.
# Work out the parents of the stochastic, which are
# provided in the parents input dictionary:
parents = {}
( args, varargs, varkw, defaults ) = inspect.getargspec( func )
if defaults is None:
defaults = []
# Check if value has been provided:
if ( 'value' in args ):
value_included = True
else:
value_included = False
# Check if parents have been provided:
if ( 'parents' in args ):
parents_included = True
else:
parents_included = False
# Raise error if value or parents haven't been provided:
if ( value_included==False )+( parents_included==False ):
err_str = 'Stochastic {0} value and/or parents not defined properly'\
.format( dictionary['name'] )
raise ValueError( err_str )
else:
for i in range( len( args ) ):
if args[i]=='parents':
for key in defaults[i].keys():
parents[key] = defaults[i][key]
elif args[i]=='value':
dictionary['value'] = defaults[i]
dictionary['parents'] = parents
return ModelObjs.Stoch( dictionary )
if func:
return instantiate_stochastic( func )
else:
return instantiate_stochastic
#pdb.set_trace()
return stochastic_object
| tomevans/pyhm | pyhm/InstantiationDecorators.py | Python | gpl-2.0 | 4,592 |
##
# Copyright 2012-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for Intel MKL as toolchain linear algebra library.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
from distutils.version import LooseVersion
from easybuild.toolchains.compiler.gcc import TC_CONSTANT_GCC
from easybuild.toolchains.compiler.inteliccifort import TC_CONSTANT_INTELCOMP
from easybuild.toolchains.compiler.pgi import TC_CONSTANT_PGI
from easybuild.toolchains.mpi.intelmpi import TC_CONSTANT_INTELMPI
from easybuild.toolchains.mpi.mpich import TC_CONSTANT_MPICH
from easybuild.toolchains.mpi.mpich2 import TC_CONSTANT_MPICH2
from easybuild.toolchains.mpi.mvapich2 import TC_CONSTANT_MVAPICH2
from easybuild.toolchains.mpi.openmpi import TC_CONSTANT_OPENMPI
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.toolchain.linalg import LinAlg
TC_CONSTANT_INTELMKL = 'IntelMKL'
class IntelMKL(LinAlg):
"""Support for Intel MKL."""
# library settings are inspired by http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor
BLAS_MODULE_NAME = ['imkl']
BLAS_LIB_MAP = {
"lp64": '_lp64',
"interface": None,
"interface_mt": None,
}
BLAS_LIB = ["mkl_%(interface)s%(lp64)s" , "mkl_sequential", "mkl_core"]
BLAS_LIB_MT = ["mkl_%(interface)s%(lp64)s" , "mkl_%(interface_mt)s_thread", "mkl_core"]
BLAS_LIB_GROUP = True
BLAS_LIB_STATIC = True
BLAS_FAMILY = TC_CONSTANT_INTELMKL
LAPACK_MODULE_NAME = ['imkl']
LAPACK_IS_BLAS = True
LAPACK_FAMILY = TC_CONSTANT_INTELMKL
BLACS_MODULE_NAME = ['imkl']
BLACS_LIB = ["mkl_blacs%(mpi)s%(lp64)s"]
BLACS_LIB_MAP = {'mpi':None}
BLACS_LIB_GROUP = True
BLACS_LIB_STATIC = True
SCALAPACK_MODULE_NAME = ['imkl']
SCALAPACK_LIB = ["mkl_scalapack%(lp64_sc)s"]
SCALAPACK_LIB_MT = ["mkl_scalapack%(lp64_sc)s"]
SCALAPACK_LIB_MAP = {'lp64_sc': '_lp64'}
SCALAPACK_REQUIRES = ['LIBBLACS', 'LIBBLAS']
SCALAPACK_LIB_GROUP = True
SCALAPACK_LIB_STATIC = True
def __init__(self, *args, **kwargs):
"""Toolchain constructor."""
class_constants = kwargs.setdefault('class_constants', [])
class_constants.extend(['BLAS_LIB_MAP', 'SCALAPACK_LIB', 'SCALAPACK_LIB_MT', 'SCALAPACK_LIB_MAP'])
super(IntelMKL, self).__init__(*args, **kwargs)
def set_variables(self):
"""Set the variables"""
# for recent versions of Intel MKL, -ldl should be used for linking;
# the Intel MKL Link Advisor specifies to always do this,
# but it is only needed when statically linked with Intel MKL,
# and only strictly needed for some compilers (e.g. PGI)
mkl_version = self.get_software_version(self.BLAS_MODULE_NAME)[0]
if LooseVersion(mkl_version) >= LooseVersion('11') and self.COMPILER_FAMILY in [TC_CONSTANT_PGI]:
self.log.info("Adding -ldl as extra library when linking with Intel MKL libraries (for v11.x and newer)")
if self.LIB_EXTRA is None:
self.LIB_EXTRA = ['dl']
elif 'dl' not in self.LIB_EXTRA:
self.LIB_EXTRA.append('dl')
super(IntelMKL, self).set_variables()
def _set_blas_variables(self):
"""Fix the map a bit"""
interfacemap = {
TC_CONSTANT_INTELCOMP: 'intel',
TC_CONSTANT_GCC: 'gf',
# Taken from https://www.pgroup.com/support/link.htm#mkl
TC_CONSTANT_PGI: 'intel',
}
try:
self.BLAS_LIB_MAP.update({
"interface": interfacemap[self.COMPILER_FAMILY],
})
except:
raise EasyBuildError("_set_blas_variables: interface unsupported combination with MPI family %s",
self.COMPILER_FAMILY)
interfacemap_mt = {
TC_CONSTANT_INTELCOMP: 'intel',
TC_CONSTANT_GCC: 'gnu',
TC_CONSTANT_PGI: 'pgi',
}
try:
self.BLAS_LIB_MAP.update({"interface_mt":interfacemap_mt[self.COMPILER_FAMILY]})
except:
raise EasyBuildError("_set_blas_variables: interface_mt unsupported combination with compiler family %s",
self.COMPILER_FAMILY)
if self.options.get('32bit', None):
# 32bit
self.BLAS_LIB_MAP.update({"lp64":''})
if self.options.get('i8', None):
# ilp64/i8
self.BLAS_LIB_MAP.update({"lp64":'_ilp64'})
# CPP / CFLAGS
self.variables.nappend_el('CFLAGS', 'DMKL_ILP64')
# exact paths/linking statements depend on imkl version
found_version = self.get_software_version(self.BLAS_MODULE_NAME)[0]
if LooseVersion(found_version) < LooseVersion('10.3'):
if self.options.get('32bit', None):
self.BLAS_LIB_DIR = ['lib/32']
else:
self.BLAS_LIB_DIR = ['lib/em64t']
self.BLAS_INCLUDE_DIR = ['include']
else:
if self.options.get('32bit', None):
raise EasyBuildError("_set_blas_variables: 32-bit libraries not supported yet for IMKL v%s (> v10.3)",
found_version)
else:
self.BLAS_LIB_DIR = ['mkl/lib/intel64', 'compiler/lib/intel64' ]
self.BLAS_INCLUDE_DIR = ['mkl/include']
super(IntelMKL, self)._set_blas_variables()
def _set_blacs_variables(self):
mpimap = {
TC_CONSTANT_OPENMPI: '_openmpi',
TC_CONSTANT_INTELMPI: '_intelmpi',
TC_CONSTANT_MVAPICH2: '_intelmpi',
# use intelmpi MKL blacs library for both MPICH v2 and v3
# cfr. https://software.intel.com/en-us/articles/intel-mkl-link-line-advisor
# note: MKL link advisor uses 'MPICH' for MPICH v1
TC_CONSTANT_MPICH2: '_intelmpi',
TC_CONSTANT_MPICH: '_intelmpi',
}
try:
self.BLACS_LIB_MAP.update({'mpi': mpimap[self.MPI_FAMILY]})
except:
raise EasyBuildError("_set_blacs_variables: mpi unsupported combination with MPI family %s",
self.MPI_FAMILY)
self.BLACS_LIB_DIR = self.BLAS_LIB_DIR
self.BLACS_INCLUDE_DIR = self.BLAS_INCLUDE_DIR
super(IntelMKL, self)._set_blacs_variables()
def _set_scalapack_variables(self):
imkl_version = self.get_software_version(self.BLAS_MODULE_NAME)[0]
if LooseVersion(imkl_version) < LooseVersion('10.3'):
self.SCALAPACK_LIB.append("mkl_solver%(lp64)s_sequential")
self.SCALAPACK_LIB_MT.append("mkl_solver%(lp64)s")
if self.options.get('32bit', None):
# 32 bit
self.SCALAPACK_LIB_MAP.update({"lp64_sc":'_core'})
elif self.options.get('i8', None):
# ilp64/i8
self.SCALAPACK_LIB_MAP.update({"lp64_sc":'_ilp64'})
self.SCALAPACK_LIB_DIR = self.BLAS_LIB_DIR
self.SCALAPACK_INCLUDE_DIR = self.BLAS_INCLUDE_DIR
super(IntelMKL, self)._set_scalapack_variables()
| wpoely86/easybuild-framework | easybuild/toolchains/linalg/intelmkl.py | Python | gpl-2.0 | 8,106 |
import wave, struct
from array import *
import matplotlib.pyplot as plt
from pylab import *
import sys, getopt, os.path #time
#print "Program Start"
file_name = ""
graph = 0
doAvg = 0
filterOrder = 5 #0
baudrate = 4800.0 #9600.0 #9143.0 # Increase Slightly Some more 9183.0 #
offset = 22 #11 # (-1)
bytes = 80 #12 for 7Ch
preambleBits = 10
def printHelp():
print "KatanaLRS Decoder by Stephen Carlson Jan 2014\n \
-h Help \n \
-g Graph \n \
-a Work on Average of all valid frames (must have multiple packets)\n \
-i <inputfile> (Default: Newest \"_AF.wav\") \n \
-f <Filter Order> (Default: 5) \n \
-b <Baudrate> (Default: 4800)\n \
-B <Bytes in Payload> (Default: 80) \n \
-o <Preamble Offset> (Default: 22) \n \
-p <Preamble Bits> (Default: 10)"
try:
opts, args = getopt.getopt(sys.argv[1:],"higaf:b:o:B:p:",["ifile=","help"])
except getopt.GetoptError:
printHelp()
print "\nError with command line arguments\n"
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
printHelp()
sys.exit()
if opt in ("-g"):
graph = 1
if opt in ("-a"):
doAvg = 1
if opt in ("-i", "--ifile"):
file_name = arg
if opt in ("-f"):
filterOrder = int(arg)
if opt in ("-b"):
baudrate = int(arg) * 1.0
if opt in ("-B"):
bytes = int(arg)
if opt in ("-o"):
offset = int(arg)
if opt in ("-p"):
preambleBits = int(arg)
if file_name != "":
file_name = file_name + ".wav"
else:
recent = 0.0
for file in os.listdir(os.getcwd()):
if file.endswith("_AF.wav") and (os.path.getsize(file) < 900000) :
if(os.path.getmtime(file) > recent):
file_name = file
recent = os.path.getmtime(file)
#print file, os.path.getmtime(file),os.path.getsize(file)
print '\nUsing',file_name
#print file_name + ".wav"
w = wave.open(file_name,'r')
nchannels = w.getnchannels()
samplewid = w.getsampwidth()
framerate = w.getframerate()
nframes = w.getnframes()
# print "Channels: " + repr(w.getnchannels())
# print "Width: " + repr(w.getsampwidth())
# print "Rate: " + repr(w.getframerate())
# print ""
#for i in w.readframes(20) #range(0,20)
#a = array('i') #[]
#a = w.readframes(1)
#print len(a)
length = w.getnframes()
#print a
a = zeros(length)
for i in range(0,length): #len(a)):
frame = w.readframes(1)
data = struct.unpack('hh',frame)[0]
#print data
a[i]=data
if(int(filterOrder) > 0): # Moving into the Realm of Windowing Functions and Signal Processing
for i in range(len(a)-int(filterOrder)): # Moving Average, no Weights
for j in range(int(filterOrder)):
a[i] += a[i+j+1]
a[i]/(1.0+int(filterOrder))
period = 1.0/baudrate
sampleDelta = 1.0/framerate
sampPerPeriod = framerate/baudrate #3.4 #
print "Sa/Mark: " + repr(sampPerPeriod)
# Discover Zero Crossings
diff = zeros(length) #(1,length))
last = 0;
t = arange(0,length,1)
#print len(diff)
dcBias = 0 #0 #8500
max = 0
for i in range(0,len(diff)):
#print t[i] # repr(a[i]) + " + " + repr(last)
if( (a[i] >= dcBias and last < dcBias) ): # or (a[i] < 0 and last >= 0) ):
diff[i] = 1000;
elif (a[i] < dcBias and last >= dcBias):
diff[i] = -1000;
#print i
if(a[i] > max):
max = a[i]
last = a[i]
#print "Max: " + repr(max)
annoyingOffset = -4000
# Find Preamble Sequence
last = 0
delta = 0
mark = zeros(length)+annoyingOffset
valid = zeros(length)+annoyingOffset
preambleCounter = 0
deltaSum = 0
state = "PREAMBLE"
for i in range(0,len(diff)):
delta = i - last
if(diff[i] != 0):
last = i
if(state == "PREAMBLE"):
if( (sampPerPeriod*.5 < delta) and (delta < sampPerPeriod*1.5) ):
mark[i] = 2000
preambleCounter += 1
deltaSum += delta
#print i
elif( (sampPerPeriod*3.6 < delta) and preambleCounter >= preambleBits and diff[i] < 0):
line = "Valid Porch @ " + repr(i)
#if(i<100000): line += "\t"
line += "\tAvg Preamble Sa/Period: " + repr((deltaSum*1.0)/preambleCounter)
print line
state = "UART_START"
else:
preambleCounter = 0;
deltaSum = 0;
#state = "PREAMBLE"
if(state == "UART_START"):
if(diff[i] > 0):
valid[i] = 3000
state = "PREAMBLE"
#elif(sampPerPeriod*3.6 < delta)
# if(state == "UART_DECODE"):
# if(sampPerPeriod*3.6 < delta
# for i in range(100):
# print repr(int(round(sampPerPeriod*(i+0.5)))) + " " + repr(int(sampPerPeriod*(i+0.5)))
# range(i+sampPerPeriod*0.5,i+10*sampPerPeriod,sampPerPeriod):
# Decode Serial Frames on Valid Syncs
timing = zeros(length)+annoyingOffset
#timing = timing*dcBias*.1
# offset = 22 #11 # (-1)
# bytes = 80 #12 for 7Ch
byteArray = [] #zeros(bytes,dtype=numpy.int) #zeros(bytes,Int) #array('B') #[] #zeros(bytes) #[bytes+1]
for i in range(bytes):
byteArray.append(0)
#print "Avg Buffer:",int(bytes*sampPerPeriod*8)
avgArray = zeros(int(bytes*sampPerPeriod*8))
validCount = 0;
for i in range(0,len(valid)):
if(valid[i] != annoyingOffset): # Yes, yes, I know this is like O(n^3) or something like that, need to optimize
for l in range(bytes):
byteArray[l] = 0;
# line = "" #[]
if bytes*sampPerPeriod*10+i >len(valid):
break
validCount += 1
for s in range(len(avgArray)):
avgArray[s] += a[i+s]
for j in range(bytes):
# octet = ""
for k in range(8):
l = int(sampPerPeriod*(j*8+k))+i+offset #int(round(sampPerPeriod*(j+0.5)))+i
timing[l] = dcBias*.1 #-2000
if(a[l] >=dcBias):
# octet += '0' #.append(0) #<<= 1 #print '0'
byteArray[j] = (byteArray[j])<<1 | 0x00
else:
# octet += '1' #.append(1) #print '1'
byteArray[j] = (byteArray[j])<<1 | 0x01
# line += octet + '\n' #(octet[::-1] + ' ')
#print line
#print '---'
str = ">\t" #"\n>\t"
for i in range(bytes):
if(byteArray[i] in range(0x20, 0x7E)): #== '*'):
if(chr(byteArray[i]) == '*'):
str += "*" #\n"
break
else: str += chr(byteArray[i]) #repr(bin(byteArray))
print str
#print "\nValid Frames:",validCount
if (validCount > 0):
if(doAvg): timing = zeros(len(avgArray))+annoyingOffset
for s in range(len(avgArray)):
avgArray[s] = avgArray[s] / validCount
# Ok, with the code about to be pasted and modified below, I have officially crossed the line, I need to do lots of def modules
for i in range(len(avgArray)-int(filterOrder)): # Moving Average, no Weights
for j in range(int(filterOrder)):
avgArray[i] += avgArray[i+j+1]
avgArray[i]/(1.0+int(filterOrder))
for l in range(bytes):
byteArray[l] = 0;
for j in range(bytes):
for k in range(8):
l = int(sampPerPeriod*(j*8+k))+offset
if(l<len(avgArray)):
if(doAvg): timing[l] = dcBias
if(avgArray[l] >=dcBias):
byteArray[j] = (byteArray[j])<<1 | 0x00
else:
byteArray[j] = (byteArray[j])<<1 | 0x01
str = "\n>+++\t"
for i in range(bytes):
if(byteArray[i] in range(0x20, 0x7E)):
if(chr(byteArray[i]) == '*'):
str += "*"
break
else: str += chr(byteArray[i]) #repr(bin(byteArray))
print str
lower = 0 #12100
upper = length-1 #12700
if graph == 1:
#plt.plot(t[lower:upper],a[lower:upper]*.5,'b',t[lower:upper],diff[lower:upper],'r',t[lower:upper],mark[lower:upper],'y',t[lower:upper],valid[lower:upper],'g')
plt.plot(a[lower:upper]*.1,'b')
#plt.plot(diff[lower:upper]*.2,'r')
#plt.plot(mark[lower:upper],'yo')
plt.plot(valid[lower:upper],'go')
plt.plot(timing[lower:upper],'co')
plt.show()
elif doAvg == 1:
plt.plot(avgArray[0:int(len(avgArray))]*.01,'b')
plt.plot(timing[0:int(len(avgArray))],'co')
plt.show()
#print len(frame)
# data = struct.unpack('hh',frame)[0]
# dataa = struct.unpack('hh',frame)[1]
# print repr(data) + ' and ' + repr(dataa)#int(data[0])
#print i
#print a[i]
#r = a[i]+1
#print r
| StephenCarlson/KatanaLRS-code | Decoder/katanaDecoder.py | Python | gpl-2.0 | 7,912 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
__all__ = [
'register_api_signal_handlers'
]
def register_api_signal_handlers(handler_func):
signal.signal(signal.SIGINT, handler_func)
| dennybaa/st2 | st2api/st2api/signal_handlers.py | Python | apache-2.0 | 941 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python.platform import logging
from tensorflow.python.training import summary_io
def assert_summary(expected_tags, expected_simple_values, summary_proto):
"""Asserts summary contains the specified tags and values.
Args:
expected_tags: All tags in summary.
expected_simple_values: Simply values for some tags.
summary_proto: Summary to validate.
Raises:
ValueError: if expectations are not met.
"""
actual_tags = set()
for value in summary_proto.value:
actual_tags.add(value.tag)
if value.tag in expected_simple_values:
expected = expected_simple_values[value.tag]
actual = value.simple_value
np.testing.assert_almost_equal(
actual, expected, decimal=2, err_msg=value.tag)
expected_tags = set(expected_tags)
if expected_tags != actual_tags:
raise ValueError('Expected tags %s, got %s.' % (expected_tags, actual_tags))
def to_summary_proto(summary_str):
"""Create summary based on latest stats.
Args:
summary_str: Serialized summary.
Returns:
summary_pb2.Summary.
Raises:
ValueError: if tensor is not a valid summary tensor.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
return summary
# TODO(ptucker): Move to a non-test package?
def latest_event_file(base_dir):
"""Find latest event file in `base_dir`.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
File path, or `None` if none exists.
"""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
return sorted(file_paths)[-1] if file_paths else None
def latest_events(base_dir):
"""Parse events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
Iterable of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
file_path = latest_event_file(base_dir)
return summary_io.summary_iterator(file_path) if file_path else []
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
List of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
return [e for e in latest_events(base_dir) if e.HasField('summary')]
def simple_values_from_events(events, tags):
"""Parse summaries from events with simple_value.
Args:
events: List of tensorflow.Event protos.
tags: List of string event tags corresponding to simple_value summaries.
Returns:
dict of tag:value.
Raises:
ValueError: if a summary with a specified tag does not contain simple_value.
"""
step_by_tag = {}
value_by_tag = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
tag = v.tag
if tag in tags:
if not v.HasField('simple_value'):
raise ValueError('Summary for %s is not a simple_value.' % tag)
# The events are mostly sorted in step order, but we explicitly check
# just in case.
if tag not in step_by_tag or e.step > step_by_tag[tag]:
step_by_tag[tag] = e.step
value_by_tag[tag] = v.simple_value
return value_by_tag
| awni/tensorflow | tensorflow/contrib/testing/python/framework/test_util.py | Python | apache-2.0 | 4,163 |
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Policy model (v015_film_transformer_gpt)
- Contains the policy model (v015_film_transformer_gpt), to evaluate the best actions given a state
"""
import logging
from diplomacy_research.models.policy.order_based import OrderBasedPolicyModel, load_args as load_parent_args
from diplomacy_research.models.state_space import get_adjacency_matrix, NB_SUPPLY_CENTERS, NB_POWERS, NB_NODES, \
ORDER_VOCABULARY_SIZE, MAX_CANDIDATES, PAD_ID, NB_SEASONS, NB_PREV_ORDERS, NB_ORDERS_FEATURES
from diplomacy_research.settings import NB_PARTITIONS
# Constants
LOGGER = logging.getLogger(__name__)
def load_args():
""" Load possible arguments
:return: A list of tuple (arg_type, arg_name, arg_value, arg_desc)
"""
return load_parent_args() + [
# Hyperparameters
('int', 'nb_graph_conv', 12, 'Number of Graph Conv Layer'),
('int', 'power_emb_size', 60, 'Power embedding size.'),
('int', 'season_emb_size', 20, 'Season embedding size'),
('int', 'gcn_size', 120, 'Size of graph convolution outputs.'),
('int', 'attn_size', 120, 'LSTM decoder attention size.'),
('int', 'trsf_ctxt_size', 1000 + NB_SUPPLY_CENTERS, 'The size of the context window.'),
('int', 'trsf_emb_size', 80, 'The size of the embedding for the vocabulary and the context'),
('int', 'trsf_nb_heads', 2, 'The number of attention heads to use for transformer'),
('int', 'trsf_nb_layers', 4, 'The number of layers to use for transformer')
]
class PolicyModel(OrderBasedPolicyModel):
""" Policy Model """
def _encode_board(self, board_state, name, reuse=None):
""" Encodes a board state or prev orders state
:param board_state: The board state / prev orders state to encode - (batch, NB_NODES, initial_features)
:param name: The name to use for the encoding
:param reuse: Whether to reuse or not the weights from another encoding operation
:return: The encoded board state / prev_orders state
"""
from diplomacy_research.utils.tensorflow import tf
from diplomacy_research.models.layers.graph_convolution import film_gcn_res_block, preprocess_adjacency
# Quick function to retrieve hparams and placeholders and function shorthands
hps = lambda hparam_name: self.hparams[hparam_name]
pholder = lambda placeholder_name: self.placeholders[placeholder_name]
relu = tf.nn.relu
# Getting film gammas and betas
film_gammas = self.outputs['_%s_film_gammas' % name]
film_betas = self.outputs['_%s_film_betas' % name]
# Computing norm adjacency
norm_adjacency = preprocess_adjacency(get_adjacency_matrix())
norm_adjacency = tf.tile(tf.expand_dims(norm_adjacency, axis=0), [tf.shape(board_state)[0], 1, 1])
# Building scope
scope = tf.VariableScope(name='policy/%s' % name, reuse=reuse)
with tf.variable_scope(scope):
# Adding noise to break symmetry
board_state = board_state + tf.random_normal(tf.shape(board_state), stddev=0.01)
graph_conv = tf.layers.Dense(units=hps('gcn_size'), activation=relu)(board_state)
# First and intermediate layers
for layer_idx in range(hps('nb_graph_conv') - 1):
graph_conv = film_gcn_res_block(inputs=graph_conv, # (b, NB_NODES, gcn_size)
gamma=film_gammas[layer_idx],
beta=film_betas[layer_idx],
gcn_out_dim=hps('gcn_size'),
norm_adjacency=norm_adjacency,
is_training=pholder('is_training'),
residual=True)
# Last layer
graph_conv = film_gcn_res_block(inputs=graph_conv, # (b, NB_NODES, final_size)
gamma=film_gammas[-1],
beta=film_betas[-1],
gcn_out_dim=hps('attn_size') // 2,
norm_adjacency=norm_adjacency,
is_training=pholder('is_training'),
residual=False)
# Returning
return graph_conv
def _get_board_state_conv(self, board_0yr_conv, is_training, prev_ord_conv=None):
""" Computes the board state conv to use as the attention target (memory)
:param board_0yr_conv: The board state encoding of the current (present) board state)
:param is_training: Indicate whether we are doing training or inference
:param prev_ord_conv: Optional. The encoding of the previous orders state.
:return: The board state conv to use as the attention target (memory)
"""
from diplomacy_research.utils.tensorflow import tf
assert prev_ord_conv is not None, 'This model requires a prev_ord_conv argument'
return tf.concat([board_0yr_conv, prev_ord_conv], axis=-1)
def _build_policy_initial(self):
""" Builds the policy model (initial step) """
from diplomacy_research.utils.tensorflow import tf
from diplomacy_research.models.layers.initializers import uniform
from diplomacy_research.utils.tensorflow import pad_axis, to_int32, to_float, to_bool
if not self.placeholders:
self.placeholders = self.get_placeholders()
# Quick function to retrieve hparams and placeholders and function shorthands
hps = lambda hparam_name: self.hparams[hparam_name]
pholder = lambda placeholder_name: self.placeholders[placeholder_name]
# Training loop
with tf.variable_scope('policy', reuse=tf.AUTO_REUSE):
with tf.device(self.cluster_config.worker_device if self.cluster_config else None):
# Features
board_state = to_float(self.features['board_state']) # tf.flt32 - (b, NB_NODES, NB_FEATURES)
board_alignments = to_float(self.features['board_alignments']) # (b, NB_NODES * len)
prev_orders_state = to_float(self.features['prev_orders_state']) # (b, NB_PRV_OD, NB_ND, NB_OD_FT)
decoder_inputs = self.features['decoder_inputs'] # tf.int32 - (b, <= 1 + NB_SCS)
decoder_lengths = self.features['decoder_lengths'] # tf.int32 - (b,)
candidates = self.features['candidates'] # tf.int32 - (b, nb_locs * MAX_CANDIDATES)
current_power = self.features['current_power'] # tf.int32 - (b,)
current_season = self.features['current_season'] # tf.int32 - (b,)
dropout_rates = self.features['dropout_rate'] # tf.flt32 - (b,)
# Batch size
batch_size = tf.shape(board_state)[0]
# Reshaping board alignments
board_alignments = tf.reshape(board_alignments, [batch_size, -1, NB_NODES])
board_alignments /= tf.math.maximum(1., tf.reduce_sum(board_alignments, axis=-1, keepdims=True))
# Overriding dropout_rates if pholder('dropout_rate') > 0
dropout_rates = tf.cond(tf.greater(pholder('dropout_rate'), 0.),
true_fn=lambda: tf.zeros_like(dropout_rates) + pholder('dropout_rate'),
false_fn=lambda: dropout_rates)
# Padding decoder_inputs and candidates
board_alignments = pad_axis(board_alignments, axis=1, min_size=tf.reduce_max(decoder_lengths))
decoder_inputs = pad_axis(decoder_inputs, axis=-1, min_size=2)
candidates = pad_axis(candidates, axis=-1, min_size=MAX_CANDIDATES)
# Making sure all RNN lengths are at least 1
# No need to trim, because the fields are variable length
raw_decoder_lengths = decoder_lengths
decoder_lengths = tf.math.maximum(1, decoder_lengths)
# Placeholders
decoder_type = tf.reduce_max(pholder('decoder_type'))
is_training = pholder('is_training')
# Reshaping candidates
candidates = tf.reshape(candidates, [batch_size, -1, MAX_CANDIDATES])
candidates = candidates[:, :tf.reduce_max(decoder_lengths), :] # tf.int32 - (b, nb_locs, MAX_CAN)
# Computing FiLM Gammas and Betas
with tf.variable_scope('film_scope'):
power_embedding = uniform(name='power_embedding',
shape=[NB_POWERS, hps('power_emb_size')],
scale=1.)
current_power_mask = tf.one_hot(current_power, NB_POWERS, dtype=tf.float32)
current_power_embedding = tf.reduce_sum(power_embedding[None]
* current_power_mask[:, :, None], axis=1) # (b, power_emb)
film_embedding_input = current_power_embedding
# Also conditioning on current_season
season_embedding = uniform(name='season_embedding',
shape=[NB_SEASONS, hps('season_emb_size')],
scale=1.)
current_season_mask = tf.one_hot(current_season, NB_SEASONS, dtype=tf.float32)
current_season_embedding = tf.reduce_sum(season_embedding[None] # (b,season_emb)
* current_season_mask[:, :, None], axis=1)
film_embedding_input = tf.concat([film_embedding_input, current_season_embedding], axis=1)
film_output_dims = [hps('gcn_size')] * (hps('nb_graph_conv') - 1) + [hps('attn_size') // 2]
# For board_state
board_film_weights = tf.layers.Dense(units=2 * sum(film_output_dims), # (b, 1, 750)
use_bias=True,
activation=None)(film_embedding_input)[:, None, :]
board_film_gammas, board_film_betas = tf.split(board_film_weights, 2, axis=2) # (b, 1, 750)
board_film_gammas = tf.split(board_film_gammas, film_output_dims, axis=2)
board_film_betas = tf.split(board_film_betas, film_output_dims, axis=2)
# For prev_orders
prev_ord_film_weights = tf.layers.Dense(units=2 * sum(film_output_dims), # (b, 1, 750)
use_bias=True,
activation=None)(film_embedding_input)[:, None, :]
prev_ord_film_weights = tf.tile(prev_ord_film_weights, [NB_PREV_ORDERS, 1, 1]) # (n_pr, 1, 750)
prev_ord_film_gammas, prev_ord_film_betas = tf.split(prev_ord_film_weights, 2, axis=2)
prev_ord_film_gammas = tf.split(prev_ord_film_gammas, film_output_dims, axis=2)
prev_ord_film_betas = tf.split(prev_ord_film_betas, film_output_dims, axis=2)
# Storing as temporary output
self.add_output('_board_state_conv_film_gammas', board_film_gammas)
self.add_output('_board_state_conv_film_betas', board_film_betas)
self.add_output('_prev_orders_conv_film_gammas', prev_ord_film_gammas)
self.add_output('_prev_orders_conv_film_betas', prev_ord_film_betas)
# Creating graph convolution
with tf.variable_scope('graph_conv_scope'):
assert hps('nb_graph_conv') >= 2
assert hps('attn_size') % 2 == 0
# Encoding board state
board_state_0yr_conv = self.encode_board(board_state, name='board_state_conv')
# Encoding prev_orders
prev_orders_state = tf.reshape(prev_orders_state, [batch_size * NB_PREV_ORDERS,
NB_NODES,
NB_ORDERS_FEATURES])
prev_ord_conv = self.encode_board(prev_orders_state, name='prev_orders_conv')
# Splitting back into (b, nb_prev, NB_NODES, attn_size / 2)
# Reducing the prev ord conv using avg
prev_ord_conv = tf.reshape(prev_ord_conv, [batch_size,
NB_PREV_ORDERS,
NB_NODES,
hps('attn_size') // 2])
prev_ord_conv = tf.reduce_mean(prev_ord_conv, axis=1)
# Concatenating the current board conv with the prev ord conv
# The final board_state_conv should be of dimension (b, NB_NODE, attn_size)
board_state_conv = self.get_board_state_conv(board_state_0yr_conv, is_training, prev_ord_conv)
# Creating policy order embedding vector (to embed order ix)
# Embeddings needs to be cached locally on the worker, otherwise TF can't compute their gradients
with tf.variable_scope('order_embedding_scope'):
caching_device = self.cluster_config.caching_device if self.cluster_config else None
partitioner = tf.fixed_size_partitioner(NB_PARTITIONS) if hps('use_partitioner') else None
order_embedding = uniform(name='order_embedding',
shape=[ORDER_VOCABULARY_SIZE, hps('trsf_emb_size')],
scale=1.,
partitioner=partitioner,
caching_device=caching_device)
# Creating candidate embedding
with tf.variable_scope('candidate_embedding_scope'):
# embedding: (order_vocab_size, 64)
caching_device = self.cluster_config.caching_device if self.cluster_config else None
partitioner = tf.fixed_size_partitioner(NB_PARTITIONS) if hps('use_partitioner') else None
candidate_embedding = uniform(name='candidate_embedding',
shape=[ORDER_VOCABULARY_SIZE, hps('trsf_emb_size') + 1],
scale=1.,
partitioner=partitioner,
caching_device=caching_device)
# Trimming to the maximum number of candidates
candidate_lengths = tf.reduce_sum(to_int32(tf.math.greater(candidates, PAD_ID)), -1) # int32 - (b,)
max_candidate_length = tf.math.maximum(1, tf.reduce_max(candidate_lengths))
candidates = candidates[:, :, :max_candidate_length]
# Building output tags
outputs = {'batch_size': batch_size,
'board_alignments': board_alignments,
'decoder_inputs': decoder_inputs,
'decoder_type': decoder_type,
'raw_decoder_lengths': raw_decoder_lengths,
'decoder_lengths': decoder_lengths,
'board_state_conv': board_state_conv,
'board_state_0yr_conv': board_state_0yr_conv,
'prev_ord_conv': prev_ord_conv,
'order_embedding': order_embedding,
'candidate_embedding': candidate_embedding,
'candidates': candidates,
'max_candidate_length': max_candidate_length,
'in_retreat_phase': tf.math.logical_and( # 1) board not empty, 2) disl. units present
tf.reduce_sum(board_state[:], axis=[1, 2]) > 0,
tf.math.logical_not(to_bool(tf.reduce_min(board_state[:, :, 23], -1))))}
# Adding to graph
self.add_meta_information(outputs)
def _build_policy_final(self):
""" Builds the policy model (final step) """
from diplomacy_research.utils.tensorflow import tf
from diplomacy_research.models.layers.attention import StaticAttentionWrapper
from diplomacy_research.models.layers.beam_decoder import DiverseBeamSearchDecoder
from diplomacy_research.models.layers.decoder import CandidateBasicDecoder
from diplomacy_research.models.layers.dropout import SeededDropoutWrapper
from diplomacy_research.models.layers.dynamic_decode import dynamic_decode
from diplomacy_research.models.layers.initializers import uniform
from diplomacy_research.models.layers.transformer import TransformerCell
from diplomacy_research.models.layers.wrappers import IdentityCell
from diplomacy_research.models.policy.order_based.helper import CustomHelper, CustomBeamHelper
from diplomacy_research.utils.tensorflow import cross_entropy, sequence_loss, to_int32, to_float, get_tile_beam
# Quick function to retrieve hparams and placeholders and function shorthands
hps = lambda hparam_name: self.hparams[hparam_name]
pholder = lambda placeholder_name: self.placeholders[placeholder_name]
# Training loop
with tf.variable_scope('policy', reuse=tf.AUTO_REUSE):
with tf.device(self.cluster_config.worker_device if self.cluster_config else None):
# Features
player_seeds = self.features['player_seed'] # tf.int32 - (b,)
temperature = self.features['temperature'] # tf,flt32 - (b,)
dropout_rates = self.features['dropout_rate'] # tf.flt32 - (b,)
# Placeholders
stop_gradient_all = pholder('stop_gradient_all')
# Outputs (from initial steps)
batch_size = self.outputs['batch_size']
board_alignments = self.outputs['board_alignments']
decoder_inputs = self.outputs['decoder_inputs']
decoder_type = self.outputs['decoder_type']
raw_decoder_lengths = self.outputs['raw_decoder_lengths']
decoder_lengths = self.outputs['decoder_lengths']
board_state_conv = self.outputs['board_state_conv']
order_embedding = self.outputs['order_embedding']
candidate_embedding = self.outputs['candidate_embedding']
candidates = self.outputs['candidates']
max_candidate_length = self.outputs['max_candidate_length']
# Creating a smaller position embedding if it's not present in the outputs
# Embeddings needs to be cached locally on the worker, otherwise TF can't compute their gradients
with tf.variable_scope('position_embedding_scope'):
caching_device = self.cluster_config.caching_device if self.cluster_config else None
position_embedding = uniform(name='position_embedding',
shape=[NB_SUPPLY_CENTERS, hps('trsf_emb_size')],
scale=1.,
caching_device=caching_device)
# Past Attentions
past_attentions, message_lengths = None, None
# --- Decoding ---
with tf.variable_scope('decoder_scope', reuse=tf.AUTO_REUSE):
feeder_cell = IdentityCell(output_size=hps('trsf_emb_size') + hps('attn_size'))
# ======== Regular Decoding ========
# Applying Dropout to input, attention and output
feeder_cell = SeededDropoutWrapper(cell=feeder_cell,
seeds=player_seeds,
input_keep_probs=1. - dropout_rates,
variational_recurrent=hps('use_v_dropout'),
input_size=hps('trsf_emb_size') + hps('attn_size'),
dtype=tf.float32)
# Apply attention over orderable location at each position
feeder_cell = StaticAttentionWrapper(cell=feeder_cell,
memory=board_state_conv,
alignments=board_alignments,
sequence_length=raw_decoder_lengths,
output_attention=False)
# Setting initial state
feeder_cell_init_state = feeder_cell.zero_state(batch_size, tf.float32)
# ---- Helper ----
helper = CustomHelper(decoder_type=decoder_type,
inputs=decoder_inputs[:, :-1],
order_embedding=order_embedding,
candidate_embedding=candidate_embedding,
sequence_length=decoder_lengths,
candidates=candidates,
time_major=False,
softmax_temperature=temperature)
# ---- Transformer Cell ----
trsf_scope = tf.VariableScope(name='policy/training_scope/transformer', reuse=False)
transformer_cell = TransformerCell(nb_layers=hps('trsf_nb_layers'),
nb_heads=hps('trsf_nb_heads'),
word_embedding=order_embedding,
position_embedding=position_embedding,
batch_size=batch_size,
feeder_cell=feeder_cell,
feeder_init_state=feeder_cell_init_state,
past_attentions=past_attentions,
past_seq_lengths=message_lengths,
scope=trsf_scope,
name='transformer')
transformer_cell_init_state = transformer_cell.zero_state(batch_size, tf.float32)
# ---- Invariants ----
invariants_map = {
'past_attentions': tf.TensorShape([None, # batch size
hps('trsf_nb_layers'), # nb_layers
2, # key, value
hps('trsf_nb_heads'), # nb heads
None, # Seq len
hps('trsf_emb_size') // hps('trsf_nb_heads')])} # Head size
# ---- Decoder ----
sequence_mask = tf.sequence_mask(raw_decoder_lengths,
maxlen=tf.reduce_max(decoder_lengths),
dtype=tf.float32)
maximum_iterations = NB_SUPPLY_CENTERS
model_decoder = CandidateBasicDecoder(cell=transformer_cell,
helper=helper,
initial_state=transformer_cell_init_state,
max_candidate_length=max_candidate_length,
extract_state=True)
training_results, _, _ = dynamic_decode(decoder=model_decoder,
output_time_major=False,
maximum_iterations=maximum_iterations,
invariants_map=invariants_map,
swap_memory=hps('swap_memory'))
global_vars_after_decoder = set(tf.global_variables())
# ======== Beam Search Decoding ========
tile_beam = get_tile_beam(hps('beam_width'))
beam_feeder_cell = IdentityCell(output_size=hps('trsf_emb_size') + hps('attn_size'))
# Applying Dropout to input, attention and output
beam_feeder_cell = SeededDropoutWrapper(cell=beam_feeder_cell,
seeds=tile_beam(player_seeds),
input_keep_probs=tile_beam(1. - dropout_rates),
variational_recurrent=hps('use_v_dropout'),
input_size=hps('trsf_emb_size') + hps('attn_size'),
dtype=tf.float32)
# Apply attention over orderable location at each position
beam_feeder_cell = StaticAttentionWrapper(cell=beam_feeder_cell,
memory=tile_beam(board_state_conv),
alignments=tile_beam(board_alignments),
sequence_length=tile_beam(raw_decoder_lengths),
output_attention=False)
# Setting initial state
beam_feeder_init_state = beam_feeder_cell.zero_state(batch_size * hps('beam_width'), tf.float32)
# ---- Transformer Cell ----
trsf_scope = tf.VariableScope(name='policy/training_scope/transformer', reuse=True)
beam_trsf_cell = TransformerCell(nb_layers=hps('trsf_nb_layers'),
nb_heads=hps('trsf_nb_heads'),
word_embedding=order_embedding,
position_embedding=position_embedding,
batch_size=batch_size * hps('beam_width'),
feeder_cell=beam_feeder_cell,
feeder_init_state=beam_feeder_init_state,
past_attentions=tile_beam(past_attentions),
past_seq_lengths=tile_beam(message_lengths),
scope=trsf_scope,
name='transformer')
beam_trsf_cell_init_state = beam_trsf_cell.zero_state(batch_size * hps('beam_width'), tf.float32)
# ---- Beam Helper and Decoder ----
beam_helper = CustomBeamHelper(cell=beam_trsf_cell,
order_embedding=order_embedding,
candidate_embedding=candidate_embedding,
candidates=candidates,
sequence_length=decoder_lengths,
initial_state=beam_trsf_cell_init_state,
beam_width=hps('beam_width'))
beam_decoder = DiverseBeamSearchDecoder(beam_helper=beam_helper,
sequence_length=decoder_lengths,
nb_groups=hps('beam_groups'))
beam_results, beam_state, _ = dynamic_decode(decoder=beam_decoder,
output_time_major=False,
maximum_iterations=maximum_iterations,
invariants_map=invariants_map,
swap_memory=hps('swap_memory'))
# Making sure we haven't created new global variables
assert not set(tf.global_variables()) - global_vars_after_decoder, 'New global vars were created'
# Processing results
candidate_logits = training_results.rnn_output # (b, dec_len, max_cand_len)
logits_length = tf.shape(candidate_logits)[1] # dec_len
decoder_target = decoder_inputs[:, 1:1 + logits_length]
# Selected tokens are the token that was actually fed at the next position
sample_mask = to_float(tf.math.equal(training_results.sample_id, -1))
selected_tokens = to_int32(
sequence_mask * (sample_mask * to_float(decoder_target)
+ (1. - sample_mask) * to_float(training_results.sample_id)))
# Computing ArgMax tokens
argmax_id = to_int32(tf.argmax(candidate_logits, axis=-1))
max_nb_candidate = tf.shape(candidate_logits)[2]
candidate_ids = \
tf.reduce_sum(tf.one_hot(argmax_id, max_nb_candidate, dtype=tf.int32) * candidates, axis=-1)
argmax_tokens = to_int32(to_float(candidate_ids) * sequence_mask)
# Extracting the position of the target candidate
tokens_labels = tf.argmax(to_int32(tf.math.equal(selected_tokens[:, :, None], candidates)), -1)
target_labels = tf.argmax(to_int32(tf.math.equal(decoder_target[:, :, None], candidates)), -1)
# Log Probs
log_probs = -1. * cross_entropy(logits=candidate_logits, labels=tokens_labels) * sequence_mask
# Computing policy loss
with tf.variable_scope('policy_loss'):
policy_loss = sequence_loss(logits=candidate_logits,
targets=target_labels,
weights=sequence_mask,
average_across_batch=True,
average_across_timesteps=True)
policy_loss = tf.cond(stop_gradient_all,
lambda: tf.stop_gradient(policy_loss), # pylint: disable=cell-var-from-loop
lambda: policy_loss) # pylint: disable=cell-var-from-loop
# Building output tags
outputs = {'tag/policy/order_based/v015_film_transformer_gpt': True,
'targets': decoder_inputs[:, 1:],
'selected_tokens': selected_tokens,
'argmax_tokens': argmax_tokens,
'logits': candidate_logits,
'log_probs': log_probs,
'beam_tokens': tf.transpose(beam_results.predicted_ids, perm=[0, 2, 1]), # [batch, beam, steps]
'beam_log_probs': beam_state.log_probs,
'rnn_states': training_results.rnn_state,
'policy_loss': policy_loss,
'draw_prob': self.outputs.get('draw_prob', tf.zeros_like(self.features['draw_target'])),
'learning_rate': self.learning_rate}
# Adding features, placeholders and outputs to graph
self.add_meta_information(outputs)
| diplomacy/research | diplomacy_research/models/policy/order_based/v015_film_transformer_gpt/model.py | Python | mit | 34,103 |
# META: timeout=long
import pytest
from tests.support.asserts import assert_error, assert_dialog_handled, assert_success
def is_element_selected(session, element_id):
return session.transport.send(
"GET", "session/{session_id}/element/{element_id}/selected".format(
session_id=session.session_id,
element_id=element_id))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog, inline):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<input id=foo type=checkbox checked>")
element = session.find.css("#foo", all=False)
element.send_keys("foo")
create_dialog(dialog_type, text=dialog_type)
response = is_element_selected(session, element.id)
assert_success(response, True)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog, inline):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<input id=foo type=checkbox checked>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = is_element_selected(session, element.id)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<input id=foo type=checkbox checked>")
element = session.find.css("#foo", all=False)
create_dialog(dialog_type, text=dialog_type)
response = is_element_selected(session, element.id)
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| chromium/chromium | third_party/blink/web_tests/external/wpt/webdriver/tests/is_element_selected/user_prompts.py | Python | bsd-3-clause | 4,102 |
# -*- coding: utf-8 -*-
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).
from . import wizard
| linkitspa/l10n-italy | l10n_it_fatturapa_out_triple_discount/__init__.py | Python | agpl-3.0 | 111 |
from paver.easy import * # for sh()
import os
@task
def test():
"""Run unit tests."""
import unittest
import tests
suite = unittest.defaultTestLoader.loadTestsFromModule(tests)
unittest.TextTestRunner().run(suite)
@task
def revbuild():
"""Increment the build number."""
import procgame
version_info = procgame.__version_info__
version_info = version_info[:-1] + (int(version_info[-1]) + 1,)
vfile = open('./procgame/_version.py', 'w')
vfile.write('# Generated by: paver revbuild\n')
vfile.write('__version_info__ = %s\n' % (repr(version_info)))
vfile.close()
| mjocean/PyProcGameHD-SkeletonGame | pavement.py | Python | mit | 576 |
'''
Created on Feb 26, 2013
@package: superdesk security
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Provides the ally core setup patch.
'''
from .service import assemblyGateways, updateAssemblyGateways, \
registerMethodOverride, updateAssemblyActiveRights, assemblyActiveRights, \
registerDefaultRights, userValueForFilter
from ally.container import support, ioc
import logging
# --------------------------------------------------------------------
log = logging.getLogger(__name__)
# --------------------------------------------------------------------
try: from __setup__ import ally_core
except ImportError: log.info('No ally core component available, thus cannot populate processors')
else:
ally_core = ally_core # Just to avoid the import warning
# ----------------------------------------------------------------
from acl.core.impl.processor import resource_node_associate, resource_model_filter, resource_alternate, resource_gateway
iterateResourcePermissions = checkResourceAvailableRights = modelFiltersForPermissions = \
authenticatedForPermissions = alternateNavigationPermissions = gatewaysFromPermissions = support.notCreated
support.createEntitySetup(resource_node_associate, resource_model_filter, resource_alternate, resource_gateway)
# --------------------------------------------------------------------
@ioc.after(updateAssemblyGateways)
def updateAssemblyGatewaysForResources():
assemblyGateways().add(iterateResourcePermissions(), authenticatedForPermissions(), userValueForFilter(),
alternateNavigationPermissions(), gatewaysFromPermissions(), before=registerMethodOverride())
@ioc.after(updateAssemblyActiveRights)
def updateAssemblyActiveRightsForResources():
assemblyActiveRights().add(checkResourceAvailableRights(), after=registerDefaultRights())
| superdesk/Live-Blog | plugins/superdesk-security/__plugin__/superdesk_security/patch_ally_core.py | Python | agpl-3.0 | 1,986 |
import json
import os
import sys
import getopt
import subprocess
import re
import copy
import hashlib
def parseCmakeBoolean(var):
rejected_strings = ['false','off','no']
if var.lower() in rejected_strings:
return False;
else:
return True;
def getBranchName(directory):
"""Returns the name of the current git branch"""
return subprocess.check_output(["git","rev-parse","--abbrev-ref","HEAD"],cwd=directory).strip()
def getRemotes(directory):
"""Returns list of remote git repositories"""
gitRemoteOutput = subprocess.check_output(['git','remote','-v'],cwd=directory)
remotes = []
for line in gitRemoteOutput.splitlines():
if '(fetch)' in line:
splitLine = line.split();
remotes.append({'name': splitLine[0].strip(), 'url': splitLine[1].strip()})
return remotes
def gitLogValue(format,directory):
"""Returns git log value specified by format"""
return subprocess.check_output(["git","log","-1","--pretty=format:%"+format],cwd=directory).strip()
def getAllFilesWithExtension(directory,extension):
"""Recursively return a list of all files in directory with specified extension"""
filesWithExtension = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(extension):
filesWithExtension.append(os.path.realpath(os.path.join(root, file)))
return filesWithExtension
def getSourcePathFromGcovFile(gcovFilename):
"""Return the source path corresponding to a .gcov file"""
gcovPath,gcovFilenameWithExtension = os.path.split(gcovFilename)
srcFilename = re.sub(".gcov$","",gcovFilenameWithExtension)
return re.sub("#","/",srcFilename)
def main(argv):
arguments = ['COVERAGE_SRCS_FILE=','COVERALLS_OUTPUT_FILE=','COV_PATH=','PROJECT_ROOT=','TRAVISCI=']
COVERAGE_SRCS_FILE=None
COVERALLS_OUTPUT_FILE=None
COV_PATH=None
PROJECT_ROOT=None
TRAVISCI=None
optlist, args = getopt.getopt(argv,'',arguments)
for o, a in optlist:
if o == "--COVERAGE_SRCS_FILE":
COVERAGE_SRCS_FILE=a
elif o == "--COVERALLS_OUTPUT_FILE":
COVERALLS_OUTPUT_FILE=a
elif o == "--COV_PATH":
COV_PATH=a
elif o == "--PROJECT_ROOT":
PROJECT_ROOT=a
elif o == "--TRAVISCI":
TRAVISCI=a
else:
assert False, "unhandled option"
if COVERAGE_SRCS_FILE == None:
assert False, "COVERAGE_SRCS_FILE is not defined"
if COVERALLS_OUTPUT_FILE==None:
assert False, "COVERALLS_OUTPUT_FILE is not defined"
if COV_PATH==None:
assert False, "COV_PATH is not defined"
if PROJECT_ROOT==None:
assert False, "PROJECT_ROOT is not defined"
gcdaAllFiles = getAllFilesWithExtension(COV_PATH,".gcda")
for gcdaFile in gcdaAllFiles:
gcdaDirectory = os.path.dirname(gcdaFile)
subprocess.check_call(["gcov","-p","-o",gcdaDirectory,gcdaFile],cwd=COV_PATH)
gcovAllFiles = getAllFilesWithExtension(COV_PATH,".gcov")
sourcesToCheck = [line.strip() for line in open(COVERAGE_SRCS_FILE, 'r')]
gcovCheckedFiles = []
uncheckedSources = sourcesToCheck
for gcovFile in gcovAllFiles:
sourceWithPath = getSourcePathFromGcovFile(gcovFile)
if sourceWithPath in sourcesToCheck:
print "YES: ",sourceWithPath.strip()," WAS FOUND"
gcovCheckedFiles.append(gcovFile)
uncheckedSources.remove(sourceWithPath)
else:
print "NO: ",sourceWithPath.strip()," WAS NOT FOUND"
coverageList = []
for gcovFilename in gcovCheckedFiles:
fileCoverage = {}
#get name for json file
sourceWithPath = getSourcePathFromGcovFile(gcovFilename)
fileCoverage['name'] = os.path.relpath(sourceWithPath,PROJECT_ROOT)
print "Generating JSON file for "+fileCoverage['name']
fileCoverage['source_digest'] = hashlib.md5(open(sourceWithPath, 'rb').read()).hexdigest()
lineCoverage = []
gcovFile = open(gcovFilename,'r')
for line in gcovFile:
line = [i.strip() for i in line.split(':')]
lineNumber = int(line[1])
if lineNumber != 0:
if line[0] == '#####':
lineCoverage.append(0)
elif line[0] == '=====':
lineCoverage.append(0)
elif line[0] == '-':
lineCoverage.append(None)
else:
lineCoverage.append(int(line[0]))
if lineNumber != len(lineCoverage):
raise RuntimeError['line_number does not match len(array)']
gcovFile.close()
fileCoverage['coverage'] = lineCoverage
coverageList.append(copy.deepcopy(fileCoverage))
for uncheckedFilename in uncheckedSources:
fileCoverage = {}
fileCoverage['name'] = os.path.relpath(uncheckedFilename,PROJECT_ROOT)
fileCoverage['source_digest'] = hashlib.md5(open(uncheckedFilename, 'rb').read()).hexdigest()
lineCoverage = []
uncheckedFile = open(uncheckedFilename,'r')
for line in uncheckedFile:
lineCoverage.append(0)
uncheckedFile.close()
fileCoverage['coverage'] = lineCoverage
coverageList.append(copy.deepcopy(fileCoverage))
coverallsOutput = {}
coverallsOutput['source_files'] = coverageList
if parseCmakeBoolean(TRAVISCI):
print "Generating for travis-ci"
coverallsOutput['service_name'] = 'travis-ci'
coverallsOutput['service_job_id'] = os.environ.get('TRAVIS_JOB_ID')
else:
print "Generating for other"
coverallsOutput['repo_token'] = os.environ.get('COVERALLS_REPO_TOKEN')
head = {'id':gitLogValue('H',PROJECT_ROOT),'author_name':gitLogValue('an',PROJECT_ROOT), \
'author_email':gitLogValue('ae',PROJECT_ROOT),'committer_name':gitLogValue('cn',PROJECT_ROOT), \
'committer_email':gitLogValue('ce',PROJECT_ROOT), 'message':gitLogValue('B',PROJECT_ROOT)}
gitDict = {'head':head,'branch':getBranchName(PROJECT_ROOT),'remotes':getRemotes(COV_PATH)}
coverallsOutput['git'] = gitDict
with open(COVERALLS_OUTPUT_FILE, 'w') as outfile:
json.dump(coverallsOutput,outfile,indent=4)
if __name__ == "__main__":
main(sys.argv[1:])
| quantumsteve/morebin | cmake/CoverallsGenerateGcov.py | Python | mit | 6,396 |
# coding=utf8
'''
使用mongodb的用户模型
'''
__AUTHOR__ = 'seraphln'
from mongoengine.django.auth import User
from mongoengine import Document, IntField, StringField, ReferenceField
class UserProfile(Document):
''' 用户属性模型 '''
user = ReferenceField(User)
last_ip = StringField(max_length=40) #防止ipv6出现,因此设置大一些
contact_num = IntField() #可以暂时先放qq号之类的
tel_num = StringField() #电话号码
note = StringField(max_length=300) #备注信息
oem_source = StringField(max_length=40) # 定制来源
level = StringField() # 用户等级
invitor = ReferenceField(User, dbref=True) # 邀请人
| seraphlnWu/snaker | snaker/account/models.py | Python | gpl-2.0 | 690 |
#!/usr/bin/env python2.7
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generates the appropriate build.json data for all the end2end tests."""
import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver secure platforms ci_mac tracing')
default_unsecure_fixture_options = FixtureOptions(
True, False, True, False, ['windows', 'linux', 'mac', 'posix'], True, False)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'])
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options,
'h2_census': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
'h2_full': default_unsecure_fixture_options,
'h2_full+poll': default_unsecure_fixture_options._replace(
platforms=['linux']),
'h2_full+pipe': default_unsecure_fixture_options._replace(
platforms=['linux']),
'h2_full+poll+pipe': default_unsecure_fixture_options._replace(
platforms=['linux']),
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_oauth2': default_secure_fixture_options._replace(ci_mac=False),
'h2_proxy': default_unsecure_fixture_options._replace(includes_proxy=True,
ci_mac=False),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(
ci_mac=False),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(ci_mac=False),
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace(
ci_mac=False, tracing=True),
'h2_ssl': default_secure_fixture_options,
'h2_ssl+poll': default_secure_fixture_options._replace(platforms=['linux']),
'h2_ssl_proxy': default_secure_fixture_options._replace(includes_proxy=True,
ci_mac=False),
'h2_uds+poll': uds_fixture_options._replace(platforms=['linux']),
'h2_uds': uds_fixture_options,
}
TestOptions = collections.namedtuple(
'TestOptions', 'needs_fullstack needs_dns proxyable secure traceable cpu_cost')
default_test_options = TestOptions(False, False, True, False, True, 1.0)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1
# maps test names to options
END2END_TESTS = {
'bad_hostname': default_test_options,
'binary_metadata': default_test_options,
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload': default_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'connectivity': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'default_host': default_test_options._replace(needs_fullstack=True,
needs_dns=True),
'disappearing_server': connectivity_test_options,
'empty_batch': default_test_options,
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False),
'high_initial_seqno': default_test_options,
'invoke_large_request': default_test_options,
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(proxyable=False),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline': default_test_options,
'no_op': default_test_options,
'payload': default_test_options._replace(cpu_cost=LOWCPU),
'ping_pong_streaming': default_test_options,
'ping': connectivity_test_options._replace(proxyable=False),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(proxyable=False),
'request_with_payload': default_test_options,
'server_finishes_request': default_test_options,
'shutdown_finishes_calls': default_test_options,
'shutdown_finishes_tags': default_test_options,
'simple_delayed_request': connectivity_test_options._replace(cpu_cost=LOWCPU),
'simple_metadata': default_test_options,
'simple_request': default_test_options,
'trailing_metadata': default_test_options,
}
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
return True
def without(l, e):
l = l[:]
l.remove(e)
return l
def main():
sec_deps = [
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr'
]
unsec_deps = [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
json = {
'#': 'generated with test/end2end/gen_build_json.py',
'libs': [
{
'name': 'end2end_tests',
'build': 'private',
'language': 'c',
'secure': True,
'src': ['test/core/end2end/end2end_tests.c'] + [
'test/core/end2end/tests/%s.c' % t
for t in sorted(END2END_TESTS.keys())],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': sec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
] + [
{
'name': 'end2end_nosec_tests',
'build': 'private',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/end2end_nosec_tests.c'] + [
'test/core/end2end/tests/%s.c' % t
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': unsec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
],
'targets': [
{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.c' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_tests'
] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
] + [
{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': 'no',
'src': ['test/core/end2end/fixtures/%s.c' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_nosec_tests'
] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
],
'tests': [
{
'name': '%s_test' % f,
'args': [t],
'exclude_configs': [],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': False,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys()) if compatible(f, t)
] + [
{
'name': '%s_nosec_test' % f,
'args': [t],
'exclude_configs': [],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': False,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if compatible(f, t) and not END2END_TESTS[t].secure
],
'core_end2end_tests': dict(
(t, END2END_TESTS[t].secure)
for t in END2END_TESTS.keys()
)
}
print yaml.dump(json)
if __name__ == '__main__':
main()
| VcamX/grpc | test/core/end2end/gen_build_yaml.py | Python | bsd-3-clause | 11,531 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-09 17:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0038_offercredit'),
]
operations = [
migrations.AlterField(
model_name='offercredit',
name='offer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='offer_credit', to='auctions.Offer'),
),
]
| codesy/codesy | auctions/migrations/0039_auto_20160909_1728.py | Python | agpl-3.0 | 573 |
"""
Typical complex rows (extension .txtx as extended txt) are:
1@3 1 2 &if n==1:v=100#else:v=10& 3
or (with the same effect)
1@3 1 2 &if n==1:v=100#if n>1:v=10& 3
or
1@3 &v=100*n& 3
or
1@3 1 2 3
; is quite complicated to be used with if, so we use #
n and v are mandatory names
n is the value in first position of the record
v is the result of the calculation in a formula
& starts and concludes a formula
we can have more than one formula in a row
(but we can have more than a row)
"""
from txtxFunctions import *
fIn = open(project + "/" + fileName + ".txtx", "r")
fOu = open(project + "/" + fileName + ".txt", "w")
nrow = 0
for line in fIn:
nrow += 1
line = fill(line) # to have formulas as blocks without spaces
sline = splitUnfill(line) # split and restores spaces
# print sline
if len(sline) != 0: # skipping empty lines
pos = sline[0].find("@")
# found an @ sign
if pos >= 0:
# the range of the first value
n1 = n2 = 0 # init. required
try:
n1 = int(sline[0][0:pos])
except BaseException:
print(
"no digits or wrong characters on the left of @ in row",
nrow,
"\nexecution stopped in error.")
fIn.close()
fOu.close()
os.sys.exit(1)
# print "*", n1
try:
n2 = int(sline[0][pos + 1:])
except BaseException:
print(
"no digits or wrong characters on the right of @ in row",
nrow,
"\nexecution stopped in error.")
fIn.close()
fOu.close()
os.sys.exit(1)
# print "*", n2
# not found an @ sign
else:
n1 = n2 = int(sline[0])
# output, applying formulas
for n in range(n1, n2 + 1):
# print "%d " % n,
print("%d " % n, end=' ', file=fOu)
for i in range(1, len(sline)):
# check for the presence of a formula
if sline[i].find("=") != -1:
# print "%s " % executeFormula(fIn,fOu,nrow,n,sline[i]),
print(
"%s " %
executeFormula(
fIn,
fOu,
nrow,
n,
sline[i]),
end=' ',
file=fOu)
else:
# print "%s " % sline[i],
print("%s " % sline[i], end=' ', file=fOu)
# print
print(file=fOu)
fIn.close()
fOu.close()
print("File", fileName, "converted: .txtx => .txt\n")
| terna/SLAPP3 | 6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/$$slapp$$/convert_txtx_txt.py | Python | cc0-1.0 | 2,869 |
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Incur., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Testing the C Extension cursors
"""
import logging
import unittest
from mysql.connector import errors, errorcode
import tests
try:
from _mysql_connector import (
MySQL, MySQLError, MySQLInterfaceError,
)
except ImportError:
HAVE_CMYSQL = False
else:
HAVE_CMYSQL = True
from mysql.connector.connection_cext import CMySQLConnection
from mysql.connector.cursor_cext import (
CMySQLCursor, CMySQLCursorBuffered, CMySQLCursorRaw
)
LOGGER = logging.getLogger(tests.LOGGER_NAME)
@unittest.skipIf(HAVE_CMYSQL == False, "C Extension not available")
class CExtMySQLCursorTests(tests.CMySQLCursorTests):
def _get_cursor(self, cnx=None):
if not cnx:
cnx = CMySQLConnection(**self.config)
return CMySQLCursor(connection=cnx)
def test___init__(self):
self.assertRaises(errors.InterfaceError, CMySQLCursor, connection='ham')
cur = self._get_cursor(self.cnx)
self.assertTrue(hex(id(self.cnx)).upper()[2:]
in repr(cur._cnx).upper())
def test_lastrowid(self):
cur = self._get_cursor(self.cnx)
tbl = 'test_lastrowid'
self.setup_table(self.cnx, tbl)
cur.execute("INSERT INTO {0} (col1) VALUES (1)".format(tbl))
self.assertEqual(1, cur.lastrowid)
cur.execute("INSERT INTO {0} () VALUES ()".format(tbl))
self.assertEqual(2, cur.lastrowid)
cur.execute("INSERT INTO {0} () VALUES (),()".format(tbl))
self.assertEqual(3, cur.lastrowid)
cur.execute("INSERT INTO {0} () VALUES ()".format(tbl))
self.assertEqual(5, cur.lastrowid)
def test__fetch_warnings(self):
self.cnx.get_warnings = True
cur = self._get_cursor(self.cnx)
cur._cnx = None
self.assertRaises(errors.InterfaceError, cur._fetch_warnings)
cur = self._get_cursor(self.cnx)
cur.execute("SELECT 'a' + 'b'")
cur.fetchall()
exp = [
('Warning', 1292, "Truncated incorrect DOUBLE value: 'a'"),
('Warning', 1292, "Truncated incorrect DOUBLE value: 'b'")
]
res = cur._fetch_warnings()
self.assertTrue(tests.cmp_result(exp, res))
self.assertEqual(len(exp), cur._warning_count)
def test_execute(self):
self.cnx.get_warnings = True
cur = self._get_cursor(self.cnx)
self.assertEqual(None, cur.execute(None))
self.assertRaises(errors.ProgrammingError, cur.execute,
'SELECT %s,%s,%s', ('foo', 'bar',))
cur.execute("SELECT 'a' + 'b'")
cur.fetchall()
exp = [
('Warning', 1292, "Truncated incorrect DOUBLE value: 'a'"),
('Warning', 1292, "Truncated incorrect DOUBLE value: 'b'")
]
self.assertTrue(tests.cmp_result(exp, cur._warnings))
self.cnx.get_warnings = False
cur.execute("SELECT BINARY 'ham'")
exp = [(b'ham',)]
self.assertEqual(exp, cur.fetchall())
cur.close()
tbl = 'myconnpy_cursor'
self.setup_table(self.cnx, tbl)
cur = self._get_cursor(self.cnx)
stmt_insert = "INSERT INTO {0} (col1,col2) VALUES (%s,%s)".format(tbl)
res = cur.execute(stmt_insert, (1, 100))
self.assertEqual(None, res, "Return value of execute() is wrong.")
stmt_select = "SELECT col1,col2 FROM {0} ORDER BY col1".format(tbl)
cur.execute(stmt_select)
self.assertEqual([(1, '100')],
cur.fetchall(), "Insert test failed")
data = {'id': 2}
stmt = "SELECT col1,col2 FROM {0} WHERE col1 <= %(id)s".format(tbl)
cur.execute(stmt, data)
self.assertEqual([(1, '100')], cur.fetchall())
cur.close()
def test_executemany__errors(self):
self.cnx.get_warnings = True
cur = self._get_cursor(self.cnx)
self.assertEqual(None, cur.executemany(None, []))
cur = self._get_cursor(self.cnx)
self.assertRaises(errors.ProgrammingError, cur.executemany,
'programming error with string', 'foo')
self.assertRaises(errors.ProgrammingError, cur.executemany,
'programming error with 1 element list', ['foo'])
self.assertEqual(None, cur.executemany('empty params', []))
self.assertEqual(None, cur.executemany('params is None', None))
self.assertRaises(errors.ProgrammingError, cur.executemany,
'foo', ['foo'])
self.assertRaises(errors.ProgrammingError, cur.executemany,
'SELECT %s', [('foo',), 'foo'])
self.assertRaises(errors.ProgrammingError,
cur.executemany,
"INSERT INTO t1 1 %s", [(1,), (2,)])
cur.executemany("SELECT SHA1(%s)", [('foo',), ('bar',)])
self.assertEqual(None, cur.fetchone())
def test_executemany(self):
tbl = 'myconnpy_cursor'
self.setup_table(self.cnx, tbl)
stmt_insert = "INSERT INTO {0} (col1,col2) VALUES (%s,%s)".format(tbl)
stmt_select = "SELECT col1,col2 FROM {0} ORDER BY col1".format(tbl)
cur = self._get_cursor(self.cnx)
res = cur.executemany(stmt_insert, [(1, 100), (2, 200), (3, 300)])
self.assertEqual(3, cur.rowcount)
res = cur.executemany("SELECT %s", [('f',), ('o',), ('o',)])
self.assertEqual(3, cur.rowcount)
data = [{'id': 2}, {'id': 3}]
stmt = "SELECT * FROM {0} WHERE col1 <= %(id)s".format(tbl)
cur.executemany(stmt, data)
self.assertEqual(5, cur.rowcount)
cur.execute(stmt_select)
self.assertEqual([(1, '100'), (2, '200'), (3, '300')],
cur.fetchall(), "Multi insert test failed")
data = [{'id': 2}, {'id': 3}]
stmt = "DELETE FROM {0} WHERE col1 = %(id)s".format(tbl)
cur.executemany(stmt, data)
self.assertEqual(2, cur.rowcount)
stmt = "TRUNCATE TABLE {0}".format(tbl)
cur.execute(stmt)
stmt = (
"/*comment*/INSERT/*comment*/INTO/*comment*/{0}(col1,col2)VALUES"
"/*comment*/(%s,%s/*comment*/)/*comment()*/ON DUPLICATE KEY UPDATE"
" col1 = VALUES(col1)"
).format(tbl)
cur.executemany(stmt, [(4, 100), (5, 200), (6, 300)])
self.assertEqual(3, cur.rowcount)
cur.execute(stmt_select)
self.assertEqual([(4, '100'), (5, '200'), (6, '300')],
cur.fetchall(), "Multi insert test failed")
stmt = "TRUNCATE TABLE {0}".format(tbl)
cur.execute(stmt)
stmt = (
"INSERT INTO/*comment*/{0}(col1,col2)VALUES"
"/*comment*/(%s,'/*100*/')/*comment()*/ON DUPLICATE KEY UPDATE "
"col1 = VALUES(col1)"
).format(tbl)
cur.executemany(stmt, [(4,), (5,)])
self.assertEqual(2, cur.rowcount)
cur.execute(stmt_select)
self.assertEqual([(4, '/*100*/'), (5, '/*100*/')],
cur.fetchall(), "Multi insert test failed")
cur.close()
def _test_callproc_setup(self, cnx):
self._test_callproc_cleanup(cnx)
stmt_create1 = (
"CREATE PROCEDURE myconnpy_sp_1 "
"(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) "
"BEGIN SET pProd := pFac1 * pFac2; END;")
stmt_create2 = (
"CREATE PROCEDURE myconnpy_sp_2 "
"(IN pFac1 INT, IN pFac2 INT, OUT pProd INT) "
"BEGIN SELECT 'abc'; SELECT 'def'; SET pProd := pFac1 * pFac2; "
"END;")
stmt_create3 = (
"CREATE PROCEDURE myconnpy_sp_3"
"(IN pStr1 VARCHAR(20), IN pStr2 VARCHAR(20), "
"OUT pConCat VARCHAR(100)) "
"BEGIN SET pConCat := CONCAT(pStr1, pStr2); END;")
stmt_create4 = (
"CREATE PROCEDURE myconnpy_sp_4"
"(IN pStr1 VARCHAR(20), INOUT pStr2 VARCHAR(20), "
"OUT pConCat VARCHAR(100)) "
"BEGIN SET pConCat := CONCAT(pStr1, pStr2); END;")
try:
cur = cnx.cursor()
cur.execute(stmt_create1)
cur.execute(stmt_create2)
cur.execute(stmt_create3)
cur.execute(stmt_create4)
except errors.Error as err:
self.fail("Failed setting up test stored routine; {0}".format(err))
cur.close()
def _test_callproc_cleanup(self, cnx):
sp_names = ('myconnpy_sp_1', 'myconnpy_sp_2', 'myconnpy_sp_3',
'myconnpy_sp_4')
stmt_drop = "DROP PROCEDURE IF EXISTS {procname}"
try:
cur = cnx.cursor()
for sp_name in sp_names:
cur.execute(stmt_drop.format(procname=sp_name))
except errors.Error as err:
self.fail(
"Failed cleaning up test stored routine; {0}".format(err))
cur.close()
def test_callproc(self):
cur = self._get_cursor(self.cnx)
self.check_method(cur, 'callproc')
self.assertRaises(ValueError, cur.callproc, None)
self.assertRaises(ValueError, cur.callproc, 'sp1', None)
config = tests.get_mysql_config()
self.cnx.get_warnings = True
self._test_callproc_setup(self.cnx)
cur = self.cnx.cursor()
if tests.MYSQL_VERSION < (5, 1):
exp = ('5', '4', b'20')
else:
exp = (5, 4, 20)
result = cur.callproc('myconnpy_sp_1', (exp[0], exp[1], 0))
self.assertEqual(exp, result)
if tests.MYSQL_VERSION < (5, 1):
exp = ('6', '5', b'30')
else:
exp = (6, 5, 30)
result = cur.callproc('myconnpy_sp_2', (exp[0], exp[1], 0))
self.assertTrue(isinstance(cur._stored_results, list))
self.assertEqual(exp, result)
exp_results = [
('abc',),
('def',)
]
for i, result in enumerate(cur.stored_results()):
self.assertEqual(exp_results[i], result.fetchone())
exp = ('ham', 'spam', 'hamspam')
result = cur.callproc('myconnpy_sp_3', (exp[0], exp[1], 0))
self.assertTrue(isinstance(cur._stored_results, list))
self.assertEqual(exp, result)
exp = ('ham', 'spam', 'hamspam')
result = cur.callproc('myconnpy_sp_4',
(exp[0], (exp[1], 'CHAR'), (0, 'CHAR')))
self.assertTrue(isinstance(cur._stored_results, list))
self.assertEqual(exp, result)
cur.close()
self._test_callproc_cleanup(self.cnx)
def test_fetchone(self):
cur = self._get_cursor(self.cnx)
self.assertEqual(None, cur.fetchone())
cur = self.cnx.cursor()
cur.execute("SELECT BINARY 'ham'")
exp = (b'ham',)
self.assertEqual(exp, cur.fetchone())
self.assertEqual(None, cur.fetchone())
cur.close()
def test_fetchmany(self):
"""MySQLCursor object fetchmany()-method"""
cur = self._get_cursor(self.cnx)
self.assertEqual([], cur.fetchmany())
tbl = 'myconnpy_fetch'
self.setup_table(self.cnx, tbl)
stmt_insert = (
"INSERT INTO {table} (col1,col2) "
"VALUES (%s,%s)".format(table=tbl))
stmt_select = (
"SELECT col1,col2 FROM {table} "
"ORDER BY col1 DESC".format(table=tbl))
cur = self.cnx.cursor()
nrrows = 10
data = [(i, str(i * 100)) for i in range(1, nrrows+1)]
cur.executemany(stmt_insert, data)
cur.execute(stmt_select)
exp = [(10, '1000'), (9, '900'), (8, '800'), (7, '700')]
rows = cur.fetchmany(4)
self.assertTrue(tests.cmp_result(exp, rows),
"Fetching first 4 rows test failed.")
exp = [(6, '600'), (5, '500'), (4, '400')]
rows = cur.fetchmany(3)
self.assertTrue(tests.cmp_result(exp, rows),
"Fetching next 3 rows test failed.")
exp = [(3, '300'), (2, '200'), (1, '100')]
rows = cur.fetchmany(3)
self.assertTrue(tests.cmp_result(exp, rows),
"Fetching next 3 rows test failed.")
self.assertEqual([], cur.fetchmany())
cur.close()
def test_fetchall(self):
cur = self._get_cursor(self.cnx)
self.assertRaises(errors.InterfaceError, cur.fetchall)
tbl = 'myconnpy_fetch'
self.setup_table(self.cnx, tbl)
stmt_insert = (
"INSERT INTO {table} (col1,col2) "
"VALUES (%s,%s)".format(table=tbl))
stmt_select = (
"SELECT col1,col2 FROM {table} "
"ORDER BY col1 ASC".format(table=tbl))
cur = self.cnx.cursor()
cur.execute("SELECT * FROM {table}".format(table=tbl))
self.assertEqual([], cur.fetchall(),
"fetchall() with empty result should return []")
nrrows = 10
data = [(i, str(i * 100)) for i in range(1, nrrows+1)]
cur.executemany(stmt_insert, data)
cur.execute(stmt_select)
self.assertTrue(tests.cmp_result(data, cur.fetchall()),
"Fetching all rows failed.")
self.assertEqual(None, cur.fetchone())
cur.close()
def test_raise_on_warning(self):
self.cnx.raise_on_warnings = True
cur = self._get_cursor(self.cnx)
cur.execute("SELECT 'a' + 'b'")
try:
cur.execute("SELECT 'a' + 'b'")
cur.fetchall()
except errors.DatabaseError:
pass
else:
self.fail("Did not get exception while raising warnings.")
def test__str__(self):
cur = self._get_cursor(self.cnx)
self.assertEqual("CMySQLCursor: (Nothing executed yet)",
cur.__str__())
cur.execute("SELECT VERSION()")
cur.fetchone()
self.assertEqual("CMySQLCursor: SELECT VERSION()",
cur.__str__())
stmt = "SELECT VERSION(),USER(),CURRENT_TIME(),NOW(),SHA1('myconnpy')"
cur.execute(stmt)
cur.fetchone()
self.assertEqual("CMySQLCursor: {0}..".format(stmt[:40]),
cur.__str__())
cur.close()
def test_column_names(self):
cur = self._get_cursor(self.cnx)
stmt = "SELECT NOW() as now, 'The time' as label, 123 FROM dual"
exp = (b'now', 'label', b'123')
cur.execute(stmt)
cur.fetchone()
self.assertEqual(exp, cur.column_names)
cur.close()
def test_statement(self):
cur = CMySQLCursor(self.cnx)
exp = 'SELECT * FROM ham'
cur._executed = exp
self.assertEqual(exp, cur.statement)
cur._executed = ' ' + exp + ' '
self.assertEqual(exp, cur.statement)
cur._executed = b'SELECT * FROM ham'
self.assertEqual(exp, cur.statement)
def test_with_rows(self):
cur = CMySQLCursor(self.cnx)
self.assertFalse(cur.with_rows)
cur._description = ('ham', 'spam')
self.assertTrue(cur.with_rows)
def tests_nextset(self):
cur = CMySQLCursor(self.cnx)
stmt = "SELECT 'result', 1; SELECT 'result', 2; SELECT 'result', 3"
cur.execute(stmt)
self.assertEqual([('result', 1)], cur.fetchall())
self.assertTrue(cur.nextset())
self.assertEqual([('result', 2)], cur.fetchall())
self.assertTrue(cur.nextset())
self.assertEqual([('result', 3)], cur.fetchall())
self.assertEqual(None, cur.nextset())
tbl = 'myconnpy_nextset'
stmt = "SELECT 'result', 1; INSERT INTO {0} () VALUES (); " \
"SELECT * FROM {0}".format(tbl)
self.setup_table(self.cnx, tbl)
cur.execute(stmt)
self.assertEqual([('result', 1)], cur.fetchall())
try:
cur.nextset()
except errors.Error as exc:
self.assertEqual(errorcode.CR_NO_RESULT_SET, exc.errno)
self.assertEqual(1, cur._affected_rows)
self.assertTrue(cur.nextset())
self.assertEqual([(1, None, 0)], cur.fetchall())
self.assertEqual(None, cur.nextset())
cur.close()
self.cnx.rollback()
def tests_execute_multi(self):
tbl = 'myconnpy_execute_multi'
stmt = "SELECT 'result', 1; INSERT INTO {0} () VALUES (); " \
"SELECT * FROM {0}".format(tbl)
self.setup_table(self.cnx, tbl)
multi_cur = CMySQLCursor(self.cnx)
results = []
exp = [
(u"SELECT 'result', 1", [(u'result', 1)]),
(u"INSERT INTO {0} () VALUES ()".format(tbl), 1, 1),
(u"SELECT * FROM {0}".format(tbl), [(1, None, 0)]),
]
for cur in multi_cur.execute(stmt, multi=True):
if cur.with_rows:
results.append((cur.statement, cur.fetchall()))
else:
results.append(
(cur.statement, cur._affected_rows, cur.lastrowid)
)
self.assertEqual(exp, results)
cur.close()
self.cnx.rollback()
class CExtMySQLCursorBufferedTests(tests.CMySQLCursorTests):
def _get_cursor(self, cnx=None):
if not cnx:
cnx = CMySQLConnection(**self.config)
self.cnx.buffered = True
return CMySQLCursorBuffered(connection=cnx)
def test___init__(self):
self.assertRaises(errors.InterfaceError, CMySQLCursorBuffered,
connection='ham')
cur = self._get_cursor(self.cnx)
self.assertTrue(hex(id(self.cnx)).upper()[2:]
in repr(cur._cnx).upper())
def test_execute(self):
self.cnx.get_warnings = True
cur = self._get_cursor(self.cnx)
self.assertEqual(None, cur.execute(None, None))
self.assertEqual(True,
isinstance(cur, CMySQLCursorBuffered))
cur.execute("SELECT 1")
self.assertEqual((1,), cur.fetchone())
def test_raise_on_warning(self):
self.cnx.raise_on_warnings = True
cur = self._get_cursor(self.cnx)
self.assertRaises(errors.DatabaseError,
cur.execute, "SELECT 'a' + 'b'")
def test_with_rows(self):
cur = self._get_cursor(self.cnx)
cur.execute("SELECT 1")
self.assertTrue(cur.with_rows)
class CMySQLCursorRawTests(tests.CMySQLCursorTests):
def _get_cursor(self, cnx=None):
if not cnx:
cnx = CMySQLConnection(**self.config)
return CMySQLCursorRaw(connection=cnx)
def test_fetchone(self):
cur = self._get_cursor(self.cnx)
self.assertEqual(None, cur.fetchone())
cur.execute("SELECT 1, 'string', MAKEDATE(2010,365), 2.5")
exp = (b'1', b'string', b'2010-12-31', b'2.5')
self.assertEqual(exp, cur.fetchone())
| ChrisPappalardo/mysql-connector-python | tests/cext/test_cext_cursor.py | Python | gpl-2.0 | 20,005 |
# Copyright (c) 2007 MIPS Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
from m5.params import *
from m5.proxy import *
from BaseCPU import BaseCPU
class ThreadModel(Enum):
vals = ['Single', 'SMT', 'SwitchOnCacheMiss']
class InOrderCPU(BaseCPU):
type = 'InOrderCPU'
activity = Param.Unsigned(0, "Initial count")
threadModel = Param.ThreadModel('SMT', "Multithreading model (SE-MODE only)")
cachePorts = Param.Unsigned(2, "Cache Ports")
stageWidth = Param.Unsigned(4, "Stage width")
fetchMemPort = Param.String("icache_port" , "Name of Memory Port to get instructions from")
dataMemPort = Param.String("dcache_port" , "Name of Memory Port to get data from")
icache_port = Port("Instruction Port")
dcache_port = Port("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
fetchBuffSize = Param.Unsigned(4, "Fetch Buffer Size (Number of Cache Blocks Stored)")
memBlockSize = Param.Unsigned(64, "Memory Block Size")
predType = Param.String("tournament", "Branch predictor type ('local', 'tournament')")
localPredictorSize = Param.Unsigned(2048, "Size of local predictor")
localCtrBits = Param.Unsigned(2, "Bits per counter")
localHistoryTableSize = Param.Unsigned(2048, "Size of local history table")
localHistoryBits = Param.Unsigned(11, "Bits for the local history")
globalPredictorSize = Param.Unsigned(8192, "Size of global predictor")
globalCtrBits = Param.Unsigned(2, "Bits per counter")
globalHistoryBits = Param.Unsigned(13, "Bits of history")
choicePredictorSize = Param.Unsigned(8192, "Size of choice predictor")
choiceCtrBits = Param.Unsigned(2, "Bits of choice counters")
BTBEntries = Param.Unsigned(4096, "Number of BTB entries")
BTBTagSize = Param.Unsigned(16, "Size of the BTB tags, in bits")
RASSize = Param.Unsigned(16, "RAS size")
instShiftAmt = Param.Unsigned(2, "Number of bits to shift instructions by")
functionTrace = Param.Bool(False, "Enable function trace")
functionTraceStart = Param.Tick(0, "Cycle to start function trace")
stageTracing = Param.Bool(False, "Enable tracing of each stage in CPU")
multLatency = Param.Unsigned(1, "Latency for Multiply Operations")
multRepeatRate = Param.Unsigned(1, "Repeat Rate for Multiply Operations")
div8Latency = Param.Unsigned(1, "Latency for 8-bit Divide Operations")
div8RepeatRate = Param.Unsigned(1, "Repeat Rate for 8-bit Divide Operations")
div16Latency = Param.Unsigned(1, "Latency for 16-bit Divide Operations")
div16RepeatRate = Param.Unsigned(1, "Repeat Rate for 16-bit Divide Operations")
div24Latency = Param.Unsigned(1, "Latency for 24-bit Divide Operations")
div24RepeatRate = Param.Unsigned(1, "Repeat Rate for 24-bit Divide Operations")
div32Latency = Param.Unsigned(1, "Latency for 32-bit Divide Operations")
div32RepeatRate = Param.Unsigned(1, "Repeat Rate for 32-bit Divide Operations")
| koparasy/faultinjection-gem5 | src/cpu/inorder/InOrderCPU.py | Python | bsd-3-clause | 4,432 |
import time, cmd;
from subproc import Popen, PIPE
class ApertiumProcess(Popen):
def __init__(self, arguments, stdin=PIPE, stdout=PIPE, stderr=PIPE):
super(ApertiumProcess, self).__init__(arguments, stdin=stdin, stdout=stdout, stderr=stderr)
def readChar(self, reader):
last_read = self.recv(1)
sleeps = 0
while last_read in (None, '') and sleeps < 3:
time.sleep(0.1)
last_read = reader(1)
sleeps += 1
if last_read != None:
return last_read
else:
return ''
def readUntilNull(self, reader):
ret = []
last_read = '?'
while last_read not in ('\x00', ''):
last_read = self.readChar(reader)
ret.append(last_read)
return ''.join(ret)
def kill(self):
import os
try:
print self.pid;
os.system('kill -9 ' + self.pid);
os.kill(self.pid, 9)
except OSError:
pass # if we get here, the process either never started, or has already died
def __del__(self):
self.kill()
| unhammer/lttoolbox-oldsvn | tests/apertium_process.py | Python | gpl-2.0 | 1,117 |
from django.apps import AppConfig
class ShareConfig(AppConfig):
name = 'share'
| Oinweb/py-fly | share/apps.py | Python | bsd-2-clause | 85 |
# FIXME: Remove module
from fscore.deprecated import deprecated
__all__ = ["deprecated"]
| FrodeSolheim/fs-uae-launcher | fsui/decorators.py | Python | gpl-2.0 | 91 |
"""
Models for the custom course feature
"""
from datetime import datetime
import logging
from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import UTC
from lazy import lazy
from student.models import CourseEnrollment, AlreadyEnrolledError # pylint: disable=import-error
from xmodule_django.models import CourseKeyField, LocationKeyField # pylint: disable=import-error
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.ccx")
class CustomCourseForEdX(models.Model):
"""
A Custom Course.
"""
course_id = CourseKeyField(max_length=255, db_index=True)
display_name = models.CharField(max_length=255)
coach = models.ForeignKey(User, db_index=True)
@lazy
def course(self):
"""Return the CourseDescriptor of the course related to this CCX"""
store = modulestore()
with store.bulk_operations(self.course_id):
course = store.get_course(self.course_id)
if not course or isinstance(course, ErrorDescriptor):
log.error("CCX {0} from {2} course {1}".format( # pylint: disable=logging-format-interpolation
self.display_name, self.course_id, "broken" if course else "non-existent"
))
return course
@lazy
def start(self):
"""Get the value of the override of the 'start' datetime for this CCX
"""
# avoid circular import problems
from .overrides import get_override_for_ccx
return get_override_for_ccx(self, self.course, 'start')
@lazy
def due(self):
"""Get the value of the override of the 'due' datetime for this CCX
"""
# avoid circular import problems
from .overrides import get_override_for_ccx
return get_override_for_ccx(self, self.course, 'due')
def has_started(self):
"""Return True if the CCX start date is in the past"""
return datetime.now(UTC()) > self.start
def has_ended(self):
"""Return True if the CCX due date is set and is in the past"""
if self.due is None:
return False
return datetime.now(UTC()) > self.due
def start_datetime_text(self, format_string="SHORT_DATE"):
"""Returns the desired text representation of the CCX start datetime
The returned value is always expressed in UTC
"""
i18n = self.course.runtime.service(self.course, "i18n")
strftime = i18n.strftime
value = strftime(self.start, format_string)
if format_string == 'DATE_TIME':
value += u' UTC'
return value
def end_datetime_text(self, format_string="SHORT_DATE"):
"""Returns the desired text representation of the CCX due datetime
If the due date for the CCX is not set, the value returned is the empty
string.
The returned value is always expressed in UTC
"""
if self.due is None:
return ''
i18n = self.course.runtime.service(self.course, "i18n")
strftime = i18n.strftime
value = strftime(self.due, format_string)
if format_string == 'DATE_TIME':
value += u' UTC'
return value
class CcxMembership(models.Model):
"""
Which students are in a CCX?
"""
ccx = models.ForeignKey(CustomCourseForEdX, db_index=True)
student = models.ForeignKey(User, db_index=True)
active = models.BooleanField(default=False)
@classmethod
def auto_enroll(cls, student, future_membership):
"""convert future_membership to an active membership
"""
if not future_membership.auto_enroll:
msg = "auto enrollment not allowed for {}"
raise ValueError(msg.format(future_membership))
membership = cls(
ccx=future_membership.ccx, student=student, active=True
)
try:
CourseEnrollment.enroll(
student, future_membership.ccx.course_id, check_access=True
)
except AlreadyEnrolledError:
# if the user is already enrolled in the course, great!
pass
membership.save()
future_membership.delete()
@classmethod
def memberships_for_user(cls, user, active=True):
"""
active memberships for a user
"""
return cls.objects.filter(student=user, active__exact=active)
class CcxFutureMembership(models.Model):
"""
Which emails for non-users are waiting to be added to CCX on registration
"""
ccx = models.ForeignKey(CustomCourseForEdX, db_index=True)
email = models.CharField(max_length=255)
auto_enroll = models.BooleanField(default=0)
class CcxFieldOverride(models.Model):
"""
Field overrides for custom courses.
"""
ccx = models.ForeignKey(CustomCourseForEdX, db_index=True)
location = LocationKeyField(max_length=255, db_index=True)
field = models.CharField(max_length=255)
class Meta: # pylint: disable=missing-docstring,old-style-class
unique_together = (('ccx', 'location', 'field'),)
value = models.TextField(default='null')
| dkarakats/edx-platform | lms/djangoapps/ccx/models.py | Python | agpl-3.0 | 5,217 |
from __future__ import (division, unicode_literals)
import warnings
import numpy as np
from numpy.testing import assert_almost_equal
from scipy.ndimage.interpolation import zoom, shift
from scipy.ndimage.measurements import center_of_mass
from scipy.spatial import cKDTree
def crop_pad(image, corner, shape):
ndim = len(corner)
corner = [int(round(c)) for c in corner]
shape = [int(round(s)) for s in shape]
original = image.shape[-ndim:]
zipped = zip(corner, shape, original)
if np.any(c < 0 or c + s > o for (c, s, o) in zipped):
no_padding = [(0, 0)] * (image.ndim - ndim)
padding = [(max(-c, 0), max(c + s - o, 0)) for (c, s, o) in zipped]
corner = [c + max(-c, 0) for c in corner]
image_temp = np.pad(image, no_padding + padding, mode=str('constant'))
else:
image_temp = image
no_crop = [slice(o+1) for o in image.shape[:-ndim]]
crop = [slice(c, c+s) for (c, s) in zip(corner, shape)]
return image_temp[no_crop + crop]
def draw_ellipse(shape, radius, center, FWHM, noise=0):
sigma = FWHM / 2.35482
cutoff = 2 * FWHM
# draw a circle
R = max(radius)
zoom_factor = np.array(radius) / R
size = int((R + cutoff)*2)
c = size // 2
y, x = np.meshgrid(*([np.arange(size)] * 2), indexing='ij')
h = np.sqrt((y - c)**2+(x - c)**2) - R
mask = np.abs(h) < cutoff
im = np.zeros((size,)*2, dtype=np.float)
im[mask] += np.exp((h[mask] / sigma)**2/-2)/(sigma*np.sqrt(2*np.pi))
# zoom so that radii are ok
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = zoom(im, zoom_factor)
# shift and make correct shape
center_diff = center - np.array(center_of_mass(im))
left_padding = np.round(center_diff).astype(np.int)
subpx_shift = center_diff - left_padding
im = shift(im, subpx_shift)
im = crop_pad(im, -left_padding, shape)
im[im < 0] = 0
assert_almost_equal(center_of_mass(im), center, decimal=2)
if noise > 0:
im += np.random.random(shape) * noise * im.max()
return (im / im.max() * 255).astype(np.uint8)
def draw_ellipsoid(shape, radius, center, FWHM, noise=0):
sigma = FWHM / 2.35482
cutoff = 2 * FWHM
# draw a sphere
R = max(radius)
zoom_factor = np.array(radius) / R
size = int((R + cutoff)*2)
c = size // 2
z, y, x = np.meshgrid(*([np.arange(size)] * 3), indexing='ij')
h = np.sqrt((z - c)**2+(y - c)**2+(x - c)**2) - R
mask = np.abs(h) < cutoff
im = np.zeros((size,)*3, dtype=np.float)
im[mask] += np.exp((h[mask] / sigma)**2/-2)/(sigma*np.sqrt(2*np.pi))
# zoom so that radii are ok
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = zoom(im, zoom_factor)
# shift and make correct shape
center_diff = center - np.array(center_of_mass(im))
left_padding = np.round(center_diff).astype(np.int)
subpx_shift = center_diff - left_padding
im = shift(im, subpx_shift)
im = crop_pad(im, -left_padding, shape)
im[im < 0] = 0
assert_almost_equal(center_of_mass(im), center, decimal=2)
if noise > 0:
im += np.random.random(shape) * noise * im.max()
return (im / im.max() * 255).astype(np.uint8)
def feat_step(r):
""" Solid disc. """
return r <= 1
class SimulatedImage(object):
""" This class makes it easy to generate artificial pictures.
Parameters
----------
shape : tuple of int
dtype : numpy.dtype, default np.float64
saturation : maximum value in image
radius : default radius of particles, used for determining the
distance between particles in clusters
feat_dict : dictionary of arguments passed to tp.artificial.draw_feature
Attributes
----------
image : ndarray containing pixel values
center : the center [y, x] to use for radial coordinates
Examples
--------
image = SimulatedImage(shape=(50, 50), dtype=np.float64, radius=7,
feat_dict={'diameter': 20, 'max_value': 100,
'feat_func': SimulatedImage.feat_hat,
'disc_size': 0.2})
image.draw_feature((10, 10))
image.draw_dimer((32, 35), angle=75)
image.add_noise(5)
image()
"""
def __init__(self, shape,
radius=None, noise=0.0,
feat_func=feat_step, **feat_kwargs):
self.ndim = len(shape)
self.shape = shape
self.dtype = np.float64
self.image = np.zeros(shape, dtype=self.dtype)
self.feat_func = feat_func
self.feat_kwargs = feat_kwargs
self.noise = float(noise)
self.center = tuple([float(s) / 2.0 for s in shape])
self.radius = float(radius)
self._coords = []
self.pos_columns = ['z', 'y', 'x'][-self.ndim:]
self.size_columns = ['size']
def __call__(self):
# so that you can checkout the image with image() instead of image.image
return self.noisy_image(self.noise)
def clear(self):
"""Clears the current image"""
self._coords = []
self.image = np.zeros_like(self.image)
def normalize_image(self, image):
""" Normalize image """
image = image.astype(self.dtype)
abs_max = np.max(np.abs(image))
return image / abs_max
def noisy_image(self, noise_level):
"""Adds noise to the current image, uniformly distributed
between 0 and `noise_level`, not including noise_level."""
if noise_level <= 0:
return self.image
noise = np.random.random(self.shape) * noise_level
noisy_image = self.normalize_image(self.image + noise)
return np.array(noisy_image, dtype=self.dtype)
@property
def coords(self):
if len(self._coords) == 0:
return np.zeros((0, self.ndim), dtype=self.dtype)
return np.array(self._coords)
def draw_feature(self, pos):
"""Draws a feature at `pos`."""
pos = [float(p) for p in pos]
self._coords.append(pos)
draw_feature(image=self.image, position=pos, diameter=2.0 * self.radius,
max_value=1.0, feat_func=self.feat_func,
**self.feat_kwargs)
def draw_features(self, count, separation=0, margin=None):
"""Draws N features at random locations, using minimum separation
and a margin. If separation > 0, less than N features may be drawn."""
if margin is None:
margin = float(self.radius)
pos = self.gen_nonoverlapping_locations(self.shape, count, separation,
margin)
for p in pos:
self.draw_feature(p)
return pos
@staticmethod
def gen_random_locations(shape, count, margin=0.0):
""" Generates `count` number of positions within `shape`. If a `margin` is
given, positions will be inside this margin. Margin may be tuple-valued.
"""
margin = validate_tuple(margin, len(shape))
pos = [np.random.uniform(m, s - m, count)
for (s, m) in zip(shape, margin)]
return np.array(pos).T
def gen_nonoverlapping_locations(self, shape, count, separation,
margin=0.0):
""" Generates `count` number of positions within `shape`, that have minimum
distance `separation` from each other. The number of positions returned may
be lower than `count`, because positions too close to each other will be
deleted. If a `margin` is given, positions will be inside this margin.
Margin may be tuple-valued.
"""
positions = self.gen_random_locations(shape, count, margin)
if len(positions) > 1:
return eliminate_overlapping_locations(positions, separation)
else:
return positions
def validate_tuple(value, ndim):
if not hasattr(value, '__iter__'):
return (value,) * ndim
if len(value) == ndim:
return tuple(value)
raise ValueError("List length should have same length as image dimensions.")
def draw_feature(image, position, diameter, max_value=None,
feat_func=feat_step, ecc=None, **kwargs):
""" Draws a radial symmetric feature and adds it to the image at given
position. The given function will be evaluated at each pixel coordinate,
no averaging or convolution is done.
Parameters
----------
image : ndarray
image to draw features on
position : iterable
coordinates of feature position
diameter : number
defines the box that will be drawn on
max_value : number
maximum feature value. should be much less than the max value of the
image dtype, to avoid pixel wrapping at overlapping features
feat_func : function. Default: feat_gauss
function f(r) that takes an ndarray of radius values
and returns intensity values <= 1
ecc : positive number, optional
eccentricity of feature, defined only in 2D. Identical to setting
diameter to (diameter / (1 - ecc), diameter * (1 - ecc))
kwargs : keyword arguments are passed to feat_func
"""
if len(position) != image.ndim:
raise ValueError("Number of position coordinates should match image"
" dimensionality.")
diameter = validate_tuple(diameter, image.ndim)
if ecc is not None:
if len(diameter) != 2:
raise ValueError("Eccentricity is only defined in 2 dimensions")
if diameter[0] != diameter[1]:
raise ValueError("Diameter is already anisotropic; eccentricity is"
" not defined.")
diameter = (diameter[0] / (1 - ecc), diameter[1] * (1 - ecc))
radius = tuple([d / 2 for d in diameter])
if max_value is None:
max_value = np.iinfo(image.dtype).max - 3
rect = []
vectors = []
for (c, r, lim) in zip(position, radius, image.shape):
if (c >= lim) or (c < 0):
raise ValueError("Position outside of image.")
lower_bound = max(int(np.floor(c - r)), 0)
upper_bound = min(int(np.ceil(c + r + 1)), lim)
rect.append(slice(lower_bound, upper_bound))
vectors.append(np.arange(lower_bound - c, upper_bound - c) / r)
coords = np.meshgrid(*vectors, indexing='ij', sparse=True)
r = np.sqrt(np.sum(np.array(coords)**2, axis=0))
spot = max_value * feat_func(r, **kwargs)
image[rect] += spot.astype(image.dtype)
def gen_random_locations(shape, count, margin=0):
""" Generates `count` number of positions within `shape`. If a `margin` is
given, positions will be inside this margin. Margin may be tuple-valued.
"""
margin = validate_tuple(margin, len(shape))
np.random.seed(0)
pos = [np.random.randint(round(m), round(s - m), count)
for (s, m) in zip(shape, margin)]
return np.array(pos).T
def eliminate_overlapping_locations(f, separation):
""" Makes sure that no position is within `separation` from each other, by
deleting one of the that are to close to each other.
"""
separation = validate_tuple(separation, f.shape[1])
assert np.greater(separation, 0).all()
# Rescale positions, so that pairs are identified below a distance of 1.
f = f / separation
while True:
duplicates = cKDTree(f, 30).query_pairs(1)
if len(duplicates) == 0:
break
to_drop = []
for pair in duplicates:
to_drop.append(pair[1])
f = np.delete(f, to_drop, 0)
return f * separation
| rbnvrw/circletracking | circletracking/artificial.py | Python | bsd-3-clause | 11,656 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from .._haseventbase import HasEventBase as HasEvent
from ._hastype import HasType
from ._allevents import AllEvents
from ._hasgallery import HasGallery
from ._hasidof import HasIdOf
from ._regexpidof import RegExpIdOf
from ._hascitation import HasCitation
from ._hasnote import HasNote
from ._hasnoteregexp import HasNoteRegexp
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hasreferencecountof import HasReferenceCountOf
from ._hassourcecount import HasSourceCount
from ._eventprivate import EventPrivate
from ._matchesfilter import MatchesFilter
from ._matchespersonfilter import MatchesPersonFilter
from ._matchessourceconfidence import MatchesSourceConfidence
from ._matchessourcefilter import MatchesSourceFilter
from ._hasattribute import HasAttribute
from ._hasdata import HasData
from ._changedsince import ChangedSince
from ._hastag import HasTag
from ._matchesplacefilter import MatchesPlaceFilter
from ._hasdayofweek import HasDayOfWeek
editor_rule_list = [
AllEvents,
HasType,
HasIdOf,
HasGallery,
RegExpIdOf,
HasCitation,
HasNote,
HasNoteRegexp,
HasReferenceCountOf,
HasSourceCount,
EventPrivate,
MatchesFilter,
MatchesPersonFilter,
MatchesSourceConfidence,
MatchesSourceFilter,
HasAttribute,
HasData,
ChangedSince,
HasTag,
MatchesPlaceFilter,
HasDayOfWeek
]
| pmghalvorsen/gramps_branch | gramps/gen/filters/rules/event/__init__.py | Python | gpl-2.0 | 2,334 |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Element wise ops acting on segments of arrays."""
import tensorflow.compat.v2 as tf
from tf_quant_finance.math import diff_ops
def segment_diff(x,
segment_ids,
order=1,
exclusive=False,
dtype=None,
name=None):
"""Computes difference of successive elements in a segment.
For a complete description of segment_* ops see documentation of
`tf.segment_max`. This op extends the `diff` functionality to segmented
inputs.
The behaviour of this op is the same as that of the op `diff` within each
segment. The result is effectively a concatenation of the results of `diff`
applied to each segment.
#### Example
```python
x = tf.constant([2, 5, 1, 7, 9] + [32, 10, 12, 3] + [4, 8, 5])
segments = tf.constant([0, 0, 0, 0, 0] + [1, 1, 1, 1] + [2, 2, 2])
# First order diff. Expected result: [3, -4, 6, 2, -22, 2, -9, 4, -3]
dx1 = segment_diff(
x, segment_ids=segments, order=1, exclusive=True)
# Non-exclusive, second order diff.
# Expected result: [2, 5, -1, 2, 8, 32, 10, -20, -7, 4, 8, 1]
dx2 = segment_diff(
x, segment_ids=segments, order=2, exclusive=False)
```
Args:
x: A rank 1 `Tensor` of any dtype for which arithmetic operations are
permitted.
segment_ids: A `Tensor`. Must be one of the following types: int32, int64. A
1-D tensor whose size is equal to the size of `x`. Values should be sorted
and can be repeated.
order: Positive Python int. The order of the difference to compute. `order =
1` corresponds to the difference between successive elements.
Default value: 1
exclusive: Python bool. See description above.
Default value: False
dtype: Optional `tf.Dtype`. If supplied, the dtype for `x` to use when
converting to `Tensor`.
Default value: None which maps to the default dtype inferred by TF.
name: Python `str` name prefixed to Ops created by this class.
Default value: None which is mapped to the default name 'segment_diff'.
Returns:
diffs: A `Tensor` of the same dtype as `x`. Assuming that each segment is
of length greater than or equal to order, if `exclusive` is True,
then the size is `n-order*k` where `n` is the size of x,
`k` is the number of different segment ids supplied if `segment_ids` is
not None or 1 if `segment_ids` is None. If any of the segments is of
length less than the order, then the size is:
`n-sum(min(order, length(segment_j)), j)` where the sum is over segments.
If `exclusive` is False, then the size is `n`.
"""
with tf.compat.v1.name_scope(name, default_name='segment_diff', values=[x]):
x = tf.convert_to_tensor(x, dtype=dtype)
raw_diffs = diff_ops.diff(x, order=order, exclusive=exclusive)
if segment_ids is None:
return raw_diffs
# If segment ids are supplied, raw_diffs are incorrect at locations:
# p, p+1, ... min(p+order-1, m_p-1) where p is the index of the first
# element of a segment other than the very first segment (which is
# already correct). m_p is the segment length.
# Find positions where the segments begin.
has_segment_changed = tf.concat(
[[False], tf.not_equal(segment_ids[1:] - segment_ids[:-1], 0)], axis=0)
# Shape [k, 1]
segment_start_index = tf.cast(tf.where(has_segment_changed), dtype=tf.int32)
segment_end_index = tf.concat(
[tf.reshape(segment_start_index, [-1])[1:], [tf.size(segment_ids)]],
axis=0)
segment_end_index = tf.reshape(segment_end_index, [-1, 1])
# The indices of locations that need to be adjusted. This needs to be
# constructed in steps. First we generate p, p+1, ... p+order-1.
# Shape [num_segments-1, order]
fix_indices = (
segment_start_index + tf.range(order, dtype=segment_start_index.dtype))
in_bounds = tf.where(fix_indices < segment_end_index)
# Keep only the ones in bounds.
fix_indices = tf.reshape(tf.gather_nd(fix_indices, in_bounds), [-1, 1])
needs_fix = tf.scatter_nd(
fix_indices,
# Unfortunately, scatter_nd doesn't support bool on GPUs so we need to
# do ints here and then convert to bool.
tf.reshape(tf.ones_like(fix_indices, dtype=tf.int32), [-1]),
shape=tf.shape(x))
# If exclusive is False, then needs_fix means we need to replace the values
# in raw_diffs at those locations with the values in x.
needs_fix = tf.cast(needs_fix, dtype=tf.bool)
if not exclusive:
return tf.where(needs_fix, x, raw_diffs)
# If exclusive is True, we have to be more careful. The raw_diffs
# computation has removed the first 'order' elements. After removing the
# corresponding elements from needs_fix, we use it to remove the elements
# from raw_diffs.
return tf.boolean_mask(raw_diffs, tf.logical_not(needs_fix[order:]))
def segment_cumsum(x, segment_ids, exclusive=False, dtype=None, name=None):
"""Computes cumulative sum of elements in a segment.
For a complete description of segment_* ops see documentation of
`tf.segment_sum`. This op extends the `tf.math.cumsum` functionality to
segmented inputs.
The behaviour of this op is the same as that of the op `tf.math.cumsum` within
each segment. The result is effectively a concatenation of the results of
`tf.math.cumsum` applied to each segment with the same interpretation for the
argument `exclusive`.
#### Example
```python
x = tf.constant([2, 5, 1, 7, 9] + [32, 10, 12, 3] + [4, 8, 5])
segments = tf.constant([0, 0, 0, 0, 0] + [1, 1, 1, 1] + [2, 2, 2])
# Inclusive cumulative sum.
# Expected result: [2, 7, 8, 15, 24, 32, 42, 54, 57, 4, 12, 17]
cumsum1 = segment_cumsum(
x, segment_ids=segments, exclusive=False)
# Exclusive cumsum.
# Expected result: [0, 2, 7, 8, 15, 0, 32, 42, 54, 0, 4, 12]
cumsum2 = segment_cumsum(
x, segment_ids=segments, exclusive=True)
```
Args:
x: A rank 1 `Tensor` of any dtype for which arithmetic operations are
permitted.
segment_ids: A `Tensor`. Must be one of the following types: int32, int64. A
1-D tensor whose size is equal to the size of `x`. Values should be sorted
and can be repeated. Values must range from `0` to `num segments - 1`.
exclusive: Python bool. See description above.
Default value: False
dtype: Optional `tf.Dtype`. If supplied, the dtype for `x` to use when
converting to `Tensor`.
Default value: None which maps to the default dtype inferred by TF.
name: Python `str` name prefixed to Ops created by this class.
Default value: None which is mapped to the default name 'segment_cumsum'.
Returns:
cumsums: A `Tensor` of the same dtype as `x`. Assuming that each segment is
of length greater than or equal to order, if `exclusive` is True,
then the size is `n-order*k` where `n` is the size of x,
`k` is the number of different segment ids supplied if `segment_ids` is
not None or 1 if `segment_ids` is None. If any of the segments is of
length less than the order, then the size is:
`n-sum(min(order, length(segment_j)), j)` where the sum is over segments.
If `exclusive` is False, then the size is `n`.
"""
with tf.compat.v1.name_scope(name, default_name='segment_cumsum', values=[x]):
x = tf.convert_to_tensor(x, dtype=dtype)
raw_cumsum = tf.math.cumsum(x, exclusive=exclusive)
if segment_ids is None:
return raw_cumsum
# It is quite tedious to do a vectorized version without a while loop so
# we skip that for now.
# TODO(b/137940928): Replace these ops with more efficient C++ kernels.
def scanner(accumulators, args):
cumsum, prev_segment, prev_value = accumulators
value, segment = args
if exclusive:
initial_value, inc_value = tf.zeros_like(value), cumsum + prev_value
else:
initial_value, inc_value = value, cumsum + value
next_cumsum = tf.where(
tf.equal(prev_segment, segment), inc_value, initial_value)
return next_cumsum, segment, value
return tf.scan(
scanner, (x, segment_ids),
initializer=(tf.zeros_like(x[0]), tf.zeros_like(segment_ids[0]) - 1,
tf.zeros_like(x[0])))[0]
__all__ = ['segment_cumsum', 'segment_diff']
| google/tf-quant-finance | tf_quant_finance/math/segment_ops.py | Python | apache-2.0 | 8,973 |
from textwrap import dedent
from pprint import pformat
from collections import OrderedDict
import attr
from . import sentinel
from .ordering import Ordering
# adapted from https://stackoverflow.com/a/47663099/1615465
def no_default_vals_in_repr(cls):
"""Class decorator on top of attr.s that omits attributes from repr that
have their default value"""
defaults = OrderedDict()
for attribute in cls.__attrs_attrs__:
if isinstance(attribute.default, attr.Factory):
assert attribute.default.takes_self == False, 'not implemented'
defaults[attribute.name] = attribute.default.factory()
else:
defaults[attribute.name] = attribute.default
def repr_(self):
real_cls = self.__class__
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
attributes = defaults.keys()
return "{0}({1})".format(
class_name,
", ".join(
name + "=" + repr(getattr(self, name))
for name in attributes
if getattr(self, name) != defaults[name]))
cls.__repr__ = repr_
return cls
# SankeyDefinition
def _convert_bundles_to_dict(bundles):
if not isinstance(bundles, dict):
bundles = {k: v for k, v in enumerate(bundles)}
return bundles
def _convert_ordering(ordering):
if isinstance(ordering, Ordering):
return ordering
else:
return Ordering(ordering)
def _validate_bundles(instance, attribute, bundles):
# Check bundles
for k, b in bundles.items():
if not b.from_elsewhere:
if b.source not in instance.nodes:
raise ValueError('Unknown source "{}" in bundle {}'.format(
b.source, k))
if not isinstance(instance.nodes[b.source], ProcessGroup):
raise ValueError(
'Source of bundle {} is not a process group'.format(k))
if not b.to_elsewhere:
if b.target not in instance.nodes:
raise ValueError('Unknown target "{}" in bundle {}'.format(
b.target, k))
if not isinstance(instance.nodes[b.target], ProcessGroup):
raise ValueError(
'Target of bundle {} is not a process group'.format(k))
for u in b.waypoints:
if u not in instance.nodes:
raise ValueError('Unknown waypoint "{}" in bundle {}'.format(
u, k))
if not isinstance(instance.nodes[u], Waypoint):
raise ValueError(
'Waypoint "{}" of bundle {} is not a waypoint'.format(u,
k))
def _validate_ordering(instance, attribute, ordering):
for layer_bands in ordering.layers:
for band_nodes in layer_bands:
for u in band_nodes:
if u not in instance.nodes:
raise ValueError('Unknown node "{}" in ordering'.format(u))
@attr.s(slots=True, frozen=True)
class SankeyDefinition(object):
nodes = attr.ib()
bundles = attr.ib(converter=_convert_bundles_to_dict,
validator=_validate_bundles)
ordering = attr.ib(converter=_convert_ordering, validator=_validate_ordering)
flow_selection = attr.ib(default=None)
flow_partition = attr.ib(default=None)
time_partition = attr.ib(default=None)
def copy(self):
return self.__class__(self.nodes.copy(), self.bundles.copy(),
self.ordering, self.flow_partition,
self.flow_selection, self.time_partition)
def to_code(self):
nodes = "\n".join(
" %s: %s," % (repr(k), pformat(v)) for k, v in self.nodes.items()
)
ordering = "\n".join(
" %s," % repr([list(x) for x in layer]) for layer in self.ordering.layers
# convert to list just because it looks neater
)
bundles = "\n".join(
" %s," % pformat(bundle) for bundle in self.bundles.values()
)
if self.flow_selection is not None:
flow_selection = "flow_selection = %s\n\n" % pformat(self.flow_selection)
else:
flow_selection = ""
if self.flow_partition is not None:
flow_partition = "flow_partition = %s\n\n" % pformat(self.flow_partition)
else:
flow_partition = ""
if self.time_partition is not None:
time_partition = "time_partition = %s\n\n" % pformat(self.time_partition)
else:
time_partition = ""
code = dedent("""
from floweaver import (
ProcessGroup,
Waypoint,
Partition,
Group,
Elsewhere,
Bundle,
SankeyDefinition,
)
nodes = {
%s
}
ordering = [
%s
]
bundles = [
%s
]
%s%s%ssdd = SankeyDefinition(nodes, bundles, ordering%s%s%s)
""") % (
nodes,
ordering,
bundles,
flow_selection,
flow_partition,
time_partition,
(", flow_selection=flow_selection" if flow_selection else ""),
(", flow_partition=flow_partition" if flow_partition else ""),
(", time_partition=time_parititon" if time_partition else "")
)
return code
# ProcessGroup
def _validate_direction(instance, attribute, value):
if value not in 'LR':
raise ValueError('direction must be L or R')
@no_default_vals_in_repr
@attr.s(slots=True)
class ProcessGroup(object):
"""A ProcessGroup represents a group of processes from the underlying dataset.
The processes to include are defined by the `selection`. By default they
are all lumped into one node in the diagram, but by defining a `partition`
this can be controlled.
Attributes
----------
selection : list or string
If a list of strings, they are taken as process ids.
If a single string, it is taken as a Pandas query string run against the
process table.
partition : Partition, optional
Defines how to split the ProcessGroup into subgroups.
direction : 'R' or 'L'
Direction of flow, default 'R' (left-to-right).
title : string, optional
Label for the ProcessGroup. If not set, the ProcessGroup id will be used.
"""
selection = attr.ib(default=None)
partition = attr.ib(default=None)
direction = attr.ib(validator=_validate_direction, default='R')
title = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(str)))
# Waypoint
@no_default_vals_in_repr
@attr.s(slots=True)
class Waypoint(object):
"""A Waypoint represents a control point along a :class:`Bundle` of flows.
There are two reasons to define Waypoints: to control the routing of
:class:`Bundle` s of flows through the diagram, and to split flows according
to some attributes by setting a `partition`.
Attributes
----------
partition : Partition, optional
Defines how to split the Waypoint into subgroups.
direction : 'R' or 'L'
Direction of flow, default 'R' (left-to-right).
title : string, optional
Label for the Waypoint. If not set, the Waypoint id will be used.
"""
partition = attr.ib(default=None)
direction = attr.ib(validator=_validate_direction, default='R')
title = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(str)))
# Bundle
Elsewhere = sentinel.create('Elsewhere')
def _validate_flow_selection(instance, attribute, value):
if instance.source == instance.target and not value:
raise ValueError('flow_selection is required for bundle with same '
'source and target')
@no_default_vals_in_repr
@attr.s(frozen=True, slots=True)
class Bundle(object):
"""A Bundle represents a set of flows between two :class:`ProcessGroup`s.
Attributes
----------
source : string
The id of the :class:`ProcessGroup` at the start of the Bundle.
target : string
The id of the :class:`ProcessGroup` at the end of the Bundle.
waypoints : list of strings
Optional list of ids of :class:`Waypoint`s the Bundle should pass through.
flow_selection : string, optional
Query string to filter the flows included in this Bundle.
flow_partition : Partition, optional
Defines how to split the flows in the Bundle into sub-flows. Often you want
the same Partition for all the Bundles in the diagram, see
:attr:`SankeyDefinition.flow_partition`.
default_partition : Partition, optional
Defines the Partition applied to any Waypoints automatically added to route
the Bundle across layers of the diagram.
"""
source = attr.ib()
target = attr.ib()
waypoints = attr.ib(default=attr.Factory(tuple), converter=tuple)
flow_selection = attr.ib(default=None, validator=_validate_flow_selection)
flow_partition = attr.ib(default=None)
default_partition = attr.ib(default=None)
@property
def to_elsewhere(self):
"""True if the target of the Bundle is Elsewhere (outside the system
boundary)."""
return self.target is Elsewhere
@property
def from_elsewhere(self):
"""True if the source of the Bundle is Elsewhere (outside the system
boundary)."""
return self.source is Elsewhere
| ricklupton/sankeyview | floweaver/sankey_definition.py | Python | mit | 9,779 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
from google.cloud import firestore
from oca.environment import FIRESTORE_PROJECT
db = firestore.Client(project=FIRESTORE_PROJECT)
| our-city-app/oca-backend | services/oca3/oca/db/__init__.py | Python | apache-2.0 | 763 |
#! /usr/bin/env python
## @package pygraylog.streams
# This package is used to manage Graylog streams using its remote API thanks to requests.
#
import sys, json, requests
from pygraylog.api import MetaObjectAPI
## This class is used to manage the streams.
class Stream(MetaObjectAPI):
## Creates a stream using the given dict.
# @param stream_details a dict with four required keys (description, rules, title).
# @throw TypeError the given variable is not a dict
# @throw ValueError some required keys are missing in the given stream_details dict
# @throw IOError HTTP code >= 500
# @return True if succeded
def create(self, stream_details):
if type(stream_details) is not dict:
self.error_msg = "given stream_details must be a dict."
raise TypeError
if 'description' not in stream_details or 'title' not in stream_details:
self.error_msg = "Some parameters are missing, required: description, rules, title."
raise ValueError
self._validation_schema = super(Stream, self)._get_validation_schema("streams")['models']['CreateStreamRequest']
if super(Stream, self)._create("streams", stream_details) == True:
self._data['stream_id'] = self._response['stream_id']
self._data['id'] = self._response['stream_id']
return True
return False
## Removes a previously loaded stream from the server.
# The key 'id' from self._data is used.
# @throw TypeError the given variable is not a dict
# @throw ValueError the key named 'id' is missing in the loaded data
# @throw IOError HTTP code >= 500
# @return True if succeded
def delete(self):
if self._data == None or 'id' not in self._data:
self.error_msg = "The object is empty: no id available."
raise ValueError
return super(Stream, self)._delete("streams", self._data['id'])
## Updates a stream using the given dict.
# @param stream_details a dict with the keys to update.
# @throw TypeError the given variable is not a dict
# @throw ValueError some required keys are missing in the given stream_details dict
# @throw IOError HTTP code >= 500
# @return True if succeded
def update(self, stream_details):
if type(stream_details) is not dict:
print stream_details
self.error_msg = "given stream_details must be a dict."
raise TypeError
if 'id' in stream_details.keys():
del stream_details['id']
return super(Stream, self)._update("streams", self._data['id'], stream_details)
## Tells if a stream exists in the server's database.
# @param id the stream to find
# @throw ValueError the given stream is empty
# @throw IOError HTTP code >= 500
# @return True if found
def find_by_id(self, id):
return super(Stream, self).find_by_id("streams", id)
## Returns the stream's id if it exists in the server's database.
# @param title the stream to find
# @throw ValueError the given stream is empty
# @throw IOError HTTP code >= 500
# @return the id is found or None
def find_by_title(self, title):
if len(title) == 0:
self.error_msg = "given title is too short."
raise ValueError
_url = "%s/%s" % (self._server.url, "streams")
r = self._server.session.get(_url)
if r.status_code != 200:
self.error_msg = r.text
raise IOError
for (i, stream) in enumerate(r.json()['streams']):
if stream['title'] == title:
return stream['id']
return None
## Loads a stream from the server's database.
# @param stream the stream to find
# @throw ValueError the given stream is empty
# @throw IOError HTTP code != 200
# @return True if found and loaded
def load_from_server(self, id):
return super(Stream, self)._load_from_server("streams", id)
## Gets the rules attached to the stream.
# @throw ValueError the given stream is empty
# @throw IOError HTTP code != 200
# @return a list a rules
def get_rules(self):
if self._data == None or 'id' not in self._data:
self.error_msg = "The object is empty: no id available."
raise ValueError
_url = "%s/%s/%s/%s" % (self._server.url, "streams", self._data['id'], "rules")
r = self._server.session.get(_url)
self._handle_request_status_code(r)
return r.json()['stream_rules']
## Gets the current thoughput of the stream on this node in messages per second
# @param stream the stream to find
# @throw ValueError the given stream is empty
# @throw IOError HTTP code != 200
# @return the current value
def get_throughput(self):
if self._data == None or 'id' not in self._data:
self.error_msg = "The object is empty: no id available."
raise ValueError
_url = "%s/%s/%s/%s" % (self._server.url, "streams", id, "throughput")
r = self._server.session.get(_url)
self._handle_request_status_code(r)
if r.status_code != 200:
self.error_msg = r.json()
raise IOError
return r.json()['throughput']
## Pauses the current stream/
# @throw ValueError the given stream is not found
# @throw IOError HTTP code != 200
# @return true on success
def pause(self):
if self._data['disabled'] == True:
self.error_msg = "The Steam is already stopped."
return False
_url = "%s/%s/%s/%s" % (self._server.url, "streams", self._data['id'], "pause")
r = self._server.session.post(_url)
self._handle_request_status_code(r)
return True
## Resumes the current stream/
# @throw ValueError the given stream is not found
# @throw IOError HTTP code != 200
# @return true on success
def resume(self):
if self._data['disabled'] == False:
self.error_msg = "The Steam is already started."
return False
_url = "%s/%s/%s/%s" % (self._server.url, "streams", self._data['id'], "resume")
r = self._server.session.post(_url)
self._handle_request_status_code(r)
return True
class Rule(MetaObjectAPI):
## Attaches the rule to the given steam.
# @params the Stream object
def attach(self, stream):
if stream == None:
self.error_msg = "The given stream object is null."
raise ValueError
self._stream = stream
## Adds a rule to an existing stream on the server..
# @param rule_details the rule to add
# @throw ValueError the given parameters are not valid
# @throw IOError HTTP code >= 500
# @return True if succeded
def create(self, rule_details):
if type(rule_details) is not dict:
self.error_msg = "given rule_details must be a dict."
raise TypeError
_url = "%s/%s/%s/%s" % ( self._server.url, 'streams', self._stream._data['id'], 'rules')
r = self._server.session.post(_url, json=rule_details)
# if super(Rule, self)._create("streams", stream_details) == True:
# self._data['stream_id'] = self._response['stream_id']
# self._data['id'] = self._response['stream_id']
# return True
self._handle_request_status_code(r)
return True
## Removes a previously loaded rule from the server.
# self._data is cleared on success.
# @param object_name the type of resource to find (user, streams...)
# @throw ValueError the given parameters are not valid
# @throw IOError HTTP code >= 500
# @return True if succeded
def delete(self):
if self._data == None or 'id' not in self._data:
self.error_msg = "The object is empty: no id available."
raise ValueError
_url = "%s/%s/%s/%s" % (self._server.url, 'streams', self._stream._data['id'], "rules", self._data['id'])
r = self._server.session.delete(_url)
self._handle_request_status_code(r)
if r.status_code == 204:
self._data.clear()
return True
self._response = r.json()
return False
def update():
_url = "%s/streams/%s/rules/%s" % ( self._server.url, self._stream._data['id'], id)
return super(Rule, self)._update(_url, id)
## Tells if a rule exists.
# @param id to find
# @throw ValueError the given stream is empty
# @throw IOError HTTP code >= 500
# @return True if found
def find_by_id(self, id):
_url = "%s/streams/%s/rules/%s" % ( self._server.url, self._stream._data['id'], id)
return super(Rule, self).find_by_id("streams", id)
## Loads a stream from the server's database.
# @param stream the stream to find
# @throw ValueError the given stream is empty
# @throw IOError HTTP code != 200
# @return True if found and loaded
def load_from_server(self, id):
return super(Rule, self)._load_from_server("rules", id)
class Alert_Receiver(MetaObjectAPI):
## Attaches a group of alert receivers to a stream using the given dict.
# @param ar_details a list of dicts with three required keys (streamId, entity, type).
# @throw TypeError the given variable is not a dict
# @throw ValueError some required keys are missing in the given stream_details dict
# @throw IOError HTTP code >= 500
# @return True if succeded
def create(self, ar_details):
if type(ar_details) is not list:
self.error_msg = "given ar_details must be a list of dicts."
raise TypeError
for ar in enumerate(ar_details):
if self.add(ar) == False:
return False
return True
## Attaches an alert receiver to a stream using the given dict.
# @param ar_details a dict with three required keys (streamId, entityn type).
# @throw TypeError the given variable is not a dict
# @throw ValueError some required keys are missing in the given stream_details dict
# @throw IOError HTTP code >= 500
# @return True if succeded
def add(self, ar_details):
if type(ar_details_details) is not dict:
self.error_msg = "given stream_details must be a dict."
raise TypeError
if 'streamId' not in ar_details or 'entity' not in ar_details or 'type' not in ar_details:
self.error_msg = "Some parameters are missing, required: streamId, entity, type."
raise ValueError
if pygraylog.streams.Stream(self._hostname, self._login, self._password).find_by_id(ar_details['streamId']) == False:
self.error_msg = "Bad given streamId."
raise ValueError
# TODO: find the right definition in the API's doc
#self._validation_schema = super(Stream, self)._get_validation_schema("streams")['models']['StreamListResponse']['streams']
_url = "/streams/%s/%alerts/receivers" % ( ar_details['streamId'] )
return super(Alert_Receiver, self).create(_url, ar_details)
## Removes a previously loaded alert receiver from all the streams.
# self._data is used and cleared on success.
# @throw TypeError the given variable is not a dict
# @throw ValueError the key named 'id' is missing in the loaded data
# @throw IOError HTTP code >= 500
# @return True if succeded
def erase(self):
if self._data == None or 'type' not in self._data or 'entity' not in self._data:
self.error_msg = "The object is empty: no type or entity available."
raise ValueError
r = self._server.session.get("/streams")
if r.status_code >= 500:
self.error_msg = r.text
raise IOError
if r.status_code == 404:
self._response = r.json()
return False
for (i, stream) in enumerate(r.json()['streams']):
if 'alert_receivers' in stream:
_url = "/streams/%s/%alerts/receivers" % ( stream['id'] )
_payload = {}
if self._data['entity'] in stream['alert_receivers']['emails']:
_payload = { 'streamId': stream['id'], 'entity': self._data['entity'], 'type': 'emails' }
else:
_payload = { 'streamId': stream['id'], 'entity': self._data['entity'], 'type': 'streams' }
r = self._server.session.delete(_url, params=_payload)
if r.status_code >= 500:
self.error_msg = r.text
raise IOError
self._data.clear()
return True
## Removes a previously loaded alert receiver from a stream.
# self._data is used.
# @throw TypeError the given variable is not a dict
# @throw ValueError the key named 'id' is missing in the loaded data
# @throw IOError HTTP code >= 500
# @return True if succeded
def delete(self):
if self._data == None or 'type' not in self._data or 'entity' not in self._data:
self.error_msg = "The object is empty: no type or entity available."
raise ValueError
_url = "/streams/%s/%alerts/receivers" % ( ar_details['streamId'] )
r = self._server.session.delete(_url, params=self._data)
if r.status_code >= 500:
self.error_msg = r.text
raise IOError
if r.status_code == 204:
self._data.clear()
return True
self._response = r.json()
return False
## Updates a stream using the given dict.
# @param stream_details a dict with the keys to update.
# @throw TypeError the given variable is not a dict
# @throw ValueError some required keys are missing in the given stream_details dict
# @throw IOError HTTP code >= 500
# @return True if succeded
def update(self, ar_details):
raise ValueError
# if type(ar_details) is not dict:
# print ar_details
# self.error_msg = "given ar_details must be a dict."
# raise TypeError
#
# if 'id' in stream_details.keys():
# del stream_details['id']
#
# return super(Stream, self)._update("streams", self._data['id'], stream_details)
## Tells if an alert receiver exists somewhere in one of the stored streams.
# @param id email or streamname to find
# @throw ValueError the given stream is empty
# @throw IOError HTTP code >= 500
# @return True if found
def find_by_id(self, id):
if len(id) == 0:
self.error_msg = "given id is too short."
raise ValueError
_url = "%s/%s" % (self._url, 'streams')
r = self._server.session.get(_url)
if r.status_code >= 500:
self.error_msg = r.text
raise IOError
if r.status_code == 404:
self._response = r.json()
return False
for (i, stream) in enumerate(r.json()['streams']):
if 'alert_receivers' in stream:
if id in stream['alert_receivers']['emails'] or id in stream['alert_receivers']['streams']:
return True
return False
## Loads an object from the server's database.
# @param id email or streamname to find
# @throw ValueError the given parameters are not valid
# @throw IOError HTTP code >= 500
# @return True if found and loaded
def load_from_server(self, id):
objec_name = 'streams'
if len(id) == 0:
self.error_msg = "given id is too short."
raise ValueError
_url = "%s/%s/%s" % (self._url, object_name, id)
r = self._server.session.get(_url)
if r.status_code >= 500:
self.error_msg = r.text
raise IOError
if r.status_code == 404:
self._response = r.json()
return False
for (i, stream) in enumerate(r.json()['streams']):
if 'alert_receivers' in stream:
if id in stream['alert_receivers']['emails']:
self._data[stream['id']] = { 'emails': [ id ] }
elif id in stream['alert_receivers']['streams']:
self._data[stream['id']] = { 'streams': [ id ] }
return True
## Exports the specified objects from the server's database.
# It overrides the parent method.
# @param object_name the type of resource to find (stream, streams...)
# @throw ValueError the given parameters are not valid
# @throw IOError HTTP code >= 500
# @return the JSON object or None
def backup(self, object_name):
if len(object_name) == 0:
self.error_msg = "given object_name is too short."
raise ValueError
_result = {}
_buf = self.super(Alert_Receiver, self).backup('streams')
for stream in enumerate(_buf):
if 'alert_receivers' in stream:
_result[stream['id']] = stream['alert_receivers']
return _result
| MisterG/pygraylog | pygraylog/streams.py | Python | gpl-3.0 | 15,040 |
from abc import ABC, abstractmethod
from demo_python_at.commons.message import Message
class Printer(ABC):
"""Base class for all printers."""
@abstractmethod
def print(self, message: Message):
"""Abstract method for printing."""
pass
class StdoutPrinter(Printer):
"""Class that prints a message to console."""
def print(self, message: Message):
"""
Print given message in stdout.
:param message: Message class object
"""
print(message.data())
| tatools/demo-python | demo_python_at/commons/printer.py | Python | apache-2.0 | 527 |
"""SCons.Tool.Packaging.ipk
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/ipk.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
import SCons.Builder
import SCons.Node.FS
import os
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, NAME, VERSION, DESCRIPTION,
SUMMARY, X_IPK_PRIORITY, X_IPK_SECTION, SOURCE_URL,
X_IPK_MAINTAINER, X_IPK_DEPENDS, **kw):
""" this function prepares the packageroot directory for packaging with the
ipkg builder.
"""
SCons.Tool.Tool('ipkg').generate(env)
# setup the Ipkg builder
bld = env['BUILDERS']['Ipkg']
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
# This should be overridable from the construction environment,
# which it is by using ARCHITECTURE=.
# Guessing based on what os.uname() returns at least allows it
# to work for both i386 and x86_64 Linux systems.
archmap = {
'i686' : 'i386',
'i586' : 'i386',
'i486' : 'i386',
}
buildarchitecture = os.uname()[4]
buildarchitecture = archmap.get(buildarchitecture, buildarchitecture)
if 'ARCHITECTURE' in kw:
buildarchitecture = kw['ARCHITECTURE']
# setup the kw to contain the mandatory arguments to this fucntion.
# do this before calling any builder or setup function
loc=locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# generate the specfile
specfile = gen_ipk_dir(PACKAGEROOT, source, env, kw)
# override the default target.
if str(target[0])=="%s-%s"%(NAME, VERSION):
target=[ "%s_%s_%s.ipk"%(NAME, VERSION, buildarchitecture) ]
# now apply the Ipkg builder
return bld(env, target, specfile, **kw)
def gen_ipk_dir(proot, source, env, kw):
# make sure the packageroot is a Dir object.
if SCons.Util.is_String(proot): proot=env.Dir(proot)
# create the specfile builder
s_bld=SCons.Builder.Builder(
action = build_specfiles,
)
# create the specfile targets
spec_target=[]
control=proot.Dir('CONTROL')
spec_target.append(control.File('control'))
spec_target.append(control.File('conffiles'))
spec_target.append(control.File('postrm'))
spec_target.append(control.File('prerm'))
spec_target.append(control.File('postinst'))
spec_target.append(control.File('preinst'))
# apply the builder to the specfile targets
s_bld(env, spec_target, source, **kw)
# the packageroot directory does now contain the specfiles.
return proot
def build_specfiles(source, target, env):
""" filter the targets for the needed files and use the variables in env
to create the specfile.
"""
#
# At first we care for the CONTROL/control file, which is the main file for ipk.
#
# For this we need to open multiple files in random order, so we store into
# a dict so they can be easily accessed.
#
#
opened_files={}
def open_file(needle, haystack):
try:
return opened_files[needle]
except KeyError:
file=filter(lambda x: x.get_path().rfind(needle)!=-1, haystack)[0]
opened_files[needle]=open(file.get_abspath(), 'w')
return opened_files[needle]
control_file=open_file('control', target)
if 'X_IPK_DESCRIPTION' not in env:
env['X_IPK_DESCRIPTION']="%s\n %s"%(env['SUMMARY'],
env['DESCRIPTION'].replace('\n', '\n '))
content = """
Package: $NAME
Version: $VERSION
Priority: $X_IPK_PRIORITY
Section: $X_IPK_SECTION
Source: $SOURCE_URL
Architecture: $ARCHITECTURE
Maintainer: $X_IPK_MAINTAINER
Depends: $X_IPK_DEPENDS
Description: $X_IPK_DESCRIPTION
"""
control_file.write(env.subst(content))
#
# now handle the various other files, which purpose it is to set post-,
# pre-scripts and mark files as config files.
#
# We do so by filtering the source files for files which are marked with
# the "config" tag and afterwards we do the same for x_ipk_postrm,
# x_ipk_prerm, x_ipk_postinst and x_ipk_preinst tags.
#
# The first one will write the name of the file into the file
# CONTROL/configfiles, the latter add the content of the x_ipk_* variable
# into the same named file.
#
for f in [x for x in source if 'PACKAGING_CONFIG' in dir(x)]:
config=open_file('conffiles')
config.write(f.PACKAGING_INSTALL_LOCATION)
config.write('\n')
for str in 'POSTRM PRERM POSTINST PREINST'.split():
name="PACKAGING_X_IPK_%s"%str
for f in [x for x in source if name in dir(x)]:
file=open_file(name)
file.write(env[str])
#
# close all opened files
for f in opened_files.values():
f.close()
# call a user specified function
if 'CHANGE_SPECFILE' in env:
content += env['CHANGE_SPECFILE'](target)
return 0
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Uli1/mapnik | scons/scons-local-2.4.0/SCons/Tool/packaging/ipk.py | Python | lgpl-2.1 | 6,298 |
import os
import datetime
from collections import defaultdict
from django.db import models
from django.db.models import F, Q
from core.models import PlCoreBase,User,Controller
from core.models.plcorebase import StrippedCharField
from core.models import Controller,ControllerLinkManager,ControllerLinkDeletionManager
class ControllerUser(PlCoreBase):
objects = ControllerLinkManager()
deleted_objects = ControllerLinkDeletionManager()
user = models.ForeignKey(User,related_name='controllerusers')
controller = models.ForeignKey(Controller,related_name='controllersusers')
kuser_id = StrippedCharField(null=True, blank=True, max_length=200, help_text="Keystone user id")
class Meta:
unique_together = ('user', 'controller')
def __unicode__(self): return u'%s %s' % (self.controller, self.user)
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = ControllerUser.objects.all()
else:
users = User.select_by_user(user)
qs = ControllerUser.objects.filter(user__in=users)
return qs
def can_update(self, user):
return user.can_update_root()
class ControllerSitePrivilege(PlCoreBase):
objects = ControllerLinkManager()
deleted_objects = ControllerLinkDeletionManager()
controller = models.ForeignKey('Controller', related_name='controllersiteprivileges')
site_privilege = models.ForeignKey('SitePrivilege', related_name='controllersiteprivileges')
role_id = StrippedCharField(null=True, blank=True, max_length=200, db_index=True, help_text="Keystone id")
class Meta:
unique_together = ('controller', 'site_privilege', 'role_id')
def __unicode__(self): return u'%s %s' % (self.controller, self.site_privilege)
def can_update(self, user):
if user.is_readonly:
return False
if user.is_admin:
return True
cprivs = ControllerSitePrivilege.objects.filter(site_privilege__user=user)
for cpriv in dprivs:
if cpriv.site_privilege.role.role == ['admin', 'Admin']:
return True
return False
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = ControllerSitePrivilege.objects.all()
else:
cpriv_ids = [cp.id for cp in ControllerSitePrivilege.objects.filter(site_privilege__user=user)]
qs = ControllerSitePrivilege.objects.filter(id__in=cpriv_ids)
return qs
class ControllerSlicePrivilege(PlCoreBase):
objects = ControllerLinkManager()
deleted_objects = ControllerLinkDeletionManager()
controller = models.ForeignKey('Controller', related_name='controllersliceprivileges')
slice_privilege = models.ForeignKey('SlicePrivilege', related_name='controllersliceprivileges')
role_id = StrippedCharField(null=True, blank=True, max_length=200, db_index=True, help_text="Keystone id")
class Meta:
unique_together = ('controller', 'slice_privilege')
def __unicode__(self): return u'%s %s' % (self.controller, self.slice_privilege)
def can_update(self, user):
if user.is_readonly:
return False
if user.is_admin:
return True
cprivs = ControllerSlicePrivilege.objects.filter(slice_privilege__user=user)
for cpriv in dprivs:
if cpriv.role.role == ['admin', 'Admin']:
return True
return False
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = ControllerSlicePrivilege.objects.all()
else:
cpriv_ids = [cp.id for cp in ControllerSlicePrivilege.objects.filter(slice_privilege__user=user)]
qs = ControllerSlicePrivilege.objects.filter(id__in=cpriv_ids)
return qs
| xmaruto/mcord | xos/core/models/controlleruser.py | Python | apache-2.0 | 3,800 |
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from google.cloud import datacatalog
datacatalog_client = datacatalog.DataCatalogClient()
class CleanupResultsTest(unittest.TestCase):
def test_entries_should_not_exist_after_cleanup(self):
query = 'system=oracle'
scope = datacatalog.SearchCatalogRequest.Scope()
scope.include_project_ids.append(
os.environ['ORACLE2DC_DATACATALOG_PROJECT_ID'])
request = datacatalog.SearchCatalogRequest()
request.scope = scope
request.query = query
request.page_size = 1000
search_results = [
result for result in datacatalog_client.search_catalog(request)
]
self.assertEqual(len(search_results), 0)
| GoogleCloudPlatform/datacatalog-connectors-rdbms | google-datacatalog-oracle-connector/system_tests/cleanup_results_test.py | Python | apache-2.0 | 1,324 |
#!/usr/bin/env python2
"""
search for bytes in a WMI repository.
author: Willi Ballenthin
email: william.ballenthin@fireeye.com
"""
import sys
import logging
import binascii
import argparse
import cim
logger = logging.getLogger(__name__)
def find_bytes(repo, needle):
index = cim.Index(repo.cim_type, repo.logical_index_store)
for i in range(repo.logical_data_store.page_count):
buf = repo.logical_data_store.get_physical_page_buffer(i)
if needle not in buf:
continue
offset = buf.index(needle)
print('found hit on physical page %s at offset %s' % (hex(i), hex(offset)))
try:
lnum = repo.data_mapping.get_logical_page_number(i)
except cim.UnmappedPage:
print(' this page not mapped to a logical page (unallocated page)')
continue
print(' mapped to logical page %s' % (hex(lnum)))
try:
page = repo.logical_data_store.get_page(lnum)
except IndexError:
print(' failed to fetch logical page')
continue
if len(page.toc) == 0:
print(' page does not contain TOC, unknown contents')
continue
offset_found = False
if 0 <= offset < len(page.toc):
print(' hit in page TOC')
offset_found = True
for j in range(page.toc.count):
entry = page.toc[j]
if entry.offset <= offset < entry.offset + entry.size:
print(' hit on object contents at entry index %s id %s' % (hex(j), hex(entry.record_id)))
offset_found = True
key_hits = set([])
for key in index.lookup_keys(cim.Key('NS_')):
if not key.is_data_reference:
continue
if key.data_page != lnum:
continue
if key.data_id != entry.record_id:
continue
key = str(key)
if key not in key_hits:
print(' referred to by key %s' % (key))
key_hits.add(key)
if not offset_found:
print(' hit in page slack space')
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Search for a byte pattern in WMI repository structures.")
parser.add_argument("input", type=str,
help="Path to input file")
parser.add_argument("needle", type=str,
help="String or bytes for which to search")
parser.add_argument("-v", "--verbose", action="store_true",
help="Enable debug logging")
parser.add_argument("-q", "--quiet", action="store_true",
help="Disable all output but errors")
args = parser.parse_args(args=argv)
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.ERROR)
logging.getLogger().setLevel(logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
repo = cim.CIM.from_path(args.input)
try:
needle = binascii.unhexlify(args.needle)
find_bytes(repo, needle)
except ValueError:
find_bytes(repo, args.needle.encode('ascii'))
find_bytes(repo, args.needle.encode('utf-16le'))
return 0
if __name__ == "__main__":
sys.exit(main())
| fireeye/flare-wmi | python-cim/samples/find_bytes.py | Python | apache-2.0 | 3,590 |
from pygame import Surface, Rect, transform, font, image
class PassengerTab(Surface):
def __init__(self, position, size, passenger, resourcePath):
Surface.__init__(self, size)
self.position = position
self.size = size
self.passenger = passenger
self.textFont = font.Font(None, 15)
self.passengerSurface = Surface(self.size).convert()
self.rect = Rect(self.position, self.size)
self.optionImage = transform.scale(image.load(resourcePath + "img/optionIcon.png"), (20, 20))
self.optionRect = self.optionImage.get_rect()
| ZakDoesGaming/OregonTrail | lib/passengerTab.py | Python | mit | 543 |
import datetime
import os.path
import time
from typing import Type
from paramiko.ecdsakey import ECDSAKey
from paramiko.pkey import PKey
from paramiko.rsakey import RSAKey
from pytest import mark, raises
from geofront.keystore import parse_openssh_pubkey
from geofront.masterkey import (EmptyStoreError, FileSystemMasterKeyStore,
KeyGenerationError, PeriodicalRenewal,
TwoPhaseRenewal,
generate_key, read_private_key_file,
renew_master_key)
from geofront.remote import Remote
def test_fs_master_key_store_load():
path = os.path.join(os.path.dirname(__file__), 'test_id_rsa')
s = FileSystemMasterKeyStore(path)
key = s.load()
assert isinstance(key, RSAKey)
assert key.get_base64() == (
'AAAAB3NzaC1yc2EAAAADAQABAAABAQC7+fDpQ9sQKIdzXvqT3TzrPp2OpUCOJtUW3k0oi'
'trqqHe1XiCke++DSpAv56poCppTj9qo3N1HyhZhSv/jH7/ejZ8NZdtvLIZGOCQZVdKNy0'
'cg7jlimrWA2s8X201Yn3hYpUrYJYbhAAuQM5flvbyBtn5/miONQ8NVimgjG6UVANVqX4W'
'H9kqdr4SBf45/+BAdenf2j5DC3xceOOW8wZfe2rOJpQ0msVxMeXExGqF9DS2E3bqOwE1C'
'MPEGYr5KZCx7IeJ/4udBuKc/gOXb8tPiTTNxtYXEBcqhBdCa/M6pEdW5LiHxxoF5b6xY9'
'q0nmi7Rn0weXK0SufhGgKrpSH+B'
)
def test_fs_master_key_store_save(tmpdir):
path = tmpdir.join('id_rsa')
s = FileSystemMasterKeyStore(str(path))
with raises(EmptyStoreError):
s.load()
key = RSAKey.generate(1024)
s.save(key)
stored_key = s.load()
assert isinstance(stored_key, RSAKey)
assert stored_key.get_base64() == stored_key.get_base64()
def test_read_private_key_file():
path = os.path.join(os.path.dirname(__file__), 'test_id_rsa')
with open(path) as f:
key = read_private_key_file(f)
assert isinstance(key, RSAKey)
assert key.get_base64() == (
'AAAAB3NzaC1yc2EAAAADAQABAAABAQC7+fDpQ9sQKIdzXvqT3TzrPp2OpUCOJtUW3k0oi'
'trqqHe1XiCke++DSpAv56poCppTj9qo3N1HyhZhSv/jH7/ejZ8NZdtvLIZGOCQZVdKNy0'
'cg7jlimrWA2s8X201Yn3hYpUrYJYbhAAuQM5flvbyBtn5/miONQ8NVimgjG6UVANVqX4W'
'H9kqdr4SBf45/+BAdenf2j5DC3xceOOW8wZfe2rOJpQ0msVxMeXExGqF9DS2E3bqOwE1C'
'MPEGYr5KZCx7IeJ/4udBuKc/gOXb8tPiTTNxtYXEBcqhBdCa/M6pEdW5LiHxxoF5b6xY9'
'q0nmi7Rn0weXK0SufhGgKrpSH+B'
)
def authorized_key_set(path):
dotssh = path.join('.ssh')
if not dotssh.isdir():
dotssh = path.mkdir('.ssh')
with dotssh.join('authorized_keys').open() as f:
return {parse_openssh_pubkey(line.strip()) for line in f}
def test_two_phase_renewal(fx_authorized_servers, fx_master_key):
remote_set = {
Remote('user', '127.0.0.1', port)
for port in fx_authorized_servers
}
old_key = fx_master_key
new_key = RSAKey.generate(1024)
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {old_key}
with TwoPhaseRenewal(remote_set, old_key, new_key):
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {old_key, new_key}
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {new_key}
def test_two_phase_renewal_stop(fx_authorized_servers, fx_master_key):
remote_set = {
Remote('user', '127.0.0.1', port)
for port in fx_authorized_servers
}
old_key = fx_master_key
new_key = RSAKey.generate(1024)
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {old_key}
SomeException = type('SomeException', (Exception,), {})
with raises(SomeException):
with TwoPhaseRenewal(remote_set, old_key, new_key):
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {old_key, new_key}
raise SomeException('something went wrong')
for t, path, ev in fx_authorized_servers.values():
assert old_key in authorized_key_set(path)
@mark.parametrize('key_type, bits', [
(RSAKey, None),
(RSAKey, 1024),
(RSAKey, 2048),
(ECDSAKey, None),
(ECDSAKey, 256),
(ECDSAKey, 384),
])
def test_renew_master_key(fx_authorized_servers, fx_master_key, tmpdir,
key_type: Type[PKey], bits: int):
remote_set = {
Remote('user', '127.0.0.1', port)
for port in fx_authorized_servers
}
store = FileSystemMasterKeyStore(str(tmpdir.join('id_rsa')))
store.save(fx_master_key)
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {fx_master_key}
new_key = renew_master_key(remote_set, store, key_type, bits)
assert new_key.get_bits() == bits or bits is None
assert isinstance(new_key, key_type)
assert new_key != fx_master_key
assert store.load() == new_key
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {new_key}
class FailureTestMasterKeyStore(FileSystemMasterKeyStore):
def save(self, master_key: PKey):
try:
self.load()
except EmptyStoreError:
super().save(master_key)
else:
raise RenewalFailure()
class RenewalFailure(Exception):
pass
def test_renew_master_key_fail(fx_authorized_servers, fx_master_key, tmpdir):
remote_set = {
Remote('user', '127.0.0.1', port)
for port in fx_authorized_servers
}
store = FailureTestMasterKeyStore(str(tmpdir.join('id_rsa')))
store.save(fx_master_key)
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {fx_master_key}
with raises(RenewalFailure):
renew_master_key(remote_set, store)
assert store.load() == fx_master_key
for t, path, ev in fx_authorized_servers.values():
assert fx_master_key in authorized_key_set(path)
def wait_for(seconds: int, condition):
for _ in range(seconds * 2):
if condition():
break
time.sleep(0.5)
else:
raise TimeoutError(
'failed to satisfy condition during {0} seconds'.format(seconds)
)
def test_periodical_renewal(request, fx_authorized_servers, fx_master_key,
tmpdir):
timeout = request.config.getoption('--sshd-state-timeout')
remote_set = {
Remote('user', '127.0.0.1', port)
for port in fx_authorized_servers
}
store = FileSystemMasterKeyStore(str(tmpdir.join('id_rsa')))
store.save(fx_master_key)
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {fx_master_key}
p = PeriodicalRenewal(remote_set, store, datetime.timedelta(seconds=3))
assert store.load() == fx_master_key
for t, path, ev in fx_authorized_servers.values():
assert fx_master_key in authorized_key_set(path)
wait_for(timeout, lambda: store.load() != fx_master_key)
second_key = store.load()
assert second_key != fx_master_key
for t, path, ev in fx_authorized_servers.values():
key_set = authorized_key_set(path)
assert second_key in key_set
wait_for(timeout, lambda: store.load() != second_key)
third_key = store.load()
assert third_key != fx_master_key
assert third_key != second_key
for t, path, ev in fx_authorized_servers.values():
key_set = authorized_key_set(path)
assert third_key in key_set
p.terminate()
last_key = store.load()
time.sleep(10)
assert store.load() == last_key
for t, path, ev in fx_authorized_servers.values():
assert authorized_key_set(path) == {last_key}
def test_generate_key():
default_default = generate_key()
assert isinstance(default_default, RSAKey)
assert default_default.get_bits() == 1024
rsa_default = generate_key(RSAKey)
assert rsa_default.get_bits() == 1024
assert isinstance(rsa_default, RSAKey)
rsa_2048 = generate_key(RSAKey, 2048)
assert isinstance(rsa_2048, RSAKey)
assert rsa_2048.get_bits() == 2048
ecdsa_default = generate_key(ECDSAKey)
assert isinstance(ecdsa_default, ECDSAKey)
assert ecdsa_default.get_bits() == 256
ecdsa_256 = generate_key(ECDSAKey, 256)
assert isinstance(ecdsa_256, ECDSAKey)
assert ecdsa_256.get_bits() == 256
ecdsa_384 = generate_key(ECDSAKey, 384)
assert isinstance(ecdsa_384, ECDSAKey)
assert ecdsa_384.get_bits() == 384
ecdsa_521 = generate_key(ECDSAKey, 521)
assert isinstance(ecdsa_521, ECDSAKey)
assert ecdsa_521.get_bits() == 521
with raises(KeyGenerationError):
generate_key(RSAKey, 256)
with raises(KeyGenerationError):
generate_key(ECDSAKey, 1024)
| spoqa/geofront | tests/masterkey_test.py | Python | agpl-3.0 | 8,697 |
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from web import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
| b4ldr/atlas-traceroute-to-bgp | db_create.py | Python | artistic-2.0 | 474 |
####################################################################################################
# This file is part of the CLBlast project. The project is licensed under Apache Version 2.0.
#
# Author(s):
# Cedric Nugteren <www.cedricnugteren.nl>
#
# This file test PyCLBlast: the Python interface to CLBlast. It is not exhaustive. For full testing
# it is recommended to run the regular CLBlast tests, this is just a small smoke test.
#
####################################################################################################
import unittest
import numpy as np
import pyopencl as cl
from pyopencl.array import Array
import pyclblast
class TestPyCLBlast(unittest.TestCase):
@staticmethod
def setup(sizes, dtype):
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
host_arrays, device_arrays = [], []
for size in sizes:
numpy_array = np.random.rand(*size).astype(dtype=dtype)
opencl_array = Array(queue, numpy_array.shape, numpy_array.dtype)
opencl_array.set(numpy_array)
host_arrays.append(numpy_array)
device_arrays.append(opencl_array)
queue.finish()
return queue, host_arrays, device_arrays
def test_axpy(self):
for dtype in ["float32", "complex64"]:
for alpha in [1.0, 3.1]:
for n in [1, 7, 32]:
queue, h, d = self.setup([(n,), (n,)], dtype=dtype)
pyclblast.axpy(queue, n, d[0], d[1], alpha=alpha)
queue.finish()
result = d[1].get()
reference = alpha * h[0] + h[1]
for i in range(n):
self.assertAlmostEqual(reference[i], result[i], places=3)
def test_gemv(self):
for dtype in ["float32", "complex64"]:
for beta in [1.0]:
for alpha in [1.0, 3.1]:
for m in [1, 7, 32]:
for n in [1, 7, 32]:
queue, h, d = self.setup([(m, n), (n,), (m,)], dtype=dtype)
pyclblast.gemv(queue, m, n, d[0], d[1], d[2],
a_ld=n, alpha=alpha, beta=beta)
queue.finish()
result = d[2].get()
reference = alpha * np.dot(h[0], h[1]) + beta * h[2]
for i in range(m):
self.assertAlmostEqual(reference[i], result[i], places=3)
def test_gemm(self):
for dtype in ["float32", "complex64"]:
for beta in [1.0]:
for alpha in [1.0, 3.1]:
for m in [1, 7, 32]:
for n in [1, 7, 32]:
for k in [1, 7, 32]:
queue, h, d = self.setup([(m, k), (k, n), (m, n)], dtype=dtype)
pyclblast.gemm(queue, m, n, k, d[0], d[1], d[2],
a_ld=k, b_ld=n, c_ld=n, alpha=alpha, beta=beta)
queue.finish()
result = d[2].get()
reference = alpha * np.dot(h[0], h[1]) + beta * h[2]
for i in range(m):
for j in range(n):
self.assertAlmostEqual(reference[i, j], result[i, j],
places=3)
| gpu/CLBlast | src/pyclblast/test/test_pyclblast.py | Python | apache-2.0 | 3,568 |
"""
WSGI config for aniauth project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aniauth.settings")
application = get_wsgi_application()
| randomic/aniauth-tdd | aniauth/wsgi.py | Python | mit | 392 |
from django.conf.urls import url
from cats.views.cat import (
CatList,
CatDetail
)
from cats.views.breed import (
BreedList,
BreedDetail
)
urlpatterns = [
# Cats URL's
url(r'^cats/$', CatList.as_view(), name='list'),
url(r'^cats/(?P<pk>\d+)/$', CatDetail.as_view(), name='detail'),
# Breeds URL's
url(r'^breeds/$', BreedList.as_view(), name='list_breeds'),
url(r'^breeds/(?P<pk>\d+)/$', BreedDetail.as_view(), name='detail_breed'),
]
| OscaRoa/api-cats | cats/urls.py | Python | mit | 475 |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
from django.core.management import call_command
__author__ = 'calthorpe_analytics'
import glob
import os
def import_feeds_in_directory(path):
"""
runs the manage command to import all GTFS zipfiles in a given directory
"""
os.chdir(path)
files_to_import = glob.glob("*.zip")
for filename in files_to_import:
filepath = os.path.join(path, filename)
call_command('importgtfs', filepath)
| CalthorpeAnalytics/urbanfootprint | footprint/main/utils/import_GTFS_feeds.py | Python | gpl-3.0 | 918 |
### Elliptic curve math - pybitcointools
#P = 2 ** 256 - 2 ** 32 - 2 ** 9 - 2 ** 8 - 2 ** 7 - 2 ** 6 - 2 ** 4 - 1
P = 115792089237316195423570985008687907853269984665640564039457584007908834671663
N = 115792089237316195423570985008687907852837564279074904382605163141518161494337
A = 0
Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240
Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424
G = (Gx, Gy)
def inv(a, n):
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
r = high / low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % n
def isinf(p):
if type(p) is long:
p = str(p)
return p[0] == 0 and p[1] == 0
def base10_add(a, b):
if isinf(a):
return b[0], b[1]
if isinf(b):
return a[0], a[1]
if type(a) is long:
a = str(a)
if type(b) is long:
b = str(b)
if a[0] == b[0]:
if a[1] == b[1]:
return base10_double((int(a[0]), int(a[1])))
else:
return 0, 0
m = ((int(b[1]) - int(a[1])) * inv(int(b[0]) - int(a[0]), P)) % P
x = (m * m - int(a[0]) - int(b[0])) % P
y = (m * (int(a[0]) - x) - int(a[1])) % P
return x, y
def base10_double(a):
if isinf(a):
return 0, 0
if type(a) is long:
a = str(a)
m = ((3 * int(a[0]) * int(a[0]) + A) * inv(2 * int(a[1]), P)) % P
x = (m * m - 2 * int(a[0])) % P
y = (m * (int(a[0]) - x) - int(a[1])) % P
return int(x), int(y)
def base10_multiply(a, n):
if isinf(a) or n == 0:
return 0, 0
if n == 1:
return a
if n < 0 or n >= N:
return base10_multiply(a, n % N)
if (n % 2) == 0:
return base10_double(base10_multiply(a, n / 2))
if (n % 2) == 1:
return base10_add(base10_double(base10_multiply(a, n / 2)), a) | inuitwallet/bippy_old | num/elip.py | Python | mit | 1,709 |
import time
import requests
import pytest
import yaml
from suite.ap_resources_utils import (
create_ap_usersig_from_yaml,
delete_ap_usersig,
create_ap_logconf_from_yaml,
create_ap_policy_from_yaml,
delete_ap_policy,
delete_ap_logconf,
create_ap_waf_policy_from_yaml,
)
from suite.resources_utils import (
ensure_connection_to_public_endpoint,
create_items_from_yaml,
create_example_app,
delete_common_app,
delete_items_from_yaml,
wait_until_all_pods_are_ready,
create_secret_from_yaml,
delete_secret,
ensure_response_from_backend,
create_ingress,
create_ingress_with_ap_annotations,
delete_ingress,
wait_before_test,
scale_deployment,
get_total_ingresses,
get_total_vs,
get_last_reload_status,
get_pods_amount, get_total_vsr,
)
from suite.vs_vsr_resources_utils import (
create_virtual_server_from_yaml,
delete_virtual_server,
patch_virtual_server_from_yaml,
create_virtual_server,
create_v_s_route,
)
from suite.policy_resources_utils import (
create_policy_from_yaml,
delete_policy,
)
from suite.yaml_utils import get_first_ingress_host_from_yaml
from settings import TEST_DATA
class IngressSetup:
"""
Encapsulate the Smoke Example details.
Attributes:
public_endpoint (PublicEndpoint):
ingress_host (str):
"""
def __init__(self, req_url, metrics_url, ingress_host):
self.req_url = req_url
self.metrics_url = metrics_url
self.ingress_host = ingress_host
@pytest.fixture(scope="class")
def simple_ingress_setup(
request,
kube_apis,
ingress_controller_endpoint,
test_namespace,
ingress_controller,
) -> IngressSetup:
"""
Deploy simple application and all the Ingress resources under test in one namespace.
:param request: pytest fixture
:param kube_apis: client apis
:param ingress_controller_endpoint: public endpoint
:param test_namespace:
:return: BackendSetup
"""
req_url = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port_ssl}/backend1"
metrics_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.metrics_port}/metrics"
secret_name = create_secret_from_yaml(
kube_apis.v1, test_namespace, f"{TEST_DATA}/smoke/smoke-secret.yaml"
)
create_example_app(kube_apis, "simple", test_namespace)
create_items_from_yaml(
kube_apis, f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml", test_namespace
)
ingress_host = get_first_ingress_host_from_yaml(
f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml"
)
wait_until_all_pods_are_ready(kube_apis.v1, test_namespace)
ensure_connection_to_public_endpoint(
ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.port,
ingress_controller_endpoint.port_ssl,
)
def fin():
print("Clean up the Application:")
delete_common_app(kube_apis, "simple", test_namespace)
delete_secret(kube_apis.v1, secret_name, test_namespace)
delete_items_from_yaml(
kube_apis, f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml", test_namespace
)
request.addfinalizer(fin)
return IngressSetup(req_url, metrics_url, ingress_host)
@pytest.mark.batch_start
class TestMultipleSimpleIngress:
@pytest.mark.parametrize(
"ingress_controller",
[
pytest.param(
{"extra_args": ["-enable-prometheus-metrics"]},
)
],
indirect=["ingress_controller"],
)
def test_simple_ingress_batch_start(
self,
request,
kube_apis,
ingress_controller_prerequisites,
ingress_controller,
test_namespace,
simple_ingress_setup,
):
"""
Pod startup time with simple Ingress
"""
ensure_response_from_backend(
simple_ingress_setup.req_url, simple_ingress_setup.ingress_host, check404=True
)
total_ing = int(request.config.getoption("--batch-resources"))
manifest = f"{TEST_DATA}/smoke/standard/smoke-ingress.yaml"
for i in range(1, total_ing + 1):
with open(manifest) as f:
doc = yaml.safe_load(f)
doc["metadata"]["name"] = f"smoke-ingress-{i}"
doc["spec"]["rules"][0]["host"] = f"smoke-{i}.example.com"
create_ingress(kube_apis.networking_v1, test_namespace, doc)
print(f"Total resources deployed is {total_ing}")
wait_before_test()
ic_ns = ingress_controller_prerequisites.namespace
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0)
while get_pods_amount(kube_apis.v1, ic_ns) is not 0:
print(f"Number of replicas not 0, retrying...")
wait_before_test()
num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1)
assert (
get_total_ingresses(simple_ingress_setup.metrics_url, "nginx") == str(total_ing + 1)
and get_last_reload_status(simple_ingress_setup.metrics_url, "nginx") == "1"
)
for i in range(1, total_ing + 1):
delete_ingress(kube_apis.networking_v1, f"smoke-ingress-{i}", test_namespace)
assert num is None
##############################################################################################################
@pytest.fixture(scope="class")
def ap_ingress_setup(
request, kube_apis, ingress_controller_endpoint, test_namespace
) -> IngressSetup:
"""
Deploy a simple application and AppProtect manifests.
:param request: pytest fixture
:param kube_apis: client apis
:param ingress_controller_endpoint: public endpoint
:param test_namespace:
:return: BackendSetup
"""
print("------------------------- Deploy backend application -------------------------")
create_example_app(kube_apis, "simple", test_namespace)
req_url = f"https://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.port_ssl}/backend1"
metrics_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.metrics_port}/metrics"
wait_until_all_pods_are_ready(kube_apis.v1, test_namespace)
ensure_connection_to_public_endpoint(
ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.port,
ingress_controller_endpoint.port_ssl,
)
print("------------------------- Deploy Secret -----------------------------")
src_sec_yaml = f"{TEST_DATA}/appprotect/appprotect-secret.yaml"
create_items_from_yaml(kube_apis, src_sec_yaml, test_namespace)
print("------------------------- Deploy logconf -----------------------------")
src_log_yaml = f"{TEST_DATA}/appprotect/logconf.yaml"
log_name = create_ap_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace)
print(f"------------------------- Deploy appolicy: ---------------------------")
src_pol_yaml = f"{TEST_DATA}/appprotect/dataguard-alarm.yaml"
pol_name = create_ap_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, test_namespace)
print("------------------------- Deploy ingress -----------------------------")
ingress_host = {}
src_ing_yaml = f"{TEST_DATA}/appprotect/appprotect-ingress.yaml"
create_ingress_with_ap_annotations(
kube_apis, src_ing_yaml, test_namespace, "dataguard-alarm", "True", "True", "127.0.0.1:514"
)
ingress_host = get_first_ingress_host_from_yaml(src_ing_yaml)
wait_before_test()
def fin():
print("Clean up:")
src_ing_yaml = f"{TEST_DATA}/appprotect/appprotect-ingress.yaml"
delete_items_from_yaml(kube_apis, src_ing_yaml, test_namespace)
delete_ap_policy(kube_apis.custom_objects, pol_name, test_namespace)
delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace)
delete_common_app(kube_apis, "simple", test_namespace)
src_sec_yaml = f"{TEST_DATA}/appprotect/appprotect-secret.yaml"
delete_items_from_yaml(kube_apis, src_sec_yaml, test_namespace)
request.addfinalizer(fin)
return IngressSetup(req_url, metrics_url, ingress_host)
@pytest.mark.skip_for_nginx_oss
@pytest.mark.batch_start
@pytest.mark.appprotect
@pytest.mark.parametrize(
"crd_ingress_controller_with_ap",
[
{
"extra_args": [
f"-enable-custom-resources",
f"-enable-app-protect",
f"-enable-prometheus-metrics",
]
}
],
indirect=True,
)
class TestAppProtect:
def test_ap_ingress_batch_start(
self,
request,
kube_apis,
crd_ingress_controller_with_ap,
ap_ingress_setup,
ingress_controller_prerequisites,
test_namespace,
):
"""
Pod startup time with AP Ingress
"""
print("------------- Run test for AP policy: dataguard-alarm --------------")
print(f"Request URL: {ap_ingress_setup.req_url} and Host: {ap_ingress_setup.ingress_host}")
ensure_response_from_backend(
ap_ingress_setup.req_url, ap_ingress_setup.ingress_host, check404=True
)
total_ing = int(request.config.getoption("--batch-resources"))
manifest = f"{TEST_DATA}/appprotect/appprotect-ingress.yaml"
for i in range(1, total_ing + 1):
with open(manifest) as f:
doc = yaml.safe_load(f)
doc["metadata"]["name"] = f"appprotect-ingress-{i}"
doc["spec"]["rules"][0]["host"] = f"appprotect-{i}.example.com"
create_ingress(kube_apis.networking_v1, test_namespace, doc)
print(f"Total resources deployed is {total_ing}")
wait_before_test()
ic_ns = ingress_controller_prerequisites.namespace
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0)
while get_pods_amount(kube_apis.v1, ic_ns) is not 0:
print(f"Number of replicas not 0, retrying...")
wait_before_test()
num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1)
assert (
get_total_ingresses(ap_ingress_setup.metrics_url, "nginx") == str(total_ing + 1)
and get_last_reload_status(ap_ingress_setup.metrics_url, "nginx") == "1"
)
for i in range(1, total_ing + 1):
delete_ingress(kube_apis.networking_v1, f"appprotect-ingress-{i}", test_namespace)
assert num is None
##############################################################################################################
@pytest.mark.batch_start
@pytest.mark.parametrize(
"crd_ingress_controller, virtual_server_setup",
[
(
{
"type": "complete",
"extra_args": [f"-enable-custom-resources", f"-enable-prometheus-metrics"],
},
{"example": "virtual-server", "app_type": "simple"},
)
],
indirect=True,
)
class TestVirtualServer:
def test_vs_batch_start(
self,
request,
kube_apis,
ingress_controller_prerequisites,
crd_ingress_controller,
virtual_server_setup,
test_namespace,
):
"""
Pod startup time with simple VS
"""
resp = requests.get(
virtual_server_setup.backend_1_url, headers={"host": virtual_server_setup.vs_host}
)
assert resp.status_code is 200
total_vs = int(request.config.getoption("--batch-resources"))
manifest = f"{TEST_DATA}/virtual-server/standard/virtual-server.yaml"
for i in range(1, total_vs + 1):
with open(manifest) as f:
doc = yaml.safe_load(f)
doc["metadata"]["name"] = f"virtual-server-{i}"
doc["spec"]["host"] = f"virtual-server-{i}.example.com"
kube_apis.custom_objects.create_namespaced_custom_object(
"k8s.nginx.org", "v1", test_namespace, "virtualservers", doc
)
print(f"VirtualServer created with name '{doc['metadata']['name']}'")
print(f"Total resources deployed is {total_vs}")
wait_before_test()
ic_ns = ingress_controller_prerequisites.namespace
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0)
while get_pods_amount(kube_apis.v1, ic_ns) is not 0:
print(f"Number of replicas not 0, retrying...")
wait_before_test()
num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1)
assert (
get_total_vs(virtual_server_setup.metrics_url, "nginx") == str(total_vs + 1)
and get_last_reload_status(virtual_server_setup.metrics_url, "nginx") == "1"
)
for i in range(1, total_vs + 1):
delete_virtual_server(kube_apis.custom_objects, f"virtual-server-{i}", test_namespace)
assert num is None
##############################################################################################################
@pytest.fixture(scope="class")
def appprotect_waf_setup(request, kube_apis, test_namespace) -> None:
"""
Deploy simple application and all the AppProtect(dataguard-alarm) resources under test in one namespace.
:param request: pytest fixture
:param kube_apis: client apis
:param ingress_controller_endpoint: public endpoint
:param test_namespace:
"""
uds_crd_resource = f"{TEST_DATA}/ap-waf/ap-ic-uds.yaml"
ap_policy_uds = "dataguard-alarm-uds"
print("------------------------- Deploy logconf -----------------------------")
src_log_yaml = f"{TEST_DATA}/ap-waf/logconf.yaml"
global log_name
log_name = create_ap_logconf_from_yaml(kube_apis.custom_objects, src_log_yaml, test_namespace)
print("------------------------- Create UserSig CRD resource-----------------------------")
usersig_name = create_ap_usersig_from_yaml(
kube_apis.custom_objects, uds_crd_resource, test_namespace
)
print(f"------------------------- Deploy dataguard-alarm appolicy ---------------------------")
src_pol_yaml = f"{TEST_DATA}/ap-waf/{ap_policy_uds}.yaml"
global ap_pol_name
ap_pol_name = create_ap_policy_from_yaml(kube_apis.custom_objects, src_pol_yaml, test_namespace)
def fin():
print("Clean up:")
delete_ap_policy(kube_apis.custom_objects, ap_pol_name, test_namespace)
delete_ap_usersig(kube_apis.custom_objects, usersig_name, test_namespace)
delete_ap_logconf(kube_apis.custom_objects, log_name, test_namespace)
request.addfinalizer(fin)
@pytest.mark.skip_for_nginx_oss
@pytest.mark.batch_start
@pytest.mark.appprotect
@pytest.mark.parametrize(
"crd_ingress_controller_with_ap, virtual_server_setup",
[
(
{
"type": "complete",
"extra_args": [
f"-enable-custom-resources",
f"-enable-leader-election=false",
f"-enable-app-protect",
f"-enable-prometheus-metrics",
],
},
{
"example": "ap-waf",
"app_type": "simple",
},
)
],
indirect=True,
)
class TestAppProtectWAFPolicyVS:
def test_ap_waf_policy_vs_batch_start(
self,
request,
kube_apis,
ingress_controller_prerequisites,
crd_ingress_controller_with_ap,
virtual_server_setup,
appprotect_waf_setup,
test_namespace,
):
"""
Pod startup time with AP WAF Policy
"""
waf_spec_vs_src = f"{TEST_DATA}/ap-waf/virtual-server-waf-spec.yaml"
waf_pol_dataguard_src = f"{TEST_DATA}/ap-waf/policies/waf-dataguard.yaml"
print(f"Create waf policy")
create_ap_waf_policy_from_yaml(
kube_apis.custom_objects,
waf_pol_dataguard_src,
test_namespace,
test_namespace,
True,
False,
ap_pol_name,
log_name,
"syslog:server=127.0.0.1:514",
)
wait_before_test()
print(f"Patch vs with policy: {waf_spec_vs_src}")
patch_virtual_server_from_yaml(
kube_apis.custom_objects,
virtual_server_setup.vs_name,
waf_spec_vs_src,
virtual_server_setup.namespace,
)
wait_before_test(120)
print(
"----------------------- Send request with embedded malicious script----------------------"
)
response1 = requests.get(
virtual_server_setup.backend_1_url + "</script>",
headers={"host": virtual_server_setup.vs_host},
)
print(response1.status_code)
print(
"----------------------- Send request with blocked keyword in UDS----------------------"
)
response2 = requests.get(
virtual_server_setup.backend_1_url,
headers={"host": virtual_server_setup.vs_host},
data="kic",
)
total_vs = int(request.config.getoption("--batch-resources"))
print(response2.status_code)
for i in range(1, total_vs + 1):
with open(waf_spec_vs_src) as f:
doc = yaml.safe_load(f)
doc["metadata"]["name"] = f"virtual-server-{i}"
doc["spec"]["host"] = f"virtual-server-{i}.example.com"
kube_apis.custom_objects.create_namespaced_custom_object(
"k8s.nginx.org", "v1", test_namespace, "virtualservers", doc
)
print(f"VirtualServer created with name '{doc['metadata']['name']}'")
print(f"Total resources deployed is {total_vs}")
wait_before_test()
ic_ns = ingress_controller_prerequisites.namespace
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0)
while get_pods_amount(kube_apis.v1, ic_ns) is not 0:
print(f"Number of replicas not 0, retrying...")
wait_before_test()
num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1)
assert (
get_total_vs(virtual_server_setup.metrics_url, "nginx") == str(total_vs + 1)
and get_last_reload_status(virtual_server_setup.metrics_url, "nginx") == "1"
)
for i in range(1, total_vs + 1):
delete_virtual_server(kube_apis.custom_objects, f"virtual-server-{i}", test_namespace)
delete_policy(kube_apis.custom_objects, "waf-policy", test_namespace)
assert num is None
##############################################################################################################
@pytest.fixture(scope="class")
def vs_vsr_setup(
request,
kube_apis,
test_namespace,
ingress_controller_endpoint,
):
"""
Deploy one VS with multiple VSRs.
:param kube_apis: client apis
:param test_namespace:
:param ingress_controller_endpoint: public endpoint
:return:
"""
total_vsr = int(request.config.getoption("--batch-resources"))
vsr_source = f"{TEST_DATA}/startup/virtual-server-routes/route.yaml"
with open(vsr_source) as f:
vsr = yaml.safe_load(f)
for i in range(1, total_vsr + 1):
vsr["metadata"]["name"] = f"route-{i}"
vsr["spec"]["subroutes"][0]["path"] = f"/route-{i}"
create_v_s_route(kube_apis.custom_objects, vsr, test_namespace)
vs_source = f"{TEST_DATA}/startup/virtual-server-routes/virtual-server.yaml"
with open(vs_source) as f:
vs = yaml.safe_load(f)
routes = []
for i in range(1, total_vsr + 1):
route = {"path": f"/route-{i}", "route": f"route-{i}"}
routes.append(route)
vs["spec"]["routes"] = routes
create_virtual_server(kube_apis.custom_objects, vs, test_namespace)
@pytest.mark.batch_start
@pytest.mark.parametrize(
"crd_ingress_controller",
[
pytest.param(
{
"type": "complete",
"extra_args": ["-enable-custom-resources","-enable-prometheus-metrics", "-enable-leader-election=false"]
},
)
],
indirect=True,
)
class TestSingleVSMultipleVSRs:
def test_startup_time(
self,
request,
kube_apis,
ingress_controller_prerequisites,
crd_ingress_controller,
ingress_controller_endpoint,
vs_vsr_setup):
"""
Pod startup time with 1 VS and multiple VSRs.
"""
total_vsr = int(request.config.getoption("--batch-resources"))
ic_ns = ingress_controller_prerequisites.namespace
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 0)
while get_pods_amount(kube_apis.v1, ic_ns) is not 0:
print(f"Number of replicas not 0, retrying...")
wait_before_test()
num = scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ic_ns, 1)
metrics_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.metrics_port}/metrics"
assert (
get_total_vs(metrics_url, "nginx") == "1"
and get_total_vsr(metrics_url, "nginx") == str(total_vsr)
and get_last_reload_status(metrics_url, "nginx") == "1"
)
assert num is None | nginxinc/kubernetes-ingress | tests/suite/test_batch_startup_times.py | Python | apache-2.0 | 21,598 |
#!.venv/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v - 1)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('Current database version: ' + str(v))
| benjaminbrinkman/open-ad-platform | db_downgrade.py | Python | mit | 394 |
import hashlib
from wtforms import TextField
from core.manager import ExecutionContext
from core.plugins.lib.fields import IntegerField, Field
from core.plugins.lib.views.forms import SettingsFormView
from core.tests.base import RealizeTest
from core.tests.factories import UserFactory
from core.plugins.lib.models import PluginDataModel
from core.plugins.lib.proxies import MetricProxy, SourceProxy
from core.plugins.lib.base import BasePlugin
from manager import PluginManager
from lib.views.base import View
from lib.views.charts import ModelChartView
from realize.log import logging
from flask import current_app
from flask.ext.login import login_user, logout_user
log = logging.getLogger(__name__)
class TestModel(PluginDataModel):
metric_proxy = MetricProxy(name="test1")
source_proxy = SourceProxy(name='test1')
number = IntegerField()
class SettingsModel(PluginDataModel):
metric_proxy = MetricProxy(name="settings")
source_proxy = SourceProxy(name="self")
name = Field()
class TestSettingsForm(SettingsFormView):
model = SettingsModel
setting_name = TextField(description="Your name or something like it.")
class TestModelView(ModelChartView):
name = 'test_chart'
description = 'A super awesome test chart.'
model = TestModel
y_data_field = 'number'
x_data_field = 'date'
class TestView(View):
name = "test"
children = [TestModelView, TestSettingsForm]
class TestPlugin(BasePlugin):
name = "test"
hashkey = "1"
models = [TestModel, SettingsModel]
views = [TestView]
settings_form = TestSettingsForm
class PluginManagerTest(RealizeTest):
plugin_classes = [TestPlugin]
def test_add_remove(self):
user = UserFactory()
context = ExecutionContext(user=user, plugin=self.plugin_info['1']['plugin'])
manager = PluginManager(context)
# This should be 1 because we have just added a plugin for the user.
plugin_key = self.plugin_info['1']['plugin'].hashkey
manager.add(plugin_key)
self.assertEqual(len(user.plugins), 1)
manager.remove(plugin_key)
self.assertEqual(len(user.plugins), 0)
def test_get_route(self):
context = ExecutionContext(user=self.plugin_info['1']['user'], plugin=self.plugin_info['1']['plugin'])
login_user(self.plugin_info['1']['user'])
manager = PluginManager(context)
response = manager.call_route_handler(self.plugin_info['1']['views']['test'].hashkey, "get", {}, None)
logout_user()
self.assertEqual(response.status_code, 200)
def test_get_settings(self):
context = ExecutionContext(user=self.plugin_info['1']['user'], plugin=self.plugin_info['1']['plugin'])
manager = PluginManager(context)
# Should return the response from the view.
response = manager.get_settings(self.plugin_info['1']['plugin'].hashkey)
self.assertTrue(isinstance(response, dict))
| realizeapp/realize-core | core/plugins/tests.py | Python | agpl-3.0 | 2,951 |
symbols = {}
def intern(name):
try:
return symbols[name]
except KeyError:
symbol = Symbol(name)
symbols[name] = symbol
return symbol
class Symbol(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
| bendiken/sxp-python | lib/sxp/symbol.py | Python | mit | 269 |
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import numpy as np
def predictClothesGeneral(temp):
dataFile = open("data.txt")
data = dataFile.read()
data = data.split("\n")
X = []
Y = []
Y2 = []
for i in range(0,len(data) - 1):
X.append([float(data[i].split(":")[1])])
Y.append(int(data[i].split(":")[3]))
Y2.append(int(data[i].split(":")[4]))
clf = RandomForestClassifier(n_estimators=25)
clf2 = RandomForestClassifier(n_estimators=25)
clf.fit(X,Y)
clf2.fit(X,Y2)
pants = clf.predict([[temp]])
tops = clf2.predict([[temp]])
s = "I recommend you wear a pair of "
if pants == 1:
s = s + "jeans"
else:
s = s + "khaki shorts"
s = s + " and a "
if tops == 1:
s = s + "shirt, its a nice day out!"
elif tops == 2:
s = s + "sweat shirt."
else:
s = s + "jacket, it will be chilly today."
return s
def predictFromFileGeneral(fileName):
fi = open(fileName)
data = fi.read().split("\n")
for i in range(0,len(data) - 1):
data2 = data[i].split(":")
print "At " + data2[1].split(",")[0] + " degrees... " + predictClothesGeneral(float(data2[1].split(",")[0]))
def addToKnownList(shirt, temp):
dataFile = open("userAdded.txt", 'a')
dataFile.write(str(shirt + ":" + str(temp)) + '\n')
def predictClothesData(temp):
dataFile = open("userAdded.txt")
data = dataFile.read()
data = data.split("\n")
X = []
Y = []
for i in range(0,len(data) - 1):
X.append([float(data[i].split(":")[1])])
Y.append(data[i].split(":")[0])
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X,Y)
predict = clf.predict([[temp]])
return predict
def predictFromFileData(fileName):
fi = open(fileName)
data = fi.read().split("\n")
for i in range(0,len(data) - 1):
data2 = data[i].split(":")
print "At " + data2[1].split(",")[0] + " degrees... I would recommend a " + predictClothesData(float(data2[1].split(",")[0]))[0]
| epaglier/Project-JARVIS | jarvis-features/Weather AI/weatherai.py | Python | gpl-3.0 | 2,382 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pluggable Back-ends for Container Server
"""
import errno
import os
from uuid import uuid4
import six
from six.moves import range
from six.moves.urllib.parse import unquote
import sqlite3
from eventlet import tpool
from swift.common.constraints import CONTAINER_LISTING_LIMIT
from swift.common.exceptions import LockTimeout
from swift.common.utils import Timestamp, encode_timestamps, \
decode_timestamps, extract_swift_bytes, storage_directory, hash_path, \
ShardRange, renamer, MD5_OF_EMPTY_STRING, mkdirs, get_db_files, \
parse_db_filename, make_db_file_path, split_path, RESERVED_BYTE, \
filter_shard_ranges, ShardRangeList
from swift.common.db import DatabaseBroker, utf8encode, BROKER_TIMEOUT, \
zero_like, DatabaseAlreadyExists, SQLITE_ARG_LIMIT
DATADIR = 'containers'
RECORD_TYPE_OBJECT = 'object'
RECORD_TYPE_SHARD = 'shard'
SHARD_RANGE_TABLE = 'shard_range'
NOTFOUND = 'not_found'
UNSHARDED = 'unsharded'
SHARDING = 'sharding'
SHARDED = 'sharded'
COLLAPSED = 'collapsed'
SHARD_STATS_STATES = [ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHRINKING]
SHARD_LISTING_STATES = SHARD_STATS_STATES + [ShardRange.CLEAVED]
SHARD_UPDATE_STATES = [ShardRange.CREATED, ShardRange.CLEAVED,
ShardRange.ACTIVE, ShardRange.SHARDING]
# when auditing a shard gets its own shard range, which could be in any state
# except FOUND, and any potential acceptors excluding FOUND ranges that may be
# unwanted overlaps
SHARD_AUDITING_STATES = [ShardRange.CREATED, ShardRange.CLEAVED,
ShardRange.ACTIVE, ShardRange.SHARDING,
ShardRange.SHARDED, ShardRange.SHRINKING,
ShardRange.SHRUNK]
# attribute names in order used when transforming shard ranges from dicts to
# tuples and vice-versa
SHARD_RANGE_KEYS = ('name', 'timestamp', 'lower', 'upper', 'object_count',
'bytes_used', 'meta_timestamp', 'deleted', 'state',
'state_timestamp', 'epoch', 'reported', 'tombstones')
POLICY_STAT_TABLE_CREATE = '''
CREATE TABLE policy_stat (
storage_policy_index INTEGER PRIMARY KEY,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0
);
'''
POLICY_STAT_TRIGGER_SCRIPT = '''
CREATE TRIGGER object_insert_policy_stat AFTER INSERT ON object
BEGIN
UPDATE policy_stat
SET object_count = object_count + (1 - new.deleted),
bytes_used = bytes_used + new.size
WHERE storage_policy_index = new.storage_policy_index;
INSERT INTO policy_stat (
storage_policy_index, object_count, bytes_used)
SELECT new.storage_policy_index,
(1 - new.deleted),
new.size
WHERE NOT EXISTS(
SELECT changes() as change
FROM policy_stat
WHERE change <> 0
);
UPDATE container_info
SET hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER object_delete_policy_stat AFTER DELETE ON object
BEGIN
UPDATE policy_stat
SET object_count = object_count - (1 - old.deleted),
bytes_used = bytes_used - old.size
WHERE storage_policy_index = old.storage_policy_index;
UPDATE container_info
SET hash = chexor(hash, old.name, old.created_at);
END;
'''
CONTAINER_INFO_TABLE_SCRIPT = '''
CREATE TABLE container_info (
account TEXT,
container TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
reported_put_timestamp TEXT DEFAULT '0',
reported_delete_timestamp TEXT DEFAULT '0',
reported_object_count INTEGER DEFAULT 0,
reported_bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT '',
x_container_sync_point1 INTEGER DEFAULT -1,
x_container_sync_point2 INTEGER DEFAULT -1,
storage_policy_index INTEGER DEFAULT 0,
reconciler_sync_point INTEGER DEFAULT -1
);
'''
CONTAINER_STAT_VIEW_SCRIPT = '''
CREATE VIEW container_stat
AS SELECT ci.account, ci.container, ci.created_at,
ci.put_timestamp, ci.delete_timestamp,
ci.reported_put_timestamp, ci.reported_delete_timestamp,
ci.reported_object_count, ci.reported_bytes_used, ci.hash,
ci.id, ci.status, ci.status_changed_at, ci.metadata,
ci.x_container_sync_point1, ci.x_container_sync_point2,
ci.reconciler_sync_point,
ci.storage_policy_index,
coalesce(ps.object_count, 0) AS object_count,
coalesce(ps.bytes_used, 0) AS bytes_used
FROM container_info ci LEFT JOIN policy_stat ps
ON ci.storage_policy_index = ps.storage_policy_index;
CREATE TRIGGER container_stat_update
INSTEAD OF UPDATE ON container_stat
BEGIN
UPDATE container_info
SET account = NEW.account,
container = NEW.container,
created_at = NEW.created_at,
put_timestamp = NEW.put_timestamp,
delete_timestamp = NEW.delete_timestamp,
reported_put_timestamp = NEW.reported_put_timestamp,
reported_delete_timestamp = NEW.reported_delete_timestamp,
reported_object_count = NEW.reported_object_count,
reported_bytes_used = NEW.reported_bytes_used,
hash = NEW.hash,
id = NEW.id,
status = NEW.status,
status_changed_at = NEW.status_changed_at,
metadata = NEW.metadata,
x_container_sync_point1 = NEW.x_container_sync_point1,
x_container_sync_point2 = NEW.x_container_sync_point2,
storage_policy_index = NEW.storage_policy_index,
reconciler_sync_point = NEW.reconciler_sync_point;
END;
'''
def update_new_item_from_existing(new_item, existing):
"""
Compare the data and meta related timestamps of a new object item with
the timestamps of an existing object record, and update the new item
with data and/or meta related attributes from the existing record if
their timestamps are newer.
The multiple timestamps are encoded into a single string for storing
in the 'created_at' column of the objects db table.
:param new_item: A dict of object update attributes
:param existing: A dict of existing object attributes
:return: True if any attributes of the new item dict were found to be
newer than the existing and therefore not updated, otherwise
False implying that the updated item is equal to the existing.
"""
# item[created_at] may be updated so keep a copy of the original
# value in case we process this item again
new_item.setdefault('data_timestamp', new_item['created_at'])
# content-type and metadata timestamps may be encoded in
# item[created_at], or may be set explicitly.
item_ts_data, item_ts_ctype, item_ts_meta = decode_timestamps(
new_item['data_timestamp'])
if new_item.get('ctype_timestamp'):
item_ts_ctype = Timestamp(new_item.get('ctype_timestamp'))
item_ts_meta = item_ts_ctype
if new_item.get('meta_timestamp'):
item_ts_meta = Timestamp(new_item.get('meta_timestamp'))
if not existing:
# encode new_item timestamps into one string for db record
new_item['created_at'] = encode_timestamps(
item_ts_data, item_ts_ctype, item_ts_meta)
return True
# decode existing timestamp into separate data, content-type and
# metadata timestamps
rec_ts_data, rec_ts_ctype, rec_ts_meta = decode_timestamps(
existing['created_at'])
# Extract any swift_bytes values from the content_type values. This is
# necessary because the swift_bytes value to persist should be that at the
# most recent data timestamp whereas the content-type value to persist is
# that at the most recent content-type timestamp. The two values happen to
# be stored in the same database column for historical reasons.
for item in (new_item, existing):
content_type, swift_bytes = extract_swift_bytes(item['content_type'])
item['content_type'] = content_type
item['swift_bytes'] = swift_bytes
newer_than_existing = [True, True, True]
if rec_ts_data >= item_ts_data:
# apply data attributes from existing record
new_item.update([(k, existing[k])
for k in ('size', 'etag', 'deleted', 'swift_bytes')])
item_ts_data = rec_ts_data
newer_than_existing[0] = False
if rec_ts_ctype >= item_ts_ctype:
# apply content-type attribute from existing record
new_item['content_type'] = existing['content_type']
item_ts_ctype = rec_ts_ctype
newer_than_existing[1] = False
if rec_ts_meta >= item_ts_meta:
# apply metadata timestamp from existing record
item_ts_meta = rec_ts_meta
newer_than_existing[2] = False
# encode updated timestamps into one string for db record
new_item['created_at'] = encode_timestamps(
item_ts_data, item_ts_ctype, item_ts_meta)
# append the most recent swift_bytes onto the most recent content_type in
# new_item and restore existing to its original state
for item in (new_item, existing):
if item['swift_bytes']:
item['content_type'] += ';swift_bytes=%s' % item['swift_bytes']
del item['swift_bytes']
return any(newer_than_existing)
def merge_shards(shard_data, existing):
"""
Compares ``shard_data`` with ``existing`` and updates ``shard_data`` with
any items of ``existing`` that take precedence over the corresponding item
in ``shard_data``.
:param shard_data: a dict representation of shard range that may be
modified by this method.
:param existing: a dict representation of shard range.
:returns: True if ``shard data`` has any item(s) that are considered to
take precedence over the corresponding item in ``existing``
"""
if not existing:
return True
if existing['timestamp'] < shard_data['timestamp']:
# note that currently we do not roll forward any meta or state from
# an item that was created at older time, newer created time trumps
shard_data['reported'] = 0 # reset the latch
return True
elif existing['timestamp'] > shard_data['timestamp']:
return False
new_content = False
# timestamp must be the same, so preserve existing range bounds and deleted
for k in ('lower', 'upper', 'deleted'):
shard_data[k] = existing[k]
# now we need to look for meta data updates
if existing['meta_timestamp'] >= shard_data['meta_timestamp']:
for k in ('object_count', 'bytes_used', 'meta_timestamp'):
shard_data[k] = existing[k]
shard_data['tombstones'] = existing.get('tombstones', -1)
else:
new_content = True
# We can latch the reported flag
if existing['reported'] and \
existing['object_count'] == shard_data['object_count'] and \
existing['bytes_used'] == shard_data['bytes_used'] and \
existing.get('tombstones', -1) == shard_data['tombstones'] and \
existing['state'] == shard_data['state'] and \
existing['epoch'] == shard_data['epoch']:
shard_data['reported'] = 1
else:
shard_data.setdefault('reported', 0)
if shard_data['reported'] and not existing['reported']:
new_content = True
if (existing['state_timestamp'] == shard_data['state_timestamp']
and shard_data['state'] > existing['state']):
new_content = True
elif existing['state_timestamp'] >= shard_data['state_timestamp']:
for k in ('state', 'state_timestamp', 'epoch'):
shard_data[k] = existing[k]
else:
new_content = True
return new_content
class ContainerBroker(DatabaseBroker):
"""
Encapsulates working with a container database.
Note that this may involve multiple on-disk DB files if the container
becomes sharded:
* :attr:`_db_file` is the path to the legacy container DB name, i.e.
``<hash>.db``. This file should exist for an initialised broker that
has never been sharded, but will not exist once a container has been
sharded.
* :attr:`db_files` is a list of existing db files for the broker. This
list should have at least one entry for an initialised broker, and
should have two entries while a broker is in SHARDING state.
* :attr:`db_file` is the path to whichever db is currently authoritative
for the container. Depending on the container's state, this may not be
the same as the ``db_file`` argument given to :meth:`~__init__`, unless
``force_db_file`` is True in which case :attr:`db_file` is always equal
to the ``db_file`` argument given to :meth:`~__init__`.
* :attr:`pending_file` is always equal to :attr:`_db_file` extended with
``.pending``, i.e. ``<hash>.db.pending``.
"""
db_type = 'container'
db_contains_type = 'object'
db_reclaim_timestamp = 'created_at'
delete_meta_whitelist = ['x-container-sysmeta-shard-quoted-root',
'x-container-sysmeta-shard-root']
def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,
account=None, container=None, pending_timeout=None,
stale_reads_ok=False, skip_commits=False,
force_db_file=False):
self._init_db_file = db_file
if db_file == ':memory:':
base_db_file = db_file
else:
base_db_file = make_db_file_path(db_file, None)
super(ContainerBroker, self).__init__(
base_db_file, timeout, logger, account, container, pending_timeout,
stale_reads_ok, skip_commits=skip_commits)
# the root account and container are populated on demand
self._root_account = self._root_container = None
self._force_db_file = force_db_file
self._db_files = None
@classmethod
def create_broker(cls, device_path, part, account, container, logger=None,
epoch=None, put_timestamp=None,
storage_policy_index=None):
"""
Create a ContainerBroker instance. If the db doesn't exist, initialize
the db file.
:param device_path: device path
:param part: partition number
:param account: account name string
:param container: container name string
:param logger: a logger instance
:param epoch: a timestamp to include in the db filename
:param put_timestamp: initial timestamp if broker needs to be
initialized
:param storage_policy_index: the storage policy index
:return: a :class:`swift.container.backend.ContainerBroker` instance
"""
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = make_db_file_path(
os.path.join(device_path, db_dir, hsh + '.db'), epoch)
broker = ContainerBroker(db_path, account=account, container=container,
logger=logger)
if not os.path.exists(broker.db_file):
try:
broker.initialize(put_timestamp, storage_policy_index)
except DatabaseAlreadyExists:
pass
return broker
def get_db_state(self):
"""
Returns the current state of on disk db files.
"""
if self._db_file == ':memory:':
return UNSHARDED
if not self.db_files:
return NOTFOUND
if len(self.db_files) > 1:
return SHARDING
if self.db_epoch is None:
# never been sharded
return UNSHARDED
if self.db_epoch != self._own_shard_range().epoch:
return UNSHARDED
if not self.get_shard_ranges():
return COLLAPSED
return SHARDED
def sharding_initiated(self):
"""
Returns True if a broker has shard range state that would be necessary
for sharding to have been initiated, False otherwise.
"""
own_shard_range = self.get_own_shard_range()
if own_shard_range.state in (ShardRange.SHARDING,
ShardRange.SHRINKING,
ShardRange.SHARDED,
ShardRange.SHRUNK):
return bool(self.get_shard_ranges())
return False
def sharding_required(self):
"""
Returns True if a broker has shard range state that would be necessary
for sharding to have been initiated but has not yet completed sharding,
False otherwise.
"""
db_state = self.get_db_state()
return (db_state == SHARDING or
(db_state == UNSHARDED and self.sharding_initiated()))
def is_sharded(self):
return self.get_db_state() == SHARDED
def reload_db_files(self):
"""
Reloads the cached list of valid on disk db files for this broker.
"""
if self._db_file == ':memory:':
return
# reset connection so the next access will use the correct DB file
self.conn = None
self._db_files = get_db_files(self._init_db_file)
@property
def db_files(self):
"""
Gets the cached list of valid db files that exist on disk for this
broker.
The cached list may be refreshed by calling
:meth:`~swift.container.backend.ContainerBroker.reload_db_files`.
:return: A list of paths to db files ordered by ascending epoch;
the list may be empty.
"""
if not self._db_files:
self.reload_db_files()
return self._db_files
@property
def db_file(self):
"""
Get the path to the primary db file for this broker. This is typically
the db file for the most recent sharding epoch. However, if no db files
exist on disk, or if ``force_db_file`` was True when the broker was
constructed, then the primary db file is the file passed to the broker
constructor.
:return: A path to a db file; the file does not necessarily exist.
"""
if self._force_db_file:
return self._init_db_file
if self.db_files:
return self.db_files[-1]
return self._init_db_file
@property
def db_epoch(self):
hash_, epoch, ext = parse_db_filename(self.db_file)
return epoch
@property
def storage_policy_index(self):
if not hasattr(self, '_storage_policy_index'):
self._storage_policy_index = \
self.get_info()['storage_policy_index']
return self._storage_policy_index
@property
def path(self):
self._populate_instance_cache()
return '%s/%s' % (self.account, self.container)
def _initialize(self, conn, put_timestamp, storage_policy_index):
"""
Create a brand new container database (tables, indices, triggers, etc.)
"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
if not self.container:
raise ValueError(
'Attempting to create a new database with no container set')
if storage_policy_index is None:
storage_policy_index = 0
self.create_object_table(conn)
self.create_policy_stat_table(conn, storage_policy_index)
self.create_container_info_table(conn, put_timestamp,
storage_policy_index)
self.create_shard_range_table(conn)
self._db_files = None
def create_object_table(self, conn):
"""
Create the object table which is specific to the container DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE object (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
created_at TEXT,
size INTEGER,
content_type TEXT,
etag TEXT,
deleted INTEGER DEFAULT 0,
storage_policy_index INTEGER DEFAULT 0
);
CREATE INDEX ix_object_deleted_name ON object (deleted, name);
CREATE TRIGGER object_update BEFORE UPDATE ON object
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""" + POLICY_STAT_TRIGGER_SCRIPT)
def create_container_info_table(self, conn, put_timestamp,
storage_policy_index):
"""
Create the container_info table which is specific to the container DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
Also creates the container_stat view.
:param conn: DB connection object
:param put_timestamp: put timestamp
:param storage_policy_index: storage policy index
"""
if put_timestamp is None:
put_timestamp = Timestamp(0).internal
# The container_stat view is for compatibility; old versions of Swift
# expected a container_stat table with columns "object_count" and
# "bytes_used", but when that stuff became per-storage-policy and
# moved to the policy_stat table, we stopped creating those columns in
# container_stat.
#
# To retain compatibility, we create the container_stat view with some
# triggers to make it behave like the old container_stat table. This
# way, if an old version of Swift encounters a database with the new
# schema, it can still work.
#
# Note that this can occur during a rolling Swift upgrade if a DB gets
# rsynced from an old node to a new, so it's necessary for
# availability during upgrades. The fact that it enables downgrades is
# a nice bonus.
conn.executescript(CONTAINER_INFO_TABLE_SCRIPT +
CONTAINER_STAT_VIEW_SCRIPT)
conn.execute("""
INSERT INTO container_info (account, container, created_at, id,
put_timestamp, status_changed_at, storage_policy_index)
VALUES (?, ?, ?, ?, ?, ?, ?);
""", (self.account, self.container, Timestamp.now().internal,
str(uuid4()), put_timestamp, put_timestamp,
storage_policy_index))
def create_policy_stat_table(self, conn, storage_policy_index=0):
"""
Create policy_stat table.
:param conn: DB connection object
:param storage_policy_index: the policy_index the container is
being created with
"""
conn.executescript(POLICY_STAT_TABLE_CREATE)
conn.execute("""
INSERT INTO policy_stat (storage_policy_index)
VALUES (?)
""", (storage_policy_index,))
def create_shard_range_table(self, conn):
"""
Create the shard_range table which is specific to the container DB.
:param conn: DB connection object
"""
# Use execute (not executescript) so we get the benefits of our
# GreenDBConnection. Creating a table requires a whole-DB lock;
# *any* in-progress cursor will otherwise trip a "database is locked"
# error.
conn.execute("""
CREATE TABLE %s (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
timestamp TEXT,
lower TEXT,
upper TEXT,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
meta_timestamp TEXT,
deleted INTEGER DEFAULT 0,
state INTEGER,
state_timestamp TEXT,
epoch TEXT,
reported INTEGER DEFAULT 0,
tombstones INTEGER DEFAULT -1
);
""" % SHARD_RANGE_TABLE)
conn.execute("""
CREATE TRIGGER shard_range_update BEFORE UPDATE ON %s
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
""" % SHARD_RANGE_TABLE)
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
SELECT name FROM sqlite_master
WHERE name = 'ix_object_deleted_name' '''):
self._db_version = 1
return self._db_version
def _get_deleted_key(self, connection):
if self.get_db_version(connection) < 1:
return '+deleted'
return 'deleted'
def _newid(self, conn):
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = 0, reported_delete_timestamp = 0,
reported_object_count = 0, reported_bytes_used = 0''')
def _commit_puts_load(self, item_list, entry):
"""See :func:`swift.common.db.DatabaseBroker._commit_puts_load`"""
(name, timestamp, size, content_type, etag, deleted) = entry[:6]
if len(entry) > 6:
storage_policy_index = entry[6]
else:
storage_policy_index = 0
content_type_timestamp = meta_timestamp = None
if len(entry) > 7:
content_type_timestamp = entry[7]
if len(entry) > 8:
meta_timestamp = entry[8]
item_list.append({'name': name,
'created_at': timestamp,
'size': size,
'content_type': content_type,
'etag': etag,
'deleted': deleted,
'storage_policy_index': storage_policy_index,
'ctype_timestamp': content_type_timestamp,
'meta_timestamp': meta_timestamp})
def _empty(self):
self._commit_puts_stale_ok()
with self.get() as conn:
try:
row = conn.execute(
'SELECT max(object_count) from policy_stat').fetchone()
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
row = conn.execute(
'SELECT object_count from container_stat').fetchone()
return zero_like(row[0])
def empty(self):
"""
Check if container DB is empty.
This method uses more stringent checks on object count than
:meth:`is_deleted`: this method checks that there are no objects in any
policy; if the container is in the process of sharding then both fresh
and retiring databases are checked to be empty; if a root container has
shard ranges then they are checked to be empty.
:returns: True if the database has no active objects, False otherwise
"""
if not all(broker._empty() for broker in self.get_brokers()):
return False
if self.is_root_container() and self.sharding_initiated():
# sharded shards don't get updates from their shards so their shard
# usage should not be relied upon
return self.get_shard_usage()['object_count'] <= 0
return True
def delete_object(self, name, timestamp, storage_policy_index=0):
"""
Mark an object deleted.
:param name: object name to be deleted
:param timestamp: timestamp when the object was marked as deleted
:param storage_policy_index: the storage policy index for the object
"""
self.put_object(name, timestamp, 0, 'application/deleted', 'noetag',
deleted=1, storage_policy_index=storage_policy_index)
def make_tuple_for_pickle(self, record):
return (record['name'], record['created_at'], record['size'],
record['content_type'], record['etag'], record['deleted'],
record['storage_policy_index'],
record['ctype_timestamp'],
record['meta_timestamp'])
def put_object(self, name, timestamp, size, content_type, etag, deleted=0,
storage_policy_index=0, ctype_timestamp=None,
meta_timestamp=None):
"""
Creates an object in the DB with its metadata.
:param name: object name to be created
:param timestamp: timestamp of when the object was created
:param size: object size
:param content_type: object content-type
:param etag: object etag
:param deleted: if True, marks the object as deleted and sets the
deleted_at timestamp to timestamp
:param storage_policy_index: the storage policy index for the object
:param ctype_timestamp: timestamp of when content_type was last
updated
:param meta_timestamp: timestamp of when metadata was last updated
"""
record = {'name': name, 'created_at': timestamp, 'size': size,
'content_type': content_type, 'etag': etag,
'deleted': deleted,
'storage_policy_index': storage_policy_index,
'ctype_timestamp': ctype_timestamp,
'meta_timestamp': meta_timestamp}
self.put_record(record)
def remove_objects(self, lower, upper, max_row=None):
"""
Removes object records in the given namespace range from the object
table.
Note that objects are removed regardless of their storage_policy_index.
:param lower: defines the lower bound of object names that will be
removed; names greater than this value will be removed; names less
than or equal to this value will not be removed.
:param upper: defines the upper bound of object names that will be
removed; names less than or equal to this value will be removed;
names greater than this value will not be removed. The empty string
is interpreted as there being no upper bound.
:param max_row: if specified only rows less than or equal to max_row
will be removed
"""
query_conditions = []
query_args = []
if max_row is not None:
query_conditions.append('ROWID <= ?')
query_args.append(str(max_row))
if lower:
query_conditions.append('name > ?')
query_args.append(lower)
if upper:
query_conditions.append('name <= ?')
query_args.append(upper)
query = 'DELETE FROM object WHERE deleted in (0, 1)'
if query_conditions:
query += ' AND ' + ' AND '.join(query_conditions)
with self.get() as conn:
conn.execute(query, query_args)
conn.commit()
def _is_deleted_info(self, object_count, put_timestamp, delete_timestamp,
**kwargs):
"""
Apply delete logic to database info.
:returns: True if the DB is considered to be deleted, False otherwise
"""
# The container is considered deleted if the delete_timestamp
# value is greater than the put_timestamp, and there are no
# objects in the container.
return zero_like(object_count) and (
Timestamp(delete_timestamp) > Timestamp(put_timestamp))
def _is_deleted(self, conn):
"""
Check if the DB is considered to be deleted.
This object count used in this check is the same as the container
object count that would be returned in the result of :meth:`get_info`
and exposed to a client i.e. it is based on the container_stat view for
the current storage policy index or relevant shard range usage.
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
info = conn.execute('''
SELECT put_timestamp, delete_timestamp, object_count
FROM container_stat''').fetchone()
info = dict(info)
info.update(self._get_alternate_object_stats()[1])
return self._is_deleted_info(**info)
def is_old_enough_to_reclaim(self, now, reclaim_age):
with self.get() as conn:
info = conn.execute('''
SELECT put_timestamp, delete_timestamp
FROM container_stat''').fetchone()
return (Timestamp(now - reclaim_age) >
Timestamp(info['delete_timestamp']) >
Timestamp(info['put_timestamp']))
def is_empty_enough_to_reclaim(self):
if self.is_root_container() and (self.get_shard_ranges() or
self.get_db_state() == SHARDING):
return False
return self.empty()
def is_reclaimable(self, now, reclaim_age):
return self.is_old_enough_to_reclaim(now, reclaim_age) and \
self.is_empty_enough_to_reclaim()
def get_info_is_deleted(self):
"""
Get the is_deleted status and info for the container.
:returns: a tuple, in the form (info, is_deleted) info is a dict as
returned by get_info and is_deleted is a boolean.
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return {}, True
info = self.get_info()
return info, self._is_deleted_info(**info)
def get_replication_info(self):
info = super(ContainerBroker, self).get_replication_info()
info['shard_max_row'] = self.get_max_row(SHARD_RANGE_TABLE)
return info
def _do_get_info_query(self, conn):
data = None
trailing_sync = 'x_container_sync_point1, x_container_sync_point2'
trailing_pol = 'storage_policy_index'
errors = set()
while not data:
try:
data = conn.execute(('''
SELECT account, container, created_at, put_timestamp,
delete_timestamp, status_changed_at,
object_count, bytes_used,
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, reported_bytes_used, hash,
id, %s, %s
FROM container_stat
''') % (trailing_sync, trailing_pol)).fetchone()
except sqlite3.OperationalError as err:
err_msg = str(err)
if err_msg in errors:
# only attempt migration once
raise
errors.add(err_msg)
if 'no such column: storage_policy_index' in err_msg:
trailing_pol = '0 AS storage_policy_index'
elif 'no such column: x_container_sync_point' in err_msg:
trailing_sync = '-1 AS x_container_sync_point1, ' \
'-1 AS x_container_sync_point2'
else:
raise
data = dict(data)
# populate instance cache
self._storage_policy_index = data['storage_policy_index']
self.account = data['account']
self.container = data['container']
return data
def _get_info(self):
self._commit_puts_stale_ok()
with self.get() as conn:
return self._do_get_info_query(conn)
def _populate_instance_cache(self, conn=None):
# load cached instance attributes from the database if necessary
if self.container is None:
with self.maybe_get(conn) as conn:
self._do_get_info_query(conn)
def _get_alternate_object_stats(self):
state = self.get_db_state()
if state == SHARDING:
other_info = self.get_brokers()[0]._get_info()
stats = {'object_count': other_info['object_count'],
'bytes_used': other_info['bytes_used']}
elif state == SHARDED and self.is_root_container():
stats = self.get_shard_usage()
else:
stats = {}
return state, stats
def get_info(self):
"""
Get global data for the container.
:returns: dict with keys: account, container, created_at,
put_timestamp, delete_timestamp, status_changed_at,
object_count, bytes_used, reported_put_timestamp,
reported_delete_timestamp, reported_object_count,
reported_bytes_used, hash, id, x_container_sync_point1,
x_container_sync_point2, and storage_policy_index,
db_state.
"""
data = self._get_info()
state, stats = self._get_alternate_object_stats()
data.update(stats)
data['db_state'] = state
return data
def set_x_container_sync_points(self, sync_point1, sync_point2):
with self.get() as conn:
try:
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
except sqlite3.OperationalError as err:
if 'no such column: x_container_sync_point' not in \
str(err):
raise
self._migrate_add_container_sync_points(conn)
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
conn.commit()
def _set_x_container_sync_points(self, conn, sync_point1, sync_point2):
if sync_point1 is not None and sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?,
x_container_sync_point2 = ?
''', (sync_point1, sync_point2))
elif sync_point1 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?
''', (sync_point1,))
elif sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point2 = ?
''', (sync_point2,))
def get_policy_stats(self):
with self.get() as conn:
try:
info = conn.execute('''
SELECT storage_policy_index, object_count, bytes_used
FROM policy_stat
''').fetchall()
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
info = conn.execute('''
SELECT 0 as storage_policy_index, object_count, bytes_used
FROM container_stat
''').fetchall()
policy_stats = {}
for row in info:
stats = dict(row)
key = stats.pop('storage_policy_index')
policy_stats[key] = stats
return policy_stats
def has_multiple_policies(self):
with self.get() as conn:
try:
curs = conn.execute('''
SELECT count(storage_policy_index)
FROM policy_stat
''').fetchone()
except sqlite3.OperationalError as err:
if 'no such table: policy_stat' not in str(err):
raise
# no policy_stat row
return False
if curs and curs[0] > 1:
return True
# only one policy_stat row
return False
def set_storage_policy_index(self, policy_index, timestamp=None):
"""
Update the container_stat policy_index and status_changed_at.
"""
if timestamp is None:
timestamp = Timestamp.now().internal
def _setit(conn):
conn.execute('''
INSERT OR IGNORE INTO policy_stat (storage_policy_index)
VALUES (?)
''', (policy_index,))
conn.execute('''
UPDATE container_stat
SET storage_policy_index = ?,
status_changed_at = MAX(?, status_changed_at)
WHERE storage_policy_index <> ?
''', (policy_index, timestamp, policy_index))
conn.commit()
with self.get() as conn:
try:
_setit(conn)
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
self._migrate_add_storage_policy(conn)
_setit(conn)
self._storage_policy_index = policy_index
def reported(self, put_timestamp, delete_timestamp, object_count,
bytes_used):
"""
Update reported stats, available with container's `get_info`.
:param put_timestamp: put_timestamp to update
:param delete_timestamp: delete_timestamp to update
:param object_count: object_count to update
:param bytes_used: bytes_used to update
"""
with self.get() as conn:
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = ?, reported_delete_timestamp = ?,
reported_object_count = ?, reported_bytes_used = ?
''', (put_timestamp, delete_timestamp, object_count, bytes_used))
conn.commit()
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter,
path=None, storage_policy_index=0, reverse=False,
include_deleted=False, since_row=None,
transform_func=None, all_policies=False,
allow_reserved=False):
"""
Get a list of objects sorted by name starting at marker onward, up
to limit entries. Entries will begin with the prefix and will not
have the delimiter after the prefix.
:param limit: maximum number of entries to get
:param marker: marker query
:param end_marker: end marker query
:param prefix: prefix query
:param delimiter: delimiter for query
:param path: if defined, will set the prefix and delimiter based on
the path
:param storage_policy_index: storage policy index for query
:param reverse: reverse the result order.
:param include_deleted: if True, include only deleted objects; if
False (default), include only undeleted objects; otherwise, include
both deleted and undeleted objects.
:param since_row: include only items whose ROWID is greater than
the given row id; by default all rows are included.
:param transform_func: an optional function that if given will be
called for each object to get a transformed version of the object
to include in the listing; should have same signature as
:meth:`~_transform_record`; defaults to :meth:`~_transform_record`.
:param all_policies: if True, include objects for all storage policies
ignoring any value given for ``storage_policy_index``
:param allow_reserved: exclude names with reserved-byte by default
:returns: list of tuples of (name, created_at, size, content_type,
etag, deleted)
"""
if include_deleted is True:
deleted_arg = ' = 1'
elif include_deleted is False:
deleted_arg = ' = 0'
else:
deleted_arg = ' in (0, 1)'
if transform_func is None:
transform_func = self._transform_record
delim_force_gte = False
if six.PY2:
(marker, end_marker, prefix, delimiter, path) = utf8encode(
marker, end_marker, prefix, delimiter, path)
self._commit_puts_stale_ok()
if reverse:
# Reverse the markers if we are reversing the listing.
marker, end_marker = end_marker, marker
if path is not None:
prefix = path
if path:
prefix = path = path.rstrip('/') + '/'
delimiter = '/'
elif delimiter and not prefix:
prefix = ''
if prefix:
end_prefix = prefix[:-1] + chr(ord(prefix[-1]) + 1)
orig_marker = marker
with self.get() as conn:
results = []
deleted_key = self._get_deleted_key(conn)
query_keys = ['name', 'created_at', 'size', 'content_type',
'etag', deleted_key]
while len(results) < limit:
query_args = []
query_conditions = []
if end_marker and (not prefix or end_marker < end_prefix):
query_conditions.append('name < ?')
query_args.append(end_marker)
elif prefix:
query_conditions.append('name < ?')
query_args.append(end_prefix)
if delim_force_gte:
query_conditions.append('name >= ?')
query_args.append(marker)
# Always set back to False
delim_force_gte = False
elif marker and (not prefix or marker >= prefix):
query_conditions.append('name > ?')
query_args.append(marker)
elif prefix:
query_conditions.append('name >= ?')
query_args.append(prefix)
if not allow_reserved:
query_conditions.append('name >= ?')
query_args.append(chr(ord(RESERVED_BYTE) + 1))
query_conditions.append(deleted_key + deleted_arg)
if since_row:
query_conditions.append('ROWID > ?')
query_args.append(since_row)
def build_query(keys, conditions, args):
query = 'SELECT ' + ', '.join(keys) + ' FROM object '
if conditions:
query += 'WHERE ' + ' AND '.join(conditions)
tail_query = '''
ORDER BY name %s LIMIT ?
''' % ('DESC' if reverse else '')
return query + tail_query, args + [limit - len(results)]
# storage policy filter
if all_policies:
query, args = build_query(
query_keys + ['storage_policy_index'],
query_conditions,
query_args)
else:
query, args = build_query(
query_keys + ['storage_policy_index'],
query_conditions + ['storage_policy_index = ?'],
query_args + [storage_policy_index])
try:
curs = conn.execute(query, tuple(args))
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
query, args = build_query(
query_keys + ['0 as storage_policy_index'],
query_conditions, query_args)
curs = conn.execute(query, tuple(args))
curs.row_factory = None
# Delimiters without a prefix is ignored, further if there
# is no delimiter then we can simply return the result as
# prefixes are now handled in the SQL statement.
if prefix is None or not delimiter:
return [transform_func(r) for r in curs]
# We have a delimiter and a prefix (possibly empty string) to
# handle
rowcount = 0
for row in curs:
rowcount += 1
name = row[0]
if reverse:
end_marker = name
else:
marker = name
if len(results) >= limit:
curs.close()
return results
end = name.find(delimiter, len(prefix))
if path is not None:
if name == path:
continue
if end >= 0 and len(name) > end + len(delimiter):
if reverse:
end_marker = name[:end + len(delimiter)]
else:
marker = ''.join([
name[:end],
delimiter[:-1],
chr(ord(delimiter[-1:]) + 1),
])
curs.close()
break
elif end >= 0:
if reverse:
end_marker = name[:end + len(delimiter)]
else:
marker = ''.join([
name[:end],
delimiter[:-1],
chr(ord(delimiter[-1:]) + 1),
])
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + len(delimiter)]
if dir_name != orig_marker:
results.append([dir_name, '0', 0, None, ''])
curs.close()
break
results.append(transform_func(row))
if not rowcount:
break
return results
def get_objects(self, limit=None, marker='', end_marker='',
include_deleted=None, since_row=None):
"""
Returns a list of objects, including deleted objects, in all policies.
Each object in the list is described by a dict with keys {'name',
'created_at', 'size', 'content_type', 'etag', 'deleted',
'storage_policy_index'}.
:param limit: maximum number of entries to get
:param marker: if set, objects with names less than or equal to this
value will not be included in the list.
:param end_marker: if set, objects with names greater than or equal to
this value will not be included in the list.
:param include_deleted: if True, include only deleted objects; if
False, include only undeleted objects; otherwise (default), include
both deleted and undeleted objects.
:param since_row: include only items whose ROWID is greater than
the given row id; by default all rows are included.
:return: a list of dicts, each describing an object.
"""
limit = CONTAINER_LISTING_LIMIT if limit is None else limit
return self.list_objects_iter(
limit, marker, end_marker, prefix=None, delimiter=None, path=None,
reverse=False, include_deleted=include_deleted,
transform_func=self._record_to_dict, since_row=since_row,
all_policies=True, allow_reserved=True
)
def _transform_record(self, record):
"""
Returns a tuple of (name, last-modified time, size, content_type and
etag) for the given record.
The given record's created_at timestamp is decoded into separate data,
content-type and meta timestamps and the metadata timestamp is used as
the last-modified time value.
"""
t_data, t_ctype, t_meta = decode_timestamps(record[1])
return (record[0], t_meta.internal) + record[2:5]
def _record_to_dict(self, rec):
if rec:
keys = ('name', 'created_at', 'size', 'content_type', 'etag',
'deleted', 'storage_policy_index')
return dict(zip(keys, rec))
return None
def merge_items(self, item_list, source=None):
"""
Merge items into the object table.
:param item_list: list of dictionaries of {'name', 'created_at',
'size', 'content_type', 'etag', 'deleted',
'storage_policy_index', 'ctype_timestamp',
'meta_timestamp'}
:param source: if defined, update incoming_sync with the source
"""
for item in item_list:
if six.PY2 and isinstance(item['name'], six.text_type):
item['name'] = item['name'].encode('utf-8')
elif not six.PY2 and isinstance(item['name'], six.binary_type):
item['name'] = item['name'].decode('utf-8')
def _really_really_merge_items(conn):
curs = conn.cursor()
if self.get_db_version(conn) >= 1:
query_mod = ' deleted IN (0, 1) AND '
else:
query_mod = ''
curs.execute('BEGIN IMMEDIATE')
# Get sqlite records for objects in item_list that already exist.
# We must chunk it up to avoid sqlite's limit of 999 args.
records = {}
for offset in range(0, len(item_list), SQLITE_ARG_LIMIT):
chunk = [rec['name'] for rec in
item_list[offset:offset + SQLITE_ARG_LIMIT]]
records.update(
((rec[0], rec[6]), rec) for rec in curs.execute(
'SELECT name, created_at, size, content_type,'
'etag, deleted, storage_policy_index '
'FROM object WHERE ' + query_mod + ' name IN (%s)' %
','.join('?' * len(chunk)), chunk))
# Sort item_list into things that need adding and deleting, based
# on results of created_at query.
to_delete = set()
to_add = {}
for item in item_list:
item.setdefault('storage_policy_index', 0) # legacy
item_ident = (item['name'], item['storage_policy_index'])
existing = self._record_to_dict(records.get(item_ident))
if update_new_item_from_existing(item, existing):
if item_ident in records: # exists with older timestamp
to_delete.add(item_ident)
if item_ident in to_add: # duplicate entries in item_list
update_new_item_from_existing(item, to_add[item_ident])
to_add[item_ident] = item
if to_delete:
curs.executemany(
'DELETE FROM object WHERE ' + query_mod +
'name=? AND storage_policy_index=?',
(item_ident for item_ident in to_delete))
if to_add:
curs.executemany(
'INSERT INTO object (name, created_at, size, content_type,'
'etag, deleted, storage_policy_index) '
'VALUES (?, ?, ?, ?, ?, ?, ?)',
((rec['name'], rec['created_at'], rec['size'],
rec['content_type'], rec['etag'], rec['deleted'],
rec['storage_policy_index'])
for rec in to_add.values()))
if source:
# for replication we rely on the remote end sending merges in
# order with no gaps to increment sync_points
sync_point = item_list[-1]['ROWID']
curs.execute('''
UPDATE incoming_sync SET
sync_point=max(?, sync_point) WHERE remote_id=?
''', (sync_point, source))
if curs.rowcount < 1:
curs.execute('''
INSERT INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (sync_point, source))
conn.commit()
def _really_merge_items(conn):
return tpool.execute(_really_really_merge_items, conn)
with self.get() as conn:
try:
return _really_merge_items(conn)
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
self._migrate_add_storage_policy(conn)
return _really_merge_items(conn)
def merge_shard_ranges(self, shard_ranges):
"""
Merge shard ranges into the shard range table.
:param shard_ranges: a shard range or a list of shard ranges; each
shard range should be an instance of
:class:`~swift.common.utils.ShardRange` or a dict representation of
a shard range having ``SHARD_RANGE_KEYS``.
"""
if not shard_ranges:
return
if not isinstance(shard_ranges, (list, ShardRangeList)):
shard_ranges = [shard_ranges]
item_list = []
for item in shard_ranges:
if isinstance(item, ShardRange):
item = dict(item)
for col in ('name', 'lower', 'upper'):
if six.PY2 and isinstance(item[col], six.text_type):
item[col] = item[col].encode('utf-8')
elif not six.PY2 and isinstance(item[col], six.binary_type):
item[col] = item[col].decode('utf-8')
item_list.append(item)
def _really_merge_items(conn):
curs = conn.cursor()
curs.execute('BEGIN IMMEDIATE')
# Get rows for items that already exist.
# We must chunk it up to avoid sqlite's limit of 999 args.
records = {}
for offset in range(0, len(item_list), SQLITE_ARG_LIMIT):
chunk = [record['name'] for record
in item_list[offset:offset + SQLITE_ARG_LIMIT]]
records.update(
(rec[0], rec) for rec in curs.execute(
'SELECT %s FROM %s '
'WHERE deleted IN (0, 1) AND name IN (%s)' %
(', '.join(SHARD_RANGE_KEYS), SHARD_RANGE_TABLE,
','.join('?' * len(chunk))), chunk))
# Sort item_list into things that need adding and deleting
to_delete = set()
to_add = {}
for item in item_list:
item_ident = item['name']
existing = records.get(item_ident)
if existing:
existing = dict(zip(SHARD_RANGE_KEYS, existing))
if merge_shards(item, existing):
# exists with older timestamp
if item_ident in records:
to_delete.add(item_ident)
# duplicate entries in item_list
if (item_ident not in to_add or
merge_shards(item, to_add[item_ident])):
to_add[item_ident] = item
if to_delete:
curs.executemany(
'DELETE FROM %s WHERE deleted in (0, 1) '
'AND name = ?' % SHARD_RANGE_TABLE,
((item_ident,) for item_ident in to_delete))
if to_add:
vals = ','.join('?' * len(SHARD_RANGE_KEYS))
curs.executemany(
'INSERT INTO %s (%s) VALUES (%s)' %
(SHARD_RANGE_TABLE, ','.join(SHARD_RANGE_KEYS), vals),
tuple([item[k] for k in SHARD_RANGE_KEYS]
for item in to_add.values()))
conn.commit()
migrations = {
'no such column: reported':
self._migrate_add_shard_range_reported,
'no such column: tombstones':
self._migrate_add_shard_range_tombstones,
('no such table: %s' % SHARD_RANGE_TABLE):
self.create_shard_range_table,
}
migrations_done = set()
with self.get() as conn:
while True:
try:
return _really_merge_items(conn)
except sqlite3.OperationalError as err:
# Without the rollback, new enough (>= py37) python/sqlite3
# will panic:
# sqlite3.OperationalError: cannot start a transaction
# within a transaction
conn.rollback()
for err_str, migration in migrations.items():
if err_str in migrations_done:
continue
if err_str in str(err):
migration(conn)
migrations_done.add(err_str)
break
else:
raise
def get_reconciler_sync(self):
with self.get() as conn:
try:
return conn.execute('''
SELECT reconciler_sync_point FROM container_stat
''').fetchone()[0]
except sqlite3.OperationalError as err:
if "no such column: reconciler_sync_point" not in str(err):
raise
return -1
def update_reconciler_sync(self, point):
query = '''
UPDATE container_stat
SET reconciler_sync_point = ?
'''
with self.get() as conn:
try:
conn.execute(query, (point,))
except sqlite3.OperationalError as err:
if "no such column: reconciler_sync_point" not in str(err):
raise
self._migrate_add_storage_policy(conn)
conn.execute(query, (point,))
conn.commit()
def get_misplaced_since(self, start, count):
"""
Get a list of objects which are in a storage policy different
from the container's storage policy.
:param start: last reconciler sync point
:param count: maximum number of entries to get
:returns: list of dicts with keys: name, created_at, size,
content_type, etag, storage_policy_index
"""
qry = '''
SELECT ROWID, name, created_at, size, content_type, etag,
deleted, storage_policy_index
FROM object
WHERE ROWID > ?
AND storage_policy_index != (
SELECT storage_policy_index FROM container_stat LIMIT 1)
ORDER BY ROWID ASC LIMIT ?
'''
self._commit_puts_stale_ok()
with self.get() as conn:
try:
cur = conn.execute(qry, (start, count))
except sqlite3.OperationalError as err:
if "no such column: storage_policy_index" not in str(err):
raise
return []
return list(dict(row) for row in cur.fetchall())
def _migrate_add_container_sync_points(self, conn):
"""
Add the x_container_sync_point columns to the 'container_stat' table.
"""
conn.executescript('''
BEGIN;
ALTER TABLE container_stat
ADD COLUMN x_container_sync_point1 INTEGER DEFAULT -1;
ALTER TABLE container_stat
ADD COLUMN x_container_sync_point2 INTEGER DEFAULT -1;
COMMIT;
''')
def _migrate_add_storage_policy(self, conn):
"""
Migrate the container schema to support tracking objects from
multiple storage policies. If the container_stat table has any
pending migrations, they are applied now before copying into
container_info.
* create the 'policy_stat' table.
* copy the current 'object_count' and 'bytes_used' columns to a
row in the 'policy_stat' table.
* add the storage_policy_index column to the 'object' table.
* drop the 'object_insert' and 'object_delete' triggers.
* add the 'object_insert_policy_stat' and
'object_delete_policy_stat' triggers.
* create container_info table for non-policy container info
* insert values from container_stat into container_info
* drop container_stat table
* create container_stat view
"""
# I tried just getting the list of column names in the current
# container_stat table with a pragma table_info, but could never get
# it inside the same transaction as the DDL (non-DML) statements:
# https://docs.python.org/2/library/sqlite3.html
# #controlling-transactions
# So we just apply all pending migrations to container_stat and copy a
# static known list of column names into container_info.
try:
self._migrate_add_container_sync_points(conn)
except sqlite3.OperationalError as e:
if 'duplicate column' in str(e):
conn.execute('ROLLBACK;')
else:
raise
try:
conn.executescript("""
ALTER TABLE container_stat
ADD COLUMN metadata TEXT DEFAULT '';
""")
except sqlite3.OperationalError as e:
if 'duplicate column' not in str(e):
raise
column_names = ', '.join((
'account', 'container', 'created_at', 'put_timestamp',
'delete_timestamp', 'reported_put_timestamp',
'reported_object_count', 'reported_bytes_used', 'hash', 'id',
'status', 'status_changed_at', 'metadata',
'x_container_sync_point1', 'x_container_sync_point2'))
conn.executescript(
'BEGIN;' +
POLICY_STAT_TABLE_CREATE +
'''
INSERT INTO policy_stat (
storage_policy_index, object_count, bytes_used)
SELECT 0, object_count, bytes_used
FROM container_stat;
ALTER TABLE object
ADD COLUMN storage_policy_index INTEGER DEFAULT 0;
DROP TRIGGER object_insert;
DROP TRIGGER object_delete;
''' +
POLICY_STAT_TRIGGER_SCRIPT +
CONTAINER_INFO_TABLE_SCRIPT +
'''
INSERT INTO container_info (%s)
SELECT %s FROM container_stat;
DROP TABLE IF EXISTS container_stat;
''' % (column_names, column_names) +
CONTAINER_STAT_VIEW_SCRIPT +
'COMMIT;')
def _migrate_add_shard_range_reported(self, conn):
"""
Add the reported column to the 'shard_range' table.
"""
conn.executescript('''
BEGIN;
ALTER TABLE %s
ADD COLUMN reported INTEGER DEFAULT 0;
COMMIT;
''' % SHARD_RANGE_TABLE)
def _migrate_add_shard_range_tombstones(self, conn):
"""
Add the tombstones column to the 'shard_range' table.
"""
conn.executescript('''
BEGIN;
ALTER TABLE %s
ADD COLUMN tombstones INTEGER DEFAULT -1;
COMMIT;
''' % SHARD_RANGE_TABLE)
def _reclaim_other_stuff(self, conn, age_timestamp, sync_timestamp):
super(ContainerBroker, self)._reclaim_other_stuff(
conn, age_timestamp, sync_timestamp)
# populate instance cache, but use existing conn to avoid deadlock
# when it has a pending update
self._populate_instance_cache(conn=conn)
try:
conn.execute('''
DELETE FROM %s WHERE deleted = 1 AND timestamp < ?
AND name != ?
''' % SHARD_RANGE_TABLE, (sync_timestamp, self.path))
except sqlite3.OperationalError as err:
if ('no such table: %s' % SHARD_RANGE_TABLE) not in str(err):
raise
def _get_shard_range_rows(self, connection=None, include_deleted=False,
states=None, include_own=False,
exclude_others=False):
"""
Returns a list of shard range rows.
To get all shard ranges use ``include_own=True``. To get only the
broker's own shard range use ``include_own=True`` and
``exclude_others=True``.
:param connection: db connection
:param include_deleted: include rows marked as deleted
:param states: include only rows matching the given state(s); can be an
int or a list of ints.
:param include_own: boolean that governs whether the row whose name
matches the broker's path is included in the returned list. If
True, that row is included, otherwise it is not included. Default
is False.
:param exclude_others: boolean that governs whether the rows whose
names do not match the broker's path are included in the returned
list. If True, those rows are not included, otherwise they are
included. Default is False.
:return: a list of tuples.
"""
if exclude_others and not include_own:
return []
included_states = set()
if isinstance(states, (list, tuple, set)):
included_states.update(states)
elif states is not None:
included_states.add(states)
# defaults to be used when legacy db's are missing columns
default_values = {'reported': 0,
'tombstones': -1}
def do_query(conn, defaults=None):
condition = ''
conditions = []
params = []
if not include_deleted:
conditions.append('deleted=0')
if included_states:
conditions.append('state in (%s)' % ','.join(
'?' * len(included_states)))
params.extend(included_states)
if not include_own:
conditions.append('name != ?')
params.append(self.path)
if exclude_others:
conditions.append('name = ?')
params.append(self.path)
if conditions:
condition = ' WHERE ' + ' AND '.join(conditions)
columns = SHARD_RANGE_KEYS[:-2]
for column in SHARD_RANGE_KEYS[-2:]:
if column in defaults:
columns += (('%s as %s' %
(default_values[column], column)),)
else:
columns += (column,)
sql = '''
SELECT %s
FROM %s%s;
''' % (', '.join(columns), SHARD_RANGE_TABLE, condition)
data = conn.execute(sql, params)
data.row_factory = None
return [row for row in data]
with self.maybe_get(connection) as conn:
defaults = set()
attempts = len(default_values) + 1
while attempts:
attempts -= 1
try:
return do_query(conn, defaults)
except sqlite3.OperationalError as err:
if ('no such table: %s' % SHARD_RANGE_TABLE) in str(err):
return []
if not attempts:
raise
new_defaults = set()
for column in default_values.keys():
if 'no such column: %s' % column in str(err):
new_defaults.add(column)
if not new_defaults:
raise
if new_defaults.intersection(defaults):
raise
defaults.update(new_defaults)
@classmethod
def resolve_shard_range_states(cls, states):
"""
Given a list of values each of which may be the name of a state, the
number of a state, or an alias, return the set of state numbers
described by the list.
The following alias values are supported: 'listing' maps to all states
that are considered valid when listing objects; 'updating' maps to all
states that are considered valid for redirecting an object update;
'auditing' maps to all states that are considered valid for a shard
container that is updating its own shard range table from a root (this
currently maps to all states except FOUND).
:param states: a list of values each of which may be the name of a
state, the number of a state, or an alias
:return: a set of integer state numbers, or None if no states are given
:raises ValueError: if any value in the given list is neither a valid
state nor a valid alias
"""
if states:
resolved_states = set()
for state in states:
if state == 'listing':
resolved_states.update(SHARD_LISTING_STATES)
elif state == 'updating':
resolved_states.update(SHARD_UPDATE_STATES)
elif state == 'auditing':
resolved_states.update(SHARD_AUDITING_STATES)
else:
resolved_states.add(ShardRange.resolve_state(state)[0])
return resolved_states
return None
def get_shard_ranges(self, marker=None, end_marker=None, includes=None,
reverse=False, include_deleted=False, states=None,
include_own=False,
exclude_others=False, fill_gaps=False):
"""
Returns a list of persisted shard ranges.
:param marker: restricts the returned list to shard ranges whose
namespace includes or is greater than the marker value.
:param end_marker: restricts the returned list to shard ranges whose
namespace includes or is less than the end_marker value.
:param includes: restricts the returned list to the shard range that
includes the given value; if ``includes`` is specified then
``marker`` and ``end_marker`` are ignored.
:param reverse: reverse the result order.
:param include_deleted: include items that have the delete marker set
:param states: if specified, restricts the returned list to shard
ranges that have the given state(s); can be a list of ints or a
single int.
:param include_own: boolean that governs whether the row whose name
matches the broker's path is included in the returned list. If
True, that row is included, otherwise it is not included. Default
is False.
:param exclude_others: boolean that governs whether the rows whose
names do not match the broker's path are included in the returned
list. If True, those rows are not included, otherwise they are
included. Default is False.
:param fill_gaps: if True, insert a modified copy of own shard range to
fill any gap between the end of any found shard ranges and the
upper bound of own shard range. Gaps enclosed within the found
shard ranges are not filled.
:return: a list of instances of :class:`swift.common.utils.ShardRange`
"""
if reverse:
marker, end_marker = end_marker, marker
if marker and end_marker and marker >= end_marker:
return []
shard_ranges = [
ShardRange(*row)
for row in self._get_shard_range_rows(
include_deleted=include_deleted, states=states,
include_own=include_own,
exclude_others=exclude_others)]
shard_ranges.sort(key=ShardRange.sort_key)
shard_ranges = filter_shard_ranges(shard_ranges, includes,
marker, end_marker)
if not includes and fill_gaps:
own_shard_range = self._own_shard_range()
if shard_ranges:
last_upper = shard_ranges[-1].upper
else:
last_upper = max(marker or own_shard_range.lower,
own_shard_range.lower)
required_upper = min(end_marker or own_shard_range.upper,
own_shard_range.upper)
if required_upper > last_upper:
filler_sr = self.get_own_shard_range()
filler_sr.lower = last_upper
filler_sr.upper = required_upper
shard_ranges.append(filler_sr)
if reverse:
shard_ranges.reverse()
return shard_ranges
def _own_shard_range(self, no_default=False):
shard_ranges = self.get_shard_ranges(include_own=True,
include_deleted=True,
exclude_others=True)
if shard_ranges:
own_shard_range = shard_ranges[0]
elif no_default:
return None
else:
own_shard_range = ShardRange(
self.path, Timestamp.now(), ShardRange.MIN, ShardRange.MAX,
state=ShardRange.ACTIVE)
return own_shard_range
def get_own_shard_range(self, no_default=False):
"""
Returns a shard range representing this broker's own shard range. If no
such range has been persisted in the broker's shard ranges table then a
default shard range representing the entire namespace will be returned.
The returned shard range will be updated with the current object stats
for this broker and a meta timestamp set to the current time. For these
values to be persisted the caller must merge the shard range.
:param no_default: if True and the broker's own shard range is not
found in the shard ranges table then None is returned, otherwise a
default shard range is returned.
:return: an instance of :class:`~swift.common.utils.ShardRange`
"""
own_shard_range = self._own_shard_range(no_default=no_default)
if own_shard_range:
info = self.get_info()
own_shard_range.update_meta(
info['object_count'], info['bytes_used'])
return own_shard_range
def is_own_shard_range(self, shard_range):
return shard_range.name == self.path
def enable_sharding(self, epoch):
"""
Updates this broker's own shard range with the given epoch, sets its
state to SHARDING and persists it in the DB.
:param epoch: a :class:`~swift.utils.common.Timestamp`
:return: the broker's updated own shard range.
"""
own_shard_range = self._own_shard_range()
own_shard_range.update_state(ShardRange.SHARDING, epoch)
own_shard_range.epoch = epoch
self.merge_shard_ranges(own_shard_range)
return own_shard_range
def get_shard_usage(self):
"""
Get the aggregate object stats for all shard ranges in states ACTIVE,
SHARDING or SHRINKING.
:return: a dict with keys {bytes_used, object_count}
"""
shard_ranges = self.get_shard_ranges(states=SHARD_STATS_STATES)
return {'bytes_used': sum(sr.bytes_used for sr in shard_ranges),
'object_count': sum(sr.object_count for sr in shard_ranges)}
def get_all_shard_range_data(self):
"""
Returns a list of all shard range data, including own shard range and
deleted shard ranges.
:return: A list of dict representations of a ShardRange.
"""
shard_ranges = self.get_shard_ranges(include_deleted=True,
include_own=True)
return [dict(sr) for sr in shard_ranges]
def set_sharding_state(self):
"""
Creates and initializes a fresh DB file in preparation for sharding a
retiring DB. The broker's own shard range must have an epoch timestamp
for this method to succeed.
:return: True if the fresh DB was successfully created, False
otherwise.
"""
epoch = self.get_own_shard_range().epoch
if not epoch:
self.logger.warning("Container '%s' cannot be set to sharding "
"state: missing epoch", self.path)
return False
state = self.get_db_state()
if not state == UNSHARDED:
self.logger.warning("Container '%s' cannot be set to sharding "
"state while in %s state", self.path, state)
return False
info = self.get_info()
# The tmp_dir is cleaned up by the replicators after reclaim_age, so if
# we initially create the fresh DB there, we will already have cleanup
# covered if there is an error.
tmp_dir = os.path.join(self.get_device_path(), 'tmp')
if not os.path.exists(tmp_dir):
mkdirs(tmp_dir)
tmp_db_file = os.path.join(tmp_dir, "fresh%s.db" % str(uuid4()))
fresh_broker = ContainerBroker(tmp_db_file, self.timeout, self.logger,
self.account, self.container)
fresh_broker.initialize(info['put_timestamp'],
info['storage_policy_index'])
# copy relevant data from the retiring db to the fresh db
fresh_broker.update_metadata(self.metadata)
fresh_broker.merge_shard_ranges(self.get_all_shard_range_data())
# copy sync points so that any peer in sync with retiring db will
# appear to be in sync with the fresh db, although the peer shouldn't
# attempt to replicate objects to a db with shard ranges.
for incoming in (True, False):
syncs = self.get_syncs(incoming)
fresh_broker.merge_syncs(syncs, incoming)
max_row = self.get_max_row()
with fresh_broker.get() as fresh_broker_conn:
# Initialise the rowid to continue from where the retiring db ended
try:
sql = "INSERT into object " \
"(ROWID, name, created_at, size, content_type, etag) " \
"values (?, 'tmp_sharding', ?, 0, '', ?)"
fresh_broker_conn.execute(
sql, (max_row, Timestamp.now().internal,
MD5_OF_EMPTY_STRING))
fresh_broker_conn.execute(
'DELETE FROM object WHERE ROWID = ?', (max_row,))
fresh_broker_conn.commit()
except sqlite3.OperationalError as err:
self.logger.error(
'Failed to set the ROWID of the fresh database for %s: %s',
self.path, err)
return False
# Set the created_at and hash in the container_info table the same
# in both brokers
try:
fresh_broker_conn.execute(
'UPDATE container_stat SET created_at=?',
(info['created_at'],))
fresh_broker_conn.commit()
except sqlite3.OperationalError as err:
self.logger.error('Failed to set matching created_at time in '
'the fresh database for %s: %s',
self.path, err)
return False
# Rename to the new database
fresh_db_filename = make_db_file_path(self._db_file, epoch)
renamer(tmp_db_file, fresh_db_filename)
self.reload_db_files()
return True
def set_sharded_state(self):
"""
Unlink's the broker's retiring DB file.
:return: True if the retiring DB was successfully unlinked, False
otherwise.
"""
state = self.get_db_state()
if not state == SHARDING:
self.logger.warning("Container %r cannot be set to sharded "
"state while in %s state",
self.path, state)
return False
self.reload_db_files()
if len(self.db_files) < 2:
self.logger.warning(
'Refusing to delete db file for %r: no fresher db file found '
'in %r.', self.path, self.db_files)
return False
retiring_file = self.db_files[-2]
try:
os.unlink(retiring_file)
self.logger.debug('Unlinked retiring db %r', retiring_file)
except OSError as err:
if err.errno != errno.ENOENT:
self.logger.exception('Failed to unlink %r' % self._db_file)
return False
self.reload_db_files()
if len(self.db_files) >= 2:
self.logger.warning(
'Still have multiple db files after unlinking %r: %r',
retiring_file, self.db_files)
return False
return True
def get_brokers(self):
"""
Return a list of brokers for component dbs. The list has two entries
while the db state is sharding: the first entry is a broker for the
retiring db with ``skip_commits`` set to ``True``; the second entry is
a broker for the fresh db with ``skip_commits`` set to ``False``. For
any other db state the list has one entry.
:return: a list of :class:`~swift.container.backend.ContainerBroker`
"""
if len(self.db_files) > 2:
self.logger.warning('Unexpected db files will be ignored: %s' %
self.db_files[:-2])
brokers = []
db_files = self.db_files[-2:]
while db_files:
db_file = db_files.pop(0)
sub_broker = ContainerBroker(
db_file, self.timeout, self.logger, self.account,
self.container, self.pending_timeout, self.stale_reads_ok,
force_db_file=True, skip_commits=bool(db_files))
brokers.append(sub_broker)
return brokers
def set_sharding_sysmeta(self, key, value):
"""
Updates the broker's metadata stored under the given key
prefixed with a sharding specific namespace.
:param key: metadata key in the sharding metadata namespace.
:param value: metadata value
"""
self.update_metadata({'X-Container-Sysmeta-Shard-' + key:
(value, Timestamp.now().internal)})
def get_sharding_sysmeta_with_timestamps(self):
"""
Returns sharding specific info from the broker's metadata with
timestamps.
:param key: if given the value stored under ``key`` in the sharding
info will be returned.
:return: a dict of sharding info with their timestamps.
"""
prefix = 'X-Container-Sysmeta-Shard-'
return {
k[len(prefix):]: v
for k, v in self.metadata.items()
if k.startswith(prefix)
}
def get_sharding_sysmeta(self, key=None):
"""
Returns sharding specific info from the broker's metadata.
:param key: if given the value stored under ``key`` in the sharding
info will be returned.
:return: either a dict of sharding info or the value stored under
``key`` in that dict.
"""
info = self.get_sharding_sysmeta_with_timestamps()
if key:
return info.get(key, (None, None))[0]
else:
return {k: v[0] for k, v in info.items()}
def _get_root_meta(self):
"""
Get the (unquoted) root path, plus the header the info came from.
If no info available, returns ``(None, None)``
"""
path = self.get_sharding_sysmeta('Quoted-Root')
if path:
return 'X-Container-Sysmeta-Shard-Quoted-Root', unquote(path)
path = self.get_sharding_sysmeta('Root')
if path:
return 'X-Container-Sysmeta-Shard-Root', path
return None, None
def _load_root_info(self):
"""
Load the root container name and account for the container represented
by this broker.
The root container path, if set, is stored in sysmeta under the key
``X-Container-Sysmeta-Shard-Root``. If this sysmeta is not set then the
container is considered to be a root container and ``_root_account``
and ``_root_container`` are set equal to the broker ``account`` and
``container`` attributes respectively.
"""
hdr, path = self._get_root_meta()
if not path:
# Ensure account/container get populated
self._populate_instance_cache()
self._root_account = self.account
self._root_container = self.container
return
try:
self._root_account, self._root_container = split_path(
'/' + path, 2, 2)
except ValueError:
raise ValueError("Expected %s to be of the form "
"'account/container', got %r" % (hdr, path))
@property
def root_account(self):
if not self._root_account:
self._load_root_info()
return self._root_account
@property
def root_container(self):
if not self._root_container:
self._load_root_info()
return self._root_container
@property
def root_path(self):
return '%s/%s' % (self.root_account, self.root_container)
def is_root_container(self):
"""
Returns True if this container is a root container, False otherwise.
A root container is a container that is not a shard of another
container.
"""
_, path = self._get_root_meta()
if path is not None:
# We have metadata telling us where the root is; it's
# authoritative; shards should always have this metadata even when
# deleted
return self.path == path
# Else, we're either a root or a legacy deleted shard whose sharding
# sysmeta was deleted
# Use internal method so we don't try to update stats.
own_shard_range = self._own_shard_range(no_default=True)
if not own_shard_range:
return True # Never been sharded
if own_shard_range.deleted:
# When shard ranges shrink, they get marked deleted
return False
else:
# But even when a root collapses, empties, and gets deleted, its
# own_shard_range is left alive
return True
def _get_next_shard_range_upper(self, shard_size, last_upper=None):
"""
Returns the name of the object that is ``shard_size`` rows beyond
``last_upper`` in the object table ordered by name. If ``last_upper``
is not given then it defaults to the start of object table ordered by
name.
:param last_upper: the upper bound of the last found shard range.
:return: an object name, or None if the number of rows beyond
``last_upper`` is less than ``shard_size``.
"""
self._commit_puts_stale_ok()
with self.get() as connection:
sql = ('SELECT name FROM object WHERE %s=0 ' %
self._get_deleted_key(connection))
args = []
if last_upper:
sql += "AND name > ? "
args.append(str(last_upper))
sql += "ORDER BY name LIMIT 1 OFFSET %d" % (shard_size - 1)
row = connection.execute(sql, args).fetchone()
return row['name'] if row else None
def find_shard_ranges(self, shard_size, limit=-1, existing_ranges=None,
minimum_shard_size=1):
"""
Scans the container db for shard ranges. Scanning will start at the
upper bound of the any ``existing_ranges`` that are given, otherwise
at ``ShardRange.MIN``. Scanning will stop when ``limit`` shard ranges
have been found or when no more shard ranges can be found. In the
latter case, the upper bound of the final shard range will be equal to
the upper bound of the container namespace.
This method does not modify the state of the db; callers are
responsible for persisting any shard range data in the db.
:param shard_size: the size of each shard range
:param limit: the maximum number of shard points to be found; a
negative value (default) implies no limit.
:param existing_ranges: an optional list of existing ShardRanges; if
given, this list should be sorted in order of upper bounds; the
scan for new shard ranges will start at the upper bound of the last
existing ShardRange.
:param minimum_shard_size: Minimum size of the final shard range. If
this is greater than one then the final shard range may be extended
to more than shard_size in order to avoid a further shard range
with less minimum_shard_size rows.
:return: a tuple; the first value in the tuple is a list of
dicts each having keys {'index', 'lower', 'upper', 'object_count'}
in order of ascending 'upper'; the second value in the tuple is a
boolean which is True if the last shard range has been found, False
otherwise.
"""
existing_ranges = existing_ranges or []
minimum_shard_size = max(minimum_shard_size, 1)
object_count = self.get_info().get('object_count', 0)
if shard_size + minimum_shard_size > object_count:
# container not big enough to shard
return [], False
own_shard_range = self.get_own_shard_range()
progress = 0
progress_reliable = True
# update initial state to account for any existing shard ranges
if existing_ranges:
if all([sr.state == ShardRange.FOUND
for sr in existing_ranges]):
progress = sum([sr.object_count for sr in existing_ranges])
else:
# else: object count in existing shard ranges may have changed
# since they were found so progress cannot be reliably
# calculated; use default progress of zero - that's ok,
# progress is used for optimisation not correctness
progress_reliable = False
last_shard_upper = existing_ranges[-1].upper
if last_shard_upper >= own_shard_range.upper:
# == implies all ranges were previously found
# > implies an acceptor range has been set into which this
# shard should cleave itself
return [], True
else:
last_shard_upper = own_shard_range.lower
found_ranges = []
sub_broker = self.get_brokers()[0]
index = len(existing_ranges)
while limit is None or limit < 0 or len(found_ranges) < limit:
if progress + shard_size + minimum_shard_size > object_count:
# next shard point is within minimum_size rows of the final
# object name, or beyond it, so don't bother with db query.
# This shard will have <= shard_size + (minimum_size - 1) rows.
next_shard_upper = None
else:
try:
next_shard_upper = sub_broker._get_next_shard_range_upper(
shard_size, last_shard_upper)
except (sqlite3.OperationalError, LockTimeout):
self.logger.exception(
"Problem finding shard upper in %r: " % self.db_file)
break
if (next_shard_upper is None or
next_shard_upper > own_shard_range.upper):
# We reached the end of the container namespace, or possibly
# beyond if the container has misplaced objects. In either case
# limit the final shard range to own_shard_range.upper.
next_shard_upper = own_shard_range.upper
if progress_reliable:
# object count may include misplaced objects so the final
# shard size may not be accurate until cleaved, but at
# least the sum of shard sizes will equal the unsharded
# object_count
shard_size = object_count - progress
# NB shard ranges are created with a non-zero object count so that
# the apparent container object count remains constant, and the
# container is non-deletable while shards have been found but not
# yet cleaved
found_ranges.append(
{'index': index,
'lower': str(last_shard_upper),
'upper': str(next_shard_upper),
'object_count': shard_size})
if next_shard_upper == own_shard_range.upper:
return found_ranges, True
progress += shard_size
last_shard_upper = next_shard_upper
index += 1
return found_ranges, False
| openstack/swift | swift/container/backend.py | Python | apache-2.0 | 98,964 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import json, traceback
from PyQt4.Qt import QDialogButtonBox
from calibre.gui2 import error_dialog, warning_dialog
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
from calibre.gui2.preferences.template_functions_ui import Ui_Form
from calibre.gui2.widgets import PythonHighlighter
from calibre.utils.formatter_functions import (formatter_functions,
compile_user_function, load_user_template_functions)
class ConfigWidget(ConfigWidgetBase, Ui_Form):
def genesis(self, gui):
self.gui = gui
self.db = gui.library_view.model().db
help_text = _('''
<p>Here you can add and remove functions used in template processing. A
template function is written in python. It takes information from the
book, processes it in some way, then returns a string result. Functions
defined here are usable in templates in the same way that builtin
functions are usable. The function must be named <b>evaluate</b>, and
must have the signature shown below.</p>
<p><code>evaluate(self, formatter, kwargs, mi, locals, your parameters)
→ returning a unicode string</code></p>
<p>The parameters of the evaluate function are:
<ul>
<li><b>formatter</b>: the instance of the formatter being used to
evaluate the current template. You can use this to do recursive
template evaluation.</li>
<li><b>kwargs</b>: a dictionary of metadata. Field values are in this
dictionary.
<li><b>mi</b>: a Metadata instance. Used to get field information.
This parameter can be None in some cases, such as when evaluating
non-book templates.</li>
<li><b>locals</b>: the local variables assigned to by the current
template program.</li>
<li><b>your parameters</b>: You must supply one or more formal
parameters. The number must match the arg count box, unless arg count is
-1 (variable number or arguments), in which case the last argument must
be *args. At least one argument is required, and is usually the value of
the field being operated upon. Note that when writing in basic template
mode, the user does not provide this first argument. Instead it is
supplied by the formatter.</li>
</ul></p>
<p>
The following example function checks the value of the field. If the
field is not empty, the field's value is returned, otherwise the value
EMPTY is returned.
<pre>
name: my_ifempty
arg count: 1
doc: my_ifempty(val) -- return val if it is not empty, otherwise the string 'EMPTY'
program code:
def evaluate(self, formatter, kwargs, mi, locals, val):
if val:
return val
else:
return 'EMPTY'</pre>
This function can be called in any of the three template program modes:
<ul>
<li>single-function mode: {tags:my_ifempty()}</li>
<li>template program mode: {tags:'my_ifempty($)'}</li>
<li>general program mode: program: my_ifempty(field('tags'))</li>
</p>
''')
self.textBrowser.setHtml(help_text)
def initialize(self):
try:
self.builtin_source_dict = json.loads(P('template-functions.json', data=True,
allow_user_override=False).decode('utf-8'))
except:
traceback.print_exc()
self.builtin_source_dict = {}
self.funcs = formatter_functions().get_functions()
self.builtins = formatter_functions().get_builtins_and_aliases()
self.build_function_names_box()
self.function_name.currentIndexChanged[str].connect(self.function_index_changed)
self.function_name.editTextChanged.connect(self.function_name_edited)
self.argument_count.valueChanged.connect(self.enable_replace_button)
self.documentation.textChanged.connect(self.enable_replace_button)
self.program.textChanged.connect(self.enable_replace_button)
self.create_button.clicked.connect(self.create_button_clicked)
self.delete_button.clicked.connect(self.delete_button_clicked)
self.create_button.setEnabled(False)
self.delete_button.setEnabled(False)
self.replace_button.setEnabled(False)
self.clear_button.clicked.connect(self.clear_button_clicked)
self.replace_button.clicked.connect(self.replace_button_clicked)
self.program.setTabStopWidth(20)
self.highlighter = PythonHighlighter(self.program.document())
def enable_replace_button(self):
self.replace_button.setEnabled(self.delete_button.isEnabled())
def clear_button_clicked(self):
self.build_function_names_box()
self.program.clear()
self.documentation.clear()
self.argument_count.clear()
self.create_button.setEnabled(False)
self.delete_button.setEnabled(False)
def build_function_names_box(self, scroll_to='', set_to=''):
self.function_name.blockSignals(True)
func_names = sorted(self.funcs)
self.function_name.clear()
self.function_name.addItem('')
self.function_name.addItems(func_names)
self.function_name.setCurrentIndex(0)
if set_to:
self.function_name.setEditText(set_to)
self.create_button.setEnabled(True)
self.function_name.blockSignals(False)
if scroll_to:
idx = self.function_name.findText(scroll_to)
if idx >= 0:
self.function_name.setCurrentIndex(idx)
if scroll_to not in self.builtins:
self.delete_button.setEnabled(True)
def delete_button_clicked(self):
name = unicode(self.function_name.currentText())
if name in self.builtins:
error_dialog(self.gui, _('Template functions'),
_('You cannot delete a built-in function'), show=True)
if name in self.funcs:
del self.funcs[name]
self.changed_signal.emit()
self.create_button.setEnabled(True)
self.delete_button.setEnabled(False)
self.build_function_names_box(set_to=name)
self.program.setReadOnly(False)
else:
error_dialog(self.gui, _('Template functions'),
_('Function not defined'), show=True)
def create_button_clicked(self):
self.changed_signal.emit()
name = unicode(self.function_name.currentText())
if name in self.funcs:
error_dialog(self.gui, _('Template functions'),
_('Name %s already used')%(name,), show=True)
return
if self.argument_count.value() == 0:
box = warning_dialog(self.gui, _('Template functions'),
_('Argument count should be -1 or greater than zero. '
'Setting it to zero means that this function cannot '
'be used in single function mode.'), det_msg = '',
show=False)
box.bb.setStandardButtons(box.bb.standardButtons() | QDialogButtonBox.Cancel)
box.det_msg_toggle.setVisible(False)
if not box.exec_():
return
try:
prog = unicode(self.program.toPlainText())
cls = compile_user_function(name, unicode(self.documentation.toPlainText()),
self.argument_count.value(), prog)
self.funcs[name] = cls
self.build_function_names_box(scroll_to=name)
except:
error_dialog(self.gui, _('Template functions'),
_('Exception while compiling function'), show=True,
det_msg=traceback.format_exc())
def function_name_edited(self, txt):
self.documentation.setReadOnly(False)
self.argument_count.setReadOnly(False)
self.create_button.setEnabled(True)
self.replace_button.setEnabled(False)
self.program.setReadOnly(False)
def function_index_changed(self, txt):
txt = unicode(txt)
self.create_button.setEnabled(False)
if not txt:
self.argument_count.clear()
self.documentation.clear()
self.documentation.setReadOnly(False)
self.argument_count.setReadOnly(False)
return
func = self.funcs[txt]
self.argument_count.setValue(func.arg_count)
self.documentation.setText(func.doc)
if txt in self.builtins:
if hasattr(func, 'program_text') and func.program_text:
self.program.setPlainText(func.program_text)
elif txt in self.builtin_source_dict:
self.program.setPlainText(self.builtin_source_dict[txt])
else:
self.program.setPlainText(_('function source code not available'))
self.documentation.setReadOnly(True)
self.argument_count.setReadOnly(True)
self.program.setReadOnly(True)
self.delete_button.setEnabled(False)
else:
self.program.setPlainText(func.program_text)
self.delete_button.setEnabled(True)
self.program.setReadOnly(False)
self.replace_button.setEnabled(False)
def replace_button_clicked(self):
self.delete_button_clicked()
self.create_button_clicked()
def refresh_gui(self, gui):
pass
def commit(self):
# formatter_functions().reset_to_builtins()
pref_value = []
for name, cls in self.funcs.iteritems():
if name not in self.builtins:
pref_value.append((cls.name, cls.doc, cls.arg_count, cls.program_text))
self.db.prefs.set('user_template_functions', pref_value)
load_user_template_functions(self.db.library_id, pref_value)
return False
if __name__ == '__main__':
from PyQt4.Qt import QApplication
app = QApplication([])
test_widget('Advanced', 'TemplateFunctions')
| insomnia-lab/calibre | src/calibre/gui2/preferences/template_functions.py | Python | gpl-3.0 | 10,331 |
# remote.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
# Module implementing a remote object allowing easy access to git remotes
from exc import GitCommandError
from ConfigParser import NoOptionError
from config import SectionConstraint
from git.util import (
LazyMixin,
Iterable,
IterableList,
RemoteProgress
)
from refs import (
Reference,
RemoteReference,
SymbolicReference,
TagReference
)
from git.util import join_path
from gitdb.util import join
import re
import os
import sys
__all__ = ('RemoteProgress', 'PushInfo', 'FetchInfo', 'Remote')
#{ Utilities
def digest_process_messages(fh, progress):
"""Read progress messages from file-like object fh, supplying the respective
progress messages to the progress instance.
:param fh: File handle to read from
:return: list(line, ...) list of lines without linebreaks that did
not contain progress information"""
line_so_far = ''
dropped_lines = list()
while True:
char = fh.read(1)
if not char:
break
if char in ('\r', '\n') and line_so_far:
dropped_lines.extend(progress._parse_progress_line(line_so_far))
line_so_far = ''
else:
line_so_far += char
# END process parsed line
# END while file is not done reading
return dropped_lines
def finalize_process(proc):
"""Wait for the process (clone, fetch, pull or push) and handle its errors accordingly"""
try:
proc.wait()
except GitCommandError,e:
# if a push has rejected items, the command has non-zero return status
# a return status of 128 indicates a connection error - reraise the previous one
if proc.poll() == 128:
raise
pass
# END exception handling
def add_progress(kwargs, git, progress):
"""Add the --progress flag to the given kwargs dict if supported by the
git command. If the actual progress in the given progress instance is not
given, we do not request any progress
:return: possibly altered kwargs"""
if progress is not None:
v = git.version_info
if v[0] > 1 or v[1] > 7 or v[2] > 0 or v[3] > 3:
kwargs['progress'] = True
#END handle --progress
#END handle progress
return kwargs
#} END utilities
class PushInfo(object):
"""
Carries information about the result of a push operation of a single head::
info = remote.push()[0]
info.flags # bitflags providing more information about the result
info.local_ref # Reference pointing to the local reference that was pushed
# It is None if the ref was deleted.
info.remote_ref_string # path to the remote reference located on the remote side
info.remote_ref # Remote Reference on the local side corresponding to
# the remote_ref_string. It can be a TagReference as well.
info.old_commit # commit at which the remote_ref was standing before we pushed
# it to local_ref.commit. Will be None if an error was indicated
info.summary # summary line providing human readable english text about the push
"""
__slots__ = ('local_ref', 'remote_ref_string', 'flags', 'old_commit', '_remote', 'summary')
NEW_TAG, NEW_HEAD, NO_MATCH, REJECTED, REMOTE_REJECTED, REMOTE_FAILURE, DELETED, \
FORCED_UPDATE, FAST_FORWARD, UP_TO_DATE, ERROR = [ 1 << x for x in range(11) ]
_flag_map = { 'X' : NO_MATCH, '-' : DELETED, '*' : 0,
'+' : FORCED_UPDATE, ' ' : FAST_FORWARD,
'=' : UP_TO_DATE, '!' : ERROR }
def __init__(self, flags, local_ref, remote_ref_string, remote, old_commit=None,
summary=''):
""" Initialize a new instance """
self.flags = flags
self.local_ref = local_ref
self.remote_ref_string = remote_ref_string
self._remote = remote
self.old_commit = old_commit
self.summary = summary
@property
def remote_ref(self):
"""
:return:
Remote Reference or TagReference in the local repository corresponding
to the remote_ref_string kept in this instance."""
# translate heads to a local remote, tags stay as they are
if self.remote_ref_string.startswith("refs/tags"):
return TagReference(self._remote.repo, self.remote_ref_string)
elif self.remote_ref_string.startswith("refs/heads"):
remote_ref = Reference(self._remote.repo, self.remote_ref_string)
return RemoteReference(self._remote.repo, "refs/remotes/%s/%s" % (str(self._remote), remote_ref.name))
else:
raise ValueError("Could not handle remote ref: %r" % self.remote_ref_string)
# END
@classmethod
def _from_line(cls, remote, line):
"""Create a new PushInfo instance as parsed from line which is expected to be like
refs/heads/master:refs/heads/master 05d2687..1d0568e"""
control_character, from_to, summary = line.split('\t', 3)
flags = 0
# control character handling
try:
flags |= cls._flag_map[ control_character ]
except KeyError:
raise ValueError("Control Character %r unknown as parsed from line %r" % (control_character, line))
# END handle control character
# from_to handling
from_ref_string, to_ref_string = from_to.split(':')
if flags & cls.DELETED:
from_ref = None
else:
from_ref = Reference.from_path(remote.repo, from_ref_string)
# commit handling, could be message or commit info
old_commit = None
if summary.startswith('['):
if "[rejected]" in summary:
flags |= cls.REJECTED
elif "[remote rejected]" in summary:
flags |= cls.REMOTE_REJECTED
elif "[remote failure]" in summary:
flags |= cls.REMOTE_FAILURE
elif "[no match]" in summary:
flags |= cls.ERROR
elif "[new tag]" in summary:
flags |= cls.NEW_TAG
elif "[new branch]" in summary:
flags |= cls.NEW_HEAD
# uptodate encoded in control character
else:
# fast-forward or forced update - was encoded in control character,
# but we parse the old and new commit
split_token = "..."
if control_character == " ":
split_token = ".."
old_sha, new_sha = summary.split(' ')[0].split(split_token)
# have to use constructor here as the sha usually is abbreviated
old_commit = remote.repo.commit(old_sha)
# END message handling
return PushInfo(flags, from_ref, to_ref_string, remote, old_commit, summary)
class FetchInfo(object):
"""
Carries information about the results of a fetch operation of a single head::
info = remote.fetch()[0]
info.ref # Symbolic Reference or RemoteReference to the changed
# remote head or FETCH_HEAD
info.flags # additional flags to be & with enumeration members,
# i.e. info.flags & info.REJECTED
# is 0 if ref is SymbolicReference
info.note # additional notes given by git-fetch intended for the user
info.old_commit # if info.flags & info.FORCED_UPDATE|info.FAST_FORWARD,
# field is set to the previous location of ref, otherwise None
"""
__slots__ = ('ref','old_commit', 'flags', 'note')
NEW_TAG, NEW_HEAD, HEAD_UPTODATE, TAG_UPDATE, REJECTED, FORCED_UPDATE, \
FAST_FORWARD, ERROR = [ 1 << x for x in range(8) ]
# %c %-*s %-*s -> %s (%s)
re_fetch_result = re.compile("^\s*(.) (\[?[\w\s\.]+\]?)\s+(.+) -> ([/\w_\+\.-]+)( \(.*\)?$)?")
_flag_map = { '!' : ERROR, '+' : FORCED_UPDATE, '-' : TAG_UPDATE, '*' : 0,
'=' : HEAD_UPTODATE, ' ' : FAST_FORWARD }
def __init__(self, ref, flags, note = '', old_commit = None):
"""
Initialize a new instance
"""
self.ref = ref
self.flags = flags
self.note = note
self.old_commit = old_commit
def __str__(self):
return self.name
@property
def name(self):
""":return: Name of our remote ref"""
return self.ref.name
@property
def commit(self):
""":return: Commit of our remote ref"""
return self.ref.commit
@classmethod
def _from_line(cls, repo, line, fetch_line):
"""Parse information from the given line as returned by git-fetch -v
and return a new FetchInfo object representing this information.
We can handle a line as follows
"%c %-*s %-*s -> %s%s"
Where c is either ' ', !, +, -, *, or =
! means error
+ means success forcing update
- means a tag was updated
* means birth of new branch or tag
= means the head was up to date ( and not moved )
' ' means a fast-forward
fetch line is the corresponding line from FETCH_HEAD, like
acb0fa8b94ef421ad60c8507b634759a472cd56c not-for-merge branch '0.1.7RC' of /tmp/tmpya0vairemote_repo"""
match = cls.re_fetch_result.match(line)
if match is None:
raise ValueError("Failed to parse line: %r" % line)
# parse lines
control_character, operation, local_remote_ref, remote_local_ref, note = match.groups()
try:
new_hex_sha, fetch_operation, fetch_note = fetch_line.split("\t")
ref_type_name, fetch_note = fetch_note.split(' ', 1)
except ValueError: # unpack error
raise ValueError("Failed to parse FETCH__HEAD line: %r" % fetch_line)
# handle FETCH_HEAD and figure out ref type
# If we do not specify a target branch like master:refs/remotes/origin/master,
# the fetch result is stored in FETCH_HEAD which destroys the rule we usually
# have. In that case we use a symbolic reference which is detached
ref_type = None
if remote_local_ref == "FETCH_HEAD":
ref_type = SymbolicReference
elif ref_type_name in ("remote-tracking", "branch"):
# note: remote-tracking is just the first part of the 'remote-tracking branch' token.
# We don't parse it correctly, but its enough to know what to do, and its new in git 1.7something
ref_type = RemoteReference
elif ref_type_name == "tag":
ref_type = TagReference
else:
raise TypeError("Cannot handle reference type: %r" % ref_type_name)
#END handle ref type
# create ref instance
if ref_type is SymbolicReference:
remote_local_ref = ref_type(repo, "FETCH_HEAD")
else:
# determine prefix. Tags are usually pulled into refs/tags, they may have subdirectories.
# It is not clear sometimes where exactly the item is, unless we have an absolute path as indicated
# by the 'ref/' prefix. Otherwise even a tag could be in refs/remotes, which is when it will have the
# 'tags/' subdirectory in its path.
# We don't want to test for actual existence, but try to figure everything out analytically.
ref_path = None
remote_local_ref = remote_local_ref.strip()
if remote_local_ref.startswith(Reference._common_path_default + "/"):
# always use actual type if we get absolute paths
# Will always be the case if something is fetched outside of refs/remotes (if its not a tag)
ref_path = remote_local_ref
if ref_type is not TagReference and not remote_local_ref.startswith(RemoteReference._common_path_default + "/"):
ref_type = Reference
#END downgrade remote reference
elif ref_type is TagReference and 'tags/' in remote_local_ref:
# even though its a tag, it is located in refs/remotes
ref_path = join_path(RemoteReference._common_path_default, remote_local_ref)
else:
ref_path = join_path(ref_type._common_path_default, remote_local_ref)
#END obtain refpath
# even though the path could be within the git conventions, we make
# sure we respect whatever the user wanted, and disabled path checking
remote_local_ref = ref_type(repo, ref_path, check_path=False)
# END create ref instance
note = ( note and note.strip() ) or ''
# parse flags from control_character
flags = 0
try:
flags |= cls._flag_map[control_character]
except KeyError:
raise ValueError("Control character %r unknown as parsed from line %r" % (control_character, line))
# END control char exception hanlding
# parse operation string for more info - makes no sense for symbolic refs
old_commit = None
if isinstance(remote_local_ref, Reference):
if 'rejected' in operation:
flags |= cls.REJECTED
if 'new tag' in operation:
flags |= cls.NEW_TAG
if 'new branch' in operation:
flags |= cls.NEW_HEAD
if '...' in operation or '..' in operation:
split_token = '...'
if control_character == ' ':
split_token = split_token[:-1]
old_commit = repo.rev_parse(operation.split(split_token)[0])
# END handle refspec
# END reference flag handling
return cls(remote_local_ref, flags, note, old_commit)
class Remote(LazyMixin, Iterable):
"""Provides easy read and write access to a git remote.
Everything not part of this interface is considered an option for the current
remote, allowing constructs like remote.pushurl to query the pushurl.
NOTE: When querying configuration, the configuration accessor will be cached
to speed up subsequent accesses."""
__slots__ = ( "repo", "name", "_config_reader" )
_id_attribute_ = "name"
def __init__(self, repo, name):
"""Initialize a remote instance
:param repo: The repository we are a remote of
:param name: the name of the remote, i.e. 'origin'"""
self.repo = repo
self.name = name
if os.name == 'nt':
# some oddity: on windows, python 2.5, it for some reason does not realize
# that it has the config_writer property, but instead calls __getattr__
# which will not yield the expected results. 'pinging' the members
# with a dir call creates the config_writer property that we require
# ... bugs like these make me wonder wheter python really wants to be used
# for production. It doesn't happen on linux though.
dir(self)
# END windows special handling
def __getattr__(self, attr):
"""Allows to call this instance like
remote.special( *args, **kwargs) to call git-remote special self.name"""
if attr == "_config_reader":
return super(Remote, self).__getattr__(attr)
# sometimes, probably due to a bug in python itself, we are being called
# even though a slot of the same name exists
try:
return self._config_reader.get(attr)
except NoOptionError:
return super(Remote, self).__getattr__(attr)
# END handle exception
def _config_section_name(self):
return 'remote "%s"' % self.name
def _set_cache_(self, attr):
if attr == "_config_reader":
self._config_reader = SectionConstraint(self.repo.config_reader(), self._config_section_name())
else:
super(Remote, self)._set_cache_(attr)
def __str__(self):
return self.name
def __repr__(self):
return '<git.%s "%s">' % (self.__class__.__name__, self.name)
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not ( self == other )
def __hash__(self):
return hash(self.name)
@classmethod
def iter_items(cls, repo):
""":return: Iterator yielding Remote objects of the given repository"""
for section in repo.config_reader("repository").sections():
if not section.startswith('remote'):
continue
lbound = section.find('"')
rbound = section.rfind('"')
if lbound == -1 or rbound == -1:
raise ValueError("Remote-Section has invalid format: %r" % section)
yield Remote(repo, section[lbound+1:rbound])
# END for each configuration section
@property
def refs(self):
"""
:return:
IterableList of RemoteReference objects. It is prefixed, allowing
you to omit the remote path portion, i.e.::
remote.refs.master # yields RemoteReference('/refs/remotes/origin/master')"""
out_refs = IterableList(RemoteReference._id_attribute_, "%s/" % self.name)
out_refs.extend(RemoteReference.list_items(self.repo, remote=self.name))
assert out_refs, "Remote %s did not have any references" % self.name
return out_refs
@property
def stale_refs(self):
"""
:return:
IterableList RemoteReference objects that do not have a corresponding
head in the remote reference anymore as they have been deleted on the
remote side, but are still available locally.
The IterableList is prefixed, hence the 'origin' must be omitted. See
'refs' property for an example."""
out_refs = IterableList(RemoteReference._id_attribute_, "%s/" % self.name)
for line in self.repo.git.remote("prune", "--dry-run", self).splitlines()[2:]:
# expecting
# * [would prune] origin/new_branch
token = " * [would prune] "
if not line.startswith(token):
raise ValueError("Could not parse git-remote prune result: %r" % line)
fqhn = "%s/%s" % (RemoteReference._common_path_default,line.replace(token, ""))
out_refs.append(RemoteReference(self.repo, fqhn))
# END for each line
return out_refs
@classmethod
def create(cls, repo, name, url, **kwargs):
"""Create a new remote to the given repository
:param repo: Repository instance that is to receive the new remote
:param name: Desired name of the remote
:param url: URL which corresponds to the remote's name
:param kwargs:
Additional arguments to be passed to the git-remote add command
:return: New Remote instance
:raise GitCommandError: in case an origin with that name already exists"""
repo.git.remote( "add", name, url, **kwargs )
return cls(repo, name)
# add is an alias
add = create
@classmethod
def remove(cls, repo, name ):
"""Remove the remote with the given name"""
repo.git.remote("rm", name)
# alias
rm = remove
def rename(self, new_name):
"""Rename self to the given new_name
:return: self """
if self.name == new_name:
return self
self.repo.git.remote("rename", self.name, new_name)
self.name = new_name
try:
del(self._config_reader) # it contains cached values, section names are different now
except AttributeError:
pass
#END handle exception
return self
def update(self, **kwargs):
"""Fetch all changes for this remote, including new branches which will
be forced in ( in case your local remote branch is not part the new remote branches
ancestry anymore ).
:param kwargs:
Additional arguments passed to git-remote update
:return: self """
self.repo.git.remote("update", self.name)
return self
def _get_fetch_info_from_stderr(self, proc, progress):
# skip first line as it is some remote info we are not interested in
output = IterableList('name')
# lines which are no progress are fetch info lines
# this also waits for the command to finish
# Skip some progress lines that don't provide relevant information
fetch_info_lines = list()
for line in digest_process_messages(proc.stderr, progress):
if line.startswith('From') or line.startswith('remote: Total') or line.startswith('POST'):
continue
elif line.startswith('warning:'):
print >> sys.stderr, line
continue
elif line.startswith('fatal:'):
raise GitCommandError(("Error when fetching: %s" % line,), 2)
# END handle special messages
fetch_info_lines.append(line)
# END for each line
# read head information
fp = open(join(self.repo.git_dir, 'FETCH_HEAD'),'r')
fetch_head_info = fp.readlines()
fp.close()
assert len(fetch_info_lines) == len(fetch_head_info), "len(%s) != len(%s)" % (fetch_head_info, fetch_info_lines)
output.extend(FetchInfo._from_line(self.repo, err_line, fetch_line)
for err_line,fetch_line in zip(fetch_info_lines, fetch_head_info))
finalize_process(proc)
return output
def _get_push_info(self, proc, progress):
# read progress information from stderr
# we hope stdout can hold all the data, it should ...
# read the lines manually as it will use carriage returns between the messages
# to override the previous one. This is why we read the bytes manually
digest_process_messages(proc.stderr, progress)
output = IterableList('name')
for line in proc.stdout.readlines():
try:
output.append(PushInfo._from_line(self, line))
except ValueError:
# if an error happens, additional info is given which we cannot parse
pass
# END exception handling
# END for each line
finalize_process(proc)
return output
def fetch(self, refspec=None, progress=None, **kwargs):
"""Fetch the latest changes for this remote
:param refspec:
A "refspec" is used by fetch and push to describe the mapping
between remote ref and local ref. They are combined with a colon in
the format <src>:<dst>, preceded by an optional plus sign, +.
For example: git fetch $URL refs/heads/master:refs/heads/origin means
"grab the master branch head from the $URL and store it as my origin
branch head". And git push $URL refs/heads/master:refs/heads/to-upstream
means "publish my master branch head as to-upstream branch at $URL".
See also git-push(1).
Taken from the git manual
:param progress: See 'push' method
:param kwargs: Additional arguments to be passed to git-fetch
:return:
IterableList(FetchInfo, ...) list of FetchInfo instances providing detailed
information about the fetch results
:note:
As fetch does not provide progress information to non-ttys, we cannot make
it available here unfortunately as in the 'push' method."""
kwargs = add_progress(kwargs, self.repo.git, progress)
proc = self.repo.git.fetch(self, refspec, with_extended_output=True, as_process=True, v=True, **kwargs)
return self._get_fetch_info_from_stderr(proc, progress or RemoteProgress())
def pull(self, refspec=None, progress=None, **kwargs):
"""Pull changes from the given branch, being the same as a fetch followed
by a merge of branch with your local branch.
:param refspec: see 'fetch' method
:param progress: see 'push' method
:param kwargs: Additional arguments to be passed to git-pull
:return: Please see 'fetch' method """
kwargs = add_progress(kwargs, self.repo.git, progress)
proc = self.repo.git.pull(self, refspec, with_extended_output=True, as_process=True, v=True, **kwargs)
return self._get_fetch_info_from_stderr(proc, progress or RemoteProgress())
def push(self, refspec=None, progress=None, **kwargs):
"""Push changes from source branch in refspec to target branch in refspec.
:param refspec: see 'fetch' method
:param progress:
Instance of type RemoteProgress allowing the caller to receive
progress information until the method returns.
If None, progress information will be discarded
:param kwargs: Additional arguments to be passed to git-push
:return:
IterableList(PushInfo, ...) iterable list of PushInfo instances, each
one informing about an individual head which had been updated on the remote
side.
If the push contains rejected heads, these will have the PushInfo.ERROR bit set
in their flags.
If the operation fails completely, the length of the returned IterableList will
be null."""
kwargs = add_progress(kwargs, self.repo.git, progress)
proc = self.repo.git.push(self, refspec, porcelain=True, as_process=True, **kwargs)
return self._get_push_info(proc, progress or RemoteProgress())
@property
def config_reader(self):
"""
:return:
GitConfigParser compatible object able to read options for only our remote.
Hence you may simple type config.get("pushurl") to obtain the information"""
return self._config_reader
@property
def config_writer(self):
"""
:return: GitConfigParser compatible object able to write options for this remote.
:note:
You can only own one writer at a time - delete it to release the
configuration file and make it useable by others.
To assure consistent results, you should only query options through the
writer. Once you are done writing, you are free to use the config reader
once again."""
writer = self.repo.config_writer()
# clear our cache to assure we re-read the possibly changed configuration
try:
del(self._config_reader)
except AttributeError:
pass
#END handle exception
return SectionConstraint(writer, self._config_section_name())
| cool-RR/GitPython | git/remote.py | Python | bsd-3-clause | 27,932 |
#!/usr/bin/python
# Copyright (C) 2014 Belledonne Communications SARL
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import argparse
import os
import pystache
import sys
import xml.etree.ElementTree as ET
sys.path.append(os.path.realpath(__file__))
from apixml2python.linphone import LinphoneModule
blacklisted_classes = [
'LinphoneTunnel',
'LinphoneTunnelConfig'
]
blacklisted_events = [
'LinphoneCoreInfoReceivedCb', # missing LinphoneInfoMessage
'LinphoneCoreNotifyReceivedCb', # missing LinphoneContent
'LinphoneCoreFileTransferProgressIndicationCb', # missing LinphoneContent
'LinphoneCoreFileTransferRecvCb', # missing LinphoneContent
'LinphoneCoreFileTransferSendCb' # missing LinphoneContent
]
blacklisted_functions = [
'linphone_call_log_get_local_stats', # missing rtp_stats_t
'linphone_call_log_get_remote_stats', # missing rtp_stats_t
'linphone_call_params_get_privacy', # missing LinphonePrivacyMask
'linphone_call_params_set_privacy', # missing LinphonePrivacyMask
'linphone_chat_message_get_file_transfer_information', # missing LinphoneContent
'linphone_chat_message_start_file_download', # to be handwritten because of callback
'linphone_chat_message_state_to_string', # There is no use to wrap this function
'linphone_chat_room_create_file_transfer_message', # missing LinphoneContent
'linphone_core_add_listener',
'linphone_core_can_we_add_call', # private function
'linphone_core_enable_log_collection', # need to handle class properties
'linphone_core_get_audio_port_range', # to be handwritten because of result via arguments
'linphone_core_get_sip_transports', # missing LCSipTransports
'linphone_core_get_sip_transports_used', # missing LCSipTransports
'linphone_core_get_supported_video_sizes', # missing MSVideoSizeDef
'linphone_core_get_video_policy', # missing LinphoneVideoPolicy
'linphone_core_get_video_port_range', # to be handwritten because of result via arguments
'linphone_core_publish', # missing LinphoneContent
'linphone_core_remove_listener',
'linphone_core_serialize_logs', # There is no use to wrap this function
'linphone_core_set_log_collection_path', # need to handle class properties
'linphone_core_set_log_file', # There is no use to wrap this function
'linphone_core_set_log_handler', # Hand-written but put directly in the linphone module
'linphone_core_set_log_level', # There is no use to wrap this function
'linphone_core_set_video_policy', # missing LinphoneVideoPolicy
'linphone_core_set_sip_transports', # missing LCSipTransports
'linphone_core_subscribe', # missing LinphoneContent
'linphone_event_notify', # missing LinphoneContent
'linphone_event_send_publish', # missing LinphoneContent
'linphone_event_send_subscribe', # missing LinphoneContent
'linphone_event_update_publish', # missing LinphoneContent
'linphone_event_update_subscribe', # missing LinphoneContent
'linphone_proxy_config_get_privacy', # missing LinphonePrivacyMask
'linphone_proxy_config_normalize_number', # to be handwritten because of result via arguments
'linphone_proxy_config_set_file_transfer_server', # defined but not implemented in linphone core
'linphone_proxy_config_set_privacy', # missing LinphonePrivacyMask
'linphone_tunnel_get_http_proxy', # to be handwritten because of double pointer indirection
'lp_config_for_each_entry', # to be handwritten because of callback
'lp_config_for_each_section', # to be handwritten because of callback
'lp_config_get_range', # to be handwritten because of result via arguments
'lp_config_load_dict_to_section', # missing LinphoneDictionary
'lp_config_section_to_dict' # missing LinphoneDictionary
]
hand_written_functions = [
'linphone_chat_room_send_message2',
'linphone_core_get_sound_devices',
'linphone_core_get_video_devices',
'linphone_core_new',
'linphone_core_new_with_config'
]
def generate(apixmlfile, outputfile):
tree = ET.parse(apixmlfile)
renderer = pystache.Renderer()
m = LinphoneModule(tree, blacklisted_classes, blacklisted_events, blacklisted_functions, hand_written_functions)
os.chdir('apixml2python')
tmpfilename = outputfile.name + '.tmp'
f = open(tmpfilename, 'w')
f.write(renderer.render(m))
f.close()
f = open(tmpfilename, 'rU')
for line in f:
outputfile.write(line)
f.close()
os.unlink(tmpfilename)
def main(argv = None):
if argv is None:
argv = sys.argv
argparser = argparse.ArgumentParser(description="Generate a Python wrapper of the Linphone API.")
argparser.add_argument('-o', '--outputfile', metavar='outputfile', type=argparse.FileType('w'), help="Output C file containing the code of the Python wrapper.")
argparser.add_argument('apixmlfile', help="XML file of the Linphone API generated by genapixml.py.")
args = argparser.parse_args()
if args.outputfile == None:
args.outputfile = open('linphone.c', 'w')
generate(args.apixmlfile, args.outputfile)
if __name__ == "__main__":
sys.exit(main())
| Gui13/linphone | tools/python/apixml2python.py | Python | gpl-2.0 | 5,553 |
#!/usr/bin/python
#
# Simple script which convert the FITS headers associated to Brian McLean DSS images into simplified JSON files
# Fabien Chereau fchereau@eso.org
#
import math
import os
import sys
import Image
from astLib import astWCS
import skyTile
levels = ["x64", "x32", "x16", "x8", "x4", "x2", "x1"]
# Define the invalid zones in the plates corners for N and S plates
removeBoxN = [64 * 300 - 2000, 1199]
removeBoxS = [480, 624]
def getIntersectPoly(baseFileName, curLevel, i, j):
"""Return the convex polygons in pixel space defining the valid area of the tile or None if the poly is fully in the invalid area"""
scale = 2 ** (6 - curLevel) * 300
if baseFileName[0] == 'N':
box = removeBoxN
x = float(box[0] - i * scale) / scale * 300.
y = float(box[1] - j * scale) / scale * 300.
# x,y is the position of the box top left corner in pixel wrt lower left corner of current tile
if x > 300. or y <= 0.:
# Tile fully valid
return [[[0, 0], [300, 0], [300, 300], [0, 300]]]
if x <= 0. and y >= 300.:
# Tile fully invalid
return None
if x <= 0.:
# assert y > 0 # (always true, tested above)
assert y <= 300.
return [[[0, y], [300, y], [300, 300], [0, 300]]]
if y >= 300.:
assert x > 0
# assert x <= 300. # (always true, tested above)
return [[[0, 0], [x, 0], [x, 300], [0, 300]]]
return [[[0, 0], [x, 0], [x, 300], [0, 300]], [[x, y], [300, y], [300, 300], [x, 300]]]
else:
box = removeBoxS
x = float(i * scale - box[0]) / scale * 300.
y = float(box[1] - j * scale) / scale * 300.
# x,y is the position of the box top right corner in pixel wrt lower left corner of current tile
if x > 0. or y <= 0.:
# Tile fully valid
return [[[0, 0], [300, 0], [300, 300], [0, 300]]]
if x <= -300. and y >= 300.:
# Tile fully invalid
return None
if x <= -300.:
# assert y > 0 # (always true, tested above)
assert y <= 300.
return [[[0, y], [300, y], [300, 300], [0, 300]]]
if y >= 300.:
# assert x <= 0 # (always true, tested above)
assert x > -300.
return [[[-x, 0], [300, 0], [300, 300], [-x, 300]]]
return [[[-x, 0], [300, 0], [300, 300], [-x, 300]], [[0, y], [-x, y], [-x, 300], [0, 300]]]
def createTile(currentLevel, maxLevel, i, j, outDirectory, plateName, special=False):
# Create the associated tile description
t = skyTile.SkyImageTile()
t.level = currentLevel
t.i = i
t.j = j
t.imageUrl = "x%.2d/" % (2 ** currentLevel) + "x%.2d_%.2d_%.2d.jpg" % (2 ** currentLevel, i, j)
if currentLevel == 0:
t.credits = "Copyright (C) 2008, STScI Digitized Sky Survey"
t.infoUrl = "http://stdatu.stsci.edu/cgi-bin/dss_form"
# t.maxBrightness = 10
# Create the matching sky polygons, return if there is no relevant polygons
if special is True:
pl = [[[0, 0], [300, 0], [300, 300], [0, 300]]]
else:
pl = getIntersectPoly(plateName, currentLevel, i, j)
if pl is None or not pl:
return None
# Get the WCS from the input FITS header file for the tile
wcs = astWCS.WCS(plateName + "/" + levels[currentLevel] + "/" + plateName + "_%.2d_%.2d_" % (i, j) + levels[
currentLevel] + ".hhh")
naxis1 = wcs.header.get('NAXIS1')
naxis2 = wcs.header.get('NAXIS2')
t.skyConvexPolygons = []
for idx, poly in enumerate(pl):
p = [wcs.pix2wcs(v[0] + 0.5, v[1] + 0.5) for iv, v in enumerate(poly)]
t.skyConvexPolygons.append(p)
t.textureCoords = []
for idx, poly in enumerate(pl):
p = [(float(v[0]) / naxis1, float(v[1]) / naxis2) for iv, v in enumerate(poly)]
t.textureCoords.append(p)
v10 = wcs.pix2wcs(1, 0)
v01 = wcs.pix2wcs(0, 1)
v00 = wcs.pix2wcs(0, 0)
t.minResolution = max(abs(v10[0] - v00[0]) * math.cos(v00[1] * math.pi / 180.), abs(v01[1] - v00[1]))
if (currentLevel >= maxLevel):
return t
# Recursively creates the 4 sub-tiles
sub = createTile(currentLevel + 1, maxLevel, i * 2, j * 2, outDirectory, plateName)
if sub != None:
t.subTiles.append(sub)
sub = createTile(currentLevel + 1, maxLevel, i * 2 + 1, j * 2, outDirectory, plateName)
if sub != None:
t.subTiles.append(sub)
sub = createTile(currentLevel + 1, maxLevel, i * 2 + 1, j * 2 + 1, outDirectory, plateName)
if sub != None:
t.subTiles.append(sub)
sub = createTile(currentLevel + 1, maxLevel, i * 2, j * 2 + 1, outDirectory, plateName)
if sub != None:
t.subTiles.append(sub)
return t
def generateJpgTiles(inDirectory, outDirectory):
# Create a reduced 256x256 version of all the jpeg
for curLevel in range(0, len(levels)):
fullOutDir = outDirectory + "/x%.2d" % (2 ** curLevel)
if not os.path.exists(fullOutDir):
os.makedirs(fullOutDir)
print "Create directory " + fullOutDir
for i in range(0, 2 ** curLevel):
for j in range(0, 2 ** curLevel):
baseFileName = "x%.2d_%.2d_%.2d" % (2 ** curLevel, i, j)
im = Image.open(
inDirectory + "/" + levels[curLevel] + "/" + inDirectory + '_' + "%.2d_%.2d_" % (i, j) + levels[
curLevel] + ".jpg")
# Enhance darker part of the image
im3 = im.point(lambda t: 2. * t - 256. * (t / 256.) ** 1.6)
im2 = im3.transform((256, 256), Image.EXTENT, (0, 0, 300, 300), Image.BILINEAR)
im2.save(fullOutDir + '/' + baseFileName + ".jpg")
def plateRange():
if len(sys.argv) != 4:
print "Usage: " + sys.argv[0] + " prefix startPlate stopPlate "
exit(-1)
prefix = sys.argv[1]
outDir = "/tmp/tmpPlate"
nRange = range(int(sys.argv[2]), int(sys.argv[3]))
for i in nRange:
if os.path.exists(outDir):
os.system("rm -r " + outDir)
os.makedirs(outDir)
plateName = prefix + "%.3i" % i
generateJpgTiles(plateName, outDir)
# Create all the JSON files
masterTile = createTile(0, 6, 0, 0, outDir, plateName)
masterTile.outputJSON(qCompress=True, maxLevelPerFile=2, outDir=outDir + '/')
command = "cd /tmp && mv tmpPlate " + plateName + " && tar -cf " + plateName + ".tar " + plateName + " && rm -rf " + plateName
print command
os.system(command)
command = "cd /tmp && scp " + plateName + ".tar vosw@voint1.hq.eso.org:/work/fabienDSS2/" + plateName + ".tar"
print command
os.system(command)
command = "rm /tmp/" + plateName + ".tar"
print command
os.system(command)
def mainHeader():
# Generate the top level file containing pointers on all
outDir = "/tmp/tmpPlate"
with open('/tmp/allDSS.json', 'w') as f:
f.write('{\n')
f.write('"minResolution" : 0.1,\n')
f.write('"maxBrightness" : 13,\n')
f.write('"subTiles" : \n[\n')
for prefix in ['N', 'S']:
if prefix == 'N':
nRange = range(2, 898)
if prefix == 'S':
nRange = range(1, 894)
for i in nRange:
plateName = prefix + "%.3i" % i
ti = createTile(0, 0, 0, 0, outDir, plateName, True)
assert ti != None
f.write('\t{\n')
f.write('\t\t"minResolution" : %.8f,\n' % ti.minResolution)
f.write('\t\t"worldCoords" : ')
skyTile.writePolys(ti.skyConvexPolygons, f)
f.write(',\n')
f.write('\t\t"subTiles" : ["' + plateName + "/x01_00_00.json.qZ" + '"]\n')
f.write('\t},\n')
f.seek(-2, os.SEEK_CUR)
f.write('\n]}\n')
if __name__ == "__main__":
import psyco
psyco.full()
plateRange()
| Stellarium/stellarium | util/dssheaderToJSON.py | Python | gpl-2.0 | 8,050 |
from pysqlite2 import dbapi2 as sqlite
con = sqlite.connect("mydb")
cur = con.cursor()
newPeople = (
('Lebed' , 53),
('Zhirinovsky' , 57),
)
for person in newPeople:
cur.execute("insert into people (name_last, age) values (?, ?)", person)
# The changes will not be saved unless the transaction is committed explicitly:
con.commit()
| gburd/dbsql | src/py/doc/code/insert_more_people.py | Python | gpl-3.0 | 357 |
# stdlib
from collections import namedtuple
import socket
import subprocess
import time
import urlparse
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers, Platform
class NodeNotFound(Exception): pass
ESInstanceConfig = namedtuple(
'ESInstanceConfig', [
'is_external',
'password',
'service_check_tags',
'tags',
'timeout',
'url',
'username',
])
class ESCheck(AgentCheck):
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
DEFAULT_TIMEOUT = 5
STATS_METRICS = { # Metrics that are common to all Elasticsearch versions
"elasticsearch.docs.count": ("gauge", "indices.docs.count"),
"elasticsearch.docs.deleted": ("gauge", "indices.docs.deleted"),
"elasticsearch.store.size": ("gauge", "indices.store.size_in_bytes"),
"elasticsearch.indexing.index.total": ("gauge", "indices.indexing.index_total"),
"elasticsearch.indexing.index.time": ("gauge", "indices.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.index.current": ("gauge", "indices.indexing.index_current"),
"elasticsearch.indexing.delete.total": ("gauge", "indices.indexing.delete_total"),
"elasticsearch.indexing.delete.time": ("gauge", "indices.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.delete.current": ("gauge", "indices.indexing.delete_current"),
"elasticsearch.get.total": ("gauge", "indices.get.total"),
"elasticsearch.get.time": ("gauge", "indices.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.current": ("gauge", "indices.get.current"),
"elasticsearch.get.exists.total": ("gauge", "indices.get.exists_total"),
"elasticsearch.get.exists.time": ("gauge", "indices.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.missing.total": ("gauge", "indices.get.missing_total"),
"elasticsearch.get.missing.time": ("gauge", "indices.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.total": ("gauge", "indices.search.query_total"),
"elasticsearch.search.query.time": ("gauge", "indices.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.current": ("gauge", "indices.search.query_current"),
"elasticsearch.search.fetch.total": ("gauge", "indices.search.fetch_total"),
"elasticsearch.search.fetch.time": ("gauge", "indices.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.fetch.current": ("gauge", "indices.search.fetch_current"),
"elasticsearch.merges.current": ("gauge", "indices.merges.current"),
"elasticsearch.merges.current.docs": ("gauge", "indices.merges.current_docs"),
"elasticsearch.merges.current.size": ("gauge", "indices.merges.current_size_in_bytes"),
"elasticsearch.merges.total": ("gauge", "indices.merges.total"),
"elasticsearch.merges.total.time": ("gauge", "indices.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.merges.total.docs": ("gauge", "indices.merges.total_docs"),
"elasticsearch.merges.total.size": ("gauge", "indices.merges.total_size_in_bytes"),
"elasticsearch.refresh.total": ("gauge", "indices.refresh.total"),
"elasticsearch.refresh.total.time": ("gauge", "indices.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.flush.total": ("gauge", "indices.flush.total"),
"elasticsearch.flush.total.time": ("gauge", "indices.flush.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.process.open_fd": ("gauge", "process.open_file_descriptors"),
"elasticsearch.transport.rx_count": ("gauge", "transport.rx_count"),
"elasticsearch.transport.tx_count": ("gauge", "transport.tx_count"),
"elasticsearch.transport.rx_size": ("gauge", "transport.rx_size_in_bytes"),
"elasticsearch.transport.tx_size": ("gauge", "transport.tx_size_in_bytes"),
"elasticsearch.transport.server_open": ("gauge", "transport.server_open"),
"elasticsearch.thread_pool.bulk.active": ("gauge", "thread_pool.bulk.active"),
"elasticsearch.thread_pool.bulk.threads": ("gauge", "thread_pool.bulk.threads"),
"elasticsearch.thread_pool.bulk.queue": ("gauge", "thread_pool.bulk.queue"),
"elasticsearch.thread_pool.flush.active": ("gauge", "thread_pool.flush.active"),
"elasticsearch.thread_pool.flush.threads": ("gauge", "thread_pool.flush.threads"),
"elasticsearch.thread_pool.flush.queue": ("gauge", "thread_pool.flush.queue"),
"elasticsearch.thread_pool.generic.active": ("gauge", "thread_pool.generic.active"),
"elasticsearch.thread_pool.generic.threads": ("gauge", "thread_pool.generic.threads"),
"elasticsearch.thread_pool.generic.queue": ("gauge", "thread_pool.generic.queue"),
"elasticsearch.thread_pool.get.active": ("gauge", "thread_pool.get.active"),
"elasticsearch.thread_pool.get.threads": ("gauge", "thread_pool.get.threads"),
"elasticsearch.thread_pool.get.queue": ("gauge", "thread_pool.get.queue"),
"elasticsearch.thread_pool.index.active": ("gauge", "thread_pool.index.active"),
"elasticsearch.thread_pool.index.threads": ("gauge", "thread_pool.index.threads"),
"elasticsearch.thread_pool.index.queue": ("gauge", "thread_pool.index.queue"),
"elasticsearch.thread_pool.management.active": ("gauge", "thread_pool.management.active"),
"elasticsearch.thread_pool.management.threads": ("gauge", "thread_pool.management.threads"),
"elasticsearch.thread_pool.management.queue": ("gauge", "thread_pool.management.queue"),
"elasticsearch.thread_pool.merge.active": ("gauge", "thread_pool.merge.active"),
"elasticsearch.thread_pool.merge.threads": ("gauge", "thread_pool.merge.threads"),
"elasticsearch.thread_pool.merge.queue": ("gauge", "thread_pool.merge.queue"),
"elasticsearch.thread_pool.percolate.active": ("gauge", "thread_pool.percolate.active"),
"elasticsearch.thread_pool.percolate.threads": ("gauge", "thread_pool.percolate.threads"),
"elasticsearch.thread_pool.percolate.queue": ("gauge", "thread_pool.percolate.queue"),
"elasticsearch.thread_pool.refresh.active": ("gauge", "thread_pool.refresh.active"),
"elasticsearch.thread_pool.refresh.threads": ("gauge", "thread_pool.refresh.threads"),
"elasticsearch.thread_pool.refresh.queue": ("gauge", "thread_pool.refresh.queue"),
"elasticsearch.thread_pool.search.active": ("gauge", "thread_pool.search.active"),
"elasticsearch.thread_pool.search.threads": ("gauge", "thread_pool.search.threads"),
"elasticsearch.thread_pool.search.queue": ("gauge", "thread_pool.search.queue"),
"elasticsearch.thread_pool.snapshot.active": ("gauge", "thread_pool.snapshot.active"),
"elasticsearch.thread_pool.snapshot.threads": ("gauge", "thread_pool.snapshot.threads"),
"elasticsearch.thread_pool.snapshot.queue": ("gauge", "thread_pool.snapshot.queue"),
"elasticsearch.http.current_open": ("gauge", "http.current_open"),
"elasticsearch.http.total_opened": ("gauge", "http.total_opened"),
"jvm.mem.heap_committed": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"jvm.mem.heap_used": ("gauge", "jvm.mem.heap_used_in_bytes"),
"jvm.mem.non_heap_committed": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"jvm.mem.non_heap_used": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"jvm.threads.count": ("gauge", "jvm.threads.count"),
"jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
}
CLUSTER_HEALTH_METRICS = {
"elasticsearch.number_of_nodes": ("gauge", "number_of_nodes"),
"elasticsearch.number_of_data_nodes": ("gauge", "number_of_data_nodes"),
"elasticsearch.active_primary_shards": ("gauge", "active_primary_shards"),
"elasticsearch.active_shards": ("gauge", "active_shards"),
"elasticsearch.relocating_shards": ("gauge", "relocating_shards"),
"elasticsearch.initializing_shards": ("gauge", "initializing_shards"),
"elasticsearch.unassigned_shards": ("gauge", "unassigned_shards"),
"elasticsearch.cluster_status": ("gauge", "status", lambda v: {"red":0, "yellow":1, "green":2}.get(v, -1)),
}
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
def get_instance_config(self, instance):
url = instance.get('url')
if url is None:
raise Exception("An url must be specified in the instance")
is_external = _is_affirmative(instance.get('is_external', False))
# Support URLs that have a path in them from the config, for
# backwards-compatibility.
parsed = urlparse.urlparse(url)
if parsed[2] != "":
url = "%s://%s" % (parsed[0], parsed[1])
port = parsed.port
host = parsed.hostname
service_check_tags = [
'host:%s' % host,
'port:%s' % port
]
# Tag by URL so we can differentiate the metrics
# from multiple instances
tags = ['url:%s' % url]
tags.extend(instance.get('tags', []))
timeout = instance.get('timeout') or self.DEFAULT_TIMEOUT
config = ESInstanceConfig(
is_external=is_external,
password=instance.get('password'),
service_check_tags=service_check_tags,
tags=tags,
timeout=timeout,
url=url,
username=instance.get('username')
)
return config
def check(self, instance):
self.curr_config = self.get_instance_config(instance)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
version = self._get_es_version()
self._define_params(version, self.curr_config.is_external)
# Load stats data.
stats_url = urlparse.urljoin(self.curr_config.url, self.STATS_URL)
stats_data = self._get_data(stats_url)
self._process_stats_data(stats_data)
# Load the health data.
health_url = urlparse.urljoin(self.curr_config.url, self.HEALTH_URL)
health_data = self._get_data(health_url)
self._process_health_data(health_data)
# If we're here we did not have any ES conn issues
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.OK,
tags=self.curr_config.service_check_tags
)
def _get_es_version(self):
""" Get the running version of elasticsearch.
"""
try:
data = self._get_data(self.curr_config.url)
version = map(int, data['version']['number'].split('.')[0:3])
except Exception, e:
self.warning(
"Error while trying to get Elasticsearch version "
"from %s %s"
% (self.curr_config.url, str(e))
)
version = [0, 0, 0]
self.log.debug("Elasticsearch version is %s" % version)
return version
def _define_params(self, version, is_external):
""" Define the set of URLs and METRICS to use depending on the
running ES version.
"""
if version >= [0,90,10]:
# ES versions 0.90.10 and above
self.HEALTH_URL = "/_cluster/health?pretty=true"
self.NODES_URL = "/_nodes?network=true"
# For "external" clusters, we want to collect from all nodes.
if is_external:
self.STATS_URL = "/_nodes/stats?all=true"
else:
self.STATS_URL = "/_nodes/_local/stats?all=true"
additional_metrics = {
"jvm.gc.collectors.young.count": ("gauge", "jvm.gc.collectors.young.collection_count"),
"jvm.gc.collectors.young.collection_time": ("gauge", "jvm.gc.collectors.young.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collectors.old.count": ("gauge", "jvm.gc.collectors.old.collection_count"),
"jvm.gc.collectors.old.collection_time": ("gauge", "jvm.gc.collectors.old.collection_time_in_millis", lambda v: float(v)/1000)
}
else:
self.HEALTH_URL = "/_cluster/health?pretty=true"
self.STATS_URL = "/_cluster/nodes/stats?all=true"
self.NODES_URL = "/_cluster/nodes?network=true"
additional_metrics = {
"jvm.gc.concurrent_mark_sweep.count": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_count"),
"jvm.gc.concurrent_mark_sweep.collection_time": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.par_new.count": ("gauge", "jvm.gc.collectors.ParNew.collection_count"),
"jvm.gc.par_new.collection_time": ("gauge", "jvm.gc.collectors.ParNew.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collection_count": ("gauge", "jvm.gc.collection_count"),
"jvm.gc.collection_time": ("gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v)/1000),
}
self.STATS_METRICS.update(additional_metrics)
if version >= [0,90,5]:
# ES versions 0.90.5 and above
additional_metrics = {
"elasticsearch.search.fetch.open_contexts": ("gauge", "indices.search.open_contexts"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.filter_cache.evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.filter_cache.memory_size_in_bytes"),
"elasticsearch.id_cache.size": ("gauge","indices.id_cache.memory_size_in_bytes"),
"elasticsearch.fielddata.size": ("gauge","indices.fielddata.memory_size_in_bytes"),
"elasticsearch.fielddata.evictions": ("gauge","indices.fielddata.evictions"),
}
else:
# ES version 0.90.4 and below
additional_metrics = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),
"elasticsearch.cache.filter.count": ("gauge", "indices.cache.filter_count"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.cache.filter_evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.cache.filter_size_in_bytes"),
}
self.STATS_METRICS.update(additional_metrics)
def _get_data(self, url):
""" Hit a given URL and return the parsed json
"""
# Load basic authentication configuration, if available.
if self.curr_config.username and self.curr_config.password:
auth = (self.curr_config.username, self.curr_config.password)
else:
auth = None
try:
resp = requests.get(
url,
timeout=self.curr_config.timeout,
headers=headers(self.agentConfig),
auth=auth
)
resp.raise_for_status()
except Exception as e:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {0} when hitting {1}".format(e, url),
tags=self.curr_config.service_check_tags
)
raise
return resp.json()
def _process_stats_data(self, data):
is_external = self.curr_config.is_external
for node_name in data['nodes']:
node_data = data['nodes'][node_name]
# On newer version of ES it's "host" not "hostname"
node_hostname = node_data.get('hostname', node_data.get('host', None))
should_process = is_external \
or self.should_process_node(node_name, node_hostname)
# Override the metric hostname if we're hitting an external cluster.
metric_hostname = node_hostname if is_external else None
if should_process:
for metric in self.STATS_METRICS:
desc = self.STATS_METRICS[metric]
self._process_metric(node_data, metric, *desc,
tags=self.curr_config.tags, hostname=metric_hostname)
def should_process_node(self, node_name, node_hostname):
""" The node stats API will return stats for every node so we
want to filter out nodes that we don't care about.
"""
if node_hostname is not None:
# For ES >= 0.19
hostnames = (
self.hostname.decode('utf-8'),
socket.gethostname().decode('utf-8'),
socket.getfqdn().decode('utf-8')
)
if node_hostname.decode('utf-8') in hostnames:
return True
else:
# ES < 0.19
# Fetch interface address from ifconfig or ip addr and check
# against the primary IP from ES
try:
nodes_url = urlparse.urljoin(self.curr_config.url, self.NODES_URL)
primary_addr = self._get_primary_addr(nodes_url, node_name)
except NodeNotFound:
# Skip any nodes that aren't found
return False
if self._host_matches_node(primary_addr):
return True
def _get_primary_addr(self, url, node_name):
""" Returns a list of primary interface addresses as seen by ES.
Used in ES < 0.19
"""
data = self._get_data(url)
if node_name in data['nodes']:
node = data['nodes'][node_name]
if 'network' in node\
and 'primary_interface' in node['network']\
and 'address' in node['network']['primary_interface']:
return node['network']['primary_interface']['address']
raise NodeNotFound()
def _host_matches_node(self, primary_addrs):
""" For < 0.19, check if the current host matches the IP given in the
cluster nodes check `/_cluster/nodes`. Uses `ip addr` on Linux and
`ifconfig` on Mac
"""
if Platform.is_darwin():
ifaces = subprocess.Popen(['ifconfig'], stdout=subprocess.PIPE)
else:
ifaces = subprocess.Popen(['ip', 'addr'], stdout=subprocess.PIPE)
grepper = subprocess.Popen(['grep', 'inet'], stdin=ifaces.stdout,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ifaces.stdout.close()
out, err = grepper.communicate()
# Capture the list of interface IPs
ips = []
for iface in out.split("\n"):
iface = iface.strip()
if iface:
ips.append(iface.split(' ')[1].split('/')[0])
# Check the interface addresses against the primary address
return primary_addrs in ips
def _process_metric(self, data, metric, xtype, path, xform=None,
tags=None, hostname=None):
"""data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xfom: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key, None)
else:
break
if value is not None:
if xform: value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self._metric_not_found(metric, path)
def _process_health_data(self, data):
if self.cluster_status.get(self.curr_config.url) is None:
self.cluster_status[self.curr_config.url] = data['status']
if data['status'] in ["yellow", "red"]:
event = self._create_event(data['status'])
self.event(event)
if data['status'] != self.cluster_status.get(self.curr_config.url):
self.cluster_status[self.curr_config.url] = data['status']
event = self._create_event(data['status'])
self.event(event)
for metric in self.CLUSTER_HEALTH_METRICS:
# metric description
desc = self.CLUSTER_HEALTH_METRICS[metric]
self._process_metric(data, metric, *desc, tags=self.curr_config.tags)
# Process the service check
cluster_status = data['status']
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = "{tag} on cluster \"{cluster_name}\" "\
"| active_shards={active_shards} "\
"| initializing_shards={initializing_shards} "\
"| relocating_shards={relocating_shards} "\
"| unassigned_shards={unassigned_shards} "\
"| timed_out={timed_out}" \
.format(**data)
self.service_check(
self.SERVICE_CHECK_CLUSTER_STATUS,
status,
message=msg,
tags=self.curr_config.service_check_tags
)
def _metric_not_found(self, metric, path):
self.log.debug("Metric not found: %s -> %s", path, metric)
def _create_event(self, status):
hostname = self.hostname.decode('utf-8')
if status == "red":
alert_type = "error"
msg_title = "%s is %s" % (hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "%s is %s" % (hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "%s recovered as %s" % (hostname, status)
msg = "ElasticSearch: %s just reported as %s" % (hostname, status)
return { 'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text':msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname
}
| JohnLZeller/dd-agent | checks.d/elastic.py | Python | bsd-3-clause | 23,162 |
dict = {'naam': "Anne Goossens", 'age': 22}
'GB-Datum': "07-05-1993",
'GB-Plaats': "'s-Hertogenbosch",
'adres': "Groenstraat 51",
'broer/zussen': 1 zus,
'neven/nichten': 28,
'ooms/tantes': 16,
| ArtezGDA/text-IO | Anne/dict.py | Python | mit | 229 |
"""
Test suite for the config.views module
"""
import pytest
from html import escape
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.http import Http404
from django.test import RequestFactory
from django_project import celery
from app.config import views
from app.config import models
from app.config import utils
from app.config.settings import AppSettings
pytestmark = pytest.mark.django_db
def patch_contrib_messages(request):
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
return messages
@pytest.fixture
def mock_cisco_eox_api_access_available(monkeypatch):
app = AppSettings()
app.set_cisco_api_enabled(True)
app.set_cisco_api_client_id("client_id")
app.set_cisco_api_client_id("client_secret")
app.set_periodic_sync_enabled(True)
app.set_cisco_eox_api_queries("")
app.set_product_blacklist_regex("")
app.set_auto_create_new_products(True)
monkeypatch.setattr(utils, "check_cisco_eox_api_access",
lambda client_id, client_secret, drop_credentials=False: True)
@pytest.fixture
def mock_cisco_eox_api_access_broken(monkeypatch):
app = AppSettings()
app.set_cisco_api_enabled(True)
app.set_cisco_api_client_id("client_id")
app.set_cisco_api_client_id("client_secret")
app.set_periodic_sync_enabled(True)
app.set_cisco_eox_api_queries("")
app.set_product_blacklist_regex("")
app.set_auto_create_new_products(True)
monkeypatch.setattr(utils, "check_cisco_eox_api_access",
lambda client_id, client_secret, drop_credentials=False: False)
@pytest.fixture
def mock_cisco_eox_api_access_exception(monkeypatch):
def raise_exception():
raise Exception("totally broken")
app = AppSettings()
app.set_cisco_api_enabled(True)
app.set_cisco_api_client_id("client_id")
app.set_cisco_api_client_id("client_secret")
app.set_periodic_sync_enabled(True)
app.set_cisco_eox_api_queries("")
app.set_product_blacklist_regex("")
app.set_auto_create_new_products(True)
monkeypatch.setattr(utils, "check_cisco_eox_api_access",
lambda client_id, client_secret, drop_credentials: raise_exception())
@pytest.fixture
def mock_cisco_eox_api_access_disabled():
app = AppSettings()
app.set_cisco_api_enabled(False)
@pytest.mark.usefixtures("import_default_vendors")
class TestAddNotificationView:
URL_NAME = "productdb_config:notification-add"
def test_anonymous_default(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.add_notification(request)
assert response.status_code == 302
assert response.url.startswith("/productdb/login")
def test_authenticated_user(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=False)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
with pytest.raises(PermissionDenied):
views.add_notification(request)
def test_superuser_access(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
response = views.add_notification(request)
assert response.status_code == 200
def test_post(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
data = {
"title": "MyTitle",
"type": "ERR",
"summary_message": "This is a summary",
"detailed_message": "This is the detail message"
}
request = RequestFactory().post(url, data=data)
request.user = user
response = views.add_notification(request)
assert response.status_code == 302
assert models.NotificationMessage.objects.count() == 1
n = models.NotificationMessage.objects.filter(title="MyTitle").first()
assert n.type == models.NotificationMessage.MESSAGE_ERROR
# test with missing input
data = {
"title": "MyTitle",
"type": "ERR",
"detailed_message": "This is the detail message"
}
request = RequestFactory().post(url, data=data)
request.user = user
response = views.add_notification(request)
assert response.status_code == 200
@pytest.mark.usefixtures("import_default_vendors")
class TestStatusView:
URL_NAME = "productdb_config:status"
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
def test_anonymous_default(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.status(request)
assert response.status_code == 302
assert response.url.startswith("/productdb/login")
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
def test_authenticated_user(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=False)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
with pytest.raises(PermissionDenied):
views.status(request)
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
@pytest.mark.usefixtures("mock_worker_not_available_state")
def test_superuser_access(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
response = views.status(request)
assert response.status_code == 200
expected_content = [
"No backend worker found, asynchronous and scheduled tasks are not executed.",
"successful connected to the Cisco EoX API"
]
page_content = response.content.decode()
for line in expected_content:
assert line in page_content, page_content
assert cache.get("CISCO_EOX_API_TEST", None) is True
# cleanup
cache.delete("CISCO_EOX_API_TEST")
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
def test_with_active_workers(self, monkeypatch):
monkeypatch.setattr(celery, "is_worker_active", lambda: True)
cache.delete("CISCO_EOX_API_TEST") # ensure that cache is not set
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
response = views.status(request)
assert response.status_code == 200
assert cache.get("CISCO_EOX_API_TEST", None) is True
expected_content = [
"Backend worker found.",
"successful connected to the Cisco EoX API"
]
for line in expected_content:
assert line in response.content.decode()
# cleanup
cache.delete("CISCO_EOX_API_TEST")
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
def test_with_inactive_workers(self, monkeypatch):
monkeypatch.setattr(celery, "is_worker_active", lambda: False)
cache.delete("CISCO_EOX_API_TEST") # ensure that cache is not set
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
response = views.status(request)
assert response.status_code == 200
assert cache.get("CISCO_EOX_API_TEST", None) is True
expected_content = [
"No backend worker found, asynchronous and scheduled tasks are not executed.",
"successful connected to the Cisco EoX API"
]
for line in expected_content:
assert line in response.content.decode()
# cleanup
cache.delete("CISCO_EOX_API_TEST")
@pytest.mark.usefixtures("mock_cisco_eox_api_access_broken")
def test_access_with_broken_api(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
response = views.status(request)
assert response.status_code == 200
assert cache.get("CISCO_EOX_API_TEST", None) is False
# cleanup
cache.delete("CISCO_EOX_API_TEST")
@pytest.mark.usefixtures("mock_cisco_eox_api_access_exception")
def test_access_with_broken_api_by_exception(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
response = views.status(request)
assert response.status_code == 200
assert cache.get("CISCO_EOX_API_TEST", None) is None
# cleanup
cache.delete("CISCO_EOX_API_TEST")
@pytest.mark.usefixtures("import_default_vendors")
class TestChangeConfiguration:
URL_NAME = "productdb_config:change_settings"
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
def test_anonymous_default(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.change_configuration(request)
assert response.status_code == 302
assert response.url.startswith("/productdb/login")
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
def test_authenticated_user(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=False)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
with pytest.raises(PermissionDenied):
views.change_configuration(request)
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
@pytest.mark.usefixtures("import_default_text_blocks")
def test_superuser_access(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
patch_contrib_messages(request)
response = views.change_configuration(request)
assert response.status_code == 200
for content in models.TextBlock.objects.all().values_list("html_content", flat=True):
assert escape(content) in response.content.decode()
def test_global_options_are_visible(self):
app_config = AppSettings()
test_internal_id = "My custom Internal ID"
app_config.set_internal_product_id_label(test_internal_id)
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = user
patch_contrib_messages(request)
response = views.change_configuration(request)
assert response.status_code == 200
assert test_internal_id in response.content.decode()
@pytest.mark.usefixtures("mock_cisco_eox_api_access_available")
@pytest.mark.usefixtures("import_default_text_blocks")
def test_post_with_active_api(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
data = {}
request = RequestFactory().post(url, data=data)
request.user = user
patch_contrib_messages(request)
response = views.change_configuration(request)
assert response.status_code == 302
assert response.url == "/productdb/config/change/"
# test with invalid post value
data = {
"cisco_api_enabled": "on",
"cisco_api_client_id": "client_id",
"eox_api_blacklist": "("
}
request = RequestFactory().post(url, data=data)
request.user = user
msgs = patch_contrib_messages(request)
response = views.change_configuration(request)
assert response.status_code == 200
assert msgs.added_new
data = {
"cisco_api_client_id": "my changed client ID",
"cisco_api_client_secret": "my changed client secret",
}
request = RequestFactory().post(url, data=data)
request.user = user
patch_contrib_messages(request)
response = views.change_configuration(request)
assert response.status_code == 302
assert response.url == "/productdb/config/change/"
@pytest.mark.usefixtures("mock_cisco_eox_api_access_disabled")
@pytest.mark.usefixtures("import_default_text_blocks")
def test_post_with_inactive_api(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
data = {
"cisco_api_enabled": "on",
}
request = RequestFactory().post(url, data=data)
request.user = user
msgs = patch_contrib_messages(request)
response = views.change_configuration(request)
assert response.status_code == 302
assert response.url == "/productdb/config/change/"
assert msgs.added_new
data = {
"cisco_api_enabled": "on",
"cisco_api_client_id": "client_id"
}
request = RequestFactory().post(url, data=data)
request.user = user
msgs = patch_contrib_messages(request)
response = views.change_configuration(request)
assert response.status_code == 302
assert response.url == "/productdb/config/change/"
assert msgs.added_new
@pytest.mark.usefixtures("mock_cisco_eox_api_access_disabled")
@pytest.mark.usefixtures("import_default_text_blocks")
def test_post_with_broken_api(self):
# require super user permissions
user = User.objects.create(username="username", is_superuser=True)
url = reverse(self.URL_NAME)
data = {
"cisco_api_enabled": "on",
"cisco_api_client_id": "client_id"
}
request = RequestFactory().post(url, data=data)
request.user = user
msgs = patch_contrib_messages(request)
response = views.change_configuration(request)
assert response.status_code == 302
assert response.url == "/productdb/config/change/"
assert msgs.added_new
@pytest.mark.usefixtures("import_default_vendors")
class TestServerMessagesList:
URL_NAME = "productdb_config:notification-list"
def test_anonymous_default(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.server_messages_list(request)
assert response.status_code == 200, "Should be callable"
@pytest.mark.usefixtures("enable_login_only_mode")
def test_anonymous_login_only_mode(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.server_messages_list(request)
assert response.status_code == 302, "Should redirect to login page"
assert response.url == reverse("login") + "?next=" + url, \
"Should contain a next parameter for redirect"
def test_authenticated_user(self):
models.NotificationMessage.objects.create(title="A1", summary_message="B", detailed_message="C")
models.NotificationMessage.objects.create(title="A2", summary_message="B", detailed_message="C")
models.NotificationMessage.objects.create(title="A3", summary_message="B", detailed_message="C")
models.NotificationMessage.objects.create(title="A4", summary_message="B", detailed_message="C")
models.NotificationMessage.objects.create(title="A5", summary_message="B", detailed_message="C")
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = User.objects.create(username="username", is_superuser=False, is_staff=False)
response = views.server_messages_list(request)
assert response.status_code == 200, "Should be callable"
@pytest.mark.usefixtures("import_default_vendors")
class TestServerMessagesDetail:
URL_NAME = "productdb_config:notification-detail"
def test_anonymous_default(self):
nm = models.NotificationMessage.objects.create(title="A1", summary_message="B", detailed_message="C")
url = reverse(self.URL_NAME, kwargs={"message_id": nm.id})
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.server_message_detail(request, nm.id)
assert response.status_code == 200, "Should be callable"
@pytest.mark.usefixtures("enable_login_only_mode")
def test_anonymous_login_only_mode(self):
nm = models.NotificationMessage.objects.create(title="A1", summary_message="B", detailed_message="C")
url = reverse(self.URL_NAME, kwargs={"message_id": nm.id})
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.server_message_detail(request, nm.id)
assert response.status_code == 302, "Should redirect to login page"
assert response.url == reverse("login") + "?next=" + url, \
"Should contain a next parameter for redirect"
def test_authenticated_user(self):
nm = models.NotificationMessage.objects.create(title="A1", summary_message="B", detailed_message="C")
url = reverse(self.URL_NAME, kwargs={"message_id": nm.id})
request = RequestFactory().get(url)
request.user = User.objects.create(username="username", is_superuser=False, is_staff=False)
response = views.server_message_detail(request, nm.id)
assert response.status_code == 200, "Should be callable"
def test_404(self):
url = reverse(self.URL_NAME, kwargs={"message_id": 9999})
request = RequestFactory().get(url)
request.user = User.objects.create(username="username", is_superuser=False, is_staff=False)
with pytest.raises(Http404):
views.server_message_detail(request, 9999)
@pytest.mark.usefixtures("import_default_vendors")
class TestFlushCache:
URL_NAME = "productdb_config:flush_cache"
def test_anonymous_default(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.flush_cache(request)
assert response.status_code == 302, "Should redirect to login page"
assert response.url == reverse("login") + "?next=" + url, \
"Should contain a next parameter for redirect"
@pytest.mark.usefixtures("enable_login_only_mode")
def test_anonymous_login_only_mode(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = AnonymousUser()
response = views.flush_cache(request)
assert response.status_code == 302, "Should redirect to login page"
assert response.url == reverse("login") + "?next=" + url, \
"Should contain a next parameter for redirect"
def test_authenticated_user(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = User.objects.create(username="username", is_superuser=False, is_staff=False)
with pytest.raises(PermissionDenied):
views.flush_cache(request)
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
def test_superuser(self):
url = reverse(self.URL_NAME)
request = RequestFactory().get(url)
request.user = User.objects.get(username="pdb_admin")
msgs = patch_contrib_messages(request)
response = views.flush_cache(request)
assert response.status_code == 302, "Should redirect to status page"
assert msgs.added_new
assert response.url == reverse("productdb_config:status")
| hoelsner/product-database | app/config/tests/test_config_views.py | Python | mit | 21,046 |
#!/usr/bin/env python
"""
Parser for Gene Ontology annotations files
Usage example::
import Bio.GO.Parsers.oboparser as obo
import Bio.GO.Parsers.annotations as annotations
parser = obo.Parser(open("gene_ontology.1_2.obo"))
ontology = parser.parse()
parser = annotations.Parser(open("gene_association.sgd"))
annotationss = parser.parse(ontology)
"""
__author__ = "Tamas Nepusz"
__email__ = "tamas@cs.rhul.ac.uk"
__copyright__ = "Copyright (c) 2010, Tamas Nepusz"
__version__ = "0.1"
__all__ = ["ParseError", "Parser"]
from annotations.GO.annot import Annotation
from annotations.GO.utils import pushback_iterator
class ParseError(Exception):
"""Exception thrown when a parsing error occurred"""
def __init__(self, msg, lineno = 1):
Exception.__init__("%s near line %d" % (msg, lineno))
self.lineno = lineno
class Parser(object):
"""The main attraction, the annotations file parser.
If you want to create a parser that reads an annotations file, do this:
>>> import Bio.GO.Parsers.annotations as annotations
>>> parser = annotations.Parser(open("gene_associations.sgd"))
Only the headers are read when creating the parser. You can
access these right after construction as follows:
>>> parser.headers["gaf-version"]
['1.0']
To read the annotationss in the file, you must iterate over the
parser as if it were a list. The iterator yields `Annotation`_
objects. If you are not interested in the individual `Annotation`s
and you only need an `Ontology` object, call the `parse()`
method which will construct it for you.
"""
# pylint:disable-msg=C0103
def __init__(self, fp):
"""Creates an annotations parser that reads the given file-like object.
"""
self.headers = {}
if isinstance(fp, (str, unicode)):
# not working..
if fp.endswith(".gz"):
# This is a gzipped file
from gzip import GzipFile
fp = GzipFile(fp)
else:
fp = open(fp)
self._line_iterator = pushback_iterator(self._lines(fp))
self._read_headers()
@staticmethod
def _lines(fp):
"""Iterates over the lines of a given file, removing
comments and empty lines"""
for line in fp:
line = line.strip()
if not line:
continue
yield line
def _read_headers(self):
"""Reads the headers from the annotations file"""
for line in self._line_iterator:
if line[0] != '!':
# We have reached the end of headers
self._line_iterator.push_back(line)
return
parts = [part.rstrip() for part in line[1:].split(":", 1)]
if len(parts) < 2:
continue
key, value = parts
if key[0] == ' ':
continue
value = value.strip()
try:
self.headers[key].append(value)
except KeyError:
self.headers[key] = [value]
# pylint:disable-msg=W0142
# W0142: used * or ** magic
def annotationss(self):
"""Iterates over the annotationss in this annotations file,
yielding an `Annotation`_ object for each annotations."""
for line in self._line_iterator:
if line[0] == '!':
continue
parts = line.strip().split("\t")
yield Annotation(*parts)
def __iter__(self):
return self.annotationss()
def parse(self, ontology):
"""Parses the file handle given during construction time and
returns an appropriately constructed `Annotations`_ instance.
:Parameters:
- `ontology`: the ontology being used to map term IDs to
term names
"""
for annotations in self:
yield annotations
# TODO
pass
| marco-mariotti/selenoprofiles | libraries/annotations/GO/Parsers/annotparser.py | Python | gpl-2.0 | 4,007 |
# -*- coding: utf-8 -*-
from pyfr.integrators.std.base import BaseStdIntegrator
class BaseStdStepper(BaseStdIntegrator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Add kernel cache
self._axnpby_kerns = {}
def collect_stats(self, stats):
super().collect_stats(stats)
stats.set('solver-time-integrator', 'nsteps', self.nsteps)
stats.set('solver-time-integrator', 'nfevals', self._stepper_nfevals)
class StdEulerStepper(BaseStdStepper):
stepper_name = 'euler'
@property
def _stepper_has_errest(self):
return False
@property
def _stepper_nfevals(self):
return self.nsteps
@property
def _stepper_nregs(self):
return 2
@property
def _stepper_order(self):
return 1
def step(self, t, dt):
add, rhs = self._add, self.system.rhs
ut, f = self._regidx
rhs(t, ut, f)
add(1.0, ut, dt, f)
return ut
class StdTVDRK3Stepper(BaseStdStepper):
stepper_name = 'tvd-rk3'
@property
def _stepper_has_errest(self):
return False
@property
def _stepper_nfevals(self):
return 3*self.nsteps
@property
def _stepper_nregs(self):
return 3
@property
def _stepper_order(self):
return 3
def step(self, t, dt):
add, rhs = self._add, self.system.rhs
# Get the bank indices for each register (n, n+1, rhs)
r0, r1, r2 = self._regidx
# Ensure r0 references the bank containing u(t)
if r0 != self._idxcurr:
r0, r1 = r1, r0
# First stage; r2 = -∇·f(r0); r1 = r0 + dt*r2
rhs(t, r0, r2)
add(0.0, r1, 1.0, r0, dt, r2)
# Second stage; r2 = -∇·f(r1); r1 = 0.75*r0 + 0.25*r1 + 0.25*dt*r2
rhs(t + dt, r1, r2)
add(0.25, r1, 0.75, r0, 0.25*dt, r2)
# Third stage; r2 = -∇·f(r1); r1 = 1.0/3.0*r0 + 2.0/3.0*r1 + 2.0/3.0*dt*r2
rhs(t + 0.5*dt, r1, r2)
add(2.0/3.0, r1, 1.0/3.0, r0, 2.0/3.0*dt, r2)
# Return the index of the bank containing u(t + dt)
return r1
class StdRK4StdStepper(BaseStdStepper):
stepper_name = 'rk4'
@property
def _stepper_has_errest(self):
return False
@property
def _stepper_nfevals(self):
return 4*self.nsteps
@property
def _stepper_nregs(self):
return 3
@property
def _stepper_order(self):
return 4
def step(self, t, dt):
add, rhs = self._add, self.system.rhs
# Get the bank indices for each register
r0, r1, r2 = self._regidx
# Ensure r0 references the bank containing u(t)
if r0 != self._idxcurr:
r0, r1 = r1, r0
# First stage; r1 = -∇·f(r0)
rhs(t, r0, r1)
# Second stage; r2 = r0 + dt/2*r1; r2 = -∇·f(r2)
add(0.0, r2, 1.0, r0, dt/2.0, r1)
rhs(t + dt/2.0, r2, r2)
# As no subsequent stages depend on the first stage we can
# reuse its register to start accumulating the solution with
# r1 = r0 + dt/6*r1 + dt/3*r2
add(dt/6.0, r1, 1.0, r0, dt/3.0, r2)
# Third stage; here we reuse the r2 register
# r2 = r0 + dt/2*r2
# r2 = -∇·f(r2)
add(dt/2.0, r2, 1.0, r0)
rhs(t + dt/2.0, r2, r2)
# Accumulate; r1 = r1 + dt/3*r2
add(1.0, r1, dt/3.0, r2)
# Fourth stage; again we reuse r2
# r2 = r0 + dt*r2
# r2 = -∇·f(r2)
add(dt, r2, 1.0, r0)
rhs(t + dt, r2, r2)
# Final accumulation r1 = r1 + dt/6*r2 = u(t + dt)
add(1.0, r1, dt/6.0, r2)
# Return the index of the bank containing u(t + dt)
return r1
class StdRKVdH2RStepper(BaseStdStepper):
# Coefficients
a = []
b = []
bhat = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Compute the c and error coeffs
self.c = [0.0] + [sum(self.b[:i]) + ai for i, ai in enumerate(self.a)]
self.e = [b - bh for b, bh in zip(self.b, self.bhat)]
self._nstages = len(self.c)
@property
def _stepper_has_errest(self):
return self._controller_needs_errest and len(self.bhat)
@property
def _stepper_nfevals(self):
return len(self.b)*self.nsteps
@property
def _stepper_nregs(self):
return 4 if self._stepper_has_errest else 2
def step(self, t, dt):
add, rhs = self._add, self.system.rhs
errest = self._stepper_has_errest
r1 = self._idxcurr
if errest:
r2, rold, rerr = set(self._regidx) - {r1}
# Save the current solution
add(0.0, rold, 1.0, r1)
else:
r2, = set(self._regidx) - {r1}
# Evaluate the stages in the scheme
for i in range(self._nstages):
# Compute -∇·f
rhs(t + self.c[i]*dt, r2 if i > 0 else r1, r2)
# Accumulate the error term in rerr
if errest:
add(1.0 if i > 0 else 0.0, rerr, self.e[i]*dt, r2)
# Sum (special-casing the final stage)
if i < self._nstages - 1:
add(1.0, r1, self.a[i]*dt, r2)
add((self.b[i] - self.a[i])*dt, r2, 1.0, r1)
else:
add(1.0, r1, self.b[i]*dt, r2)
# Swap
r1, r2 = r2, r1
# Return
return (r2, rold, rerr) if errest else r2
class StdRK34Stepper(StdRKVdH2RStepper):
stepper_name = 'rk34'
a = [
11847461282814 / 36547543011857,
3943225443063 / 7078155732230,
-346793006927 / 4029903576067
]
b = [
1017324711453 / 9774461848756,
8237718856693 / 13685301971492,
57731312506979 / 19404895981398,
-101169746363290 / 37734290219643
]
bhat = [
15763415370699 / 46270243929542,
514528521746 / 5659431552419,
27030193851939 / 9429696342944,
-69544964788955 / 30262026368149
]
@property
def _stepper_order(self):
return 3
class StdRK45Stepper(StdRKVdH2RStepper):
stepper_name = 'rk45'
a = [
970286171893 / 4311952581923,
6584761158862 / 12103376702013,
2251764453980 / 15575788980749,
26877169314380 / 34165994151039
]
b = [
1153189308089 / 22510343858157,
1772645290293 / 4653164025191,
-1672844663538 / 4480602732383,
2114624349019 / 3568978502595,
5198255086312 / 14908931495163
]
bhat = [
1016888040809 / 7410784769900,
11231460423587 / 58533540763752,
-1563879915014 / 6823010717585,
606302364029 / 971179775848,
1097981568119 / 3980877426909
]
@property
def _stepper_order(self):
return 4
| Aerojspark/PyFR | pyfr/integrators/std/steppers.py | Python | bsd-3-clause | 6,858 |
from django.apps import AppConfig
class SssoonConfig(AppConfig):
name = 'sssoon'
| KINGH242/django-sssoon | sssoon/apps.py | Python | bsd-3-clause | 87 |
from __future__ import unicode_literals
import json
import mimetypes
import os
import re
import sys
from copy import copy
from importlib import import_module
from io import BytesIO
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import ISO_8859_1, UTF_8, WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils import six
from django.utils.encoding import force_bytes, force_str, uri_to_iri
from django.utils.functional import SimpleLazyObject, curry
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.six.moves.urllib.parse import urljoin, urlparse, urlsplit
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class RedirectCycleError(Exception):
"""
The test client has been asked to follow a redirect loop.
"""
def __init__(self, message, last_response):
super(RedirectCycleError, self).__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload(object):
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
for item in iterable:
yield item
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most Web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensures
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Uses the WSGI
interface to compose requests, but returns the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most Web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# We're emulating a WSGI server; we must call the close method
# on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, six.string_types) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
filename = os.path.basename(file.name) if hasattr(file, 'name') else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': str('/'),
'REMOTE_ADDR': str('127.0.0.1'),
'REQUEST_METHOD': str('GET'),
'SCRIPT_NAME': str(''),
'SERVER_NAME': str('testserver'),
'SERVER_PORT': str('80'),
'SERVER_PROTOCOL': str('HTTP/1.1'),
'wsgi.version': (1, 0),
'wsgi.url_scheme': str('http'),
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = force_str(parsed[2])
# If there are parameters, add them
if parsed[3]:
path += str(";") + force_str(parsed[3])
path = uri_to_iri(path).encode(UTF_8)
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. We replicate this behavior here.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode(ISO_8859_1) if six.PY3 else path
def get(self, path, data=None, secure=False, **extra):
"Construct a GET request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"Construct a POST request."
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"Construct a HEAD request."
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"Construct a TRACE request."
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PUT request."
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a PATCH request."
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct a DELETE request."
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Constructs an arbitrary HTTP request."""
parsed = urlparse(force_str(path))
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': str(method),
'SERVER_PORT': str('443') if secure else str('80'),
'wsgi.url_scheme': str('https') if secure else str('http'),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
query_string = force_bytes(parsed[4])
# WSGI requires latin-1 encoded strings. See get_path_info().
if six.PY3:
query_string = query_string.decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
@property
def session(self):
"""
Obtains the current session variables.
"""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
six.reraise(*exc_info)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = curry(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, secure=secure,
**extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a resource to the server using PATCH.
"""
response = super(Client, self).patch(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data,
content_type=content_type,
secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""
Send a TRACE request to the server.
"""
response = super(Client, self).trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
if backend is None:
backend = settings.AUTHENTICATION_BACKENDS[0]
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if 'application/json' not in response.get('Content-Type'):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
return json.loads(response.content.decode(), **extra)
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
# Prepend the request path to handle relative path redirects
path = url.path
if not path.startswith('/'):
path = urljoin(response.request['PATH_INFO'], path)
response = self.get(path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
| darkryder/django | django/test/client.py | Python | bsd-3-clause | 27,492 |
# -*- coding: utf-8 -*-
import os
import collections
import _pickle as pickle
import numpy as np
import pandas as pd
class Dataset(object):
def __init__(self, is_training, utils_dir, data_path, batch_size,
seq_length, vocab, labels):
self.data_path = data_path
self.batch_size = batch_size
self.seq_length = seq_length
self.utils_dir = utils_dir
label_file = os.path.join(utils_dir, 'labels.pkl')
vocab_file = os.path.join(utils_dir, 'vocab.pkl')
if is_training:
corpus_file = os.path.join(data_path, 'data.csv')
train_file = os.path.join(data_path, 'train.csv')
if not os.path.exists(vocab_file):
print('reading corpus and processing data')
self.preprocess(vocab_file, corpus_file, train_file, label_file)
else:
print('loading vocab and processing data')
self.load_preprocessed(vocab_file, train_file, label_file)
elif vocab is not None and labels is not None:
self.vocab = vocab
self.vocab_size = len(vocab) + 1
self.labels = labels
self.label_size = len(self.labels)
train_file = os.path.join(data_path, 'train.csv')
self.load_preprocessed(None, train_file, label_file)
self.reset_batch_pointer()
def transform(self, d):
new_d = list(map(self.vocab.get, d[:self.seq_length]))
new_d = list(map(lambda i: i if i else 0, new_d))
if len(list(new_d)) >= self.seq_length:
new_d = new_d[:self.seq_length]
else:
new_d = new_d + [0] * (self.seq_length - len(list(new_d)))
return new_d
def preprocess(self, vocab_file, corpus_file, data_path, label_file):
corpus = pd.read_csv(corpus_file, encoding='utf8')
labels = corpus['label'].drop_duplicates().values
corpus = corpus['text']
corpus = ''.join(map(lambda i: i.strip(), corpus))
self.labels = dict(zip(labels, range(len(labels))))
self.label_size = len(labels)
with open(label_file, 'wb') as f:
pickle.dump(self.labels, f)
counter = collections.Counter(corpus)
count_pairs = sorted(counter.items(), key=lambda i: -i[1])
self.chars, _ = zip(*count_pairs)
with open(vocab_file, 'wb') as f:
pickle.dump(self.chars, f)
self.vocab_size = len(self.chars) + 1
self.vocab = dict(zip(self.chars, range(1, len(self.chars) + 1)))
data = pd.read_csv(data_path, encoding='utf8')
tensor_x = np.array(list(map(self.transform, data['text'])))
tensor_y = np.array(list(map(self.labels.get, data['label'])))
self.tensor = np.c_[tensor_x, tensor_y].astype(int)
def load_preprocessed(self, vocab_file, data_path, label_file):
if vocab_file is not None:
with open(vocab_file, 'rb') as f:
self.chars = pickle.load(f)
self.vocab_size = len(self.chars) + 1
self.vocab = dict(zip(self.chars, range(1, len(self.chars) + 1)))
if label_file is not None:
with open(label_file, 'rb') as f:
self.labels = pickle.load(f)
self.label_size = len(self.labels)
data = pd.read_csv(data_path, encoding='utf8')
tensor_x = np.array(list(map(self.transform, data['text'])))
tensor_y = np.array(list(map(self.labels.get, data['label'])))
self.tensor = np.c_[tensor_x, tensor_y].astype(int)
def create_batches(self):
self.num_batches = int(self.tensor.shape[0] / self.batch_size)
if self.num_batches == 0:
assert False, 'Not enough data, make batch_size small.'
np.random.shuffle(self.tensor)
tensor = self.tensor[:self.num_batches * self.batch_size]
self.x_batches = np.split(tensor[:, :-1], self.num_batches, 0)
self.y_batches = np.split(tensor[:, -1], self.num_batches, 0)
def next_batch(self):
x = self.x_batches[self.pointer]
y = self.y_batches[self.pointer]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
self.create_batches()
self.pointer = 0 | koala-ai/tensorflow_nlp | nlp/text_classification/rnn_muticlass/dataset/dataset.py | Python | apache-2.0 | 4,263 |
#! /usr/bin/env python
# coding:utf-8
from numpy import *
from matplotlib.pyplot import *
from scipy.optimize import fmin_bfgs
class ML():
def __init__(self,x=[],y=[]):
self.X=x
self.Y=y
self.Theta=[]
self.Alpha=0.01
self.Iterations=1500
def load(self,fname,d=','):
data=loadtxt(fname,delimiter=d)
self.X=data[:,:-1]
self.Y=data[:,-1:]
def initXY(self,data):
m=data.shape[0]
x=hstack((ones((m,1)),data))
return x,self.Y,m
def Normalization(self,data):
mu=mean(data,0)
sigma=std(data,0)
data_Norm=(data-mu)/sigma
return data_Norm,mu,sigma
def J(self):
pass
def predict(self,x):
return array([1]+x).dot(self.Theta)
def evaluate(self):
pass
def plot(self):
pass
def show(self):
show()
class LogisticRegression(ML):
def __init__(self,x=[],y=[]):
self.Lambda=1
self.data_Norm=0
self.mu=0
self.sigma=0
def sigmoid(self,z):
return 1/(1+exp(-z))
#########################################################
# CostFunc:
# sigmoid() has a problem for that when z is too huge the
# return result is equal to 1. which cause log(h) to nan
# Solution:
# normalize the feature first
#########################################################
def J(self,theta):
x,y,m=self.initXY(self.data_Norm)
theta=theta.reshape(theta.shape[0],1)
h=self.sigmoid(x.dot(theta))
j=(-y.T.dot(log(h))-(1-y).T.dot(log(1-h)))/m
return sum(j)
def gradient(self,theta):
x,y,m=self.initXY(self.data_Norm)
theta=theta.reshape(theta.shape[0],1)
h=self.sigmoid(x.dot(theta))
grad=((h-y).T.dot(x)).T/m
return grad.flatten()
def minJ(self):
initial_theta=zeros((self.X.shape[1]+1))
self.Theta=fmin_bfgs(self.J,initial_theta,fprime=self.gradient)
def featureNormalize(self):
self.data_Norm,self.mu,self.sigma=self.Normalization(self.X)
def plot(self):
pos,neg=where(self.Y==1),where(self.Y==0)
plot(self.X[pos,0].T,self.X[pos,1].T,'b+')
plot(self.X[neg,0].T,self.X[neg,1].T,'ro')
return self
def plotDecisionLine(self):
theta=self.Theta
x=self.data_Norm
plot_x=array([min(x[:,0]),max(x[:,0])])
plot_y=(-1/theta[2])*(theta[1]*plot_x+theta[0])
pos,neg=where(self.Y==1),where(self.Y==0)
plot(x[pos,0].T,x[pos,1].T,'b+')
plot(x[neg,0].T,x[neg,1].T,'ro')
plot(plot_x,plot_y)
return self
if __name__=='__main__':
test=LogisticRegression()
test.load('ex2data1.txt')
test.featureNormalize()
test.minJ()
test.plotDecisionLine().show() | Urinx/Machine_Learning | Logistic-Regression/LogisticRegression.py | Python | gpl-2.0 | 2,429 |
"""
Computes the data to display on the Instructor Dashboard
"""
from collections import defaultdict
from util.json_request import JsonResponse
import json
from courseware import models
from django.conf import settings
from django.db.models import Count
from django.utils.translation import ugettext as _
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
from instructor_analytics.csvs import create_csv_response
from analyticsclient.client import Client
from analyticsclient.exceptions import NotFoundError
from student.models import CourseAccessRole
from opaque_keys.edx import locator
from opaque_keys.edx.locations import SlashSeparatedCourseKey
# Used to limit the length of list displayed to the screen.
MAX_SCREEN_LIST_LENGTH = 250
PROB_TYPE_LIST = [
'problem',
'lti',
]
# exclude these in Metrics
NON_STUDENT_ROLES = ['instructor', 'staff']
def get_non_student_list(course_key):
"""
Find all user_ids with instructor or staff roles in student_courseaccessrole table
"""
non_students = CourseAccessRole.objects.filter(
course_id=course_key,
role__in=NON_STUDENT_ROLES,
).values('user_id').distinct()
return [non_student['user_id'] for non_student in non_students]
def get_problem_grade_distribution(course_id, enrollment):
"""
Returns the grade distribution per problem for the course
`course_id` the course ID for the course interested in
`enrollment` the number of students enrolled in this course.
Output is 2 dicts:
'prob-grade_distrib' where the key is the problem 'module_id' and the value is a dict with:
'max_grade' - max grade for this problem
'grade_distrib' - array of tuples (`grade`,`count`).
'total_student_count' where the key is problem 'module_id' and the value is number of students
attempting the problem
"""
non_student_list = get_non_student_list(course_id)
prob_grade_distrib = {}
total_student_count = defaultdict(int)
if enrollment <= settings.MAX_ENROLLEES_FOR_METRICS_USING_DB or not settings.ANALYTICS_DATA_URL:
# Aggregate query on studentmodule table for grade data for all problems in course
queryset = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__in=PROB_TYPE_LIST,
).exclude(student_id__in=non_student_list).values('module_state_key', 'grade', 'max_grade').annotate(count_grade=Count('grade'))
# Loop through resultset building data for each problem
for row in queryset:
curr_problem = course_id.make_usage_key_from_deprecated_string(row['module_state_key'])
# Build set of grade distributions for each problem that has student responses
if curr_problem in prob_grade_distrib:
prob_grade_distrib[curr_problem]['grade_distrib'].append((row['grade'], row['count_grade']))
if ((prob_grade_distrib[curr_problem]['max_grade'] != row['max_grade']) and
(prob_grade_distrib[curr_problem]['max_grade'] < row['max_grade'])):
prob_grade_distrib[curr_problem]['max_grade'] = row['max_grade']
else:
prob_grade_distrib[curr_problem] = {
'max_grade': row['max_grade'],
'grade_distrib': [(row['grade'], row['count_grade']), ],
}
# Build set of total students attempting each problem
total_student_count[curr_problem] += row['count_grade']
else:
# Retrieve course object down to problems
course = modulestore().get_course(course_id, depth=4)
# Connect to analytics data client
client = Client(base_url=settings.ANALYTICS_DATA_URL, auth_token=settings.ANALYTICS_DATA_TOKEN)
for section in course.get_children():
for subsection in section.get_children():
for unit in subsection.get_children():
for child in unit.get_children():
if child.location.category not in PROB_TYPE_LIST:
continue
problem_id = child.location
problem = client.modules(course_id, problem_id)
try:
grade_distribution = problem.grade_distribution()
except NotFoundError:
grade_distribution = []
for score in grade_distribution:
total_student_count[problem_id] += score['count']
if problem_id in prob_grade_distrib:
if prob_grade_distrib[problem_id]['max_grade'] < score['max_grade']:
prob_grade_distrib[problem_id]['max_grade'] = score['max_grade']
prob_grade_distrib[problem_id]['grade_distrib'].append((score['grade'], score['count']))
else:
prob_grade_distrib[problem_id] = {
'max_grade': score['max_grade'],
'grade_distrib': [(score['grade'], score['count']), ],
}
return prob_grade_distrib, total_student_count
def get_sequential_open_distrib(course_id, enrollment):
"""
Returns the number of students that opened each subsection/sequential of the course
`course_id` the course ID for the course interested in
`enrollment` the number of students enrolled in this course.
Outputs a dict mapping the 'module_id' to the number of students that have opened that subsection/sequential.
"""
sequential_open_distrib = {}
non_student_list = get_non_student_list(course_id)
if enrollment <= settings.MAX_ENROLLEES_FOR_METRICS_USING_DB or not settings.ANALYTICS_DATA_URL:
# Aggregate query on studentmodule table for "opening a subsection" data
queryset = models.StudentModule.objects.filter(
course_id__exact=course_id,
module_type__exact='sequential',
).exclude(student_id__in=non_student_list).values('module_state_key').annotate(count_sequential=Count('module_state_key'))
for row in queryset:
module_id = course_id.make_usage_key_from_deprecated_string(row['module_state_key'])
sequential_open_distrib[module_id] = row['count_sequential']
else:
# Retrieve course object down to subsection
course = modulestore().get_course(course_id, depth=2)
# Connect to analytics data client
client = Client(base_url=settings.ANALYTICS_DATA_URL, auth_token=settings.ANALYTICS_DATA_TOKEN)
for section in course.get_children():
for subsection in section.get_children():
module = client.modules(course_id, subsection.location)
try:
sequential_open = module.sequential_open_distribution()
except NotFoundError:
pass
else:
sequential_open_distrib[subsection.location] = sequential_open[0]['count']
return sequential_open_distrib
def get_problem_set_grade_distrib(course_id, problem_set, enrollment):
"""
Returns the grade distribution for the problems specified in `problem_set`.
`course_id` the course ID for the course interested in
`problem_set` an array of UsageKeys representing problem module_id's.
`enrollment` the number of students enrolled in this course.
Requests from the database the a count of each grade for each problem in the `problem_set`.
Returns a dict, where the key is the problem 'module_id' and the value is a dict with two parts:
'max_grade' - the maximum grade possible for the course
'grade_distrib' - array of tuples (`grade`,`count`) ordered by `grade`
"""
non_student_list = get_non_student_list(course_id)
prob_grade_distrib = {}
if enrollment <= settings.MAX_ENROLLEES_FOR_METRICS_USING_DB or not settings.ANALYTICS_DATA_URL:
# Aggregate query on studentmodule table for grade data for set of problems in course
queryset = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__in=PROB_TYPE_LIST,
module_state_key__in=problem_set,
).exclude(student_id__in=non_student_list).values(
'module_state_key',
'grade',
'max_grade',
).annotate(count_grade=Count('grade')).order_by('module_state_key', 'grade')
# Loop through resultset building data for each problem
for row in queryset:
problem_id = course_id.make_usage_key_from_deprecated_string(row['module_state_key'])
if problem_id not in prob_grade_distrib:
prob_grade_distrib[problem_id] = {
'max_grade': 0,
'grade_distrib': [],
}
curr_grade_distrib = prob_grade_distrib[problem_id]
curr_grade_distrib['grade_distrib'].append((row['grade'], row['count_grade']))
if curr_grade_distrib['max_grade'] < row['max_grade']:
curr_grade_distrib['max_grade'] = row['max_grade']
else:
# Connect to analytics data client
client = Client(base_url=settings.ANALYTICS_DATA_URL, auth_token=settings.ANALYTICS_DATA_TOKEN)
for problem in problem_set:
module = client.modules(course_id, problem)
try:
grade_distribution = module.grade_distribution()
except NotFoundError:
grade_distribution = []
for score in grade_distribution:
if problem in prob_grade_distrib:
if prob_grade_distrib[problem]['max_grade'] < score['max_grade']:
prob_grade_distrib[problem]['max_grade'] = score['max_grade']
prob_grade_distrib[problem]['grade_distrib'].append((score['grade'], score['count']))
else:
prob_grade_distrib[problem] = {
'max_grade': score['max_grade'],
'grade_distrib': [(score['grade'], score['count'])],
}
return prob_grade_distrib
def construct_problem_data(prob_grade_distrib, total_student_count, c_subsection, c_unit, c_problem, component):
"""
Returns dict of problem with student grade data.
`prob_grade_distrib` Dict of grade distribution for all problems in the course.
`total_student_count` Dict of number of students attempting each problem.
`c_subsection` Incremental subsection count.
`c_unit` Incremental unit count.
`c_problem` Incremental problem count.
`component` The component for which data is being returned.
Returns a dict of problem label and data for use in d3 rendering.
"""
c_problem += 1
stack_data = []
# Construct label to display for this problem
label = "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem)
if component.location in prob_grade_distrib:
problem_info = prob_grade_distrib[component.location]
# Get problem_name for tooltip
problem_name = own_metadata(component).get('display_name', '')
# Compute percent of this grade over max_grade
max_grade = float(problem_info['max_grade'])
for (grade, count_grade) in problem_info['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = round((grade * 100.0) / max_grade, 1)
# Compute percent of students with this grade
student_count_percent = 0
if total_student_count[component.location] > 0:
student_count_percent = count_grade * 100 / total_student_count[component.location]
# Tooltip parameters for problem in grade distribution view
tooltip = {
'type': 'problem',
'label': label,
'problem_name': problem_name,
'count_grade': count_grade,
'percent': percent,
'grade': grade,
'max_grade': max_grade,
'student_count_percent': student_count_percent,
}
# Construct data to be sent to d3
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
'module_url': component.location.to_deprecated_string(),
})
problem = {
'xValue': label,
'stackData': stack_data,
}
return problem, c_problem
def get_d3_problem_grade_distrib(course_id, enrollment):
"""
Returns problem grade distribution information for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
`enrollment` the number of students enrolled in this course.
Returns an array of dicts in the order of the sections. Each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of the grade distribution for that problem
"""
d3_data = []
prob_grade_distrib, total_student_count = get_problem_grade_distribution(course_id, enrollment)
# Retrieve course object down to problems
course = modulestore().get_course(course_id, depth=4)
# Iterate through sections, subsections, units, problems
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
for subsection in section.get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
# Student data is at the problem level
if child.location.category in PROB_TYPE_LIST:
problem, c_problem = construct_problem_data(
prob_grade_distrib,
total_student_count,
c_subsection,
c_unit,
c_problem,
child
)
data.append(problem)
elif child.location.category in settings.TYPES_WITH_CHILD_PROBLEMS_LIST:
for library_problem in child.get_children():
problem, c_problem = construct_problem_data(
prob_grade_distrib,
total_student_count,
c_subsection,
c_unit,
c_problem,
library_problem
)
data.append(problem)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_sequential_open_distrib(course_id, enrollment):
"""
Returns how many students opened a sequential/subsection for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
`enrollment` the number of students enrolled in this course.
Returns an array in the order of the sections and each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of how many students opened each sequential/subsection
"""
d3_data = []
# Retrieve course object down to subsection
course = modulestore().get_course(course_id, depth=2)
sequential_open_distrib = get_sequential_open_distrib(course_id, enrollment)
# Iterate through sections, subsections
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
# Construct data for each subsection to be sent to d3
for subsection in section.get_children():
c_subsection += 1
subsection_name = own_metadata(subsection).get('display_name', '')
if subsection.location in sequential_open_distrib:
open_count = sequential_open_distrib[subsection.location]
else:
open_count = 0
stack_data = []
# Tooltip parameters for subsection in open_distribution view
tooltip = {
'type': 'subsection',
'num_students': open_count,
'subsection_num': c_subsection,
'subsection_name': subsection_name,
}
stack_data.append({
'color': 0,
'value': open_count,
'tooltip': tooltip,
'module_url': subsection.location.to_deprecated_string(),
})
subsection = {
'xValue': "SS {0}".format(c_subsection),
'stackData': stack_data,
}
data.append(subsection)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_section_grade_distrib(course_id, section, enrollment):
"""
Returns the grade distribution for the problems in the `section` section in a format for the d3 code.
`course_id` a string that is the course's ID.
`section` an int that is a zero-based index into the course's list of sections.
`enrollment` the number of students enrolled in this course.
Navigates to the section specified to find all the problems associated with that section and then finds the grade
distribution for those problems. Finally returns an object formated the way the d3_stacked_bar_graph.js expects its
data object to be in.
If this is requested multiple times quickly for the same course, it is better to call
get_d3_problem_grade_distrib and pick out the sections of interest.
Returns an array of dicts with the following keys (taken from d3_stacked_bar_graph.js's documentation)
'xValue' - Corresponding value for the x-axis
'stackData' - Array of objects with key, value pairs that represent a bar:
'color' - Defines what "color" the bar will map to
'value' - Maps to the height of the bar, along the y-axis
'tooltip' - (Optional) Text to display on mouse hover
"""
# Retrieve course object down to problems
course = modulestore().get_course(course_id, depth=4)
problem_set = []
problem_info = {}
c_subsection = 0
for subsection in course.get_children()[section].get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
if child.location.category in PROB_TYPE_LIST:
c_problem += 1
problem_set.append(child.location)
problem_info[child.location] = {
'id': child.location.to_deprecated_string(),
'x_value': "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem),
'display_name': own_metadata(child).get('display_name', ''),
}
elif child.location.category in settings.TYPES_WITH_CHILD_PROBLEMS_LIST:
for library_problem in child.get_children():
c_problem += 1
problem_set.append(library_problem.location)
problem_info[library_problem.location] = {
'id': library_problem.location.to_deprecated_string(),
'x_value': "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem),
'display_name': own_metadata(library_problem).get('display_name', ''),
}
grade_distrib = get_problem_set_grade_distrib(course_id, problem_set, enrollment)
d3_data = []
# Construct data for each problem to be sent to d3
for problem in problem_set:
stack_data = []
if problem in grade_distrib:
max_grade = float(grade_distrib[problem]['max_grade'])
for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = round((grade * 100.0) / max_grade, 1)
# Construct tooltip for problem in grade distibution view
tooltip = {
'type': 'problem',
'problem_info_x': problem_info[problem]['x_value'],
'count_grade': count_grade,
'percent': percent,
'problem_info_n': problem_info[problem]['display_name'],
'grade': grade,
'max_grade': max_grade,
}
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
})
d3_data.append({
'xValue': problem_info[problem]['x_value'],
'stackData': stack_data,
})
return d3_data
def get_section_display_name(course_id):
"""
Returns an array of the display names for each section in the course.
`course_id` the course ID for the course interested in
The ith string in the array is the display name of the ith section in the course.
"""
course = modulestore().get_course(course_id, depth=4)
section_display_name = [""] * len(course.get_children())
i = 0
for section in course.get_children():
section_display_name[i] = own_metadata(section).get('display_name', '')
i += 1
return section_display_name
def get_array_section_has_problem(course_id):
"""
Returns an array of true/false whether each section has problems.
`course_id` the course ID for the course interested in
The ith value in the array is true if the ith section in the course contains problems and false otherwise.
"""
course = modulestore().get_course(course_id, depth=4)
b_section_has_problem = [False] * len(course.get_children())
i = 0
for section in course.get_children():
for subsection in section.get_children():
for unit in subsection.get_children():
for child in unit.get_children():
if child.location.category in PROB_TYPE_LIST:
b_section_has_problem[i] = True
break # out of child loop
elif child.location.category in settings.TYPES_WITH_CHILD_PROBLEMS_LIST:
for library_problem in child.get_children():
if library_problem.location.category in PROB_TYPE_LIST:
b_section_has_problem[i] = True
break # out of child loop
if b_section_has_problem[i]:
break # out of unit loop
if b_section_has_problem[i]:
break # out of subsection loop
i += 1
return b_section_has_problem
def get_students_opened_subsection(request, csv=False):
"""
Get a list of students that opened a particular subsection.
If 'csv' is False, returns a dict of student's name: username.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames for CSV download.
"""
csv = request.GET.get('csv')
course_id = request.GET.get('course_id')
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
module_state_key = course_key.make_usage_key_from_deprecated_string(request.GET.get('module_id'))
non_student_list = get_non_student_list(course_key)
# Query for "opened a subsection" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key__exact=module_state_key,
module_type__exact='sequential',
).exclude(student_id__in=non_student_list).values('student__id', 'student__username', 'student__profile__name').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
results.append({
'name': student['student__profile__name'],
'username': student['student__username'],
})
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
# Subsection name is everything after 3rd space in tooltip
filename = sanitize_filename(' '.join(tooltip.split(' ')[3:]))
header = [_("Name").encode('utf-8'), _("Username").encode('utf-8')]
for student in students:
results.append([student['student__profile__name'], student['student__username']])
response = create_csv_response(filename, header, results)
return response
def get_students_problem_grades(request, csv=False):
"""
Get a list of students and grades for a particular problem.
If 'csv' is False, returns a dict of student's name: username: grade: percent.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames, grades, percents for CSV download.
"""
csv = request.GET.get('csv')
course_id = request.GET.get('course_id')
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
module_state_key = course_key.make_usage_key_from_deprecated_string(request.GET.get('module_id'))
non_student_list = get_non_student_list(course_key)
# Query for "problem grades" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key=module_state_key,
module_type__in=PROB_TYPE_LIST,
grade__isnull=False,
).exclude(student_id__in=non_student_list).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
student_dict = {
'name': student['student__profile__name'],
'username': student['student__username'],
'grade': student['grade'],
}
student_dict['percent'] = 0
if student['max_grade'] > 0:
student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])
results.append(student_dict)
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])
header = [_("Name"), _("Username"), _("Grade"), _("Percent")]
for student in students:
percent = 0
if student['max_grade'] > 0:
percent = round(student['grade'] * 100 / student['max_grade'])
results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])
response = create_csv_response(filename, header, results)
return response
def post_metrics_data_csv(request):
"""
Generate a list of opened subsections or problems for the entire course for CSV download.
Returns a header array, and an array of arrays in the format:
section, subsection, count of students for subsections
or section, problem, name, count of students, percent of students, score for problems.
"""
data = json.loads(request.POST['data'])
sections = json.loads(data['sections'])
tooltips = json.loads(data['tooltips'])
course_id = data['course_id']
data_type = data['data_type']
results = []
if data_type == 'subsection':
header = [_("Section"), _("Subsection"), _("Opened by this number of students")]
filename = sanitize_filename(_('subsections') + '_' + course_id)
elif data_type == 'problem':
header = [
_("Section"), _("Problem"), _("Name"), _("Count of Students"),
_("Percent of Students"), _("Score"),
]
filename = sanitize_filename(_('problems') + '_' + course_id)
for index, section in enumerate(sections):
results.append([section])
# tooltips array is array of dicts for subsections and
# array of array of dicts for problems.
if data_type == 'subsection':
for tooltip_dict in tooltips[index]:
num_students = tooltip_dict['num_students']
subsection = tooltip_dict['subsection_name']
# Append to results offsetting 1 column to the right.
results.append(['', subsection, num_students])
elif data_type == 'problem':
for tooltip in tooltips[index]:
for tooltip_dict in tooltip:
label = tooltip_dict['label']
problem_name = tooltip_dict['problem_name']
count_grade = tooltip_dict['count_grade']
student_count_percent = tooltip_dict['student_count_percent']
percent = tooltip_dict['percent']
# Append to results offsetting 1 column to the right.
results.append(['', label, problem_name, count_grade, student_count_percent, percent])
response = create_csv_response(filename, header, results)
return response
def sanitize_filename(filename):
"""
Utility function
"""
filename = filename.replace(" ", "_")
filename = filename.encode('utf-8')
filename = filename[0:25] + '.csv'
return filename
| caesar2164/edx-platform | lms/djangoapps/class_dashboard/dashboard_data.py | Python | agpl-3.0 | 31,160 |
import random
def absyourdifference(firstNo, secondNo):
return abs(firstNo - secondNo)
def main():
minimum = raw_input("Type in the minimum number:")
maximum = raw_input("Type in the maximum number:")
output = """I'm thinking of a number between {} and {} """.format(minimum, maximum)
print output
number = random.randint(int(minimum), int(maximum))
your_number = raw_input("Guess the number:")
output = """The number was {}.
Your guess was {}.
""".format(number,your_number)
print output
if int(your_number) == int(number):
print "Congratulations, You have guessed the number correctly"
elif int(your_number) > int(number):
print "Your guess was over by" ,str(absyourdifference(int(your_number), int(number)))
elif int(your_number) < int(number) :
print "Your guess was under by" ,str(absyourdifference(int(your_number), int(number)))
main()
#What is the minimum number? 5
#What is the maximum number? 10
#I'm thinking of a number from 5 to 10.
#What do you think it is?: 7
#The target was 9.
#Your guess was 7.
#That's under by 2.
| bruno1951/bruno1951-cmis-cs2 | oneguess.py | Python | cc0-1.0 | 1,082 |
#!/usr/bin/env python
from __future__ import print_function
from distutils.core import setup, Command
class TestCommand(Command):
description = "PYorick test/check command"
user_options = []
def get_command_name(self):
return "test"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import pyorick.test_pyorick as testmod
import unittest
for c in [testmod.TestProcess, testmod.TestCodec]:
print("Testing", str(c))
suite = unittest.TestLoader().loadTestsFromTestCase(c)
unittest.TextTestRunner(verbosity=2).run(suite)
except Exception as e :
raiseNameError("setup.py test: error in test\nException: {0}".format(e))
return
# This package requires the yorick startup file pyorick.i0 to be
# installed as an ordinary file in the same directory as pyorick.py.
# Even if you have no way to install python packages, you can
# make pyorick.py work by creating a directory, copying pyorick.py
# and pyorick.i0 to that directory, and adding the directory to
# your PYTHONPATH environment variable. You can optionally copy
# test_pyorick.py to the same directory, cd there, and run nosetests
# or py.test or python -m unittest -v test_pyorick to test pyorick.
setup(name='pyorick',
version='1.4',
description='python connection to yorick',
long_description=open('README.rst').read(),
author='David Munro and John Field',
author_email='dhmunro@users.sourceforge.net',
url='https://github.com/dhmunro/pyorick',
packages=['pyorick'],
package_data={'pyorick': ['pyorick.i0']},
requires=['numpy'],
license='http://opensource.org/licenses/BSD-2-Clause',
platforms=['Linux', 'MacOS X', 'Unix'],
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Interpreters',
],
cmdclass = {'test': TestCommand},
)
| dhmunro/pyorick | setup.py | Python | bsd-2-clause | 2,231 |
"""
sentry.constants
~~~~~~~~~~~~~~~~
These settings act as the default (base) settings for the Sentry-provided
web-server
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import os.path
from collections import OrderedDict
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
def get_all_languages():
results = []
for path in os.listdir(os.path.join(MODULE_ROOT, 'locale')):
if path.startswith('.'):
continue
if '_' in path:
pre, post = path.split('_', 1)
path = '{}-{}'.format(pre, post.lower())
results.append(path)
return results
MODULE_ROOT = os.path.dirname(__import__('sentry').__file__)
DATA_ROOT = os.path.join(MODULE_ROOT, 'data')
SORT_OPTIONS = OrderedDict((
('priority', _('Priority')),
('date', _('Last Seen')),
('new', _('First Seen')),
('freq', _('Frequency')),
('tottime', _('Total Time Spent')),
('avgtime', _('Average Time Spent')),
))
SEARCH_SORT_OPTIONS = OrderedDict((
('score', _('Score')),
('date', _('Last Seen')),
('new', _('First Seen')),
))
# XXX: Deprecated: use GroupStatus instead
STATUS_UNRESOLVED = 0
STATUS_RESOLVED = 1
STATUS_MUTED = 2
STATUS_CHOICES = {
'resolved': STATUS_RESOLVED,
'unresolved': STATUS_UNRESOLVED,
'muted': STATUS_MUTED,
}
MEMBER_OWNER = 0
MEMBER_ADMIN = 25
MEMBER_USER = 50
MEMBER_SYSTEM = 100
MEMBER_TYPES = (
(MEMBER_OWNER, _('Owner')),
(MEMBER_ADMIN, _('Admin')),
(MEMBER_USER, _('User')),
(MEMBER_SYSTEM, _('System Agent')),
)
# A list of values which represent an unset or empty password on
# a User instance.
EMPTY_PASSWORD_VALUES = ('!', '', '$')
PLATFORM_LIST = (
'csharp',
'connect',
'django',
'express',
'flask',
'go',
'ios',
'java',
'java_log4j',
'java_log4j2',
'java_logback',
'java_logging',
'javascript',
'node.js',
'php',
'pyramid',
'python',
'r',
'ruby',
'rails3',
'rails4',
'sidekiq',
'sinatra',
'tornado',
)
PLATFORM_ROOTS = {
'rails3': 'ruby',
'rails4': 'ruby',
'sinatra': 'ruby',
'sidekiq': 'ruby',
'django': 'python',
'flask': 'python',
'pyramid': 'python',
'tornado': 'python',
'express': 'node.js',
'connect': 'node.js',
'java_log4j': 'java',
'java_log4j2': 'java',
'java_logback': 'java',
'java_logging': 'java',
}
PLATFORM_TITLES = {
'rails3': 'Rails 3 (Ruby)',
'rails4': 'Rails 4 (Ruby)',
'php': 'PHP',
'ios': 'iOS',
'express': 'Express (Node.js)',
'connect': 'Connect (Node.js)',
'django': 'Django (Python)',
'flask': 'Flask (Python)',
'pyramid': 'Pyramid (Python)',
'csharp': 'C#',
'java_log4j': 'Log4j (Java)',
'java_log4j2': 'Log4j 2.x (Java)',
'java_logback': 'Logback (Java)',
'java_logging': 'java.util.logging',
}
# Normalize counts to the 15 minute marker. This value MUST be less than 60. A
# value of 0 would store counts for every minute, and is the lowest level of
# accuracy provided.
MINUTE_NORMALIZATION = 15
MAX_TAG_KEY_LENGTH = 32
MAX_TAG_VALUE_LENGTH = 200
MAX_CULPRIT_LENGTH = 200
# Team slugs which may not be used. Generally these are top level URL patterns
# which we don't want to worry about conflicts on.
RESERVED_ORGANIZATION_SLUGS = (
'admin', 'manage', 'login', 'account', 'register', 'api',
'accept', 'organizations', 'teams', 'projects', 'help',
'docs', 'logout', '404', '500', '_static',
)
RESERVED_TEAM_SLUGS = RESERVED_ORGANIZATION_SLUGS
LOG_LEVELS = {
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'warning',
logging.ERROR: 'error',
logging.FATAL: 'fatal',
}
DEFAULT_LOG_LEVEL = 'error'
DEFAULT_LOGGER_NAME = ''
# Default alerting threshold values
DEFAULT_ALERT_PROJECT_THRESHOLD = (500, 25) # 500%, 25 events
DEFAULT_ALERT_GROUP_THRESHOLD = (1000, 25) # 1000%, 25 events
# Default paginator value
EVENTS_PER_PAGE = 15
# Default sort option for the group stream
DEFAULT_SORT_OPTION = 'date'
# Setup languages for only available locales
LANGUAGE_MAP = dict(settings.LANGUAGES)
LANGUAGES = [(k, LANGUAGE_MAP[k]) for k in get_all_languages() if k in LANGUAGE_MAP]
# TODO(dcramer): We eventually want to make this user-editable
TAG_LABELS = {
'exc_type': _('Exception Type'),
'sentry:user': _('User'),
'sentry:filename': _('File'),
'sentry:function': _('Function'),
'sentry:release': _('Release'),
'os': _('OS'),
'url': _('URL'),
'server_name': _('Server'),
}
# TODO(dcramer): once this is more flushed out we want this to be extendable
SENTRY_RULES = (
'sentry.rules.actions.notify_event.NotifyEventAction',
'sentry.rules.actions.notify_event_service.NotifyEventServiceAction',
'sentry.rules.conditions.every_event.EveryEventCondition',
'sentry.rules.conditions.first_seen_event.FirstSeenEventCondition',
'sentry.rules.conditions.regression_event.RegressionEventCondition',
'sentry.rules.conditions.tagged_event.TaggedEventCondition',
'sentry.rules.conditions.event_frequency.EventFrequencyCondition',
'sentry.rules.conditions.event_attribute.EventAttributeCondition',
'sentry.rules.conditions.level.LevelCondition',
)
# methods as defined by http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html + PATCH
HTTP_METHODS = ('GET', 'POST', 'PUT', 'OPTIONS', 'HEAD', 'DELETE', 'TRACE', 'CONNECT', 'PATCH')
CLIENT_RESERVED_ATTRS = (
'project',
'event_id',
'message',
'checksum',
'culprit',
'level',
'time_spent',
'logger',
'server_name',
'site',
'timestamp',
'extra',
'modules',
'tags',
'platform',
'release',
)
DEFAULT_SCRUBBED_FIELDS = (
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'access_token',
)
OK_PLUGIN_ENABLED = _("The {name} integration has been enabled.")
OK_PLUGIN_DISABLED = _("The {name} integration has been disabled.")
OK_PLUGIN_SAVED = _('Configuration for the {name} integration has been saved.')
| JTCunning/sentry | src/sentry/constants.py | Python | bsd-3-clause | 6,219 |
#What would we do if one special case, which is A-5 straight comes up
#The one that we should modify is card_ranks function
def card_ranks(hand):
"""Return a list of the ranks, sorted with higher first."""
ranks = ['--23456789TJKA'.index(r) for r,s in hand]
ranks.sort(reverse=True)
return [5,4,3,2,1] if (ranks == [14,5,4,3,2]) else ranks | napjon/moocs_solution | design-udacity/card_ranks_modified.py | Python | mit | 365 |
"""
Django settings for djmlang project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
import os
import sys
PROJECT_DIR = os.path.dirname(os.path.dirname(__file__))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# App/Library Paths
sys.path.append(os.path.join(BASE_DIR, 'apps'))
# Users TO EMAIL Errors to based on LOGGING Settings
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p%af4@vnrnd%os7=i$#3e7$@5nvo*-280en3r*34!ld1psb7+='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# DATABASE Configured by URL
import dj_database_url
DATABASES = {'default': dj_database_url.config(default=os.environ.get('DATABASE_URL'))}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'modeltranslation',
# Grappelli and Filebrowser Admin - must come before the admin
'grappelli',
'filebrowser',
'django.contrib.admin',
'django_extensions',
'storages', # storages redux
'mptt',
'pages',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djmlang.urls'
WSGI_APPLICATION = 'djmlang.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Upload Media
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'uploads')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/uploads/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'assets'),
)
# Template Settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# DATABASE Configured by URL
import dj_database_url
DATABASES = {'default': dj_database_url.config(default=os.environ.get('DATABASE_URL'))}
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'locale')
]
from django.utils.translation import ugettext_lazy as _
LANGUAGES = (
('en', _('English')),
('es', _('Spanish')),
)
# MODEL TRANSLATION STUFF
MODELTRANSLATION_PREPOPULATE_LANGUAGE = 'en'
# SUPPRESS FACTORY BOY LOGGING MESSAGES
import logging
logging.getLogger("factory").setLevel(logging.WARN)
# APP SETTINGS
GRAPPELLI_ADMIN_TITLE = 'Translation Website Admin'
# FILEBROWSER SETTINGS
FILEBROWSER_DEBUG = True
FILEBROWSER_DIRECTORY = ''
FILEBROWSER_NORMALIZE_FILENAME = True
# Allow FileBrowser Extensions
FILEBROWSER_EXTENSIONS = {
'Folder': [''],
'Image': ['.jpg', '.jpeg', '.gif', '.png'],
'Document': ['.pdf', '.txt', '.doc', '.rtf', '.xls'],
'Audio': ['.mp3'],
'Video': ['.mp4']
}
FILEBROWSER_VERSIONS = {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop upscale'},
'thumbnail': {'verbose_name': 'Thumbnail (100px) Square', 'width': 100, 'height': '100', 'opts': 'crop'},
'small': {'verbose_name': 'Small (150px Wide)', 'width': 150, 'height': '', 'opts': ''},
'medium': {'verbose_name': 'Medium (300px Wide)', 'width': 300, 'height': '', 'opts': ''},
'big': {'verbose_name': 'Big (500px Wide)', 'width': 500, 'height': '', 'opts': ''},
'large': {'verbose_name': 'Large (700px Wide)', 'width': 700, 'height': '', 'opts': ''},
'x-large': {'verbose_name': 'Extra Large (900px Wide)', 'width': 900, 'height': '', 'opts': ''},
}
# EASY THUMBNAILS
THUMBNAIL_SUBDIR = '_thumbs'
# SWITCH STORAGE TO S3
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# STATIC FILES S3
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# AWS STUFF - Read from env
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
# For Django extensions sync command
AWS_BUCKET_NAME = AWS_STORAGE_BUCKET_NAME
AWS_HEADERS = {
'Expires': 'Thu, 15 Apr 2010 20:00:00 GMT',
'Cache-Control': 'max-age=86400',
}
SYNC_S3_PREFIX = 'uploads'
| waustin/django-multilanguage-testing | djmlang/settings/base.py | Python | apache-2.0 | 7,778 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""linear_testing_utils python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from tensorflow_estimator.python.estimator.canned import linear_testing_utils
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
linear_testing_utils.__all__ = [
s for s in dir(linear_testing_utils) if not s.startswith('__')
]
from tensorflow_estimator.python.estimator.canned.linear_testing_utils import *
| tensorflow/tensorflow | tensorflow/python/estimator/canned/linear_testing_utils.py | Python | apache-2.0 | 1,244 |
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from datetime import datetime, timedelta
import time
import six
from cassandra.cqlengine import columns, CQLEngineException, ValidationError, UnicodeMixin
from cassandra.cqlengine import connection
from cassandra.cqlengine.functions import Token, BaseQueryFunction, QueryValue
from cassandra.cqlengine.operators import (InOperator, EqualsOperator, GreaterThanOperator,
GreaterThanOrEqualOperator, LessThanOperator,
LessThanOrEqualOperator, BaseWhereOperator)
# import * ?
from cassandra.cqlengine.statements import (WhereClause, SelectStatement, DeleteStatement,
UpdateStatement, AssignmentClause, InsertStatement,
BaseCQLStatement, MapUpdateClause, MapDeleteClause,
ListUpdateClause, SetUpdateClause, CounterUpdateClause,
TransactionClause)
class QueryException(CQLEngineException):
pass
class IfNotExistsWithCounterColumn(CQLEngineException):
pass
class LWTException(CQLEngineException):
pass
class DoesNotExist(QueryException):
pass
class MultipleObjectsReturned(QueryException):
pass
def check_applied(result):
"""
check if result contains some column '[applied]' with false value,
if that value is false, it means our light-weight transaction didn't
applied to database.
"""
if result and '[applied]' in result[0] and not result[0]['[applied]']:
raise LWTException('')
class AbstractQueryableColumn(UnicodeMixin):
"""
exposes cql query operators through pythons
builtin comparator symbols
"""
def _get_column(self):
raise NotImplementedError
def __unicode__(self):
raise NotImplementedError
def _to_database(self, val):
if isinstance(val, QueryValue):
return val
else:
return self._get_column().to_database(val)
def in_(self, item):
"""
Returns an in operator
used where you'd typically want to use python's `in` operator
"""
return WhereClause(six.text_type(self), InOperator(), item)
def __eq__(self, other):
return WhereClause(six.text_type(self), EqualsOperator(), self._to_database(other))
def __gt__(self, other):
return WhereClause(six.text_type(self), GreaterThanOperator(), self._to_database(other))
def __ge__(self, other):
return WhereClause(six.text_type(self), GreaterThanOrEqualOperator(), self._to_database(other))
def __lt__(self, other):
return WhereClause(six.text_type(self), LessThanOperator(), self._to_database(other))
def __le__(self, other):
return WhereClause(six.text_type(self), LessThanOrEqualOperator(), self._to_database(other))
class BatchType(object):
Unlogged = 'UNLOGGED'
Counter = 'COUNTER'
class BatchQuery(object):
"""
Handles the batching of queries
http://www.datastax.com/docs/1.2/cql_cli/cql/BATCH
"""
_consistency = None
def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False,
timeout=connection.NOT_SET):
"""
:param batch_type: (optional) One of batch type values available through BatchType enum
:type batch_type: str or None
:param timestamp: (optional) A datetime or timedelta object with desired timestamp to be applied
to the batch transaction.
:type timestamp: datetime or timedelta or None
:param consistency: (optional) One of consistency values ("ANY", "ONE", "QUORUM" etc)
:type consistency: The :class:`.ConsistencyLevel` to be used for the batch query, or None.
:param execute_on_exception: (Defaults to False) Indicates that when the BatchQuery instance is used
as a context manager the queries accumulated within the context must be executed despite
encountering an error within the context. By default, any exception raised from within
the context scope will cause the batched queries not to be executed.
:type execute_on_exception: bool
:param timeout: (optional) Timeout for the entire batch (in seconds), if not specified fallback
to default session timeout
:type timeout: float or None
"""
self.queries = []
self.batch_type = batch_type
if timestamp is not None and not isinstance(timestamp, (datetime, timedelta)):
raise CQLEngineException('timestamp object must be an instance of datetime')
self.timestamp = timestamp
self._consistency = consistency
self._execute_on_exception = execute_on_exception
self._timeout = timeout
self._callbacks = []
def add_query(self, query):
if not isinstance(query, BaseCQLStatement):
raise CQLEngineException('only BaseCQLStatements can be added to a batch query')
self.queries.append(query)
def consistency(self, consistency):
self._consistency = consistency
def _execute_callbacks(self):
for callback, args, kwargs in self._callbacks:
callback(*args, **kwargs)
# trying to clear up the ref counts for objects mentioned in the set
del self._callbacks
def add_callback(self, fn, *args, **kwargs):
"""Add a function and arguments to be passed to it to be executed after the batch executes.
A batch can support multiple callbacks.
Note, that if the batch does not execute, the callbacks are not executed.
A callback, thus, is an "on batch success" handler.
:param fn: Callable object
:type fn: callable
:param *args: Positional arguments to be passed to the callback at the time of execution
:param **kwargs: Named arguments to be passed to the callback at the time of execution
"""
if not callable(fn):
raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn)))
self._callbacks.append((fn, args, kwargs))
def execute(self):
if len(self.queries) == 0:
# Empty batch is a no-op
# except for callbacks
self._execute_callbacks()
return
opener = 'BEGIN ' + (self.batch_type + ' ' if self.batch_type else '') + ' BATCH'
if self.timestamp:
if isinstance(self.timestamp, six.integer_types):
ts = self.timestamp
elif isinstance(self.timestamp, (datetime, timedelta)):
ts = self.timestamp
if isinstance(self.timestamp, timedelta):
ts += datetime.now() # Apply timedelta
ts = int(time.mktime(ts.timetuple()) * 1e+6 + ts.microsecond)
else:
raise ValueError("Batch expects a long, a timedelta, or a datetime")
opener += ' USING TIMESTAMP {0}'.format(ts)
query_list = [opener]
parameters = {}
ctx_counter = 0
for query in self.queries:
query.update_context_id(ctx_counter)
ctx = query.get_context()
ctx_counter += len(ctx)
query_list.append(' ' + str(query))
parameters.update(ctx)
query_list.append('APPLY BATCH;')
tmp = connection.execute('\n'.join(query_list), parameters, self._consistency, self._timeout)
check_applied(tmp)
self.queries = []
self._execute_callbacks()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# don't execute if there was an exception by default
if exc_type is not None and not self._execute_on_exception:
return
self.execute()
class AbstractQuerySet(object):
def __init__(self, model):
super(AbstractQuerySet, self).__init__()
self.model = model
# Where clause filters
self._where = []
# Transaction clause filters
self._transaction = []
# ordering arguments
self._order = []
self._allow_filtering = False
# CQL has a default limit of 10000, it's defined here
# because explicit is better than implicit
self._limit = 10000
# see the defer and only methods
self._defer_fields = []
self._only_fields = []
self._values_list = False
self._flat_values_list = False
# results cache
self._result_cache = None
self._result_idx = None
self._batch = None
self._ttl = getattr(model, '__default_ttl__', None)
self._consistency = None
self._timestamp = None
self._if_not_exists = False
self._timeout = connection.NOT_SET
@property
def column_family_name(self):
return self.model.column_family_name()
def _execute(self, q):
if self._batch:
return self._batch.add_query(q)
else:
result = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout)
if self._transaction:
check_applied(result)
return result
def __unicode__(self):
return six.text_type(self._select_query())
def __str__(self):
return str(self.__unicode__())
def __call__(self, *args, **kwargs):
return self.filter(*args, **kwargs)
def __deepcopy__(self, memo):
clone = self.__class__(self.model)
for k, v in self.__dict__.items():
if k in ['_con', '_cur', '_result_cache', '_result_idx']: # don't clone these
clone.__dict__[k] = None
elif k == '_batch':
# we need to keep the same batch instance across
# all queryset clones, otherwise the batched queries
# fly off into other batch instances which are never
# executed, thx @dokai
clone.__dict__[k] = self._batch
elif k == '_timeout':
clone.__dict__[k] = self._timeout
else:
clone.__dict__[k] = copy.deepcopy(v, memo)
return clone
def __len__(self):
self._execute_query()
return len(self._result_cache)
# ----query generation / execution----
def _select_fields(self):
""" returns the fields to select """
return []
def _validate_select_where(self):
""" put select query validation here """
def _select_query(self):
"""
Returns a select clause based on the given filter args
"""
if self._where:
self._validate_select_where()
return SelectStatement(
self.column_family_name,
fields=self._select_fields(),
where=self._where,
order_by=self._order,
limit=self._limit,
allow_filtering=self._allow_filtering
)
# ----Reads------
def _execute_query(self):
if self._batch:
raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode")
if self._result_cache is None:
self._result_cache = list(self._execute(self._select_query()))
self._construct_result = self._get_result_constructor()
def _fill_result_cache_to_idx(self, idx):
self._execute_query()
if self._result_idx is None:
self._result_idx = -1
qty = idx - self._result_idx
if qty < 1:
return
else:
for idx in range(qty):
self._result_idx += 1
self._result_cache[self._result_idx] = self._construct_result(self._result_cache[self._result_idx])
def __iter__(self):
self._execute_query()
for idx in range(len(self._result_cache)):
instance = self._result_cache[idx]
if isinstance(instance, dict):
self._fill_result_cache_to_idx(idx)
yield self._result_cache[idx]
def __getitem__(self, s):
self._execute_query()
num_results = len(self._result_cache)
if isinstance(s, slice):
# calculate the amount of results that need to be loaded
end = num_results if s.step is None else s.step
if end < 0:
end += num_results
else:
end -= 1
self._fill_result_cache_to_idx(end)
return self._result_cache[s.start:s.stop:s.step]
else:
# return the object at this index
s = int(s)
# handle negative indexing
if s < 0:
s += num_results
if s >= num_results:
raise IndexError
else:
self._fill_result_cache_to_idx(s)
return self._result_cache[s]
def _get_result_constructor(self):
"""
Returns a function that will be used to instantiate query results
"""
raise NotImplementedError
def batch(self, batch_obj):
"""
Set a batch object to run the query on.
Note: running a select query with a batch object will raise an exception
"""
if batch_obj is not None and not isinstance(batch_obj, BatchQuery):
raise CQLEngineException('batch_obj must be a BatchQuery instance or None')
clone = copy.deepcopy(self)
clone._batch = batch_obj
return clone
def first(self):
try:
return six.next(iter(self))
except StopIteration:
return None
def all(self):
"""
Returns a queryset matching all rows
.. code-block:: python
for user in User.objects().all():
print(user)
"""
return copy.deepcopy(self)
def consistency(self, consistency):
"""
Sets the consistency level for the operation. See :class:`.ConsistencyLevel`.
.. code-block:: python
for user in User.objects(id=3).consistency(CL.ONE):
print(user)
"""
clone = copy.deepcopy(self)
clone._consistency = consistency
return clone
def _parse_filter_arg(self, arg):
"""
Parses a filter arg in the format:
<colname>__<op>
:returns: colname, op tuple
"""
statement = arg.rsplit('__', 1)
if len(statement) == 1:
return arg, None
elif len(statement) == 2:
return statement[0], statement[1]
else:
raise QueryException("Can't parse '{0}'".format(arg))
def iff(self, *args, **kwargs):
"""Adds IF statements to queryset"""
if len([x for x in kwargs.values() if x is None]):
raise CQLEngineException("None values on iff are not allowed")
clone = copy.deepcopy(self)
for operator in args:
if not isinstance(operator, TransactionClause):
raise QueryException('{0} is not a valid query operator'.format(operator))
clone._transaction.append(operator)
for col_name, val in kwargs.items():
exists = False
try:
column = self.model._get_column(col_name)
except KeyError:
if col_name == 'pk__token':
if not isinstance(val, Token):
raise QueryException("Virtual column 'pk__token' may only be compared to Token() values")
column = columns._PartitionKeysToken(self.model)
else:
raise QueryException("Can't resolve column name: '{0}'".format(col_name))
if isinstance(val, Token):
if col_name != 'pk__token':
raise QueryException("Token() values may only be compared to the 'pk__token' virtual column")
partition_columns = column.partition_columns
if len(partition_columns) != len(val.value):
raise QueryException(
'Token() received {0} arguments but model has {1} partition keys'.format(
len(val.value), len(partition_columns)))
val.set_columns(partition_columns)
if isinstance(val, BaseQueryFunction) or exists is True:
query_val = val
else:
query_val = column.to_database(val)
clone._transaction.append(TransactionClause(col_name, query_val))
return clone
def filter(self, *args, **kwargs):
"""
Adds WHERE arguments to the queryset, returning a new queryset
See :ref:`retrieving-objects-with-filters`
Returns a QuerySet filtered on the keyword arguments
"""
# add arguments to the where clause filters
if len([x for x in kwargs.values() if x is None]):
raise CQLEngineException("None values on filter are not allowed")
clone = copy.deepcopy(self)
for operator in args:
if not isinstance(operator, WhereClause):
raise QueryException('{0} is not a valid query operator'.format(operator))
clone._where.append(operator)
for arg, val in kwargs.items():
col_name, col_op = self._parse_filter_arg(arg)
quote_field = True
# resolve column and operator
try:
column = self.model._get_column(col_name)
except KeyError:
if col_name == 'pk__token':
if not isinstance(val, Token):
raise QueryException("Virtual column 'pk__token' may only be compared to Token() values")
column = columns._PartitionKeysToken(self.model)
quote_field = False
else:
raise QueryException("Can't resolve column name: '{0}'".format(col_name))
if isinstance(val, Token):
if col_name != 'pk__token':
raise QueryException("Token() values may only be compared to the 'pk__token' virtual column")
partition_columns = column.partition_columns
if len(partition_columns) != len(val.value):
raise QueryException(
'Token() received {0} arguments but model has {1} partition keys'.format(
len(val.value), len(partition_columns)))
val.set_columns(partition_columns)
# get query operator, or use equals if not supplied
operator_class = BaseWhereOperator.get_operator(col_op or 'EQ')
operator = operator_class()
if isinstance(operator, InOperator):
if not isinstance(val, (list, tuple)):
raise QueryException('IN queries must use a list/tuple value')
query_val = [column.to_database(v) for v in val]
elif isinstance(val, BaseQueryFunction):
query_val = val
else:
query_val = column.to_database(val)
clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field))
return clone
def get(self, *args, **kwargs):
"""
Returns a single instance matching this query, optionally with additional filter kwargs.
See :ref:`retrieving-objects-with-filters`
Returns a single object matching the QuerySet.
.. code-block:: python
user = User.get(id=1)
If no objects are matched, a :class:`~.DoesNotExist` exception is raised.
If more than one object is found, a :class:`~.MultipleObjectsReturned` exception is raised.
"""
if args or kwargs:
return self.filter(*args, **kwargs).get()
self._execute_query()
if len(self._result_cache) == 0:
raise self.model.DoesNotExist
elif len(self._result_cache) > 1:
raise self.model.MultipleObjectsReturned('{0} objects found'.format(len(self._result_cache)))
else:
return self[0]
def _get_ordering_condition(self, colname):
order_type = 'DESC' if colname.startswith('-') else 'ASC'
colname = colname.replace('-', '')
return colname, order_type
def order_by(self, *colnames):
"""
Sets the column(s) to be used for ordering
Default order is ascending, prepend a '-' to any column name for descending
*Note: column names must be a clustering key*
.. code-block:: python
from uuid import uuid1,uuid4
class Comment(Model):
photo_id = UUID(primary_key=True)
comment_id = TimeUUID(primary_key=True, default=uuid1) # second primary key component is a clustering key
comment = Text()
sync_table(Comment)
u = uuid4()
for x in range(5):
Comment.create(photo_id=u, comment="test %d" % x)
print("Normal")
for comment in Comment.objects(photo_id=u):
print comment.comment_id
print("Reversed")
for comment in Comment.objects(photo_id=u).order_by("-comment_id"):
print comment.comment_id
"""
if len(colnames) == 0:
clone = copy.deepcopy(self)
clone._order = []
return clone
conditions = []
for colname in colnames:
conditions.append('"{0}" {1}'.format(*self._get_ordering_condition(colname)))
clone = copy.deepcopy(self)
clone._order.extend(conditions)
return clone
def count(self):
"""
Returns the number of rows matched by this query
"""
if self._batch:
raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode")
if self._result_cache is None:
query = self._select_query()
query.count = True
result = self._execute(query)
return result[0]['count']
else:
return len(self._result_cache)
def limit(self, v):
"""
Limits the number of results returned by Cassandra.
*Note that CQL's default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000*
.. code-block:: python
for user in User.objects().limit(100):
print(user)
"""
if not (v is None or isinstance(v, six.integer_types)):
raise TypeError
if v == self._limit:
return self
if v < 0:
raise QueryException("Negative limit is not allowed")
clone = copy.deepcopy(self)
clone._limit = v
return clone
def allow_filtering(self):
"""
Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key
"""
clone = copy.deepcopy(self)
clone._allow_filtering = True
return clone
def _only_or_defer(self, action, fields):
clone = copy.deepcopy(self)
if clone._defer_fields or clone._only_fields:
raise QueryException("QuerySet alread has only or defer fields defined")
# check for strange fields
missing_fields = [f for f in fields if f not in self.model._columns.keys()]
if missing_fields:
raise QueryException(
"Can't resolve fields {0} in {1}".format(
', '.join(missing_fields), self.model.__name__))
if action == 'defer':
clone._defer_fields = fields
elif action == 'only':
clone._only_fields = fields
else:
raise ValueError
return clone
def only(self, fields):
""" Load only these fields for the returned query """
return self._only_or_defer('only', fields)
def defer(self, fields):
""" Don't load these fields for the returned query """
return self._only_or_defer('defer', fields)
def create(self, **kwargs):
return self.model(**kwargs).batch(self._batch).ttl(self._ttl).\
consistency(self._consistency).if_not_exists(self._if_not_exists).\
timestamp(self._timestamp).save()
def delete(self):
"""
Deletes the contents of a query
"""
# validate where clause
partition_key = [x for x in self.model._primary_keys.values()][0]
if not any([c.field == partition_key.column_name for c in self._where]):
raise QueryException("The partition key must be defined on delete queries")
dq = DeleteStatement(
self.column_family_name,
where=self._where,
timestamp=self._timestamp
)
self._execute(dq)
def __eq__(self, q):
if len(self._where) == len(q._where):
return all([w in q._where for w in self._where])
return False
def __ne__(self, q):
return not (self != q)
def timeout(self, timeout):
"""
:param timeout: Timeout for the query (in seconds)
:type timeout: float or None
"""
clone = copy.deepcopy(self)
clone._timeout = timeout
return clone
class ResultObject(dict):
"""
adds attribute access to a dictionary
"""
def __getattr__(self, item):
try:
return self[item]
except KeyError:
raise AttributeError
class SimpleQuerySet(AbstractQuerySet):
"""
"""
def _get_result_constructor(self):
"""
Returns a function that will be used to instantiate query results
"""
def _construct_instance(values):
return ResultObject(values)
return _construct_instance
class ModelQuerySet(AbstractQuerySet):
"""
"""
def _validate_select_where(self):
""" Checks that a filterset will not create invalid select statement """
# check that there's either a = or IN relationship with a primary key or indexed field
equal_ops = [self.model._columns.get(w.field) for w in self._where if isinstance(w.operator, EqualsOperator)]
token_comparison = any([w for w in self._where if isinstance(w.value, Token)])
if not any([w.primary_key or w.index for w in equal_ops]) and not token_comparison and not self._allow_filtering:
raise QueryException('Where clauses require either a "=" or "IN" comparison with either a primary key or indexed field')
if not self._allow_filtering:
# if the query is not on an indexed field
if not any([w.index for w in equal_ops]):
if not any([w.partition_key for w in equal_ops]) and not token_comparison:
raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset')
def _select_fields(self):
if self._defer_fields or self._only_fields:
fields = self.model._columns.keys()
if self._defer_fields:
fields = [f for f in fields if f not in self._defer_fields]
elif self._only_fields:
fields = self._only_fields
return [self.model._columns[f].db_field_name for f in fields]
return super(ModelQuerySet, self)._select_fields()
def _get_result_constructor(self):
""" Returns a function that will be used to instantiate query results """
if not self._values_list: # we want models
return lambda rows: self.model._construct_instance(rows)
elif self._flat_values_list: # the user has requested flattened list (1 value per row)
return lambda row: row.popitem()[1]
else:
return lambda row: self._get_row_value_list(self._only_fields, row)
def _get_row_value_list(self, fields, row):
result = []
for x in fields:
result.append(row[x])
return result
def _get_ordering_condition(self, colname):
colname, order_type = super(ModelQuerySet, self)._get_ordering_condition(colname)
column = self.model._columns.get(colname)
if column is None:
raise QueryException("Can't resolve the column name: '{0}'".format(colname))
# validate the column selection
if not column.primary_key:
raise QueryException(
"Can't order on '{0}', can only order on (clustered) primary keys".format(colname))
pks = [v for k, v in self.model._columns.items() if v.primary_key]
if column == pks[0]:
raise QueryException(
"Can't order by the first primary key (partition key), clustering (secondary) keys only")
return column.db_field_name, order_type
def values_list(self, *fields, **kwargs):
""" Instructs the query set to return tuples, not model instance """
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self.only(fields)
clone._values_list = True
clone._flat_values_list = flat
return clone
def ttl(self, ttl):
"""
Sets the ttl (in seconds) for modified data.
*Note that running a select query with a ttl value will raise an exception*
"""
clone = copy.deepcopy(self)
clone._ttl = ttl
return clone
def timestamp(self, timestamp):
"""
Allows for custom timestamps to be saved with the record.
"""
clone = copy.deepcopy(self)
clone._timestamp = timestamp
return clone
def if_not_exists(self):
if self.model._has_counter:
raise IfNotExistsWithCounterColumn('if_not_exists cannot be used with tables containing columns')
clone = copy.deepcopy(self)
clone._if_not_exists = True
return clone
def update(self, **values):
"""
Performs an update on the row selected by the queryset. Include values to update in the
update like so:
.. code-block:: python
Model.objects(key=n).update(value='x')
Passing in updates for columns which are not part of the model will raise a ValidationError.
Per column validation will be performed, but instance level validation will not
(i.e., `Model.validate` is not called). This is sometimes referred to as a blind update.
For example:
.. code-block:: python
class User(Model):
id = Integer(primary_key=True)
name = Text()
setup(["localhost"], "test")
sync_table(User)
u = User.create(id=1, name="jon")
User.objects(id=1).update(name="Steve")
# sets name to null
User.objects(id=1).update(name=None)
Also supported is blindly adding and removing elements from container columns,
without loading a model instance from Cassandra.
Using the syntax `.update(column_name={x, y, z})` will overwrite the contents of the container, like updating a
non container column. However, adding `__<operation>` to the end of the keyword arg, makes the update call add
or remove items from the collection, without overwriting then entire column.
Given the model below, here are the operations that can be performed on the different container columns:
.. code-block:: python
class Row(Model):
row_id = columns.Integer(primary_key=True)
set_column = columns.Set(Integer)
list_column = columns.List(Integer)
map_column = columns.Map(Integer, Integer)
:class:`~cqlengine.columns.Set`
- `add`: adds the elements of the given set to the column
- `remove`: removes the elements of the given set to the column
.. code-block:: python
# add elements to a set
Row.objects(row_id=5).update(set_column__add={6})
# remove elements to a set
Row.objects(row_id=5).update(set_column__remove={4})
:class:`~cqlengine.columns.List`
- `append`: appends the elements of the given list to the end of the column
- `prepend`: prepends the elements of the given list to the beginning of the column
.. code-block:: python
# append items to a list
Row.objects(row_id=5).update(list_column__append=[6, 7])
# prepend items to a list
Row.objects(row_id=5).update(list_column__prepend=[1, 2])
:class:`~cqlengine.columns.Map`
- `update`: adds the given keys/values to the columns, creating new entries if they didn't exist, and overwriting old ones if they did
.. code-block:: python
# add items to a map
Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4})
"""
if not values:
return
nulled_columns = set()
us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl,
timestamp=self._timestamp, transactions=self._transaction)
for name, val in values.items():
col_name, col_op = self._parse_filter_arg(name)
col = self.model._columns.get(col_name)
# check for nonexistant columns
if col is None:
raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.model.__name__, col_name))
# check for primary key update attempts
if col.is_primary_key:
raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(col_name, self.__module__, self.model.__name__))
# we should not provide default values in this use case.
val = col.validate(val)
if val is None:
nulled_columns.add(col_name)
continue
# add the update statements
if isinstance(col, columns.Counter):
# TODO: implement counter updates
raise NotImplementedError
elif isinstance(col, (columns.List, columns.Set, columns.Map)):
if isinstance(col, columns.List):
klass = ListUpdateClause
elif isinstance(col, columns.Set):
klass = SetUpdateClause
elif isinstance(col, columns.Map):
klass = MapUpdateClause
else:
raise RuntimeError
us.add_assignment_clause(klass(col_name, col.to_database(val), operation=col_op))
else:
us.add_assignment_clause(AssignmentClause(
col_name, col.to_database(val)))
if us.assignments:
self._execute(us)
if nulled_columns:
ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where)
self._execute(ds)
class DMLQuery(object):
"""
A query object used for queries performing inserts, updates, or deletes
this is usually instantiated by the model instance to be modified
unlike the read query object, this is mutable
"""
_ttl = None
_consistency = None
_timestamp = None
_if_not_exists = False
def __init__(self, model, instance=None, batch=None, ttl=None, consistency=None, timestamp=None,
if_not_exists=False, transaction=None, timeout=connection.NOT_SET):
self.model = model
self.column_family_name = self.model.column_family_name()
self.instance = instance
self._batch = batch
self._ttl = ttl
self._consistency = consistency
self._timestamp = timestamp
self._if_not_exists = if_not_exists
self._transaction = transaction
self._timeout = timeout
def _execute(self, q):
if self._batch:
return self._batch.add_query(q)
else:
tmp = connection.execute(q, consistency_level=self._consistency, timeout=self._timeout)
if self._if_not_exists or self._transaction:
check_applied(tmp)
return tmp
def batch(self, batch_obj):
if batch_obj is not None and not isinstance(batch_obj, BatchQuery):
raise CQLEngineException('batch_obj must be a BatchQuery instance or None')
self._batch = batch_obj
return self
def _delete_null_columns(self):
"""
executes a delete query to remove columns that have changed to null
"""
ds = DeleteStatement(self.column_family_name)
deleted_fields = False
for _, v in self.instance._values.items():
col = v.column
if v.deleted:
ds.add_field(col.db_field_name)
deleted_fields = True
elif isinstance(col, columns.Map):
uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)
if uc.get_context_size() > 0:
ds.add_field(uc)
deleted_fields = True
if deleted_fields:
for name, col in self.model._primary_keys.items():
ds.add_where_clause(WhereClause(
col.db_field_name,
EqualsOperator(),
col.to_database(getattr(self.instance, name))
))
self._execute(ds)
def update(self):
"""
updates a row.
This is a blind update call.
All validation and cleaning needs to happen
prior to calling this.
"""
if self.instance is None:
raise CQLEngineException("DML Query intance attribute is None")
assert type(self.instance) == self.model
null_clustering_key = False if len(self.instance._clustering_keys) == 0 else True
static_changed_only = True
statement = UpdateStatement(self.column_family_name, ttl=self._ttl,
timestamp=self._timestamp, transactions=self._transaction)
for name, col in self.instance._clustering_keys.items():
null_clustering_key = null_clustering_key and col._val_is_null(getattr(self.instance, name, None))
# get defined fields and their column names
for name, col in self.model._columns.items():
# if clustering key is null, don't include non static columns
if null_clustering_key and not col.static and not col.partition_key:
continue
if not col.is_primary_key:
val = getattr(self.instance, name, None)
val_mgr = self.instance._values[name]
# don't update something that is null
if val is None:
continue
# don't update something if it hasn't changed
if not val_mgr.changed and not isinstance(col, columns.Counter):
continue
static_changed_only = static_changed_only and col.static
if isinstance(col, (columns.BaseContainerColumn, columns.Counter)):
# get appropriate clause
if isinstance(col, columns.List):
klass = ListUpdateClause
elif isinstance(col, columns.Map):
klass = MapUpdateClause
elif isinstance(col, columns.Set):
klass = SetUpdateClause
elif isinstance(col, columns.Counter):
klass = CounterUpdateClause
else:
raise RuntimeError
# do the stuff
clause = klass(col.db_field_name, val,
previous=val_mgr.previous_value, column=col)
if clause.get_context_size() > 0:
statement.add_assignment_clause(clause)
else:
statement.add_assignment_clause(AssignmentClause(
col.db_field_name,
col.to_database(val)
))
if statement.get_context_size() > 0 or self.instance._has_counter:
for name, col in self.model._primary_keys.items():
# only include clustering key if clustering key is not null, and non static columns are changed to avoid cql error
if (null_clustering_key or static_changed_only) and (not col.partition_key):
continue
statement.add_where_clause(WhereClause(
col.db_field_name,
EqualsOperator(),
col.to_database(getattr(self.instance, name))
))
self._execute(statement)
if not null_clustering_key:
self._delete_null_columns()
def save(self):
"""
Creates / updates a row.
This is a blind insert call.
All validation and cleaning needs to happen
prior to calling this.
"""
if self.instance is None:
raise CQLEngineException("DML Query intance attribute is None")
assert type(self.instance) == self.model
nulled_fields = set()
if self.instance._has_counter or self.instance._can_update():
return self.update()
else:
insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists)
static_save_only = False if len(self.instance._clustering_keys) == 0 else True
for name, col in self.instance._clustering_keys.items():
static_save_only = static_save_only and col._val_is_null(getattr(self.instance, name, None))
for name, col in self.instance._columns.items():
if static_save_only and not col.static and not col.partition_key:
continue
val = getattr(self.instance, name, None)
if col._val_is_null(val):
if self.instance._values[name].changed:
nulled_fields.add(col.db_field_name)
continue
insert.add_assignment_clause(AssignmentClause(
col.db_field_name,
col.to_database(getattr(self.instance, name, None))
))
# skip query execution if it's empty
# caused by pointless update queries
if not insert.is_empty:
self._execute(insert)
# delete any nulled columns
if not static_save_only:
self._delete_null_columns()
def delete(self):
""" Deletes one instance """
if self.instance is None:
raise CQLEngineException("DML Query instance attribute is None")
ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp)
for name, col in self.model._primary_keys.items():
if (not col.partition_key) and (getattr(self.instance, name) is None):
continue
ds.add_where_clause(WhereClause(
col.db_field_name,
EqualsOperator(),
col.to_database(getattr(self.instance, name))
))
self._execute(ds)
| bbirand/python-driver | cassandra/cqlengine/query.py | Python | apache-2.0 | 44,467 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
from .logger import logger, ctx
def find_exec(executable):
exec_exists = os.path.exists(executable)
return executable if exec_exists else shutil.which(executable)
# Decorator running a command and returning stdout
class capture_stdout:
def __init__(self, strip=False):
self.strip = strip
def __call__(self, f):
def strip_it(x):
return x.strip() if self.strip else x
def wrapper(*argv, **kwargs):
# Ensure stdout is captured
kwargs["stdout"] = subprocess.PIPE
return strip_it(f(*argv, **kwargs).stdout)
return wrapper
class Command:
""" A runnable command.
Class inheriting from the Command class must provide the bin
property/attribute.
"""
def run(self, *argv, **kwargs):
assert(hasattr(self, "bin"))
invocation = [find_exec(self.bin)]
invocation.extend(argv)
for key in ["stdout", "stderr"]:
# Preserve caller intention, otherwise silence
if key not in kwargs and ctx.quiet:
kwargs[key] = subprocess.PIPE
# Prefer safe by default
if "check" not in kwargs:
kwargs["check"] = True
logger.debug(f"Executing `{invocation}`")
return subprocess.run(invocation, **kwargs)
def __call__(self, *argv, **kwargs):
self.run(*argv, **kwargs)
| majetideepak/arrow | dev/archery/archery/utils/command.py | Python | apache-2.0 | 2,217 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
import json
import os
import pytest
import spack.cmd as cmd
import spack.cmd.find
import spack.environment as ev
import spack.user_environment as uenv
from spack.main import SpackCommand
from spack.spec import Spec
from spack.util.pattern import Bunch
find = SpackCommand('find')
env = SpackCommand('env')
install = SpackCommand('install')
base32_alphabet = 'abcdefghijklmnopqrstuvwxyz234567'
@pytest.fixture(scope='module')
def parser():
"""Returns the parser for the module command"""
prs = argparse.ArgumentParser()
spack.cmd.find.setup_parser(prs)
return prs
@pytest.fixture()
def specs():
s = []
return s
@pytest.fixture()
def mock_display(monkeypatch, specs):
"""Monkeypatches the display function to return its first argument"""
def display(x, *args, **kwargs):
specs.extend(x)
monkeypatch.setattr(spack.cmd, 'display_specs', display)
def test_query_arguments():
query_arguments = spack.cmd.find.query_arguments
# Default arguments
args = Bunch(
only_missing=False,
missing=False,
only_deprecated=False,
deprecated=False,
unknown=False,
explicit=False,
implicit=False,
start_date="2018-02-23",
end_date=None
)
q_args = query_arguments(args)
assert 'installed' in q_args
assert 'known' in q_args
assert 'explicit' in q_args
assert q_args['installed'] == ['installed']
assert q_args['known'] is any
assert q_args['explicit'] is any
assert 'start_date' in q_args
assert 'end_date' not in q_args
# Check that explicit works correctly
args.explicit = True
q_args = query_arguments(args)
assert q_args['explicit'] is True
args.explicit = False
args.implicit = True
q_args = query_arguments(args)
assert q_args['explicit'] is False
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag1(parser, specs):
args = parser.parse_args(['--tag', 'tag1'])
spack.cmd.find.find(parser, args)
assert len(specs) == 2
assert 'mpich' in [x.name for x in specs]
assert 'mpich2' in [x.name for x in specs]
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag2(parser, specs):
args = parser.parse_args(['--tag', 'tag2'])
spack.cmd.find.find(parser, args)
assert len(specs) == 1
assert 'mpich' in [x.name for x in specs]
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag2_tag3(parser, specs):
args = parser.parse_args(['--tag', 'tag2', '--tag', 'tag3'])
spack.cmd.find.find(parser, args)
assert len(specs) == 0
@pytest.mark.db
def test_namespaces_shown_correctly(database):
out = find()
assert 'builtin.mock.zmpi' not in out
out = find('--namespace')
assert 'builtin.mock.zmpi' in out
def _check_json_output(spec_list):
assert len(spec_list) == 3
assert all(spec["name"] == "mpileaks" for spec in spec_list)
assert all(spec["hash"] for spec in spec_list)
deps = [spec["dependencies"] for spec in spec_list]
assert sum(["zmpi" in [node["name"] for d in deps for node in d]]) == 1
assert sum(["mpich" in [node["name"] for d in deps for node in d]]) == 1
assert sum(["mpich2" in [node["name"] for d in deps for node in d]]) == 1
def _check_json_output_deps(spec_list):
assert len(spec_list) == 13
names = [spec["name"] for spec in spec_list]
assert names.count("mpileaks") == 3
assert names.count("callpath") == 3
assert names.count("zmpi") == 1
assert names.count("mpich") == 1
assert names.count("mpich2") == 1
assert names.count("fake") == 1
assert names.count("dyninst") == 1
assert names.count("libdwarf") == 1
assert names.count("libelf") == 1
@pytest.mark.db
def test_find_json(database):
output = find('--json', 'mpileaks')
spec_list = json.loads(output)
_check_json_output(spec_list)
@pytest.mark.db
def test_find_json_deps(database):
output = find('-d', '--json', 'mpileaks')
spec_list = json.loads(output)
_check_json_output_deps(spec_list)
@pytest.mark.db
def test_display_json(database, capsys):
specs = [Spec(s).concretized() for s in [
"mpileaks ^zmpi",
"mpileaks ^mpich",
"mpileaks ^mpich2",
]]
cmd.display_specs_as_json(specs)
spec_list = json.loads(capsys.readouterr()[0])
_check_json_output(spec_list)
cmd.display_specs_as_json(specs + specs + specs)
spec_list = json.loads(capsys.readouterr()[0])
_check_json_output(spec_list)
@pytest.mark.db
def test_display_json_deps(database, capsys):
specs = [Spec(s).concretized() for s in [
"mpileaks ^zmpi",
"mpileaks ^mpich",
"mpileaks ^mpich2",
]]
cmd.display_specs_as_json(specs, deps=True)
spec_list = json.loads(capsys.readouterr()[0])
_check_json_output_deps(spec_list)
cmd.display_specs_as_json(specs + specs + specs, deps=True)
spec_list = json.loads(capsys.readouterr()[0])
_check_json_output_deps(spec_list)
@pytest.mark.db
def test_find_format(database, config):
output = find('--format', '{name}-{^mpi.name}', 'mpileaks')
assert set(output.strip().split('\n')) == set([
"mpileaks-zmpi",
"mpileaks-mpich",
"mpileaks-mpich2",
])
output = find('--format', '{name}-{version}-{compiler.name}-{^mpi.name}',
'mpileaks')
assert "installed package" not in output
assert set(output.strip().split('\n')) == set([
"mpileaks-2.3-gcc-zmpi",
"mpileaks-2.3-gcc-mpich",
"mpileaks-2.3-gcc-mpich2",
])
output = find('--format', '{name}-{^mpi.name}-{hash:7}',
'mpileaks')
elements = output.strip().split('\n')
assert set(e[:-7] for e in elements) == set([
"mpileaks-zmpi-",
"mpileaks-mpich-",
"mpileaks-mpich2-",
])
# hashes are in base32
for e in elements:
for c in e[-7:]:
assert c in base32_alphabet
@pytest.mark.db
def test_find_format_deps(database, config):
output = find('-d', '--format', '{name}-{version}', 'mpileaks', '^zmpi')
assert output == """\
mpileaks-2.3
callpath-1.0
dyninst-8.2
libdwarf-20130729
libelf-0.8.13
zmpi-1.0
fake-1.0
"""
@pytest.mark.db
def test_find_format_deps_paths(database, config):
output = find('-dp', '--format', '{name}-{version}', 'mpileaks', '^zmpi')
spec = Spec("mpileaks ^zmpi").concretized()
prefixes = [s.prefix for s in spec.traverse()]
assert output == """\
mpileaks-2.3 {0}
callpath-1.0 {1}
dyninst-8.2 {2}
libdwarf-20130729 {3}
libelf-0.8.13 {4}
zmpi-1.0 {5}
fake-1.0 {6}
""".format(*prefixes)
@pytest.mark.db
def test_find_very_long(database, config):
output = find('-L', '--no-groups', "mpileaks")
specs = [Spec(s).concretized() for s in [
"mpileaks ^zmpi",
"mpileaks ^mpich",
"mpileaks ^mpich2",
]]
assert set(output.strip().split("\n")) == set([
("%s mpileaks@2.3" % s.dag_hash()) for s in specs
])
@pytest.mark.db
def test_find_show_compiler(database, config):
output = find('--no-groups', '--show-full-compiler', "mpileaks")
assert "mpileaks@2.3%gcc@4.5.0" in output
@pytest.mark.db
def test_find_not_found(database, config, capsys):
with capsys.disabled():
output = find("foobarbaz", fail_on_error=False)
assert "No package matches the query: foobarbaz" in output
assert find.returncode == 1
@pytest.mark.db
def test_find_no_sections(database, config):
output = find()
assert "-----------" in output
output = find("--no-groups")
assert "-----------" not in output
assert "==>" not in output
@pytest.mark.db
def test_find_command_basic_usage(database):
output = find()
assert 'mpileaks' in output
@pytest.mark.regression('9875')
def test_find_prefix_in_env(mutable_mock_env_path, install_mockery, mock_fetch,
mock_packages, mock_archive, config):
"""Test `find` formats requiring concrete specs work in environments."""
env('create', 'test')
with ev.read('test'):
install('mpileaks')
find('-p')
find('-l')
find('-L')
# Would throw error on regression
def test_find_loaded(database, working_env):
output = find('--loaded', '--group')
assert output == ''
os.environ[uenv.spack_loaded_hashes_var] = ':'.join(
[x.dag_hash() for x in spack.store.db.query()])
output = find('--loaded')
expected = find()
assert output == expected
| LLNL/spack | lib/spack/spack/test/cmd/find.py | Python | lgpl-2.1 | 8,978 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AvroDataset"""
import sys
import uuid
import tensorflow as tf
from tensorflow_io.python.ops import core_ops
class _AvroIODatasetFunction:
def __init__(self, function, resource, component, shape, dtype):
self._function = function
self._resource = resource
self._component = component
self._shape = tf.TensorShape([None]).concatenate(shape[1:])
self._dtype = dtype
def __call__(self, start, stop):
return self._function(
self._resource,
start=start,
stop=stop,
component=self._component,
shape=self._shape,
dtype=self._dtype,
)
class AvroIODataset(tf.compat.v2.data.Dataset):
"""AvroIODataset"""
def __init__(self, filename, schema, columns=None, internal=True):
"""AvroIODataset."""
if not internal:
raise ValueError(
"AvroIODataset constructor is private; please use one "
"of the factory methods instead (e.g., "
"IODataset.from_avro())"
)
with tf.name_scope("AvroIODataset") as scope:
capacity = 4096
metadata = ["schema: %s" % schema]
resource, columns_v = core_ops.io_avro_readable_init(
filename,
metadata=metadata,
container=scope,
shared_name=f"{filename}/{uuid.uuid4().hex}",
)
columns = columns if columns is not None else columns_v.numpy()
columns_dataset = []
columns_function = []
for column in columns:
shape, dtype = core_ops.io_avro_readable_spec(resource, column)
shape = tf.TensorShape([None if e < 0 else e for e in shape.numpy()])
dtype = tf.as_dtype(dtype.numpy())
function = _AvroIODatasetFunction(
core_ops.io_avro_readable_read, resource, column, shape, dtype
)
columns_function.append(function)
for (column, function) in zip(columns, columns_function):
column_dataset = tf.compat.v2.data.Dataset.range(
0, sys.maxsize, capacity
)
column_dataset = column_dataset.map(
lambda index: function(index, index + capacity)
)
column_dataset = column_dataset.apply(
tf.data.experimental.take_while(
lambda v: tf.greater(tf.shape(v)[0], 0)
)
)
columns_dataset.append(column_dataset)
if len(columns_dataset) == 1:
dataset = columns_dataset[0]
else:
dataset = tf.compat.v2.data.Dataset.zip(tuple(columns_dataset))
dataset = dataset.unbatch()
self._function = columns_function
self._dataset = dataset
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
| tensorflow/io | tensorflow_io/python/ops/avro_dataset_ops.py | Python | apache-2.0 | 3,905 |
import tempfile
import unittest
import PIL.Image
import pillowfight
class TestGaussian(unittest.TestCase):
def test_gaussian(self):
with tempfile.NamedTemporaryFile(suffix='.jpg') as tmpfile:
in_img = PIL.Image.open("tests/data/crappy_background.jpg")
out_img = pillowfight.gaussian(in_img, sigma=20.0, nb_stddev=10)
in_img.close()
# beware of JPG compression
self.assertEqual(out_img.mode, "RGB")
out_img.save(tmpfile.name)
out_img.close()
out_img = PIL.Image.open(tmpfile.name)
expected_img = PIL.Image.open(
"tests/data/crappy_background_gaussian.jpg"
)
self.assertEqual(out_img.tobytes(), expected_img.tobytes())
expected_img.close()
| jflesch/libpillowfight | tests/tests_gaussian.py | Python | gpl-2.0 | 796 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.db import migrations, models
def migrate_data_forwards(apps, schema_editor):
EmailMarketingConfiguration = apps.get_model('email_marketing', 'EmailMarketingConfiguration')
EmailMarketingConfiguration.objects.all().update(
sailthru_welcome_template=models.F('sailthru_activation_template')
)
def migrate_data_backwards(apps, schema_editor):
# Just copying old field's value to new one in forward migration, so nothing needed here.
pass
class Migration(migrations.Migration):
dependencies = [
('email_marketing', '0007_auto_20170809_0653'),
]
operations = [
migrations.RunPython(migrate_data_forwards, migrate_data_backwards)
]
| ESOedX/edx-platform | lms/djangoapps/email_marketing/migrations/0008_auto_20170809_0539.py | Python | agpl-3.0 | 788 |
import json
import logging
from typing import Optional, Tuple
from urllib.error import HTTPError
from urllib.request import urlopen
from yomeka.classic.no_such_omeka_classic_collection_exception import NoSuchOmekaClassicCollectionException
from yomeka.classic.no_such_omeka_classic_item_exception import NoSuchOmekaClassicItemException
from yomeka.classic.omeka_classic_collection import OmekaClassicCollection
from yomeka.classic.omeka_classic_file import OmekaClassicFile
from yomeka.classic.omeka_classic_item import OmekaClassicItem
from yomeka.classic.omeka_classic_json_parser import OmekaClassicJsonParser
class OmekaClassicRestApiClient:
def __init__(self, api_key, endpoint_url):
self.__api_key = api_key
if not endpoint_url.endswith('/'):
endpoint_url = endpoint_url + '/'
self.__endpoint_url = endpoint_url
self.__parser = OmekaClassicJsonParser()
self.__logger = logging.getLogger(self.__class__.__name__)
def get_all_collections(self, **kwds):
return self.__get_all_pages(self.get_collections, **kwds)
def get_all_files(self, **kwds):
return self.__get_all_pages(self.get_files, **kwds)
def get_all_items(self, **kwds):
return self.__get_all_pages(self.get_items, **kwds)
def __get_all_pages(self, get_method, **kwds):
page = 1
per_page = 50
while True:
objects = get_method(page=page, per_page=per_page, **kwds)
yield from objects
if len(objects) < per_page:
return
page = page + 1
def get_collection(self, id: int) -> OmekaClassicCollection: # @ReservedAssignment
url = self.__endpoint_url + 'api/collections/%d?key=' % id + self.__api_key
try:
collection_dict = json.loads(self.__get_url(url))
except HTTPError as e:
if e.code == 404:
raise NoSuchOmekaClassicCollectionException
else:
raise
if collection_dict.get('message') == 'Invalid record. Record not found.':
raise NoSuchOmekaClassicCollectionException
return self.__parser.parse_collection_dict(collection_dict)
def get_collections(self, *, page: Optional[int] = None, per_page: Optional[int] = None, **kwds) -> Tuple[OmekaClassicCollection, ...]:
url = self.__endpoint_url + 'api/collections?key=' + self.__api_key
if page is not None:
kwds["page"] = page
if per_page is not None:
kwds["per_page"] = per_page
for key, value in kwds.items():
if value is None:
continue
url = url + "&%(key)s=%(value)s" % locals()
return self.__parser.parse_collection_dicts(json.loads(self.__get_url(url)))
def get_files(self, *, item: Optional[int] = None, page: Optional[int] = None, per_page: Optional[int] = None, **kwds) -> Tuple[OmekaClassicFile, ...]:
url = self.__endpoint_url + 'api/files?key=' + self.__api_key
if item is not None:
kwds["item"] = item
if page is not None:
kwds["page"] = page
if per_page is not None:
kwds["per_page"] = per_page
for key, value in kwds.items():
if value is None:
continue
url = url + "&%(key)s=%(value)s" % locals()
return self.__parser.parse_file_dicts(json.loads(self.__get_url(url)))
def get_item(self, id: int) -> OmekaClassicItem: # @ReservedAssignment
url = self.__endpoint_url + 'api/items/%d?key=' % id + self.__api_key
try:
item_dict = json.loads(self.__get_url(url))
except HTTPError as e:
if e.code == 404:
raise NoSuchOmekaClassicItemException
else:
raise
if item_dict.get('message') == 'Invalid record. Record not found.':
raise NoSuchOmekaClassicItemException
return self.__parser.parse_item_dict(item_dict)
def get_items(self, collection: Optional[int] = None, page: Optional[int] = None, per_page: Optional[int] = None, **kwds) -> Tuple[OmekaClassicItem, ...]:
url = self.__endpoint_url + 'api/items?key=' + self.__api_key
if collection is not None:
kwds["collection"] = collection
if page is not None:
kwds["page"] = page
if per_page is not None:
kwds["per_page"] = per_page
for key, value in kwds.items():
if value is None:
continue
url = url + "&%(key)s=%(value)s" % locals()
return self.__parser.parse_item_dicts(json.loads(self.__get_url(url)))
def __get_url(self, url) -> str:
self.__logger.debug("getting URL %s", url)
url = urlopen(url)
try:
return url.read()
finally:
url.close()
| minorg/yomeka | yomeka/classic/omeka_classic_rest_api_client.py | Python | bsd-2-clause | 4,875 |
# -*- encoding: utf-8 -*-
import datetime, csv
from base import ReportGenerator
from geraldo.utils import get_attr_value, calculate_size
from geraldo.widgets import Widget, Label, SystemField, ObjectValue
from geraldo.graphics import Graphic, RoundRect, Rect, Line, Circle, Arc,\
Ellipse, Image
from geraldo.exceptions import AbortEvent
class CSVGenerator(ReportGenerator):
"""This is a generator to output data in CSV format. This format can be imported as a
spreadsheet to Excel, OpenOffice Calc, Google Docs Spreadsheet, and others.
Attributes:
* 'filename' - is the file path you can inform optionally to save text to.
* 'writer' - is csv.writer function you can inform manually to make it customizable.
This function must expects a first argument to receive a file object and
returns a csv.writer object.
"""
writer = None
writer_function = csv.writer
first_row_with_column_names = True
mimetype = 'text/csv'
def __init__(self, report, cache_enabled=None, writer=None, first_row_with_column_names=None, **kwargs):
super(CSVGenerator, self).__init__(report, **kwargs)
# Cache enabled
if cache_enabled is not None:
self.cache_enabled = cache_enabled
elif self.cache_enabled is None:
self.cache_enabled = bool(self.report.cache_status)
# Sets the writer function
self.writer = writer or self.writer
# Sets to append the first row with column names (ObjectValue name/attribute_name/expression)
if first_row_with_column_names is not None:
self.first_row_with_column_names = first_row_with_column_names
# Additional attributes
for k,v in kwargs.items():
setattr(self, k, v)
def start_writer(self, filename=None):
if self.writer:
return
filename = filename or self.filename
if isinstance(filename, basestring):
filename = file(filename, 'w')
# Default writer uses comma as separator and quotes only when necessary
self.writer = self.writer_function(filename, quoting=csv.QUOTE_MINIMAL)
def execute(self):
super(CSVGenerator, self).execute()
# Calls the before_print event
self.report.do_before_print(generator=self)
# Write the CSV output
self.generate_csv()
# Calls the after_print event
self.report.do_after_print(generator=self)
def get_hash_key(self, objects):
"""Appends pdf extension to the hash_key"""
return super(CSVGenerator, self).get_hash_key(objects) + '.csv'
# METHODS THAT ARE TOTALLY SPECIFIC TO THIS GENERATOR AND MUST
# OVERRIDE THE SUPERCLASS EQUIVALENT ONES
def generate_csv(self):
"""Generates the CSV output"""
self._current_object_index = 0
objects = self.report.get_objects_list()
self.start_writer()
# Make a sorted list of columns
columns = [el for el in self.report.band_detail.elements or (self.report.band_detail.child_bands and self.report.band_detail.child_bands[0].elements or [] ) if isinstance(el, ObjectValue)]
columns.sort(lambda a,b: cmp(a.left, b.left) or cmp(a.width, b.width))
# First row with column names
if self.first_row_with_column_names:
cells = [(col.attribute_name or getattr(col,'name',False) or col.expression) for col in columns]
self.writer.writerow(cells)
while self._current_object_index < len(objects):
# Get current object from list
self._current_object = objects[self._current_object_index]
cells = []
for element in columns:
widget = element.clone()
# Set widget colors
widget.font_color = self.report.default_font_color
# Set widget basic attributes
widget.instance = self._current_object
widget.generator = self
widget.report = self.report
widget.band = self.report.band_detail
widget.page = None
cells.append(widget.text)
# Next object
self._current_object_index += 1
self.writer.writerow(cells)
| titasakgm/brc-stock | openerp/addons/report_geraldo/lib/geraldo/geraldo/generators/csvgen.py | Python | agpl-3.0 | 4,331 |
# Copyright (C) 2008-2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from gi.repository import Gtk
from xl import (
covers,
settings,
xdg
)
from xl.nls import gettext as _
from xlgui import icons
from xlgui.preferences import widgets
name = _('Covers')
icon = 'image-x-generic'
ui = xdg.get_data_path('ui', 'preferences', 'cover.ui')
class TagCoverFetching(widgets.CheckPreference):
default = True
name = 'covers/use_tags'
class LocalCoverFetching(widgets.CheckPreference):
default = True
name = 'covers/use_localfile'
class LocalFilePreferredNamesPreference(widgets.Preference, widgets.CheckConditional):
default = ['front', 'cover', 'album']
name = 'covers/localfile/preferred_names'
condition_preference_name = 'covers/use_localfile'
def __init__(self, preferences, widget):
widgets.Preference.__init__(self, preferences, widget)
widgets.CheckConditional.__init__(self)
def _get_value(self):
"""
Converts the string value to a list
"""
return [v.strip() for v in widgets.Preference._get_value(self).split(',')]
def _set_value(self):
"""
Converts the list to a string value
"""
self.widget.set_text(', '.join(settings.get_option(
self.name, self.default)))
class CoverOrderPreference(widgets.OrderListPreference):
"""
This little preference item shows kind of a complicated preference
widget in action. The defaults are dynamic.
"""
name = 'covers/preferred_order'
def __init__(self, preferences, widget):
self.default = covers.MANAGER._get_methods()
widgets.OrderListPreference.__init__(self, preferences, widget)
def _set_value(self):
self.model.clear()
for item in self.default:
self.model.append([item.name])
def apply(self):
if widgets.OrderListPreference.apply(self):
covers.MANAGER.set_preferred_order(
self._get_value())
return True
class AutomaticCoverFetching(widgets.CheckPreference):
default = True
name = 'covers/automatic_fetching'
| virtuald/exaile | xlgui/preferences/cover.py | Python | gpl-2.0 | 3,320 |
import requests
import six.moves.urllib.parse as urlparse
import json
import os
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Client(object):
"""
Gym client to interface with gym_http_server
"""
def __init__(self, remote_base):
self.remote_base = remote_base
self.session = requests.Session()
self.session.headers.update({'Content-type': 'application/json'})
def _parse_server_error_or_raise_for_status(self, resp):
j = {}
try:
j = resp.json()
except:
# Most likely json parse failed because of network error, not server error (server
# sends its errors in json). Don't let parse exception go up, but rather raise default
# error.
resp.raise_for_status()
if resp.status_code != 200 and "message" in j: # descriptive message from server side
raise ServerError(message=j["message"], status_code=resp.status_code)
resp.raise_for_status()
return j
def _post_request(self, route, data):
url = urlparse.urljoin(self.remote_base, route)
logger.info("POST {}\n{}".format(url, json.dumps(data)))
resp = self.session.post(urlparse.urljoin(self.remote_base, route),
data=json.dumps(data))
return self._parse_server_error_or_raise_for_status(resp)
def _get_request(self, route):
url = urlparse.urljoin(self.remote_base, route)
logger.info("GET {}".format(url))
resp = self.session.get(url)
return self._parse_server_error_or_raise_for_status(resp)
def env_create(self, env_id):
route = '/v1/envs/'
data = {'env_id': env_id}
resp = self._post_request(route, data)
instance_id = resp['instance_id']
return instance_id
def env_list_all(self):
route = '/v1/envs/'
resp = self._get_request(route)
all_envs = resp['all_envs']
return all_envs
def env_reset(self, instance_id):
route = '/v1/envs/{}/reset/'.format(instance_id)
resp = self._post_request(route, None)
observation = resp['observation']
return observation
def env_step(self, instance_id, action, render=False):
route = '/v1/envs/{}/step/'.format(instance_id)
data = {'action': action, 'render': render}
resp = self._post_request(route, data)
observation = resp['observation']
reward = resp['reward']
done = resp['done']
info = resp['info']
return [observation, reward, done, info]
def env_action_space_info(self, instance_id):
route = '/v1/envs/{}/action_space/'.format(instance_id)
resp = self._get_request(route)
info = resp['info']
return info
def env_action_space_sample(self, instance_id):
route = '/v1/envs/{}/action_space/sample'.format(instance_id)
resp = self._get_request(route)
action = resp['action']
return action
def env_action_space_contains(self, instance_id, x):
route = '/v1/envs/{}/action_space/contains/{}'.format(instance_id, x)
resp = self._get_request(route)
member = resp['member']
return member
def env_observation_space_info(self, instance_id):
route = '/v1/envs/{}/observation_space/'.format(instance_id)
resp = self._get_request(route)
info = resp['info']
return info
def env_monitor_start(self, instance_id, directory,
force=False, resume=False, video_callable=False):
route = '/v1/envs/{}/monitor/start/'.format(instance_id)
data = {'directory': directory,
'force': force,
'resume': resume,
'video_callable': video_callable}
self._post_request(route, data)
def env_monitor_close(self, instance_id):
route = '/v1/envs/{}/monitor/close/'.format(instance_id)
self._post_request(route, None)
def env_close(self, instance_id):
route = '/v1/envs/{}/close/'.format(instance_id)
self._post_request(route, None)
def upload(self, training_dir, algorithm_id=None, api_key=None):
if not api_key:
api_key = os.environ.get('OPENAI_GYM_API_KEY')
route = '/v1/upload/'
data = {'training_dir': training_dir,
'algorithm_id': algorithm_id,
'api_key': api_key}
self._post_request(route, data)
def shutdown_server(self):
route = '/v1/shutdown/'
self._post_request(route, None)
class ServerError(Exception):
def __init__(self, message, status_code=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
if __name__ == '__main__':
remote_base = 'http://127.0.0.1:5000'
client = Client(remote_base)
# Create environment
env_id = 'CartPole-v0'
instance_id = client.env_create(env_id)
# Check properties
all_envs = client.env_list_all()
action_info = client.env_action_space_info(instance_id)
obs_info = client.env_observation_space_info(instance_id)
# Run a single step
client.env_monitor_start(instance_id, directory='tmp', force=True)
init_obs = client.env_reset(instance_id)
[observation, reward, done, info] = client.env_step(instance_id, 1, True)
client.env_monitor_close(instance_id)
client.upload(training_dir='tmp')
| Lucsanszky/gym-http-api | gym_http_client.py | Python | mit | 5,544 |
#!/usr/bin/env python
# Install the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# To set up environmental variables, see http://twil.io/secure
ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
client = Client(ACCOUNT_SID, AUTH_TOKEN)
notification = client.notify.services('ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.notifications.create(
identity='00000001',
body='Hello Bob',
tag='preferred_device')
print(notification.sid)
| TwilioDevEd/api-snippets | notifications/register/send-notification-2/send-notification-2.7.x.py | Python | mit | 550 |
from rest_framework import serializers
from models.place import Category, Place, Like
from models.country import Country
from appls.login.models import BaseUser
from appls.login.serializers import UserSerializer
from appls.points.models.place import Place, Category
class CategorySerializer(serializers.Serializer):
"""
Serializer for category model
"""
pk = serializers.IntegerField(read_only=True)
name = serializers.CharField(required=True, max_length=100)
#places = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
def create(self, validated_data):
return Category.objects.create(**validated_data)
class PlaceSerializer(serializers.ModelSerializer):
"""
Serializer for place model
"""
pk = serializers.IntegerField(read_only=True)
title = serializers.CharField(required=True, max_length=100)
description = serializers.CharField(required=False)
price = serializers.DecimalField(max_digits=5, decimal_places=4)
class Meta:
model = Place
fields = ('pk', 'title', 'description', 'author', 'category', 'price',
'latitude', 'langtitude', 'address')
def create(self, validated_data):
return Place.objects.create(**validated_data)
class LikeSerializer(serializers.Serializer):
"""
LikeSerializer
"""
place = serializers.ReadOnlyField(source='place.id')
user = serializers.ReadOnlyField(source='user.user.username')
class Meta:
model = Like
fields = ('place', 'user')
def perform_create(self, serializer):
user = BaseUser.objects.get(user = self.request.user.id)
serializer.save(user=user)
def create(self, validated_data):
return Like.objects.create(**validated_data)
class CountrySerializer(serializers.ModelSerializer):
"""
Country Serializer
"""
class Meta():
"""
"""
model = Country
fields = ('id', 'name')
| pasha369/Points | points/appls/points/serializers.py | Python | mit | 2,008 |
from future import standard_library
standard_library.install_aliases()
from builtins import str
from configparser import ConfigParser
import errno
import logging
import os
import sys
import textwrap
try:
from cryptography.fernet import Fernet
except:
pass
def generate_fernet_key():
try:
FERNET_KEY = Fernet.generate_key().decode()
except NameError:
FERNET_KEY = "cryptography_not_found_storing_passwords_in_plain_text"
return FERNET_KEY
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
class AirflowConfigException(Exception):
pass
defaults = {
'core': {
'unit_test_mode': False,
'parallelism': 32,
'load_examples': True,
'plugins_folder': None,
},
'webserver': {
'base_url': 'http://localhost:8080',
'web_server_host': '0.0.0.0',
'web_server_port': '8080',
'authenticate': False,
'filter_by_owner': False,
'demo_mode': False,
'secret_key': 'airflowified',
'expose_config': False,
},
'scheduler': {
'statsd_on': False,
'statsd_host': 'localhost',
'statsd_port': 8125,
'statsd_prefix': 'airflow',
'job_heartbeat_sec': 5,
'scheduler_heartbeat_sec': 60,
'authenticate': False,
},
'celery': {
'default_queue': 'default',
'flower_port': '5555'
},
'smtp': {
'smtp_starttls': True,
},
}
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
base_log_folder = {AIRFLOW_HOME}/logs
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
# Secret key to save connection passwords in the db
fernet_key = {FERNET_KEY}
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is use in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# Secret key used to run your flask app
secret_key = temporary_key
# Expose the configuration file in the web server
expose_config = true
# Set to true to turn on authentication : http://pythonhosted.org/airflow/installation.html#web-authentication
authenticate = False
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
[smtp]
# If you want airflow to send emails on retries, failure, and you want to
# the airflow.utils.send_email function, you have to configure an smtp
# server here
smtp_host = localhost
smtp_starttls = True
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above
# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16
# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
# Another key Celery setting
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the port that Celery Flower runs on
flower_port = 5555
# Default queue that tasks get assigned to and that worker listen on.
default_queue = default
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
# Statsd (https://github.com/etsy/statsd) integration settings
# statsd_on = False
# statsd_host = localhost
# statsd_port = 8125
# statsd_prefix = airflow
"""
TEST_CONFIG = """\
[core]
airflow_home = {AIRFLOW_HOME}
dags_folder = {AIRFLOW_HOME}/dags
base_log_folder = {AIRFLOW_HOME}/logs
executor = SequentialExecutor
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/unittests.db
unit_test_mode = True
load_examples = True
[webserver]
base_url = http://localhost:8080
web_server_host = 0.0.0.0
web_server_port = 8080
[smtp]
smtp_host = localhost
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
celery_app_name = airflow.executors.celery_executor
celeryd_concurrency = 16
worker_log_server_port = 8793
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
flower_port = 5555
default_queue = default
[scheduler]
job_heartbeat_sec = 1
scheduler_heartbeat_sec = 5
authenticate = true
"""
class ConfigParserWithDefaults(ConfigParser):
def __init__(self, defaults, *args, **kwargs):
self.defaults = defaults
ConfigParser.__init__(self, *args, **kwargs)
def get(self, section, key):
section = str(section).lower()
key = str(key).lower()
d = self.defaults
# environment variables get precedence
# must have format AIRFLOW__{SESTION}__{KEY} (note double underscore)
env_var = 'AIRFLOW__{S}__{K}'.format(S=section.upper(), K=key.upper())
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
# ...then the config file
elif self.has_option(section, key):
return expand_env_var(ConfigParser.get(self, section, key))
# ...then the defaults
elif section in d and key in d[section]:
return expand_env_var(d[section][key])
else:
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val == "true":
return True
elif val == "false":
return False
else:
raise AirflowConfigException("Not a boolean.")
def getint(self, section, key):
return int(self.get(section, key))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
"""
Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
"~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
"""
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = expand_env_var('~/airflow')
else:
AIRFLOW_HOME = expand_env_var(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(expand_env_var('~/airflow.cfg')):
AIRFLOW_CONFIG = expand_env_var('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = expand_env_var(os.environ['AIRFLOW_CONFIG'])
if not os.path.isfile(AIRFLOW_CONFIG):
"""
These configuration options are used to generate a default configuration
when it is missing. The right way to change your configuration is to alter
your configuration file, not this code.
"""
FERNET_KEY = generate_fernet_key()
logging.info("Creating new config file in: " + AIRFLOW_CONFIG)
f = open(AIRFLOW_CONFIG, 'w')
f.write(DEFAULT_CONFIG.format(**locals()))
f.close()
TEST_CONFIG_FILE = AIRFLOW_HOME + '/unittests.cfg'
if not os.path.isfile(TEST_CONFIG_FILE):
logging.info("Creating new config file in: " + TEST_CONFIG_FILE)
f = open(TEST_CONFIG_FILE, 'w')
f.write(TEST_CONFIG.format(**locals()))
f.close()
logging.info("Reading the config from " + AIRFLOW_CONFIG)
def test_mode():
conf = ConfigParserWithDefaults(defaults)
conf.read(TEST_CONFIG)
conf = ConfigParserWithDefaults(defaults)
conf.read(AIRFLOW_CONFIG)
if 'cryptography' in sys.modules and not conf.has_option('core', 'fernet_key'):
logging.warning(textwrap.dedent("""
Your system supports encrypted passwords for Airflow connections but is
currently storing them in plaintext! To turn on encryption, add a
"fernet_key" option to the "core" section of your airflow.cfg file,
like this:
[core]
fernet_key = <YOUR FERNET KEY>
Your airflow.cfg file is located at: {cfg}.
If you need to generate a fernet key, you can run this code:
from airflow.configuration import generate_fernet_key
generate_fernet_key()
""".format(cfg=AIRFLOW_CONFIG)))
| briceburg/airflow | airflow/configuration.py | Python | apache-2.0 | 10,960 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.