code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('log', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='avg_speed',
field=models.FloatField(null=True, blank=True),
),
migrations.AlterField(
model_name='entry',
name='calories',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='entry',
name='distance',
field=models.FloatField(null=True, blank=True),
),
migrations.AlterField(
model_name='entry',
name='elevation_gain',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='entry',
name='equipment',
field=models.ForeignKey(null=True, to='log.Equipment', blank=True),
),
migrations.AlterField(
model_name='entry',
name='event',
field=models.ForeignKey(null=True, to='log.Event', blank=True),
),
migrations.AlterField(
model_name='entry',
name='max_speed',
field=models.FloatField(null=True, blank=True),
),
migrations.AlterField(
model_name='entry',
name='time',
field=models.TimeField(null=True, blank=True),
),
migrations.AlterField(
model_name='equipment',
name='cost',
field=models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True),
),
migrations.AlterField(
model_name='equipment',
name='disposal_date',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='equipment',
name='disposal_proceeds',
field=models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True),
),
migrations.AlterField(
model_name='equipment',
name='expected_lifespan',
field=models.DurationField(null=True, blank=True),
),
migrations.AlterField(
model_name='equipmentmaintenance',
name='cost',
field=models.DecimalField(null=True, max_digits=8, decimal_places=2, blank=True),
),
migrations.AlterField(
model_name='event',
name='bib_number',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='category',
field=models.CharField(null=True, max_length=10, blank=True),
),
migrations.AlterField(
model_name='event',
name='event_type',
field=models.ForeignKey(null=True, to='log.EventType', blank=True),
),
migrations.AlterField(
model_name='event',
name='finish_age_group',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='finish_gender',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='finish_handicapped',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='finish_overall',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='finishers_age_group',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='finishers_gender',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='finishers_overall',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='official_time',
field=models.TimeField(null=True, blank=True),
),
migrations.AlterField(
model_name='event',
name='results_url',
field=models.URLField(null=True, blank=True),
),
migrations.AlterField(
model_name='measurements',
name='weight',
field=models.FloatField(null=True, blank=True),
),
]
| L1NT/django-training-log | log/migrations/0002_auto_20151129_1307.py | Python | gpl-2.0 | 4,784 |
from api.service import ApiService
from api.soap.message import WebServiceDocument
from api.errors import ValidationError
from api.utils import dict_search
from lxml import etree
from lxml.builder import ElementMaker
from django.http import HttpResponse
from namespaces import DEFAULT_NAMESPACES, SOAP_ENV, XSI
from containers import WebServiceFunction
class WebService(ApiService):
class Meta(ApiService):
wrapper = WebServiceFunction
def __init__(self, target_ns=None, namespaces=DEFAULT_NAMESPACES, ns_name=None, *args, **kwargs):
super(WebService, self).__init__(*args, **kwargs)
self.target_ns = target_ns
self.namespaces = namespaces
self.add_tns_entry(ns_name, self.target_ns)
self.wsdl = WebServiceDocument(
namespaces=self.namespaces,
target_ns=self.target_ns,
service_name=self.service_name,
service_url=self.service_url
)
def add_tns_entry(self, tns_name, tns_namespace):
counter = None
tns_name = '%s%s' % (tns_name or 'ns', counter or '')
while self.namespaces.search(tns_name) not in [tns_namespace, None]:
counter = (counter or 0) + 1
tns_name = '%s%s' % (tns_name or 'ns', counter or '')
self.namespaces.update({tns_name: tns_namespace})
def add_method_hook(self, fn):
self.wsdl.functions = [fn for fn in self.functions.values()]
def generate_wsdl(self, request):
return HttpResponse(str(self.wsdl), content_type='text/xml')
def get_function(self, function):
name = function.tag.replace('{%s}' % function.nsmap[function.prefix], '')
arg_elements = function.xpath('%s:*' % self.namespaces.search(value=self.target_ns), namespaces=self.namespaces)
args = dict([(arg.tag.replace('{%s}' % arg.nsmap[arg.prefix], ''), arg.text) for arg in arg_elements])
return super(WebService, self).get_function(name, **args)
def validate_request(self, request, accepted=['POST']):
return request.method in accepted
def parse_request(self, request):
message = etree.fromstring(request.raw_post_data)
header = message.xpath('%s:Header' % SOAP_ENV, namespaces=self.namespaces)[0]
body = message.xpath('%s:Body' % SOAP_ENV, namespaces=self.namespaces)[0]
if header is None or body is None:
raise ValidationError('Not a SOAP request')
if len(header) == 0 and len(body) == 0:
raise ValidationError('Empty SOAP envelope')
if len(body) > 1:
raise ValidationError('Too many requested functions')
functions = body.xpath('%s:*' % self.namespaces.search(value=self.target_ns), namespaces=self.namespaces)
return functions[0]
def process(self, request, parsed_data):
function = parsed_data
wsf, args = self.get_function(function)
result = wsf.dispatch(request, **args)
return result, wsf
def package(self, request, response, function=None):
E = ElementMaker(namespace=self.namespaces.search(SOAP_ENV), nsmap=self.namespaces)
wsf = function
envelope = E.Envelope(
E.Header(),
E.Body(
E('{%s}%sResponse' % (self.target_ns, wsf.function_name),
E('{%s}%sResult' % (self.target_ns, wsf.function_name),
response,
**{'{%s}type' % self.namespaces.search(XSI): '%s:%s' % (self.namespaces.search(value=wsf.outtype.Meta.namespace), wsf.outtype.Meta.name)}
)
)
)
)
return HttpResponse(etree.tostring(envelope), content_type='text/xml')
| allanlei/django-apipy | api/soap/service.py | Python | bsd-3-clause | 3,873 |
#!/usr/bin/python3
"""
Given a string s and a non-empty string p, find all the start indices of p's anagrams in s.
Strings consists of lowercase English letters only and the length of both strings s and p will not be larger than 20,100.
The order of output does not matter.
"""
from collections import Counter
class Solution:
def findAnagrams(self, s, target):
"""
Brute force: O(|target|) * O(cmp) * O(|s|)
Counter: O(cmp) * O(|s|)
where O(cmp) = 26, the length of alphabeta
:type s: str
:type p: str
:rtype: List[int]
"""
ret = []
counter_target = Counter(target)
counter_cur = Counter(s[:len(target)])
if counter_cur == counter_target:
ret.append(0)
for idx in range(len(target), len(s)):
head = s[idx - len(target)]
tail = s[idx]
counter_cur[tail] += 1
counter_cur[head] -= 1
if counter_cur[head] == 0:
del counter_cur[head] # requried for comparison
if counter_cur == counter_target:
# idx is the ending index, find the starting
ret.append(idx - len(target) + 1)
return ret
if __name__ == "__main__":
assert Solution().findAnagrams("cbaebabacd", "abc") == [0, 6]
| algorhythms/LeetCode | 438 Find All Anagrams in a String.py | Python | mit | 1,329 |
import asyncio
import filecmp
import logging
import os
import pickle
import tempfile
import warnings
import re
from asyncio import AbstractEventLoop
from pathlib import Path
from typing import (
Text,
Any,
Union,
List,
Type,
Callable,
TYPE_CHECKING,
Pattern,
)
from typing_extensions import Protocol
import rasa.shared.constants
import rasa.shared.utils.io
if TYPE_CHECKING:
from prompt_toolkit.validation import Validator
class WriteRow(Protocol):
"""Describes a csv writer supporting a `writerow` method (workaround for typing)."""
def writerow(self, row: List[Text]) -> None:
"""Write the given row.
Args:
row: the entries of a row as a list of strings
"""
...
def configure_colored_logging(loglevel: Text) -> None:
"""Configures coloredlogs library for specified loglevel.
Args:
loglevel: The loglevel to configure the library for
"""
import coloredlogs
loglevel = loglevel or os.environ.get(
rasa.shared.constants.ENV_LOG_LEVEL, rasa.shared.constants.DEFAULT_LOG_LEVEL
)
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {}
coloredlogs.install(
level=loglevel,
use_chroot=False,
fmt="%(asctime)s %(levelname)-8s %(name)s - %(message)s",
level_styles=level_styles,
field_styles=field_styles,
)
def enable_async_loop_debugging(
event_loop: AbstractEventLoop, slow_callback_duration: float = 0.1
) -> AbstractEventLoop:
"""Enables debugging on an event loop.
Args:
event_loop: The event loop to enable debugging on
slow_callback_duration: The threshold at which a callback should be
alerted as slow.
"""
logging.info(
"Enabling coroutine debugging. Loop id {}.".format(id(asyncio.get_event_loop()))
)
# Enable debugging
event_loop.set_debug(True)
# Make the threshold for "slow" tasks very very small for
# illustration. The default is 0.1 (= 100 milliseconds).
event_loop.slow_callback_duration = slow_callback_duration
# Report all mistakes managing asynchronous resources.
warnings.simplefilter("always", ResourceWarning)
return event_loop
def pickle_dump(filename: Union[Text, Path], obj: Any) -> None:
"""Saves object to file.
Args:
filename: the filename to save the object to
obj: the object to store
"""
with open(filename, "wb") as f:
pickle.dump(obj, f)
def pickle_load(filename: Union[Text, Path]) -> Any:
"""Loads an object from a file.
Args:
filename: the filename to load the object from
Returns: the loaded object
"""
with open(filename, "rb") as f:
return pickle.load(f)
def create_temporary_file(data: Any, suffix: Text = "", mode: Text = "w+") -> Text:
"""Creates a tempfile.NamedTemporaryFile object for data."""
encoding = None if "b" in mode else rasa.shared.utils.io.DEFAULT_ENCODING
f = tempfile.NamedTemporaryFile(
mode=mode, suffix=suffix, delete=False, encoding=encoding
)
f.write(data)
f.close()
return f.name
def create_temporary_directory() -> Text:
"""Creates a tempfile.TemporaryDirectory."""
f = tempfile.TemporaryDirectory()
return f.name
def create_path(file_path: Text) -> None:
"""Makes sure all directories in the 'file_path' exists."""
parent_dir = os.path.dirname(os.path.abspath(file_path))
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
def file_type_validator(
valid_file_types: List[Text], error_message: Text
) -> Type["Validator"]:
"""Creates a `Validator` class which can be used with `questionary` to validate
file paths.
"""
def is_valid(path: Text) -> bool:
return path is not None and any(
[path.endswith(file_type) for file_type in valid_file_types]
)
return create_validator(is_valid, error_message)
def not_empty_validator(error_message: Text) -> Type["Validator"]:
"""Creates a `Validator` class which can be used with `questionary` to validate
that the user entered something other than whitespace.
"""
def is_valid(input: Text) -> bool:
return input is not None and input.strip() != ""
return create_validator(is_valid, error_message)
def create_validator(
function: Callable[[Text], bool], error_message: Text
) -> Type["Validator"]:
"""Helper method to create `Validator` classes from callable functions. Should be
removed when questionary supports `Validator` objects."""
from prompt_toolkit.validation import Validator, ValidationError
from prompt_toolkit.document import Document
class FunctionValidator(Validator):
@staticmethod
def validate(document: Document) -> None:
is_valid = function(document.text)
if not is_valid:
raise ValidationError(message=error_message)
return FunctionValidator
def json_unpickle(
file_name: Union[Text, Path], encode_non_string_keys: bool = False
) -> Any:
"""Unpickle an object from file using json.
Args:
file_name: the file to load the object from
encode_non_string_keys: If set to `True` then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
Returns: the object
"""
import jsonpickle.ext.numpy as jsonpickle_numpy
import jsonpickle
jsonpickle_numpy.register_handlers()
file_content = rasa.shared.utils.io.read_file(file_name)
return jsonpickle.loads(file_content, keys=encode_non_string_keys)
def json_pickle(
file_name: Union[Text, Path], obj: Any, encode_non_string_keys: bool = False
) -> None:
"""Pickle an object to a file using json.
Args:
file_name: the file to store the object to
obj: the object to store
encode_non_string_keys: If set to `True` then jsonpickle will encode non-string
dictionary keys instead of coercing them into strings via `repr()`.
"""
import jsonpickle.ext.numpy as jsonpickle_numpy
import jsonpickle
jsonpickle_numpy.register_handlers()
rasa.shared.utils.io.write_text_file(
jsonpickle.dumps(obj, keys=encode_non_string_keys), file_name
)
def get_emoji_regex() -> Pattern:
"""Returns regex to identify emojis."""
return re.compile(
"["
"\U0001F600-\U0001F64F" # emoticons
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U00002702-\U000027B0"
"\U000024C2-\U0001F251"
"\u200d" # zero width joiner
"\u200c" # zero width non-joiner
"]+",
flags=re.UNICODE,
)
def are_directories_equal(dir1: Path, dir2: Path) -> bool:
"""Compares two directories recursively.
Files in each directory are
assumed to be equal if their names and contents are equal.
Args:
dir1: The first directory.
dir2: The second directory.
Returns:
`True` if they are equal, `False` otherwise.
"""
dirs_cmp = filecmp.dircmp(dir1, dir2)
if dirs_cmp.left_only or dirs_cmp.right_only:
return False
(_, mismatches, errors) = filecmp.cmpfiles(
dir1, dir2, dirs_cmp.common_files, shallow=False
)
if mismatches or errors:
return False
for common_dir in dirs_cmp.common_dirs:
new_dir1 = Path(dir1, common_dir)
new_dir2 = Path(dir2, common_dir)
is_equal = are_directories_equal(new_dir1, new_dir2)
if not is_equal:
return False
return True
| RasaHQ/rasa_nlu | rasa/utils/io.py | Python | apache-2.0 | 7,868 |
#!/usr/bin/env python
# This file is part of MSMBuilder.
#
# Copyright 2011 Stanford University
#
# MSMBuilder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
# Imports
##############################################################################
import os
import logging
import numpy as np
import scipy.io
from msmbuilder import arglib
from mdtraj import io
from msmbuilder import MSMLib
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
##############################################################################
# globals
##############################################################################
logger = logging.getLogger('msmbuilder.scripts.BuildMSM')
parser = arglib.ArgumentParser(description=
"""Estimates the counts and transition matrices from an
Assignments.h5 file. Reversible models can be calculated either from naive
symmetrization or estimation of the most likely reversible matrices (MLE,
recommended). Also calculates the equilibrium populations for the model
produced. Outputs will be saved in the directory of your input Assignments.h5
file.
\nOutput: tCounts.mtx, tProb.mtx, Populations.dat, Mapping.dat,
Assignments.Fixed.h5""")
parser.add_argument('assignments')
parser.add_argument('symmetrize', help="""Method by which to estimate a
symmetric counts matrix. Symmetrization ensures reversibility, but may skew
dynamics. We recommend maximum likelihood estimation (MLE) when tractable,
else try Transpose. It is strongly recommended you read the documentation
surrounding this choice.""", default='MLE',
choices=['MLE', 'Transpose', 'None'])
parser.add_argument('lagtime', help='''Lag time to use in model (in
number of snapshots. EG, if you have snapshots every 200ps, and set the
lagtime=50, you'll get a model with a lagtime of 10ns)''', type=int)
parser.add_argument('mapping', help='''Mapping, EG from microstates to macrostates.
If given, this mapping will be applied to the specified assignments before
creating an MSM.''', default="None")
parser.add_argument('trim', help="""Whether or not to apply an ergodic trim.
If true, keeps only the largest observed ergodic subset of the data, if
false, keeps everything. Default: True.""", default=True, type=str2bool)
parser.add_argument('output_dir')
##############################################################################
# Code
##############################################################################
def run(lagtime, assignments, symmetrize='MLE', input_mapping="None", trim=True, out_dir="./Data/"):
# set the filenames for output
FnTProb = os.path.join(out_dir, "tProb.mtx")
FnTCounts = os.path.join(out_dir, "tCounts.mtx")
FnMap = os.path.join(out_dir, "Mapping.dat")
FnAss = os.path.join(out_dir, "Assignments.Fixed.h5")
FnPops = os.path.join(out_dir, "Populations.dat")
# make sure none are taken
outputlist = [FnTProb, FnTCounts, FnMap, FnAss, FnPops]
arglib.die_if_path_exists(outputlist)
# Check for valid lag time
assert lagtime > 0, 'Please specify a positive lag time.'
# if given, apply mapping to assignments
if input_mapping != "None":
MSMLib.apply_mapping_to_assignments(assignments, input_mapping)
n_assigns_before_trim = len(np.where(assignments.flatten() != -1)[0])
counts = MSMLib.get_count_matrix_from_assignments(assignments, lag_time=lagtime, sliding_window=True)
rev_counts, t_matrix, populations, mapping = MSMLib.build_msm(counts, symmetrize=symmetrize, ergodic_trimming=trim)
if trim:
MSMLib.apply_mapping_to_assignments(assignments, mapping)
n_assigns_after_trim = len(np.where(assignments.flatten() != -1)[0])
# if had input mapping, then update it
if input_mapping != "None":
mapping = mapping[input_mapping]
# Print a statement showing how much data was discarded in trimming
percent = (1.0 - float(n_assigns_after_trim) / float(n_assigns_before_trim)) * 100.0
logger.warning("Ergodic trimming discarded: %f percent of your data", percent)
else:
logger.warning("No ergodic trimming applied")
# Save all output
np.savetxt(FnPops, populations)
np.savetxt(FnMap, mapping, "%d")
scipy.io.mmwrite(str(FnTProb), t_matrix)
scipy.io.mmwrite(str(FnTCounts), rev_counts)
io.saveh(FnAss, assignments)
for output in outputlist:
logger.info("Wrote: %s", output)
return
def entry_point():
args = parser.parse_args()
try:
assignments = io.loadh(args.assignments, 'arr_0')
except KeyError:
assignments = io.loadh(args.assignments, 'Data')
if args.mapping != "None":
args.mapping = np.array(np.loadtxt(args.mapping), dtype=int)
run(args.lagtime, assignments, args.symmetrize, args.mapping, args.trim, args.output_dir)
if __name__ == "__main__":
entry_point()
| mpharrigan/msmbuilder | scripts/BuildMSM.py | Python | gpl-2.0 | 5,662 |
from PyQt4 import QtGui
from PyQt4.QtGui import QInputDialog
def askForConfirmation(parent, message):
confirmationBox = QtGui.QMessageBox(parent=parent, text=message)
confirmationBox.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
confirmationBox.setWindowTitle("File transfer")
return confirmationBox.exec() == QtGui.QMessageBox.Yes
def askForInput(parent, message):
response = QInputDialog.getText(parent, "File transfer", message)
if response[1]:
return response[0]
else:
return None
def showMessageBox(parent, messageText):
messageBox = QtGui.QMessageBox(parent=parent, text=(messageText))
messageBox.setWindowTitle("File transfer")
messageBox.show()
| nidzo732/FileTransfer | Python/dialogboxes.py | Python | gpl-2.0 | 734 |
from test import support
from test.support import bigmemtest, _1G, _2G, _4G, precisionbigmemtest
import unittest
import operator
import sys
import functools
# Bigmem testing houserules:
#
# - Try not to allocate too many large objects. It's okay to rely on
# refcounting semantics, but don't forget that 's = create_largestring()'
# doesn't release the old 's' (if it exists) until well after its new
# value has been created. Use 'del s' before the create_largestring call.
#
# - Do *not* compare large objects using assertEquals or similar. It's a
# lengthy operation and the errormessage will be utterly useless due to
# its size. To make sure whether a result has the right contents, better
# to use the strip or count methods, or compare meaningful slices.
#
# - Don't forget to test for large indices, offsets and results and such,
# in addition to large sizes.
#
# - When repeating an object (say, a substring, or a small list) to create
# a large object, make the subobject of a length that is not a power of
# 2. That way, int-wrapping problems are more easily detected.
#
# - While the bigmemtest decorator speaks of 'minsize', all tests will
# actually be called with a much smaller number too, in the normal
# test run (5Kb currently.) This is so the tests themselves get frequent
# testing. Consequently, always make all large allocations based on the
# passed-in 'size', and don't rely on the size being very large. Also,
# memuse-per-size should remain sane (less than a few thousand); if your
# test uses more, adjust 'size' upward, instead.
# BEWARE: it seems that one failing test can yield other subsequent tests to
# fail as well. I do not know whether it is due to memory fragmentation
# issues, or other specifics of the platform malloc() routine.
character_size = 4 if sys.maxunicode > 0xFFFF else 2
class BaseStrTest:
@bigmemtest(minsize=_2G, memuse=2)
def test_capitalize(self, size):
_ = self.from_latin1
SUBSTR = self.from_latin1(' abc def ghi')
s = _('-') * size + SUBSTR
caps = s.capitalize()
self.assertEquals(caps[-len(SUBSTR):],
SUBSTR.capitalize())
self.assertEquals(caps.lstrip(_('-')), SUBSTR)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_center(self, size):
SUBSTR = self.from_latin1(' abc def ghi')
s = SUBSTR.center(size)
self.assertEquals(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEquals(s[lpadsize:-rpadsize], SUBSTR)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G, memuse=2)
def test_count(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('.') * size + SUBSTR
self.assertEquals(s.count(_('.')), size)
s += _('.')
self.assertEquals(s.count(_('.')), size + 1)
self.assertEquals(s.count(_(' ')), 3)
self.assertEquals(s.count(_('i')), 1)
self.assertEquals(s.count(_('j')), 0)
@bigmemtest(minsize=_2G, memuse=2)
def test_endswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.endswith(SUBSTR))
self.assertTrue(s.endswith(s))
s2 = _('...') + s
self.assertTrue(s2.endswith(s))
self.assertFalse(s.endswith(_('a') + SUBSTR))
self.assertFalse(SUBSTR.endswith(s))
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_expandtabs(self, size):
_ = self.from_latin1
s = _('-') * size
tabsize = 8
self.assertEquals(s.expandtabs(), s)
del s
slen, remainder = divmod(size, tabsize)
s = _(' \t') * slen
s = s.expandtabs(tabsize)
self.assertEquals(len(s), size - remainder)
self.assertEquals(len(s.strip(_(' '))), 0)
@bigmemtest(minsize=_2G, memuse=2)
def test_find(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEquals(s.find(_(' ')), 0)
self.assertEquals(s.find(SUBSTR), 0)
self.assertEquals(s.find(_(' '), sublen), sublen + size)
self.assertEquals(s.find(SUBSTR, len(SUBSTR)), sublen + size)
self.assertEquals(s.find(_('i')), SUBSTR.find(_('i')))
self.assertEquals(s.find(_('i'), sublen),
sublen + size + SUBSTR.find(_('i')))
self.assertEquals(s.find(_('i'), size),
sublen + size + SUBSTR.find(_('i')))
self.assertEquals(s.find(_('j')), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_index(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEquals(s.index(_(' ')), 0)
self.assertEquals(s.index(SUBSTR), 0)
self.assertEquals(s.index(_(' '), sublen), sublen + size)
self.assertEquals(s.index(SUBSTR, sublen), sublen + size)
self.assertEquals(s.index(_('i')), SUBSTR.index(_('i')))
self.assertEquals(s.index(_('i'), sublen),
sublen + size + SUBSTR.index(_('i')))
self.assertEquals(s.index(_('i'), size),
sublen + size + SUBSTR.index(_('i')))
self.assertRaises(ValueError, s.index, _('j'))
@bigmemtest(minsize=_2G, memuse=2)
def test_isalnum(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalnum())
s += _('.')
self.assertFalse(s.isalnum())
@bigmemtest(minsize=_2G, memuse=2)
def test_isalpha(self, size):
_ = self.from_latin1
SUBSTR = _('zzzzzzz')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalpha())
s += _('.')
self.assertFalse(s.isalpha())
@bigmemtest(minsize=_2G, memuse=2)
def test_isdigit(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('9') * size + SUBSTR
self.assertTrue(s.isdigit())
s += _('z')
self.assertFalse(s.isdigit())
@bigmemtest(minsize=_2G, memuse=2)
def test_islower(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).isupper()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.islower())
s += _('A')
self.assertFalse(s.islower())
@bigmemtest(minsize=_2G, memuse=2)
def test_isspace(self, size):
_ = self.from_latin1
whitespace = _(' \f\n\r\t\v')
repeats = size // len(whitespace) + 2
s = whitespace * repeats
self.assertTrue(s.isspace())
s += _('j')
self.assertFalse(s.isspace())
@bigmemtest(minsize=_2G, memuse=2)
def test_istitle(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('').join([_('A'), _('a') * size, SUBSTR])
self.assertTrue(s.istitle())
s += _('A')
self.assertTrue(s.istitle())
s += _('aA')
self.assertFalse(s.istitle())
@bigmemtest(minsize=_2G, memuse=2)
def test_isupper(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).islower()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.isupper())
s += _('a')
self.assertFalse(s.isupper())
@bigmemtest(minsize=_2G, memuse=2)
def test_join(self, size):
_ = self.from_latin1
s = _('A') * size
x = s.join([_('aaaaa'), _('bbbbb')])
self.assertEquals(x.count(_('a')), 5)
self.assertEquals(x.count(_('b')), 5)
self.assertTrue(x.startswith(_('aaaaaA')))
self.assertTrue(x.endswith(_('Abbbbb')))
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_ljust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_lower(self, size):
_ = self.from_latin1
s = _('A') * size
s = s.lower()
self.assertEquals(len(s), size)
self.assertEquals(s.count(_('a')), size)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_lstrip(self, size):
_ = self.from_latin1
SUBSTR = _('abc def ghi')
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.lstrip(), SUBSTR.lstrip())
del s
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.lstrip()
self.assertTrue(stripped is s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_replace(self, size):
_ = self.from_latin1
replacement = _('a')
s = _(' ') * size
s = s.replace(_(' '), replacement)
self.assertEquals(len(s), size)
self.assertEquals(s.count(replacement), size)
s = s.replace(replacement, _(' '), size - 4)
self.assertEquals(len(s), size)
self.assertEquals(s.count(replacement), 4)
self.assertEquals(s[-10:], _(' aaaa'))
@bigmemtest(minsize=_2G, memuse=2)
def test_rfind(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEquals(s.rfind(_(' ')), sublen + size + SUBSTR.rfind(_(' ')))
self.assertEquals(s.rfind(SUBSTR), sublen + size)
self.assertEquals(s.rfind(_(' '), 0, size), SUBSTR.rfind(_(' ')))
self.assertEquals(s.rfind(SUBSTR, 0, sublen + size), 0)
self.assertEquals(s.rfind(_('i')), sublen + size + SUBSTR.rfind(_('i')))
self.assertEquals(s.rfind(_('i'), 0, sublen), SUBSTR.rfind(_('i')))
self.assertEquals(s.rfind(_('i'), 0, sublen + size),
SUBSTR.rfind(_('i')))
self.assertEquals(s.rfind(_('j')), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_rindex(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEquals(s.rindex(_(' ')),
sublen + size + SUBSTR.rindex(_(' ')))
self.assertEquals(s.rindex(SUBSTR), sublen + size)
self.assertEquals(s.rindex(_(' '), 0, sublen + size - 1),
SUBSTR.rindex(_(' ')))
self.assertEquals(s.rindex(SUBSTR, 0, sublen + size), 0)
self.assertEquals(s.rindex(_('i')),
sublen + size + SUBSTR.rindex(_('i')))
self.assertEquals(s.rindex(_('i'), 0, sublen), SUBSTR.rindex(_('i')))
self.assertEquals(s.rindex(_('i'), 0, sublen + size),
SUBSTR.rindex(_('i')))
self.assertRaises(ValueError, s.rindex, _('j'))
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rjust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rstrip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.rstrip(), SUBSTR.rstrip())
del s
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.rstrip()
self.assertTrue(stripped is s)
# The test takes about size bytes to build a string, and then about
# sqrt(size) substrings of sqrt(size) in size and a list to
# hold sqrt(size) items. It's close but just over 2x size.
@bigmemtest(minsize=_2G, memuse=2.1)
def test_split_small(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2)
SUBSTR = _('a') + _(' ') * chunksize
s = SUBSTR * chunksize
l = s.split()
self.assertEquals(len(l), chunksize)
expected = _('a')
for item in l:
self.assertEquals(item, expected)
del l
l = s.split(_('a'))
self.assertEquals(len(l), chunksize + 1)
expected = _(' ') * chunksize
for item in filter(None, l):
self.assertEquals(item, expected)
# Allocates a string of twice size (and briefly two) and a list of
# size. Because of internal affairs, the s.split() call produces a
# list of size times the same one-character string, so we only
# suffer for the list size. (Otherwise, it'd cost another 48 times
# size in bytes!) Nevertheless, a list of size takes
# 8*size bytes.
@bigmemtest(minsize=_2G + 5, memuse=10)
def test_split_large(self, size):
_ = self.from_latin1
s = _(' a') * size + _(' ')
l = s.split()
self.assertEquals(len(l), size)
self.assertEquals(set(l), set([_('a')]))
del l
l = s.split(_('a'))
self.assertEquals(len(l), size + 1)
self.assertEquals(set(l), set([_(' ')]))
@bigmemtest(minsize=_2G, memuse=2.1)
def test_splitlines(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2) // 2
SUBSTR = _(' ') * chunksize + _('\n') + _(' ') * chunksize + _('\r\n')
s = SUBSTR * chunksize
l = s.splitlines()
self.assertEquals(len(l), chunksize * 2)
expected = _(' ') * chunksize
for item in l:
self.assertEquals(item, expected)
@bigmemtest(minsize=_2G, memuse=2)
def test_startswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.startswith(s))
self.assertTrue(s.startswith(_('-') * size))
self.assertFalse(s.startswith(SUBSTR))
@bigmemtest(minsize=_2G, memuse=1)
def test_strip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi ')
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
del s
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G, memuse=2)
def test_swapcase(self, size):
_ = self.from_latin1
SUBSTR = _("aBcDeFG12.'\xa9\x00")
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.swapcase()
self.assertEquals(len(s), sublen * repeats)
self.assertEquals(s[:sublen * 3], SUBSTR.swapcase() * 3)
self.assertEquals(s[-sublen * 3:], SUBSTR.swapcase() * 3)
@bigmemtest(minsize=_2G, memuse=2)
def test_title(self, size):
_ = self.from_latin1
SUBSTR = _('SpaaHAaaAaham')
s = SUBSTR * (size // len(SUBSTR) + 2)
s = s.title()
self.assertTrue(s.startswith((SUBSTR * 3).title()))
self.assertTrue(s.endswith(SUBSTR.lower() * 3))
@bigmemtest(minsize=_2G, memuse=2)
def test_translate(self, size):
_ = self.from_latin1
SUBSTR = _('aZz.z.Aaz.')
if isinstance(SUBSTR, str):
trans = {
ord(_('.')): _('-'),
ord(_('a')): _('!'),
ord(_('Z')): _('$'),
}
else:
trans = bytes.maketrans(b'.aZ', b'-!$')
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEquals(len(s), repeats * sublen)
self.assertEquals(s[:sublen], SUBSTR.translate(trans))
self.assertEquals(s[-sublen:], SUBSTR.translate(trans))
self.assertEquals(s.count(_('.')), 0)
self.assertEquals(s.count(_('!')), repeats * 2)
self.assertEquals(s.count(_('z')), repeats * 3)
@bigmemtest(minsize=_2G + 5, memuse=2)
def test_upper(self, size):
_ = self.from_latin1
s = _('a') * size
s = s.upper()
self.assertEquals(len(s), size)
self.assertEquals(s.count(_('A')), size)
@bigmemtest(minsize=_2G + 20, memuse=1)
def test_zfill(self, size):
_ = self.from_latin1
SUBSTR = _('-568324723598234')
s = SUBSTR.zfill(size)
self.assertTrue(s.endswith(_('0') + SUBSTR[1:]))
self.assertTrue(s.startswith(_('-0')))
self.assertEquals(len(s), size)
self.assertEquals(s.count(_('0')), size - len(SUBSTR))
# This test is meaningful even with size < 2G, as long as the
# doubled string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_concat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEquals(len(s), size)
s = s + s
self.assertEquals(len(s), size * 2)
self.assertEquals(s.count(_('.')), size * 2)
# This test is meaningful even with size < 2G, as long as the
# repeated string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_repeat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEquals(len(s), size)
s = s * 2
self.assertEquals(len(s), size * 2)
self.assertEquals(s.count(_('.')), size * 2)
@bigmemtest(minsize=_2G + 20, memuse=2)
def test_slice_and_getitem(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
sublen = len(SUBSTR)
s = SUBSTR * (size // sublen)
stepsize = len(s) // 100
stepsize = stepsize - (stepsize % sublen)
for i in range(0, len(s) - stepsize, stepsize):
self.assertEquals(s[i], SUBSTR[0])
self.assertEquals(s[i:i + sublen], SUBSTR)
self.assertEquals(s[i:i + sublen:2], SUBSTR[::2])
if i > 0:
self.assertEquals(s[i + sublen - 1:i - 1:-3],
SUBSTR[sublen::-3])
# Make sure we do some slicing and indexing near the end of the
# string, too.
self.assertEquals(s[len(s) - 1], SUBSTR[-1])
self.assertEquals(s[-1], SUBSTR[-1])
self.assertEquals(s[len(s) - 10], SUBSTR[0])
self.assertEquals(s[-sublen], SUBSTR[0])
self.assertEquals(s[len(s):], _(''))
self.assertEquals(s[len(s) - 1:], SUBSTR[-1:])
self.assertEquals(s[-1:], SUBSTR[-1:])
self.assertEquals(s[len(s) - sublen:], SUBSTR)
self.assertEquals(s[-sublen:], SUBSTR)
self.assertEquals(len(s[:]), len(s))
self.assertEquals(len(s[:len(s) - 5]), len(s) - 5)
self.assertEquals(len(s[5:-5]), len(s) - 10)
self.assertRaises(IndexError, operator.getitem, s, len(s))
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1)
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31)
@bigmemtest(minsize=_2G, memuse=2)
def test_contains(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
edge = _('-') * (size // 2)
s = _('').join([edge, SUBSTR, edge])
del edge
self.assertTrue(SUBSTR in s)
self.assertFalse(SUBSTR * 2 in s)
self.assertTrue(_('-') in s)
self.assertFalse(_('a') in s)
s += _('a')
self.assertTrue(_('a') in s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_compare(self, size):
_ = self.from_latin1
s1 = _('-') * size
s2 = _('-') * size
self.assertEqual(s1, s2)
del s2
s2 = s1 + _('a')
self.assertFalse(s1 == s2)
del s2
s2 = _('.') * size
self.assertFalse(s1 == s2)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_hash(self, size):
# Not sure if we can do any meaningful tests here... Even if we
# start relying on the exact algorithm used, the result will be
# different depending on the size of the C 'long int'. Even this
# test is dodgy (there's no *guarantee* that the two things should
# have a different hash, even if they, in the current
# implementation, almost always do.)
_ = self.from_latin1
s = _('\x00') * size
h1 = hash(s)
del s
s = _('\x00') * (size + 1)
self.assertFalse(h1 == hash(s))
class StrTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s
def basic_encode_test(self, size, enc, c='.', expectedsize=None):
if expectedsize is None:
expectedsize = size
s = c * size
self.assertEquals(len(s.encode(enc)), expectedsize)
def setUp(self):
# HACK: adjust memory use of tests inherited from BaseStrTest
# according to character size.
self._adjusted = {}
for name in dir(BaseStrTest):
if not name.startswith('test_'):
continue
meth = getattr(type(self), name)
try:
memuse = meth.memuse
except AttributeError:
continue
meth.memuse = character_size * memuse
self._adjusted[name] = memuse
def tearDown(self):
for name, memuse in self._adjusted.items():
getattr(type(self), name).memuse = memuse
@bigmemtest(minsize=_2G + 2, memuse=character_size + 1)
def test_encode(self, size):
return self.basic_encode_test(size, 'utf-8')
@precisionbigmemtest(size=_4G // 6 + 2, memuse=character_size + 1)
def test_encode_raw_unicode_escape(self, size):
try:
return self.basic_encode_test(size, 'raw_unicode_escape')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 5 + 70, memuse=character_size + 1)
def test_encode_utf7(self, size):
try:
return self.basic_encode_test(size, 'utf7')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 4 + 5, memuse=character_size + 4)
def test_encode_utf32(self, size):
try:
return self.basic_encode_test(size, 'utf32', expectedsize=4*size+4)
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_2G - 1, memuse=character_size + 1)
def test_encode_ascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
@precisionbigmemtest(size=_4G // 5, memuse=character_size * (6 + 1))
def test_unicode_repr_overflow(self, size):
try:
s = "\uAAAA"*size
r = repr(s)
except MemoryError:
pass # acceptable on 32-bit
else:
self.assertTrue(s == eval(r))
@bigmemtest(minsize=_2G + 10, memuse=character_size * 2)
def test_format(self, size):
s = '-' * size
sf = '%s' % (s,)
self.assertEqual(s, sf)
del sf
sf = '..%s..' % (s,)
self.assertEquals(len(sf), len(s) + 4)
self.assertTrue(sf.startswith('..-'))
self.assertTrue(sf.endswith('-..'))
del s, sf
size //= 2
edge = '-' * size
s = ''.join([edge, '%s', edge])
del edge
s = s % '...'
self.assertEquals(len(s), size * 2 + 3)
self.assertEquals(s.count('.'), 3)
self.assertEquals(s.count('-'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=character_size * 2)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
self.assertEquals(len(s), size + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('-'), size)
del s
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
size = size // 5 * 2
s = '\x00' * size
s = repr(s)
self.assertEquals(len(s), size * 4 + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('\\'), size)
self.assertEquals(s.count('0'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=character_size * 5)
def test_repr_large(self, size):
s = '\x00' * size
s = repr(s)
self.assertEquals(len(s), size * 4 + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('\\'), size)
self.assertEquals(s.count('0'), size * 2)
@bigmemtest(minsize=2**32 / 5, memuse=character_size * 7)
def test_unicode_repr(self, size):
s = "\uAAAA" * size
for f in (repr, ascii):
r = f(s)
self.assertTrue(len(r) > size)
self.assertTrue(r.endswith(r"\uaaaa'"), r[-10:])
del r
# The character takes 4 bytes even in UCS-2 builds because it will
# be decomposed into surrogates.
@bigmemtest(minsize=2**32 / 5, memuse=4 + character_size * 9)
def test_unicode_repr_wide(self, size):
s = "\U0001AAAA" * size
for f in (repr, ascii):
r = f(s)
self.assertTrue(len(r) > size)
self.assertTrue(r.endswith(r"\U0001aaaa'"), r[-12:])
del r
class BytesTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s.encode("latin1")
@bigmemtest(minsize=_2G + 2, memuse=1 + character_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEquals(len(s.decode('utf-8')), size)
class BytearrayTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return bytearray(s.encode("latin1"))
@bigmemtest(minsize=_2G + 2, memuse=1 + character_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEquals(len(s.decode('utf-8')), size)
test_hash = None
test_split_large = None
class TupleTest(unittest.TestCase):
# Tuples have a small, fixed-sized head and an array of pointers to
# data. Since we're testing 64-bit addressing, we can assume that the
# pointers are 8 bytes, and that thus that the tuples take up 8 bytes
# per size.
# As a side-effect of testing long tuples, these tests happen to test
# having more than 2<<31 references to any given object. Hence the
# use of different types of objects as contents in different tests.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
t1 = ('',) * size
t2 = ('',) * size
self.assertEqual(t1, t2)
del t2
t2 = ('',) * (size + 1)
self.assertFalse(t1 == t2)
del t2
t2 = (1,) * size
self.assertFalse(t1 == t2)
# Test concatenating into a single tuple of more than 2G in length,
# and concatenating a tuple of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_concat_test(self, size):
t = ((),) * size
self.assertEquals(len(t), size)
t = t + t
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
t = (1, 2, 3, 4, 5) * size
self.assertEquals(len(t), size * 5)
self.assertTrue(5 in t)
self.assertFalse((1, 2, 3, 4, 5) in t)
self.assertFalse(0 in t)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.assertFalse(h1 == hash(t2))
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEquals(len(t), size)
self.assertEquals(t[-1], None)
self.assertEquals(t[5], None)
self.assertEquals(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEquals(t[:5], (None,) * 5)
self.assertEquals(t[-5:], (None,) * 5)
self.assertEquals(t[20:25], (None,) * 5)
self.assertEquals(t[-25:-20], (None,) * 5)
self.assertEquals(t[size - 5:], (None,) * 5)
self.assertEquals(t[size - 5:size], (None,) * 5)
self.assertEquals(t[size - 6:size - 2], (None,) * 4)
self.assertEquals(t[size:size], ())
self.assertEquals(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEquals(len(t), size)
t = t * 2
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@precisionbigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
try:
t = tuple(range(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
@precisionbigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
try:
t = tuple(range(size))
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '(0, 0')
self.assertEquals(s[-5:], '0, 0)')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [''] * size
l2 = [''] * size
self.assertEqual(l1, l2)
del l2
l2 = [''] * (size + 1)
self.assertFalse(l1 == l2)
del l2
l2 = [2] * size
self.assertFalse(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEquals(len(l), size)
l = l + l
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEquals(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(len(l), size * 5)
self.assertTrue(5 in l)
self.assertFalse([1, 2, 3, 4, 5] in l)
self.assertFalse(0 in l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.assertRaises(TypeError, hash, l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEquals(len(l), size)
self.assertEquals(l[-1], None)
self.assertEquals(l[5], None)
self.assertEquals(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEquals(l[:5], [None] * 5)
self.assertEquals(l[-5:], [None] * 5)
self.assertEquals(l[20:25], [None] * 5)
self.assertEquals(l[-25:-20], [None] * 5)
self.assertEquals(l[size - 5:], [None] * 5)
self.assertEquals(l[size - 5:size], [None] * 5)
self.assertEquals(l[size - 6:size - 2], [None] * 4)
self.assertEquals(l[size:size], [])
self.assertEquals(l[size:size+5], [])
l[size - 2] = 5
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [None, 5, None])
self.assertEquals(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEquals(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 4)
del l[-2:]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 2)
del l[0]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[0], 2)
del l[:2]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.assertFalse(l)
l = [''] * size
self.assertEquals(len(l), size)
l = l * 2
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEquals(len(l), size)
self.assertTrue(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEquals(len(l), size * 2)
self.assertTrue(l[size - 1] is l[-1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '[0, 0')
self.assertEquals(s[-5:], '0, 0]')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(minsize=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEquals(len(l), size+1)
self.assertTrue(l[-3] is l[-2])
self.assertFalse(l[-2] is l[-1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(l.count(1), size)
self.assertEquals(l.count("1"), 0)
def basic_test_extend(self, size):
l = [object] * size
l.extend(l)
self.assertEquals(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1, 2, 3, 4, 5] * size
size *= 5
self.assertEquals(l.index(1), 0)
self.assertEquals(l.index(5, size - 5), size - 1)
self.assertEquals(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(minsize=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[:3], [1.0, "C", 1.0])
self.assertEquals(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = ["a", "b", "c", "d", "e"] * size
size *= 5
self.assertEquals(len(l), size)
item = l.pop()
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, "e")
self.assertEquals(l[-2:], ["c", "d"])
item = l.pop(0)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, "a")
self.assertEquals(l[:2], ["b", "c"])
item = l.pop(size - 2)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, "c")
self.assertEquals(l[-2:], ["b", "d"])
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEquals(len(l), size)
l.remove(10)
size -= 1
self.assertEquals(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 10])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEquals(len(l), size * 5)
self.assertEquals(l[-5:], [5, 4, 3, 2, 1])
self.assertEquals(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, 5] * size
l.sort()
self.assertEquals(len(l), size * 5)
self.assertEquals(l.count(1), size)
self.assertEquals(l[:10], [1] * 10)
self.assertEquals(l[-10:], [5] * 10)
def test_main():
support.run_unittest(StrTest, BytesTest, BytearrayTest,
TupleTest, ListTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
support.set_memlimit(sys.argv[1])
test_main()
| mancoast/CPythonPyc_test | fail/312_test_bigmem.py | Python | gpl-3.0 | 41,777 |
import pygame
from . import *
class TutorialScene(BaseScene):
def __init__(self, context):
# Create scene and make transparent box over the 'x'
BaseScene.__init__(self, context)
self.btn = pygame.Surface((50,50), pygame.SRCALPHA, 32)
self.btn.convert_alpha()
context.screen.blit(self.context.tutorial, (0,0))
self.b = context.screen.blit(self.btn, (1120,25))
def handle_inputs(self, events):
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.context.scene = TitleScene(self.context)
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
pos = pygame.mouse.get_pos()
if self.b.collidepoint(pos):
self.context.scene = TitleScene(self.context)
def render_scene(self):
pygame.display.flip()
| theheckle/rapid-pie-movement | game/scenes/tutorial_scene.py | Python | mit | 1,005 |
#!/usr/bin/env python
import sys, getopt, argparse
from kazoo.client import KazooClient
import json
def loadZookeeperOptions(opts,zk):
node = "/all_clients/"+opts['client']+"/offline/semvec"
if zk.exists(node):
data, stat = zk.get(node)
jStr = data.decode("utf-8")
print "Found zookeeper configuration:",jStr
j = json.loads(jStr)
for key in j:
opts[key] = j[key]
def activateModel(args,folder,zk):
node = "/all_clients/"+args.client+"/svtext"
print "Activating model in zookeper at node ",node," with data ",folder
if zk.exists(node):
zk.set(node,folder)
else:
zk.create(node,folder,makepath=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='set-client-config')
parser.add_argument('-z', '--zookeeper', help='zookeeper hosts', required=True)
parser.add_argument('--clientVariable', help='client variable name', default="$CLIENT")
args = parser.parse_args()
opts = vars(args)
zk = KazooClient(hosts=args.zookeeper)
zk.start()
for line in sys.stdin:
line = line.rstrip()
parts = line.split()
if len(parts) == 3 and not line.startswith("#"):
clients = parts[0].split(',')
node = parts[1]
value = parts[2]
print "--------------------------"
print parts[0],node,"->",value
for client in clients:
nodeClient = node.replace(args.clientVariable,client)
valueClient = value.replace(args.clientVariable,client)
print "----"
print nodeClient
print valueClient
if zk.exists(nodeClient):
zk.set(nodeClient,valueClient)
else:
zk.create(nodeClient,valueClient,makepath=True)
zk.stop()
| Snazz2001/seldon-server | scripts/zookeeper/set-client-config.py | Python | apache-2.0 | 1,898 |
#! /usr/bin/python
from __future__ import division
from pytronica import *
#adj = 0.1
#def osc(p):
#def osc1(p):
#return Saw(p2f(p))
#os = [osc1(p+x) for x in [0, 12.03, 7-.03]]
#return Layer(os)
a = .5
saw = lambda p: Saw(p2f(p))
osc = lambda p: Pan(saw(p), -a) + Pan(saw(p+.1), a)
def synth(p):
return osc(p) * ExpDecay(1)
s = .25 * synth(note('C4'))
s.play()
| chriswatrous/pytronica | songs/2.py | Python | gpl-2.0 | 390 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
import glob
import sys
#VERSION="2.1dev4"
VERSION="2.6dev5"
# Taken from kennethreitz/requests/setup.py
package_directory = os.path.realpath(os.path.dirname(__file__))
def get_file_contents(file_path):
"""Get the context of the file using full path name."""
content = ""
try:
full_path = os.path.join(package_directory, file_path)
content = open(full_path, 'r').read()
except:
print >> sys.stderr, "### could not open file %r" % file_path
return content
setup(
name='privacyIDEA',
version=VERSION,
description='privacyIDEA: identity, multifactor authentication (OTP), '
'authorization, audit',
author='privacyidea.org',
license='AGPLv3',
author_email='cornelius@privacyidea.org',
url='http://www.privacyidea.org',
keywords='OTP, two factor authentication, management, security',
packages=find_packages(),
scripts=['pi-manage.py',
'tools/privacyidea-convert-token',
'tools/privacyidea-create-pwidresolver-user',
'tools/privacyidea-create-sqlidresolver-user',
'tools/privacyidea-pip-update',
'tools/privacyidea-create-certificate',
'tools/privacyidea-fix-access-rights',
'tools/privacyidea-create-ad-users',
'tools/privacyidea-fetchssh.sh',
'tools/privacyidea-create-userdb.sh'
],
extras_require={
'dev': ["Sphinx>=1.3.1",
"sphinxcontrib-httpdomain>=1.3.0"],
'test': ["coverage>=3.7.1",
"mock>=1.0.1",
"nose>=1.3.4",
"responses>=0.4.0",
"six>=1.8.0"],
},
install_requires=["Flask>=0.10.1",
"Flask-Cache>=0.13.1",
"Flask-Migrate>=1.2.0",
"Flask-SQLAlchemy>=2.0",
"Flask-Script>=2.0.5",
"Jinja2>=2.7.3",
"Mako>=0.9.1",
"MarkupSafe>=0.23",
"MySQL-python>=1.2.5",
"Pillow>=2.6.1",
"PyJWT>=1.3.0",
"PyYAML>=3.11",
"Pygments>=2.0.2",
"SQLAlchemy>=1.0.5",
"Werkzeug>=0.10.4",
"alembic>=0.6.7",
"argparse>=1.2.1",
"bcrypt>=1.1.0",
"beautifulsoup4>=4.3.2",
"cffi>=0.8.6",
"configobj>=5.0.6",
"docutils>=0.12",
"funcparserlib>=0.3.6",
"itsdangerous>=0.24",
"ldap3>=0.9.8.4",
"netaddr>=0.7.12",
"passlib>=1.6.2",
"pyasn1>=0.1.7",
"pyOpenSSL>=0.15.1",
"pycparser>=2.10",
"pycrypto>=2.6.1",
"pyrad>=2.0",
"pyusb>=1.0.0b2",
"qrcode>=5.1",
"requests>=2.7.0",
"sqlsoup>=0.9.0",
"wsgiref>=0.1.2"
],
include_package_data=True,
data_files=[('etc/privacyidea/',
['deploy/apache/privacyideaapp.wsgi',
'deploy/privacyidea/dictionary',
'deploy/privacyidea/enckey',
'deploy/privacyidea/private.pem',
'deploy/privacyidea/public.pem']),
('share/man/man1',
["tools/privacyidea-convert-token.1",
"tools/privacyidea-create-pwidresolver-user.1",
"tools/privacyidea-create-sqlidresolver-user.1",
"tools/privacyidea-pip-update.1",
"tools/privacyidea-create-certificate.1",
"tools/privacyidea-fix-access-rights.1"
]),
('lib/privacyidea/authmodules/FreeRADIUS',
["authmodules/FreeRADIUS/LICENSE",
"authmodules/FreeRADIUS/privacyidea_radius.pm"]),
('lib/privacyidea/authmodules/OTRS',
["authmodules/OTRS/privacyIDEA.pm"]),
('lib/privacyidea/migrations',
["migrations/alembic.ini",
"migrations/env.py",
"migrations/README",
"migrations/script.py.mako"]),
('lib/privacyidea/migrations/versions',
["migrations/versions/2551ee982544_.py",
"migrations/versions/4f32a4e1bf33_.py",
"migrations/versions/2181294eed0b_.py",
"migrations/versions/e5cbeb7c177_.py",
"migrations/versions/4d9178fa8336_.py",
"migrations/versions/20969b4cbf06_.py"])
],
classifiers=["Framework :: Flask",
"License :: OSI Approved :: "
"GNU Affero General Public License v3",
"Programming Language :: Python",
"Development Status :: 5 - Production/Stable",
"Topic :: Internet",
"Topic :: Security",
"Topic :: System ::"
" Systems Administration :: Authentication/Directory"
],
#message_extractors={'privacyidea': [
# ('**.py', 'python', None),
# ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},
zip_safe=False,
long_description=get_file_contents('README.md')
)
| woddx/privacyidea | setup.py | Python | agpl-3.0 | 5,648 |
# -*- coding: iso-8859-15 -*-
from xml.etree.ElementTree import *
from descriptionparserxml import *
from descriptionparserflatfile import *
class DescriptionParserFactory:
@classmethod
def getParser(self, descParseInstruction):
fp = open(descParseInstruction, 'r')
tree = fromstring(fp.read())
fp.close()
del fp
grammarNode = tree.find('GameGrammar')
del tree
if(grammarNode == None):
print "no valid parserConfig"
return None
attributes = grammarNode.attrib
parserType = attributes.get('type')
del attributes
if(parserType == 'multiline'):
return DescriptionParserFlatFile(grammarNode)
elif(parserType == 'xml'):
return DescriptionParserXml(grammarNode)
else:
print "Unknown parser: " +parserType
return None
| azumimuo/family-xbmc-addon | script.games.rom.collection.browser/resources/lib/pyscraper/descriptionparserfactory.py | Python | gpl-2.0 | 793 |
# The absolute import feature is required so that we get the root celery
# module rather than `amo.celery`.
from __future__ import absolute_import
from collections import namedtuple
from inspect import isclass
from django.utils.translation import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {status} is the status
format = _(u'{addon} status changed to {status}.')
keep = True
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Super review requested')
keep = True
review_email_user = True
review_queue = True
hide_developer = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
sanitize = _(u'The addon has been flagged for Admin Review. It\'s still '
u'in our review queue, but it will need to be checked by one '
u'of our admin reviewers. The review might take longer than '
u'usual.')
reviewer_review_action = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Commented')
keep = True
review_queue = True
hide_developer = True
reviewer_review_action = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_RATING(_LOG):
id = 29
action_class = 'review'
format = _(u'{rating} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_RATING(_LOG):
id = 40
action_class = 'approve'
format = _(u'{rating} for {addon} approved.')
reviewer_format = _(u'{user} approved {rating} for {addon}.')
keep = True
reviewer_event = True
class DELETE_RATING(_LOG):
"""Requires rating.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {rating} for {addon} deleted.')
reviewer_format = _(u'{user} deleted {rating} for {addon}.')
keep = True
reviewer_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_RATING(_LOG):
id = 107
action_class = 'review'
format = _(u'{rating} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class ADMIN_USER_BANNED(_LOG):
id = 109
format = _(u'User {user} banned.')
admin_event = True
class ADMIN_USER_PICTURE_DELETED(_LOG):
id = 110
format = _(u'User {user} picture deleted.')
admin_event = True
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class ADDON_UNLISTED(_LOG):
id = 128
format = _(u'{addon} unlisted.')
keep = True
class BETA_SIGNED(_LOG):
id = 131
format = _(u'{file} was signed.')
keep = True
# Obsolete, we don't care about validation results on beta files.
class BETA_SIGNED_VALIDATION_FAILED(_LOG):
id = 132
format = _(u'{file} was signed.')
keep = True
class DELETE_ADDON(_LOG):
id = 133
action_class = 'delete'
# L10n: {0} is the add-on GUID.
format = _(u'Addon id {0} with GUID {1} has been deleted')
keep = True
class EXPERIMENT_SIGNED(_LOG):
id = 134
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIGNED(_LOG):
id = 135
format = _(u'{file} was signed.')
keep = True
# Obsolete, we don't care about validation results on unlisted files anymore.
class UNLISTED_SIGNED_VALIDATION_FAILED(_LOG):
id = 136
format = _(u'{file} was signed.')
keep = True
# Obsolete, we don't care about validation results on unlisted files anymore,
# and the distinction for sideloading add-ons is gone as well.
class UNLISTED_SIDELOAD_SIGNED_VALIDATION_PASSED(_LOG):
id = 137
format = _(u'{file} was signed.')
keep = True
# Obsolete, we don't care about validation results on unlisted files anymore,
# and the distinction for sideloading add-ons is gone as well.
class UNLISTED_SIDELOAD_SIGNED_VALIDATION_FAILED(_LOG):
id = 138
format = _(u'{file} was signed.')
keep = True
class PRELIMINARY_ADDON_MIGRATED(_LOG):
id = 139
format = _(u'{addon} migrated from preliminary.')
keep = True
review_queue = True
class DEVELOPER_REPLY_VERSION(_LOG):
id = 140
format = _(u'Reply by developer on {addon} {version}.')
short = _(u'Developer Reply')
keep = True
review_queue = True
class REVIEWER_REPLY_VERSION(_LOG):
id = 141
format = _(u'Reply by reviewer on {addon} {version}.')
short = _(u'Reviewer Reply')
keep = True
review_queue = True
class APPROVAL_NOTES_CHANGED(_LOG):
id = 142
format = _(u'Approval notes changed for {addon} {version}.')
short = _(u'Approval notes changed')
keep = True
review_queue = True
class SOURCE_CODE_UPLOADED(_LOG):
id = 143
format = _(u'Source code uploaded for {addon} {version}.')
short = _(u'Source code uploaded')
keep = True
review_queue = True
class CONFIRM_AUTO_APPROVED(_LOG):
id = 144
format = _(u'Auto-Approval confirmed for {addon} {version}.')
short = _(u'Auto-Approval confirmed')
keep = True
reviewer_review_action = True
review_queue = True
hide_developer = True
class ENABLE_VERSION(_LOG):
id = 145
format = _(u'{addon} {version} re-enabled.')
class DISABLE_VERSION(_LOG):
id = 146
format = _(u'{addon} {version} disabled.')
class APPROVE_CONTENT(_LOG):
id = 147
format = _(u'{addon} {version} content approved.')
short = _(u'Content approved')
keep = True
reviewer_review_action = True
review_queue = True
hide_developer = True
class REJECT_CONTENT(_LOG):
id = 148
action_class = 'reject'
format = _(u'{addon} {version} content rejected.')
short = _(u'Content rejected')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class ADMIN_ALTER_INFO_REQUEST(_LOG):
id = 149
format = _(u'{addon} information request altered or removed by admin.')
short = _(u'Information request altered')
keep = True
reviewer_review_action = True
review_queue = True
class DEVELOPER_CLEAR_INFO_REQUEST(_LOG):
id = 150
format = _(u'Information request cleared by developer on '
u'{addon} {version}.')
short = _(u'Information request removed')
keep = True
review_queue = True
class REQUEST_ADMIN_REVIEW_CODE(_LOG):
id = 151
format = _(u'{addon} {version} admin add-on-review requested.')
short = _(u'Admin add-on-review requested')
keep = True
review_queue = True
reviewer_review_action = True
class REQUEST_ADMIN_REVIEW_CONTENT(_LOG):
id = 152
format = _(u'{addon} {version} admin content-review requested.')
short = _(u'Admin content-review requested')
keep = True
review_queue = True
reviewer_review_action = True
class REQUEST_ADMIN_REVIEW_THEME(_LOG):
id = 153
format = _(u'{addon} {version} admin theme-review requested.')
short = _(u'Admin theme-review requested')
keep = True
review_queue = True
reviewer_review_action = True
class CREATE_STATICTHEME_FROM_PERSONA(_LOG):
id = 154
action_class = 'add'
format = _(u'{addon} was migrated from a lightweight theme.')
keep = True
class ADMIN_API_KEY_RESET(_LOG):
id = 155
format = _(u'User {user} api key reset.')
admin_event = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
# Make sure there's no duplicate IDs.
assert len(LOGS) == len(set(log.id for log in LOGS))
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = namedtuple('LogTuple', [l.__name__ for l in LOGS])(*[l for l in LOGS])
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_RATING_MODERATION = [l.id for l in LOGS if hasattr(l, 'reviewer_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
LOG_REVIEWER_REVIEW_ACTION = [
l.id for l in LOGS if hasattr(l, 'reviewer_review_action')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False) or
l.id in LOG_ADMINS)]
# Review Queue logs to show to developer (i.e. hiding admin/private)
LOG_REVIEW_QUEUE_DEVELOPER = list(set(LOG_REVIEW_QUEUE) -
set(LOG_HIDE_DEVELOPER))
| psiinon/addons-server | src/olympia/constants/activity.py | Python | bsd-3-clause | 16,067 |
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba Python tests."""
import ldb
import os
import samba
from samba.tests import TestCase, TestCaseInTempDir
class SubstituteVarTestCase(TestCase):
def test_empty(self):
self.assertEquals("", samba.substitute_var("", {}))
def test_nothing(self):
self.assertEquals("foo bar",
samba.substitute_var("foo bar", {"bar": "bla"}))
def test_replace(self):
self.assertEquals("foo bla",
samba.substitute_var("foo ${bar}", {"bar": "bla"}))
def test_broken(self):
self.assertEquals("foo ${bdkjfhsdkfh sdkfh ",
samba.substitute_var("foo ${bdkjfhsdkfh sdkfh ", {"bar": "bla"}))
def test_unknown_var(self):
self.assertEquals("foo ${bla} gsff",
samba.substitute_var("foo ${bla} gsff", {"bar": "bla"}))
def test_check_all_substituted(self):
samba.check_all_substituted("nothing to see here")
self.assertRaises(Exception, samba.check_all_substituted,
"Not subsituted: ${FOOBAR}")
class LdbExtensionTests(TestCaseInTempDir):
def test_searchone(self):
path = self.tempdir + "/searchone.ldb"
l = samba.Ldb(path)
try:
l.add({"dn": "foo=dc", "bar": "bla"})
self.assertEquals("bla",
l.searchone(basedn=ldb.Dn(l, "foo=dc"), attribute="bar"))
finally:
del l
os.unlink(path)
| wimberosa/samba | source4/scripting/python/samba/tests/core.py | Python | gpl-3.0 | 2,175 |
import array
import struct
import socket
from odict import OrderedDict as OD
class NLRI:
def __init__(self, afi, safi, val):
self.afi = afi
self.safi = safi
self.val = val
def encode(self):
return self.val
class vpnv4(NLRI):
def __init__(self, labels, rd, prefix):
self.labels = labels
self.rd = rd
self.prefix = prefix
def __repr__(self):
if self.labels:
l = ','.join([str(l) for l in self.labels])
else:
l = 'none'
return '<vpnv4 label %s rd %s prefix %s>' % (l, self.rd, self.prefix)
def __str__(self):
return '%s:%s' % (self.rd, self.prefix)
def __cmp__(self, other):
if isinstance(other, vpnv4):
return cmp(
(self.labels, self.rd, self.prefix),
(other.labels, other.rd, other.prefix),
)
return -1
def encode(self):
plen = 0
v = ''
labels = self.labels[:]
if not labels:
return '\0'
labels = [l<<4 for l in labels]
labels[-1] |= 1
for l in labels:
lo = l & 0xff
hi = (l & 0xffff00) >> 8
v += struct.pack('>HB', hi, lo)
plen += 24
l, r = self.rd.split(':')
if '.' in l:
ip = socket.inet_aton(l)
rd = struct.pack('!H4sH', 1, ip, int(r))
else:
rd = struct.pack('!HHI', 0, int(l), int(r))
v += rd
plen += 64
ip, masklen = self.prefix.split('/')
ip = socket.inet_aton(ip)
masklen = int(masklen)
plen += masklen
if masklen > 24:
v += ip
elif masklen > 16:
v += ip[:3]
elif masklen > 8:
v += ip[:2]
elif masklen > 0:
v += ip[:1]
else:
pass
return struct.pack('B', plen) + v
@classmethod
def from_bytes(cls, plen, val):
if plen==0:
# what the hell?
return cls([], '0:0', '0.0.0.0/0')
idx = 0
# plen is the length, in bits, of all the MPLS labels, plus the 8-byte RD, plus the IP prefix
labels = []
while True:
ls, = struct.unpack_from('3s', val, idx)
idx += 3
plen -= 24
if ls=='\x80\x00\x00':
# special null label for vpnv4 withdraws
labels = None
break
label, = struct.unpack_from('!I', '\x00'+ls)
bottom = label & 1
labels.append(label >> 4)
if bottom:
break
rdtype, rd = struct.unpack_from('!H6s', val, idx)
if rdtype==1:
rdip, num = struct.unpack('!4sH', rd)
rdip = socket.inet_ntoa(rdip)
rd = '%s:%s' % (rdip, num)
else:
num1, num2 = struct.unpack('!HI', rd)
rd = '%s:%s' % (num1, num2)
idx += 8
plen -= 64
ipl = pb(plen)
ip = val[idx:idx+ipl]
idx += ipl
prefix = pip(ip, plen)
return cls(labels, rd, prefix)
class ipv4(NLRI):
def __init__(self, prefix):
self.prefix = prefix
def __cmp__(self, other):
if isinstance(other, ipv4):
aip, alen = self.prefix.split('/')
alen = int(alen)
aip = socket.inet_aton(aip)
bip, blen = other.prefix.split('/')
blen = int(blen)
bip = socket.inet_aton(bip)
return cmp((aip,alen),(bip,blen))
return -1
def encode(self):
plen = 0
v = ''
ip, masklen = self.prefix.split('/')
ip = socket.inet_aton(ip)
masklen = int(masklen)
plen += masklen
if masklen > 24:
v += ip
elif masklen > 16:
v += ip[:3]
elif masklen > 8:
v += ip[:2]
elif masklen > 0:
v += ip[:1]
else:
pass
return struct.pack('B', plen) + v
def __repr__(self):
return '<ipv4 %s>' % (self.prefix,)
def __str__(self):
return self.prefix
@classmethod
def from_bytes(cls, plen, val):
return cls(pip(val, plen))
def pb(masklen):
if masklen > 24:
return 4
elif masklen > 16:
return 3
elif masklen > 8:
return 2
elif masklen > 0:
return 1
return 0
def pip(pi, masklen):
pi += '\x00\x00\x00\x00'
return '%s/%s' % (socket.inet_ntoa(pi[:4]), masklen)
def parse(bytes, afi=1, safi=0):
rv = []
if afi==1 and safi==128:
klass = vpnv4
else:
klass = ipv4
idx = 0
while idx < len(bytes):
plen, = struct.unpack_from('B', bytes, idx)
idx += 1
nbytes, rest = divmod(plen, 8)
if rest:
nbytes += 1
val = bytes[idx:idx+nbytes]
idx += nbytes
rv.append(klass.from_bytes(plen, val))
return rv
| plajjan/pybgp | pybgp/nlri.py | Python | mit | 5,029 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"DeviceEnum",},
)
class DeviceEnum(proto.Message):
r"""Container for enumeration of Google Ads devices available for
targeting.
"""
class Device(proto.Enum):
r"""Enumerates Google Ads devices available for targeting."""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
TABLET = 3
DESKTOP = 4
CONNECTED_TV = 6
OTHER = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v9/enums/types/device.py | Python | apache-2.0 | 1,200 |
# dialog.py -- Tkinter interface to the tk_dialog script.
from tkinter import *
from tkinter import _cnfmerge
if TkVersion <= 3.6:
DIALOG_ICON = 'warning'
else:
DIALOG_ICON = 'questhead'
class Dialog(Widget):
def __init__(self, master=None, cnf={}, **kw):
cnf = _cnfmerge((cnf, kw))
self.widgetName = '__dialog__'
Widget._setup(self, master, cnf)
self.num = self.tk.getint(
self.tk.call(
'tk_dialog', self._w,
cnf['title'], cnf['text'],
cnf['bitmap'], cnf['default'],
*cnf['strings']))
try: Widget.destroy(self)
except TclError: pass
def destroy(self): pass
def _test():
d = Dialog(None, {'title': 'File Modified',
'text':
'File "Python.h" has been modified'
' since the last time it was saved.'
' Do you want to save it before'
' exiting the application.',
'bitmap': DIALOG_ICON,
'default': 0,
'strings': ('Save File',
'Discard Changes',
'Return to Editor')})
print(d.num)
if __name__ == '__main__':
t = Button(None, {'text': 'Test',
'command': _test,
Pack: {}})
q = Button(None, {'text': 'Quit',
'command': t.quit,
Pack: {}})
t.mainloop()
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/tkinter/dialog.py | Python | gpl-3.0 | 1,568 |
from model.contact import Contact
from model.group import Group
from fixture.orm import ORMFixture
import random
def test_del_contact_from_group(app):
orm = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
# check for existing any group
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="test"))
group = random.choice(orm.get_group_list()) # choose random group from list
if len(orm.get_contacts_in_group(Group(id=group.id))) == 0:
if len(orm.get_contacts_not_in_group(Group(id=group.id))) == 0:
app.contact.create(Contact(firstname="Ivan"))
contact_not_in_group = random.choice(orm.get_contacts_not_in_group(Group(id=group.id)))
app.contact.add_contact_to_group_by_id(contact_not_in_group.id, group.id)
old_contacts_in_group = orm.get_contacts_in_group(Group(id=group.id))
contact_in_group = random.choice(old_contacts_in_group) # choose random contact from list
app.contact.delete_contact_from_group_by_id(contact_in_group.id, group.id)
new_contacts_in_group = orm.get_contacts_in_group(Group(id=group.id))
old_contacts_in_group.remove(contact_in_group)
assert sorted(old_contacts_in_group, key=Contact.id_or_max) == sorted(new_contacts_in_group, key=Contact.id_or_max)
| Lana-Pa/Python-training | test/test_delete_contact_from_group.py | Python | apache-2.0 | 1,311 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import mock
import operator
import os
import unittest
import six.moves.cPickle as pickle
from array import array
from collections import defaultdict
from math import ceil
from tempfile import mkdtemp
from shutil import rmtree
import warnings
from six.moves import range
from swift.common import exceptions
from swift.common import ring
from swift.common.ring.builder import MAX_BALANCE, RingValidationWarning
class TestRingBuilder(unittest.TestCase):
def setUp(self):
self.testdir = mkdtemp()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _partition_counts(self, builder, key='id'):
"""
Returns a dictionary mapping the given device key to (number of
partitions assigned to to that key).
"""
counts = defaultdict(int)
for part2dev_id in builder._replica2part2dev:
for dev_id in part2dev_id:
counts[builder.devs[dev_id][key]] += 1
return counts
def _get_population_by_region(self, builder):
"""
Returns a dictionary mapping region to number of partitions in that
region.
"""
return self._partition_counts(builder, key='region')
def test_init(self):
rb = ring.RingBuilder(8, 3, 1)
self.assertEquals(rb.part_power, 8)
self.assertEquals(rb.replicas, 3)
self.assertEquals(rb.min_part_hours, 1)
self.assertEquals(rb.parts, 2 ** 8)
self.assertEquals(rb.devs, [])
self.assertEquals(rb.devs_changed, False)
self.assertEquals(rb.version, 0)
def test_overlarge_part_powers(self):
ring.RingBuilder(32, 3, 1) # passes by not crashing
self.assertRaises(ValueError, ring.RingBuilder, 33, 3, 1)
def test_insufficient_replicas(self):
ring.RingBuilder(8, 1.0, 1) # passes by not crashing
self.assertRaises(ValueError, ring.RingBuilder, 8, 0.999, 1)
def test_negative_min_part_hours(self):
ring.RingBuilder(8, 3, 0) # passes by not crashing
self.assertRaises(ValueError, ring.RingBuilder, 8, 3, -1)
def test_deepcopy(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdb1'})
# more devices in zone #1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sdc1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sdd1'})
rb.rebalance()
rb_copy = copy.deepcopy(rb)
self.assertEqual(rb.to_dict(), rb_copy.to_dict())
self.assertTrue(rb.devs is not rb_copy.devs)
self.assertTrue(rb._replica2part2dev is not rb_copy._replica2part2dev)
self.assertTrue(rb._last_part_moves is not rb_copy._last_part_moves)
self.assertTrue(rb._remove_devs is not rb_copy._remove_devs)
self.assertTrue(rb._dispersion_graph is not rb_copy._dispersion_graph)
def test_get_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.remove_dev(1)
rb.rebalance()
r = rb.get_ring()
self.assertTrue(isinstance(r, ring.RingData))
r2 = rb.get_ring()
self.assertTrue(r is r2)
rb.rebalance()
r3 = rb.get_ring()
self.assertTrue(r3 is not r2)
r4 = rb.get_ring()
self.assertTrue(r3 is r4)
def test_rebalance_with_seed(self):
devs = [(0, 10000), (1, 10001), (2, 10002), (1, 10003)]
ring_builders = []
for n in range(3):
rb = ring.RingBuilder(8, 3, 1)
idx = 0
for zone, port in devs:
for d in ('sda1', 'sdb1'):
rb.add_dev({'id': idx, 'region': 0, 'zone': zone,
'ip': '127.0.0.1', 'port': port,
'device': d, 'weight': 1})
idx += 1
ring_builders.append(rb)
rb0 = ring_builders[0]
rb1 = ring_builders[1]
rb2 = ring_builders[2]
r0 = rb0.get_ring()
self.assertTrue(rb0.get_ring() is r0)
rb0.rebalance() # NO SEED
rb1.rebalance(seed=10)
rb2.rebalance(seed=10)
r1 = rb1.get_ring()
r2 = rb2.get_ring()
self.assertFalse(rb0.get_ring() is r0)
self.assertNotEquals(r0.to_dict(), r1.to_dict())
self.assertEquals(r1.to_dict(), r2.to_dict())
def test_rebalance_part_on_deleted_other_part_on_drained(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.rebalance(seed=1)
# We want a partition where 1 replica is on a removed device, 1
# replica is on a 0-weight device, and 1 on a normal device. To
# guarantee we have one, we see where partition 123 is, then
# manipulate its devices accordingly.
zero_weight_dev_id = rb._replica2part2dev[1][123]
delete_dev_id = rb._replica2part2dev[2][123]
rb.set_dev_weight(zero_weight_dev_id, 0.0)
rb.remove_dev(delete_dev_id)
rb.rebalance()
def test_set_replicas(self):
rb = ring.RingBuilder(8, 3.2, 1)
rb.devs_changed = False
rb.set_replicas(3.25)
self.assertTrue(rb.devs_changed)
rb.devs_changed = False
rb.set_replicas(3.2500001)
self.assertFalse(rb.devs_changed)
def test_add_dev(self):
rb = ring.RingBuilder(8, 3, 1)
dev = {'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000}
dev_id = rb.add_dev(dev)
self.assertRaises(exceptions.DuplicateDeviceError, rb.add_dev, dev)
self.assertEqual(dev_id, 0)
rb = ring.RingBuilder(8, 3, 1)
# test add new dev with no id
dev_id = rb.add_dev({'zone': 0, 'region': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 6000})
self.assertEquals(rb.devs[0]['id'], 0)
self.assertEqual(dev_id, 0)
# test add another dev with no id
dev_id = rb.add_dev({'zone': 3, 'region': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 6000})
self.assertEquals(rb.devs[1]['id'], 1)
self.assertEqual(dev_id, 1)
def test_set_dev_weight(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts, {0: 128, 1: 128, 2: 256, 3: 256})
rb.set_dev_weight(0, 0.75)
rb.set_dev_weight(1, 0.25)
rb.pretend_min_part_hours_passed()
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts, {0: 192, 1: 64, 2: 256, 3: 256})
def test_remove_dev(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts, {0: 192, 1: 192, 2: 192, 3: 192})
rb.remove_dev(1)
rb.pretend_min_part_hours_passed()
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts, {0: 256, 2: 256, 3: 256})
def test_remove_a_lot(self):
rb = ring.RingBuilder(3, 3, 1)
rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1',
'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2',
'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 2})
rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3',
'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 3})
rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1',
'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.add_dev({'id': 4, 'device': 'd4', 'ip': '10.0.0.2',
'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 2})
rb.add_dev({'id': 5, 'device': 'd5', 'ip': '10.0.0.3',
'port': 6002, 'weight': 1000.0, 'region': 0, 'zone': 3})
rb.rebalance()
rb.validate()
# this has to put more than 1/3 of the partitions in the
# cluster on removed devices in order to ensure that at least
# one partition has multiple replicas that need to move.
#
# (for an N-replica ring, it's more than 1/N of the
# partitions, of course)
rb.remove_dev(3)
rb.remove_dev(4)
rb.remove_dev(5)
rb.rebalance()
rb.validate()
def test_shuffled_gather(self):
if self._shuffled_gather_helper() and \
self._shuffled_gather_helper():
raise AssertionError('It is highly likely the ring is no '
'longer shuffling the set of partitions '
'to reassign on a rebalance.')
def _shuffled_gather_helper(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.rebalance()
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
parts = rb._gather_reassign_parts()
max_run = 0
run = 0
last_part = 0
for part, _ in parts:
if part > last_part:
run += 1
else:
if run > max_run:
max_run = run
run = 0
last_part = part
if run > max_run:
max_run = run
return max_run > len(parts) / 2
def test_initial_balance(self):
# 2 boxes, 2 drives each in zone 1
# 1 box, 2 drives in zone 2
#
# This is balanceable, but there used to be some nondeterminism in
# rebalance() that would sometimes give you an imbalanced ring.
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'region': 1, 'zone': 2, 'weight': 4000.0,
'ip': '10.1.1.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 2, 'weight': 4000.0,
'ip': '10.1.1.3', 'port': 10000, 'device': 'sdb'})
_, balance = rb.rebalance(seed=2)
# maybe not *perfect*, but should be close
self.assertTrue(balance <= 1)
def test_multitier_partial(self):
# Multitier test, nothing full
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 2, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 3, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['region'][dev['region']] += 1
counts['zone'][dev['zone']] += 1
if any(c > 1 for c in counts['region'].values()):
raise AssertionError(
"Partition %d not evenly region-distributed (got %r)" %
(part, counts['region']))
if any(c > 1 for c in counts['zone'].values()):
raise AssertionError(
"Partition %d not evenly zone-distributed (got %r)" %
(part, counts['zone']))
# Multitier test, zones full, nodes not full
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdi'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
if counts['zone'] != {0: 2, 1: 2, 2: 2}:
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count > 1:
raise AssertionError(
"Partition %d is on device %d more than once (%r)" %
(part, dev_id, counts['dev_id']))
def test_multitier_full(self):
# Multitier test, #replicas == #devs
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
if counts['zone'] != {0: 2, 1: 2, 2: 2}:
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count != 1:
raise AssertionError(
"Partition %d is on device %d %d times, not 1 (%r)" %
(part, dev_id, replica_count, counts['dev_id']))
def test_multitier_overfull(self):
# Multitier test, #replicas > #zones (to prove even distribution)
rb = ring.RingBuilder(8, 8, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
self.assertEquals(8, sum(counts['zone'].values()))
for zone, replica_count in counts['zone'].items():
if replica_count not in (2, 3):
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count not in (1, 2):
raise AssertionError(
"Partition %d is on device %d %d times, "
"not 1 or 2 (%r)" %
(part, dev_id, replica_count, counts['dev_id']))
def test_multitier_expansion_more_devices(self):
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.rebalance()
rb.validate()
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
for _ in range(5):
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = dict(zone=defaultdict(int),
dev_id=defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
self.assertEquals({0: 2, 1: 2, 2: 2}, dict(counts['zone']))
# each part is assigned once to six unique devices
self.assertEqual((counts['dev_id'].values()), [1] * 6)
self.assertEqual(len(set(counts['dev_id'].keys())), 6)
def test_multitier_part_moves_with_0_min_part_hours(self):
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.rebalance()
rb.validate()
# min_part_hours is 0, so we're clear to move 2 replicas to
# new devs
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
devs = set()
for replica in range(rb.replicas):
devs.add(rb._replica2part2dev[replica][part])
if len(devs) != 3:
raise AssertionError(
"Partition %d not on 3 devs (got %r)" % (part, devs))
def test_multitier_part_moves_with_positive_min_part_hours(self):
rb = ring.RingBuilder(8, 3, 99)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.rebalance()
rb.validate()
# min_part_hours is >0, so we'll only be able to move 1
# replica to a new home
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
for part in range(rb.parts):
devs = set()
for replica in range(rb.replicas):
devs.add(rb._replica2part2dev[replica][part])
if not any(rb.devs[dev_id]['zone'] == 1 for dev_id in devs):
raise AssertionError(
"Partition %d did not move (got %r)" % (part, devs))
def test_multitier_dont_move_too_many_replicas(self):
rb = ring.RingBuilder(8, 3, 0)
# there'll be at least one replica in z0 and z1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.rebalance()
rb.validate()
# only 1 replica should move
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 4, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf1'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
zones = set()
for replica in range(rb.replicas):
zones.add(rb.devs[rb._replica2part2dev[replica][part]]['zone'])
if len(zones) != 3:
raise AssertionError(
"Partition %d not in 3 zones (got %r)" % (part, zones))
if 0 not in zones or 1 not in zones:
raise AssertionError(
"Partition %d not in zones 0 and 1 (got %r)" %
(part, zones))
def test_rerebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts, {0: 256, 1: 256, 2: 256})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts, {0: 192, 1: 192, 2: 192, 3: 192})
rb.set_dev_weight(3, 100)
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts[3], 256)
def test_add_rebalance_add_rebalance_delete_rebalance(self):
# Test for https://bugs.launchpad.net/swift/+bug/845952
# min_part of 0 to allow for rapid rebalancing
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.remove_dev(1)
# well now we have only one device in z0
rb.set_overload(0.5)
rb.rebalance()
rb.validate()
def test_remove_last_partition_from_zero_weight(self):
rb = ring.RingBuilder(4, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 1, 'weight': 1.0,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 0.5,
'ip': '127.0.0.3', 'port': 10001, 'device': 'zero'})
zero_weight_dev = 3
rb.rebalance()
# We want at least one partition with replicas only in zone 2 and 3
# due to device weights. It would *like* to spread out into zone 1,
# but can't, due to device weight.
#
# Also, we want such a partition to have a replica on device 3,
# which we will then reduce to zero weight. This should cause the
# removal of the replica from device 3.
#
# Getting this to happen by chance is hard, so let's just set up a
# builder so that it's in the state we want. This is a synthetic
# example; while the bug has happened on a real cluster, that
# builder file had a part_power of 16, so its contents are much too
# big to include here.
rb._replica2part2dev = [
# these are the relevant ones
# | | |
# v v v
array('H', [2, 5, 6, 2, 5, 6, 2, 5, 6, 2, 5, 6, 2, 5, 6, 2]),
array('H', [1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4]),
array('H', [0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 5, 6, 2, 5, 6])]
rb.set_dev_weight(zero_weight_dev, 0.0)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=1)
node_counts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
node_counts[dev_id] += 1
self.assertEqual(node_counts[zero_weight_dev], 0)
# it's as balanced as it gets, so nothing moves anymore
rb.pretend_min_part_hours_passed()
parts_moved, _balance = rb.rebalance(seed=1)
self.assertEqual(parts_moved, 0)
def test_region_fullness_with_balanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 2, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 2, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10006, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 3, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10007, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 3, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10008, 'device': 'sda1'})
rb.rebalance(seed=2)
population_by_region = self._get_population_by_region(rb)
self.assertEquals(population_by_region,
{0: 192, 1: 192, 2: 192, 3: 192})
def test_region_fullness_with_unbalanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 2,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.rebalance(seed=2)
population_by_region = self._get_population_by_region(rb)
self.assertEquals(population_by_region, {0: 512, 1: 256})
def test_adding_region_slowly_with_unbalanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc1'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd1'})
rb.rebalance(seed=2)
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 0.25,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 0.25,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
changed_parts, _balance = rb.rebalance(seed=2)
# there's not enough room in r1 for every partition to have a replica
# in it, so only 86 assignments occur in r1 (that's ~1/5 of the total,
# since r1 has 1/5 of the weight).
population_by_region = self._get_population_by_region(rb)
self.assertEquals(population_by_region, {0: 682, 1: 86})
# only 86 parts *should* move (to the new region) but randomly some
# parts will flop around devices in the original region too
self.assertEqual(90, changed_parts)
# and since there's not enough room, subsequent rebalances will not
# cause additional assignments to r1
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEquals(population_by_region, {0: 682, 1: 86})
# after you add more weight, more partition assignments move
rb.set_dev_weight(2, 0.5)
rb.set_dev_weight(3, 0.5)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEquals(population_by_region, {0: 614, 1: 154})
rb.set_dev_weight(2, 1.0)
rb.set_dev_weight(3, 1.0)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEquals(population_by_region, {0: 512, 1: 256})
def test_avoid_tier_change_new_region(self):
rb = ring.RingBuilder(8, 3, 1)
for i in range(5):
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': i, 'device': 'sda1'})
rb.rebalance(seed=2)
# Add a new device in new region to a balanced ring
rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': 0,
'ip': '127.0.0.5', 'port': 10000, 'device': 'sda1'})
# Increase the weight of region 1 slowly
moved_partitions = []
for weight in range(0, 101, 10):
rb.set_dev_weight(5, weight)
rb.pretend_min_part_hours_passed()
changed_parts, _balance = rb.rebalance(seed=2)
rb.validate()
moved_partitions.append(changed_parts)
# Ensure that the second region has enough partitions
# Otherwise there will be replicas at risk
min_parts_for_r1 = ceil(weight / (500.0 + weight) * 768)
parts_for_r1 = self._get_population_by_region(rb).get(1, 0)
self.assertEqual(min_parts_for_r1, parts_for_r1)
# Number of partitions moved on each rebalance
# 10/510 * 768 ~ 15.06 -> move at least 15 partitions in first step
ref = [0, 17, 16, 17, 13, 15, 13, 12, 11, 13, 13]
self.assertEqual(ref, moved_partitions)
def test_set_replicas_increase(self):
rb = ring.RingBuilder(8, 2, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.replicas = 2.1
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 25])
rb.replicas = 2.2
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 51])
def test_set_replicas_decrease(self):
rb = ring.RingBuilder(4, 5, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.replicas = 4.9
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[16, 16, 16, 16, 14])
# cross a couple of integer thresholds (4 and 3)
rb.replicas = 2.5
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[16, 16, 8])
def test_fractional_replicas_rebalance(self):
rb = ring.RingBuilder(8, 2.5, 0)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance() # passes by not crashing
rb.validate() # also passes by not crashing
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 128])
def test_create_add_dev_add_replica_rebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.set_replicas(4)
rb.rebalance() # this would crash since parts_wanted was not set
rb.validate()
def test_rebalance_post_upgrade(self):
rb = ring.RingBuilder(8, 3, 1)
# 5 devices: 5 is the smallest number that does not divide 3 * 2^8,
# which forces some rounding to happen.
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.rebalance()
rb.validate()
# Older versions of the ring builder code would round down when
# computing parts_wanted, while the new code rounds up. Make sure we
# can handle a ring built by the old method.
#
# This code mimics the old _set_parts_wanted.
weight_of_one_part = rb.weight_of_one_part()
for dev in rb._iter_devs():
if not dev['weight']:
dev['parts_wanted'] = -rb.parts * rb.replicas
else:
dev['parts_wanted'] = (
int(weight_of_one_part * dev['weight']) -
dev['parts'])
rb.pretend_min_part_hours_passed()
rb.rebalance() # this crashes unless rebalance resets parts_wanted
rb.validate()
def test_add_replicas_then_rebalance_respects_weight(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdi'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdl'})
rb.rebalance(seed=1)
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts, {0: 96, 1: 96,
2: 32, 3: 32,
4: 96, 5: 96,
6: 32, 7: 32,
8: 96, 9: 96,
10: 32, 11: 32})
rb.replicas *= 2
rb.rebalance(seed=1)
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEquals(counts, {0: 192, 1: 192,
2: 64, 3: 64,
4: 192, 5: 192,
6: 64, 7: 64,
8: 192, 9: 192,
10: 64, 11: 64})
def test_overload(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdl'})
rb.rebalance(seed=12345)
rb.validate()
# sanity check: balance respects weights, so default
part_counts = self._partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 192)
self.assertEqual(part_counts[1], 192)
self.assertEqual(part_counts[2], 384)
# Devices 0 and 1 take 10% more than their fair shares by weight since
# overload is 10% (0.1).
rb.set_overload(0.1)
for _ in range(2):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 212)
self.assertEqual(part_counts[1], 212)
self.assertEqual(part_counts[2], 344)
# Now, devices 0 and 1 take 50% more than their fair shares by
# weight.
rb.set_overload(0.5)
for _ in range(3):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 256)
self.assertEqual(part_counts[1], 256)
self.assertEqual(part_counts[2], 256)
# Devices 0 and 1 may take up to 75% over their fair share, but the
# placement algorithm only wants to spread things out evenly between
# all drives, so the devices stay at 50% more.
rb.set_overload(0.75)
for _ in range(3):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 256)
self.assertEqual(part_counts[1], 256)
self.assertEqual(part_counts[2], 256)
def test_unoverload(self):
# Start off needing overload to balance, then add capacity until we
# don't need overload any more and see that things still balance.
# Overload doesn't prevent optimal balancing.
rb = ring.RingBuilder(8, 3, 1)
rb.set_overload(0.125)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 10, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 11, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.rebalance(seed=12345)
# sanity check: our overload is big enough to balance things
part_counts = self._partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 216)
self.assertEqual(part_counts['127.0.0.2'], 216)
self.assertEqual(part_counts['127.0.0.3'], 336)
# Add some weight: balance improves
for dev in rb.devs:
if dev['ip'] in ('127.0.0.1', '127.0.0.2'):
rb.set_dev_weight(dev['id'], 1.5)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 236)
self.assertEqual(part_counts['127.0.0.2'], 236)
self.assertEqual(part_counts['127.0.0.3'], 296)
# Even out the weights: balance becomes perfect
for dev in rb.devs:
if dev['ip'] in ('127.0.0.1', '127.0.0.2'):
rb.set_dev_weight(dev['id'], 2)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 256)
self.assertEqual(part_counts['127.0.0.2'], 256)
self.assertEqual(part_counts['127.0.0.3'], 256)
# Add a new server: balance stays optimal
rb.add_dev({'id': 12, 'region': 0, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 13, 'region': 0, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 14, 'region': 0, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 15, 'region': 0, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
# we're moving more than 1/3 of the replicas but fewer than 2/3, so
# we have to do this twice
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 192)
self.assertEqual(part_counts['127.0.0.2'], 192)
self.assertEqual(part_counts['127.0.0.3'], 192)
self.assertEqual(part_counts['127.0.0.4'], 192)
def test_overload_keeps_balanceable_things_balanced_initially(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'})
rb.set_overload(99999)
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
def test_overload_keeps_balanceable_things_balanced_on_rebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'})
rb.set_overload(99999)
rb.rebalance(seed=123)
part_counts = self._partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
# swap weights between 10.0.0.1 and 10.0.0.2
rb.set_dev_weight(0, 4)
rb.set_dev_weight(1, 4)
rb.set_dev_weight(2, 8)
rb.set_dev_weight(1, 8)
rb.rebalance(seed=456)
part_counts = self._partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
def test_server_per_port(self):
# 3 servers, 3 disks each, with each disk on its own port
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10001, 'device': 'sdy'})
rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10001, 'device': 'sdy'})
rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10001, 'device': 'sdy'})
rb.rebalance(seed=1)
rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10002, 'device': 'sdz'})
rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10002, 'device': 'sdz'})
rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10002, 'device': 'sdz'})
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=1)
poorly_dispersed = []
for part in range(rb.parts):
on_nodes = set()
for replica in range(rb.replicas):
dev_id = rb._replica2part2dev[replica][part]
on_nodes.add(rb.devs[dev_id]['ip'])
if len(on_nodes) < rb.replicas:
poorly_dispersed.append(part)
self.assertEqual(poorly_dispersed, [])
def test_load(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
real_pickle = pickle.load
fake_open = mock.mock_open()
io_error_not_found = IOError()
io_error_not_found.errno = errno.ENOENT
io_error_no_perm = IOError()
io_error_no_perm.errno = errno.EPERM
io_error_generic = IOError()
io_error_generic.errno = errno.EOPNOTSUPP
try:
# test a legit builder
fake_pickle = mock.Mock(return_value=rb)
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
self.assertEquals(fake_pickle.call_count, 1)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEquals(builder, rb)
fake_pickle.reset_mock()
# test old style builder
fake_pickle.return_value = rb.to_dict()
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEquals(builder.devs, rb.devs)
fake_pickle.reset_mock()
# test old devs but no meta
no_meta_builder = rb
for dev in no_meta_builder.devs:
del(dev['meta'])
fake_pickle.return_value = no_meta_builder
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEquals(builder.devs, rb.devs)
# test an empty builder
fake_pickle.side_effect = EOFError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test a corrupted builder
fake_pickle.side_effect = pickle.UnpicklingError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test some error
fake_pickle.side_effect = AttributeError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
finally:
pickle.load = real_pickle
# test non existent builder file
fake_open.side_effect = io_error_not_found
self.assertRaises(exceptions.FileNotFoundError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test non accessible builder file
fake_open.side_effect = io_error_no_perm
self.assertRaises(exceptions.PermissionError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test an error other then ENOENT and ENOPERM
fake_open.side_effect = io_error_generic
self.assertRaises(IOError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
def test_save_load(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000,
'replication_ip': '127.0.0.0', 'replication_port': 10000,
'device': 'sda1', 'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001,
'replication_ip': '127.0.0.1', 'replication_port': 10001,
'device': 'sdb1', 'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002,
'replication_ip': '127.0.0.2', 'replication_port': 10002,
'device': 'sdc1', 'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003,
'replication_ip': '127.0.0.3', 'replication_port': 10003,
'device': 'sdd1', 'meta': ''}]
rb.set_overload(3.14159)
for d in devs:
rb.add_dev(d)
rb.rebalance()
builder_file = os.path.join(self.testdir, 'test_save.builder')
rb.save(builder_file)
loaded_rb = ring.RingBuilder.load(builder_file)
self.maxDiff = None
self.assertEquals(loaded_rb.to_dict(), rb.to_dict())
self.assertEquals(loaded_rb.overload, 3.14159)
@mock.patch('six.moves.builtins.open', autospec=True)
@mock.patch('swift.common.ring.builder.pickle.dump', autospec=True)
def test_save(self, mock_pickle_dump, mock_open):
mock_open.return_value = mock_fh = mock.MagicMock()
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
rb.save('some.builder')
mock_open.assert_called_once_with('some.builder', 'wb')
mock_pickle_dump.assert_called_once_with(rb.to_dict(),
mock_fh.__enter__(),
protocol=2)
def test_search_devs(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 1, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 1, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1',
'meta': 'meta3'},
{'id': 4, 'region': 2, 'zone': 4, 'weight': 1,
'ip': '127.0.0.4', 'port': 10004, 'device': 'sde1',
'meta': 'meta4', 'replication_ip': '127.0.0.10',
'replication_port': 20000},
{'id': 5, 'region': 2, 'zone': 5, 'weight': 2,
'ip': '127.0.0.5', 'port': 10005, 'device': 'sdf1',
'meta': 'meta5', 'replication_ip': '127.0.0.11',
'replication_port': 20001},
{'id': 6, 'region': 2, 'zone': 6, 'weight': 2,
'ip': '127.0.0.6', 'port': 10006, 'device': 'sdg1',
'meta': 'meta6', 'replication_ip': '127.0.0.12',
'replication_port': 20002}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
res = rb.search_devs({'region': 0})
self.assertEquals(res, [devs[0], devs[1]])
res = rb.search_devs({'region': 1})
self.assertEquals(res, [devs[2], devs[3]])
res = rb.search_devs({'region': 1, 'zone': 2})
self.assertEquals(res, [devs[2]])
res = rb.search_devs({'id': 1})
self.assertEquals(res, [devs[1]])
res = rb.search_devs({'zone': 1})
self.assertEquals(res, [devs[1]])
res = rb.search_devs({'ip': '127.0.0.1'})
self.assertEquals(res, [devs[1]])
res = rb.search_devs({'ip': '127.0.0.1', 'port': 10001})
self.assertEquals(res, [devs[1]])
res = rb.search_devs({'port': 10001})
self.assertEquals(res, [devs[1]])
res = rb.search_devs({'replication_ip': '127.0.0.10'})
self.assertEquals(res, [devs[4]])
res = rb.search_devs({'replication_ip': '127.0.0.10',
'replication_port': 20000})
self.assertEquals(res, [devs[4]])
res = rb.search_devs({'replication_port': 20000})
self.assertEquals(res, [devs[4]])
res = rb.search_devs({'device': 'sdb1'})
self.assertEquals(res, [devs[1]])
res = rb.search_devs({'meta': 'meta1'})
self.assertEquals(res, [devs[1]])
def test_validate(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 12, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 13, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 14, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 15, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
# Degenerate case: devices added but not rebalanced yet
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.rebalance()
counts = self._partition_counts(rb, key='zone')
self.assertEquals(counts, {0: 128, 1: 128, 2: 256, 3: 256})
dev_usage, worst = rb.validate()
self.assertTrue(dev_usage is None)
self.assertTrue(worst is None)
dev_usage, worst = rb.validate(stats=True)
self.assertEquals(list(dev_usage), [32, 32, 64, 64,
32, 32, 32, # added zone0
32, 32, 32, # added zone1
64, 64, 64, # added zone2
64, 64, 64, # added zone3
])
self.assertEquals(int(worst), 0)
rb.set_dev_weight(2, 0)
rb.rebalance()
self.assertEquals(rb.validate(stats=True)[1], MAX_BALANCE)
# Test not all partitions doubly accounted for
rb.devs[1]['parts'] -= 1
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.devs[1]['parts'] += 1
# Test non-numeric port
rb.devs[1]['port'] = '10001'
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.devs[1]['port'] = 10001
# Test partition on nonexistent device
rb.pretend_min_part_hours_passed()
orig_dev_id = rb._replica2part2dev[0][0]
rb._replica2part2dev[0][0] = len(rb.devs)
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb._replica2part2dev[0][0] = orig_dev_id
# Tests that validate can handle 'holes' in .devs
rb.remove_dev(2)
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate(stats=True)
# Test partition assigned to a hole
if rb.devs[2]:
rb.remove_dev(2)
rb.pretend_min_part_hours_passed()
orig_dev_id = rb._replica2part2dev[0][0]
rb._replica2part2dev[0][0] = 2
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb._replica2part2dev[0][0] = orig_dev_id
# Validate that zero weight devices with no partitions don't count on
# the 'worst' value.
self.assertNotEquals(rb.validate(stats=True)[1], MAX_BALANCE)
rb.add_dev({'id': 16, 'region': 0, 'zone': 0, 'weight': 0,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
self.assertNotEquals(rb.validate(stats=True)[1], MAX_BALANCE)
def test_validate_partial_replica(self):
rb = ring.RingBuilder(8, 2.5, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc'})
rb.rebalance()
rb.validate() # sanity
self.assertEqual(len(rb._replica2part2dev[0]), 256)
self.assertEqual(len(rb._replica2part2dev[1]), 256)
self.assertEqual(len(rb._replica2part2dev[2]), 128)
# now swap partial replica part maps
rb._replica2part2dev[1], rb._replica2part2dev[2] = \
rb._replica2part2dev[2], rb._replica2part2dev[1]
self.assertRaises(exceptions.RingValidationError, rb.validate)
def test_validate_duplicate_part_assignment(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc'})
rb.rebalance()
rb.validate() # sanity
# now double up a device assignment
rb._replica2part2dev[1][200] = rb._replica2part2dev[2][200]
class SubStringMatcher(object):
def __init__(self, substr):
self.substr = substr
def __eq__(self, other):
return self.substr in other
with warnings.catch_warnings():
# we're firing the warning twice in this test and resetwarnings
# doesn't work - https://bugs.python.org/issue4180
warnings.simplefilter('always')
# by default things will work, but log a warning
with mock.patch('sys.stderr') as mock_stderr:
rb.validate()
expected = SubStringMatcher(
'RingValidationWarning: The partition 200 has been assigned '
'to duplicate devices')
# ... but the warning is written to stderr
self.assertEqual(mock_stderr.method_calls,
[mock.call.write(expected)])
# if you make warnings errors it blows up
with warnings.catch_warnings():
warnings.filterwarnings('error')
self.assertRaises(RingValidationWarning, rb.validate)
def test_get_part_devices(self):
rb = ring.RingBuilder(8, 3, 1)
self.assertEqual(rb.get_part_devices(0), [])
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
part_devs = sorted(rb.get_part_devices(0),
key=operator.itemgetter('id'))
self.assertEqual(part_devs, [rb.devs[0], rb.devs[1], rb.devs[2]])
def test_get_part_devices_partial_replicas(self):
rb = ring.RingBuilder(8, 2.5, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance(seed=9)
# note: partition 255 will only have 2 replicas
part_devs = sorted(rb.get_part_devices(255),
key=operator.itemgetter('id'))
self.assertEqual(part_devs, [rb.devs[0], rb.devs[1]])
def test_dispersion_with_zero_weight_devices(self):
rb = ring.RingBuilder(8, 3.0, 0)
# add two devices to a single server in a single zone
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
# and a zero weight device
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 0,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance()
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 1): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 2): [0, 256, 0, 0],
})
def test_dispersion_with_zero_weight_devices_with_parts(self):
rb = ring.RingBuilder(8, 3.0, 1)
# add four devices to a single server in a single zone
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance(seed=1)
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 1): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 2): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 3): [64, 192, 0, 0],
})
# now mark a device 2 for decom
rb.set_dev_weight(2, 0.0)
# we'll rebalance but can't move any parts
rb.rebalance(seed=1)
# zero weight tier has one copy of 1/4 part-replica
self.assertEqual(rb.dispersion, 75.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 1): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 2): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 3): [64, 192, 0, 0],
})
# unlock the stuck parts
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=3)
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 1): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 3): [0, 256, 0, 0],
})
def test_effective_overload(self):
rb = ring.RingBuilder(8, 3, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
# z1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
# z2
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
# this ring requires overload
required = rb.get_required_overload()
self.assertGreater(required, 0.1)
# and we'll use a little bit
rb.set_overload(0.1)
rb.rebalance(seed=7)
rb.validate()
# but with-out enough overload we're not dispersed
self.assertGreater(rb.dispersion, 0)
# add the other dev to z2
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'})
# but also fail another device in the same!
rb.remove_dev(6)
# we still require overload
required = rb.get_required_overload()
self.assertGreater(required, 0.1)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=7)
rb.validate()
# ... and without enough we're full dispersed
self.assertGreater(rb.dispersion, 0)
# ok, let's fix z2's weight for real
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
# ... technically, we no longer require overload
self.assertEqual(rb.get_required_overload(), 0.0)
# so let's rebalance w/o resetting min_part_hours
rb.rebalance(seed=7)
rb.validate()
# ok, we didn't quite disperse
self.assertGreater(rb.dispersion, 0)
# ... but let's unlock some parts
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=7)
rb.validate()
# ... and that got it!
self.assertEqual(rb.dispersion, 0)
def strawman_test(self):
"""
This test demonstrates a trivial failure of part-replica placement.
If you turn warnings into errors this will fail.
i.e.
export PYTHONWARNINGS=error:::swift.common.ring.builder
N.B. try not to get *too* hung up on doing something silly to make
this particular case pass w/o warnings - it's trivial to write up a
dozen more.
"""
rb = ring.RingBuilder(8, 3, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
# z1
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
# z2
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 200,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
with warnings.catch_warnings(record=True) as w:
rb.rebalance(seed=7)
rb.validate()
self.assertEqual(len(w), 65)
class TestGetRequiredOverload(unittest.TestCase):
def assertApproximately(self, a, b, error=1e-6):
self.assertTrue(abs(a - b) < error,
"%f and %f differ by more than %f" % (a, b, error))
def test_none_needed(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
# 4 equal-weight devs and 3 replicas: this can be balanced without
# resorting to overload at all
self.assertApproximately(rb.get_required_overload(), 0)
# 3 equal-weight devs and 3 replicas: this can also be balanced
rb.remove_dev(3)
self.assertApproximately(rb.get_required_overload(), 0)
def test_small_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
# Zone 2 has 7/8 of the capacity of the other two zones, so an
# overload of 1/7 will allow things to balance out.
self.assertApproximately(rb.get_required_overload(), 1.0 / 7)
def test_big_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'})
# Zone 1 has weight 200, while zones 2, 3, and 4 together have only
# 360. The small zones would need to go from 360 to 400 to balance
# out zone 1, for an overload of 40/360 = 1/9.
self.assertApproximately(rb.get_required_overload(), 1.0 / 9)
def test_enormous_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1000,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1000,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
# Zone 1 has weight 2000, while zones 2, 3, and 4 together have only
# 360. The small zones would need to go from 360 to 4000 to balance
# out zone 1, for an overload of 3640/360.
self.assertApproximately(rb.get_required_overload(), 3640.0 / 360)
def test_two_big_two_small(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 45,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 45,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 35,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 35,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
# Zones 1 and 2 each have weight 200, while zones 3 and 4 together
# have only 160. The small zones would need to go from 160 to 200 to
# balance out the big zones, for an overload of 40/160 = 1/4.
self.assertApproximately(rb.get_required_overload(), 1.0 / 4)
def test_multiple_replicas_each(self):
rb = ring.RingBuilder(8, 7, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
# Zone 0 has more than 4/7 of the weight, so we'll need to bring
# zone 1 up to a total of 150 so it can take 3 replicas, so the
# overload should be 10/140.
self.assertApproximately(rb.get_required_overload(), 10.0 / 140)
if __name__ == '__main__':
unittest.main()
| hbhdytf/mac | test/unit/common/ring/test_builder.py | Python | apache-2.0 | 100,538 |
import unittest, os
from explainshell import manager, config, store, errors
class test_manager(unittest.TestCase):
def setUp(self):
store.store('explainshell_tests').drop(True)
def _getmanager(self, names, **kwargs):
l = []
for n in names:
l.append(os.path.join(config.MANPAGEDIR, '1', n))
m = manager.manager(config.MONGO_URI, 'explainshell_tests', l, **kwargs)
return m
def test(self):
m = self._getmanager(['tar.1.gz'])
m.run()
self.assertRaises(errors.ProgramDoesNotExist, m.store.findmanpage, 'tar.2')
mp = m.store.findmanpage('tar')[0]
self.assertEquals(mp.source, 'tar.1.gz')
self.assertEquals(mp.name, 'tar')
self.assertEquals(mp.aliases, [('tar', 10)])
self.assertEquals(len(mp.paragraphs), 154)
self.assertEquals(len(mp.options), 134)
self.assertTrue(mp.find_option('-v'))
self.assertEquals(mp.synopsis, 'The GNU version of the tar archiving utility')
self.assertTrue(mp.partialmatch) # fixer is working
self.assertEquals(m.run()[0], [])
def test_verify(self):
m = self._getmanager(['tar.1.gz'])
s = m.store
# invalid mapping
s.addmapping('foo', 'bar', 1)
ok, unreachable, notfound = s.verify()
self.assertFalse(ok)
self.assertEquals(list(notfound), ['bar'])
s.mapping.drop()
m.run()
ok, unreachable, notfound = s.verify()
self.assertTrue(ok)
s.mapping.drop()
ok, unreachable, notfound = s.verify()
self.assertEquals(list(unreachable), ['tar'])
s.addmapping('foo', 'bar', 1)
ok, unreachable, notfound = s.verify()
self.assertEquals(list(notfound), ['bar'])
self.assertEquals(list(unreachable), ['tar'])
def test_aliases(self):
m = self._getmanager(['lsbcpp.1.gz', 'tar.1.gz', 'bsdtar.1.gz', 'basket.1.gz'])
m.run()
mp = m.store.findmanpage('lsbcpp')
self.assertTrue('lsbcc' in m.store)
self.assertTrue('lsbc++' in m.store)
self.assertTrue('lsbcpp' in m.store)
self.assertEquals(len(mp), 1)
mp = m.store.findmanpage('tar')
self.assertEquals(len(mp), 2)
self.assertEquals(mp[0].source, 'tar.1.gz')
self.assertEquals(mp[1].source, 'bsdtar.1.gz')
def test_overwrite(self):
m = self._getmanager(['tar.1.gz'], overwrite=False)
self.assertEquals(len(list(m.store)), 0)
a, e = m.run()
self.assertTrue(a)
self.assertFalse(e)
self.assertEquals(m.store.mapping.count(), 1)
self.assertEquals(len(list(m.store)), 1)
a, e = m.run()
self.assertFalse(a)
self.assertTrue(e)
self.assertEquals(m.store.mapping.count(), 1)
self.assertEquals(len(list(m.store)), 1)
m = manager.manager(config.MONGO_URI, 'explainshell_tests', [os.path.join(config.MANPAGEDIR, '1', 'tar.1.gz')], overwrite=True)
a, e = m.run()
self.assertTrue(a)
self.assertFalse(e)
self.assertEquals(m.store.mapping.count(), 1)
self.assertEquals(len(list(m.store)), 1)
m.store.verify()
def test_multicommand(self):
m = self._getmanager(['git.1.gz', 'git-rebase.1.gz'])
m.run()
self.assertTrue(m.store.findmanpage('git')[0].multicommand)
self.assertTrue('git rebase' in m.store)
def test_edit(self):
m = self._getmanager(['tar.1.gz'], overwrite=False)
self.assertEquals(len(list(m.store)), 0)
a, e = m.run()
mp = a[0]
mp.synopsis = 'foo'
m.edit(mp)
mp = m.store.findmanpage('tar')[0]
self.assertEquals(mp.synopsis, 'foo')
self.assertTrue(m.store.verify())
mp.aliases.append(('foo', 1))
m.edit(mp)
self.assertTrue('foo' in m.store)
self.assertEquals(m.store.findmanpage('tar')[0].paragraphs,
m.store.findmanpage('foo')[0].paragraphs)
self.assertTrue(m.store.verify()[0])
def test_samename(self):
pages = [os.path.join(config.MANPAGEDIR, '1', 'node.1.gz'), os.path.join(config.MANPAGEDIR, '8', 'node.8.gz')]
m = manager.manager(config.MONGO_URI, 'explainshell_tests', pages)
a, e = m.run()
self.assertEquals(len(a), 2)
self.assertEquals(len(m.store.findmanpage('node')), 2)
mps = m.store.findmanpage('node.8')
self.assertEquals(len(mps), 2)
self.assertEquals(mps[0].section, '8')
def test_samename_samesection(self):
m = self._getmanager(['xargs.1.gz', 'xargs.1posix.gz'])
a, e = m.run()
self.assertEquals(len(a), 2)
self.assertEquals(len(m.store.findmanpage('xargs')), 2)
mps = m.store.findmanpage('xargs.1posix')
self.assertEquals(len(mps), 2)
self.assertEquals(mps[0].section, '1posix')
| idank/explainshell | tests/test-manager.py | Python | gpl-3.0 | 4,940 |
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic.list import ListView
from solrinspector import api as solr_api
from getenv import env
SOLR_URL = env("SOLR_URL", "http://localhost:8983/solr/")
# Create your views here.
def home(request):
"""docstring for home"""
field_list = ','.join( [ f.name for f in solr_api.list_fields(SOLR_URL)])
return HttpResponse("Solr Url: %s" % field_list)
class FieldListView(ListView):
template_name = "inspectsolr/field_list.html"
def get_queryset(self):
return solr_api.list_fields(SOLR_URL)
class FieldFacetListView(ListView):
template_name = "inspectsolr/facet_list.html"
def get_queryset(self):
return solr_api.get_facets(SOLR_URL, self.kwargs['facet_field'])
| Brown-University-Library/django-inspectsolr | views.py | Python | mit | 799 |
"""
Embedded tweet plugin for Pelican
=================================
This plugin allows you to embed Twitter tweets into your articles.
And also provides a link for Twitter username.
i.e.
@username
will be replaced by a link to Twitter username page.
@username/status/tweetid
will be replaced by a `Embedded-tweet`_ API.
.. _Embedded-tweet: https://dev.twitter.com/docs/embedded-tweets
"""
from pelican import signals
import re
def embed_tweet(content):
content._content = re.sub(
r'(^|[^@\w])@(\w{1,15})\b',
'\\1<a href="https://twitter.com/\\2">@\\2</a>',
re.sub(
r'(^|[^@\w])@(\w{1,15})/status/(\d+)\b',
'\\1<blockquote class="twitter-tweet" align="center"><a href="https://twitter.com/\\2/status/\\3">Tweet of \\2/\\3</a></blockquote>',
content._content
)
) + '<script src="//platform.twitter.com/widgets.js" charset="utf-8"></script>'
def register():
signals.content_object_init.connect(embed_tweet)
| flurischt/pelican-embed-tweet | embed_tweet.py | Python | mit | 1,037 |
"""Constants used in the test suite. """
SORTED_COUNTRIES = [
("AF", "Afghanistan"),
("AX", "\xc5land Islands"),
("AL", "Albania"),
("DZ", "Algeria"),
("AS", "American Samoa"),
("AD", "Andorra"),
("AO", "Angola"),
("AI", "Anguilla"),
("AQ", "Antarctica"),
("AG", "Antigua and Barbuda"),
("AR", "Argentina"),
("AM", "Armenia"),
("AW", "Aruba"),
("AU", "Australia"),
("AT", "Austria"),
("AZ", "Azerbaijan"),
("BS", "Bahamas"),
("BH", "Bahrain"),
("BD", "Bangladesh"),
("BB", "Barbados"),
("BY", "Belarus"),
("BE", "Belgium"),
("BZ", "Belize"),
("BJ", "Benin"),
("BM", "Bermuda"),
("BT", "Bhutan"),
("BO", "Bolivia"),
("BQ", "Bonaire, Sint Eustatius and Saba"),
("BA", "Bosnia and Herzegovina"),
("BW", "Botswana"),
("BV", "Bouvet Island"),
("BR", "Brazil"),
("IO", "British Indian Ocean Territory"),
("BN", "Brunei"),
("BG", "Bulgaria"),
("BF", "Burkina Faso"),
("BI", "Burundi"),
("CV", "Cabo Verde"),
("KH", "Cambodia"),
("CM", "Cameroon"),
("CA", "Canada"),
("KY", "Cayman Islands"),
("CF", "Central African Republic"),
("TD", "Chad"),
("CL", "Chile"),
("CN", "China"),
("CX", "Christmas Island"),
("CC", "Cocos (Keeling) Islands"),
("CO", "Colombia"),
("KM", "Comoros"),
("CG", "Congo"),
("CD", "Congo (the Democratic Republic of the)"),
("CK", "Cook Islands"),
("CR", "Costa Rica"),
("CI", "C\xf4te d'Ivoire"),
("HR", "Croatia"),
("CU", "Cuba"),
("CW", "Cura\xe7ao"),
("CY", "Cyprus"),
("CZ", "Czechia"),
("DK", "Denmark"),
("DJ", "Djibouti"),
("DM", "Dominica"),
("DO", "Dominican Republic"),
("EC", "Ecuador"),
("EG", "Egypt"),
("SV", "El Salvador"),
("GQ", "Equatorial Guinea"),
("ER", "Eritrea"),
("EE", "Estonia"),
("SZ", "Eswatini"),
("ET", "Ethiopia"),
("FK", "Falkland Islands (Malvinas)"),
("FO", "Faroe Islands"),
("FJ", "Fiji"),
("FI", "Finland"),
("FR", "France"),
("GF", "French Guiana"),
("PF", "French Polynesia"),
("TF", "French Southern Territories"),
("GA", "Gabon"),
("GM", "Gambia"),
("GE", "Georgia"),
("DE", "Germany"),
("GH", "Ghana"),
("GI", "Gibraltar"),
("GR", "Greece"),
("GL", "Greenland"),
("GD", "Grenada"),
("GP", "Guadeloupe"),
("GU", "Guam"),
("GT", "Guatemala"),
("GG", "Guernsey"),
("GN", "Guinea"),
("GW", "Guinea-Bissau"),
("GY", "Guyana"),
("HT", "Haiti"),
("HM", "Heard Island and McDonald Islands"),
("VA", "Holy See"),
("HN", "Honduras"),
("HK", "Hong Kong"),
("HU", "Hungary"),
("IS", "Iceland"),
("IN", "India"),
("ID", "Indonesia"),
("IR", "Iran"),
("IQ", "Iraq"),
("IE", "Ireland"),
("IM", "Isle of Man"),
("IL", "Israel"),
("IT", "Italy"),
("JM", "Jamaica"),
("JP", "Japan"),
("JE", "Jersey"),
("JO", "Jordan"),
("KZ", "Kazakhstan"),
("KE", "Kenya"),
("KI", "Kiribati"),
("XK", "Kosovo"),
("KW", "Kuwait"),
("KG", "Kyrgyzstan"),
("LA", "Laos"),
("LV", "Latvia"),
("LB", "Lebanon"),
("LS", "Lesotho"),
("LR", "Liberia"),
("LY", "Libya"),
("LI", "Liechtenstein"),
("LT", "Lithuania"),
("LU", "Luxembourg"),
("MO", "Macao"),
("MG", "Madagascar"),
("MW", "Malawi"),
("MY", "Malaysia"),
("MV", "Maldives"),
("ML", "Mali"),
("MT", "Malta"),
("MH", "Marshall Islands"),
("MQ", "Martinique"),
("MR", "Mauritania"),
("MU", "Mauritius"),
("YT", "Mayotte"),
("MX", "Mexico"),
("FM", "Micronesia (Federated States of)"),
("MD", "Moldova"),
("MC", "Monaco"),
("MN", "Mongolia"),
("ME", "Montenegro"),
("MS", "Montserrat"),
("MA", "Morocco"),
("MZ", "Mozambique"),
("MM", "Myanmar"),
("NA", "Namibia"),
("NR", "Nauru"),
("NP", "Nepal"),
("NL", "Netherlands"),
("NC", "New Caledonia"),
("NZ", "New Zealand"),
("NI", "Nicaragua"),
("NE", "Niger"),
("NG", "Nigeria"),
("NU", "Niue"),
("NF", "Norfolk Island"),
("KP", "North Korea"),
("MK", "North Macedonia"),
("MP", "Northern Mariana Islands"),
("NO", "Norway"),
("OM", "Oman"),
("PK", "Pakistan"),
("PW", "Palau"),
("PS", "Palestine, State of"),
("PA", "Panama"),
("PG", "Papua New Guinea"),
("PY", "Paraguay"),
("PE", "Peru"),
("PH", "Philippines"),
("PN", "Pitcairn"),
("PL", "Poland"),
("PT", "Portugal"),
("PR", "Puerto Rico"),
("QA", "Qatar"),
("RE", "R\xe9union"),
("RO", "Romania"),
("RU", "Russia"),
("RW", "Rwanda"),
("BL", "Saint Barth\xe9lemy"),
("SH", "Saint Helena, Ascension and Tristan da Cunha"),
("KN", "Saint Kitts and Nevis"),
("LC", "Saint Lucia"),
("MF", "Saint Martin (French part)"),
("PM", "Saint Pierre and Miquelon"),
("VC", "Saint Vincent and the Grenadines"),
("WS", "Samoa"),
("SM", "San Marino"),
("ST", "Sao Tome and Principe"),
("SA", "Saudi Arabia"),
("SN", "Senegal"),
("RS", "Serbia"),
("SC", "Seychelles"),
("SL", "Sierra Leone"),
("SG", "Singapore"),
("SX", "Sint Maarten (Dutch part)"),
("SK", "Slovakia"),
("SI", "Slovenia"),
("SB", "Solomon Islands"),
("SO", "Somalia"),
("ZA", "South Africa"),
("GS", "South Georgia and the South Sandwich Islands"),
("KR", "South Korea"),
("SS", "South Sudan"),
("ES", "Spain"),
("LK", "Sri Lanka"),
("SD", "Sudan"),
("SR", "Suriname"),
("SJ", "Svalbard and Jan Mayen"),
("SE", "Sweden"),
("CH", "Switzerland"),
("SY", "Syria"),
("TW", "Taiwan"),
("TJ", "Tajikistan"),
("TZ", "Tanzania"),
("TH", "Thailand"),
("TL", "Timor-Leste"),
("TG", "Togo"),
("TK", "Tokelau"),
("TO", "Tonga"),
("TT", "Trinidad and Tobago"),
("TN", "Tunisia"),
("TR", "Turkey"),
("TM", "Turkmenistan"),
("TC", "Turks and Caicos Islands"),
("TV", "Tuvalu"),
("UG", "Uganda"),
("UA", "Ukraine"),
("AE", "United Arab Emirates"),
("GB", "United Kingdom"),
("UM", "United States Minor Outlying Islands"),
("US", "United States of America"),
("UY", "Uruguay"),
("UZ", "Uzbekistan"),
("VU", "Vanuatu"),
("VE", "Venezuela"),
("VN", "Vietnam"),
("VG", "Virgin Islands (British)"),
("VI", "Virgin Islands (U.S.)"),
("WF", "Wallis and Futuna"),
("EH", "Western Sahara"),
("YE", "Yemen"),
("ZM", "Zambia"),
("ZW", "Zimbabwe")
]
| edx/edx-platform | openedx/core/djangoapps/user_api/tests/test_constants.py | Python | agpl-3.0 | 6,648 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import projects.models
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0004_auto_20140912_1719'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=30)),
('image', models.ImageField(upload_to=projects.models.Project.image_upload_to, null=True)),
('blurb', models.TextField(null=True)),
('description', models.TextField(null=True)),
('website', models.URLField(null=True)),
('leaders', models.ManyToManyField(related_name='u+', to='robocrm.RoboUser')),
],
options={
},
bases=(models.Model,),
),
]
| sreidy/roboticsclub.org | projects/migrations/0001_initial.py | Python | mit | 1,003 |
import os
from os import path
from getpass import getpass
from .environment import env
from .exceptions import InvalidPasswordError
# make sure WindowsError is available
import __builtin__
if not hasattr(__builtin__, 'WindowsError'):
class WindowsError(OSError):
pass
try:
# For testing replacement routines for older python compatibility
# raise ImportError()
import subprocess
from subprocess import call as _call_command
def _capture_command(argv):
return subprocess.Popen(argv, stdout=subprocess.PIPE).communicate()[0]
except ImportError:
# this section is for python older than 2.4 - basically for CentOS 4
# when we have to use it
def _capture_command(argv):
command = ' '.join(argv)
# print "(_capture_command) Executing: %s" % command
fd = os.popen(command)
output = fd.read()
fd.close()
return output
# older python - shell arg is ignored, but is legal
def _call_command(argv, stdin=None, stdout=None, shell=True):
argv = [i.replace('"', '\"') for i in argv]
argv = ['"%s"' % i for i in argv]
command = " ".join(argv)
if stdin is not None:
command += " < " + stdin.name
if stdout is not None:
command += " > " + stdout.name
# sys.stderr.write("(_call_command) Executing: %s\n" % command)
return os.system(command)
try:
from subprocess import CalledProcessError
except ImportError:
# the Error does not exist in python 2.4
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % \
(self.cmd, self.returncode)
def _call_wrapper(argv, **kwargs):
if env['verbose']:
if hasattr(argv, '__iter__'):
command = ' '.join(argv)
else:
command = argv
print "Executing command: %s" % command
return _call_command(argv, **kwargs)
def _check_call_wrapper(argv, accepted_returncode_list=[0], **kwargs):
try:
returncode = _call_wrapper(argv, **kwargs)
if returncode not in accepted_returncode_list:
raise CalledProcessError(returncode, argv)
except WindowsError:
raise CalledProcessError("Unknown", argv)
def _create_dir_if_not_exists(dir_path, world_writeable=False, owner=None):
if not path.exists(dir_path):
_check_call_wrapper(['mkdir', '-p', dir_path])
if world_writeable:
_check_call_wrapper(['chmod', '-R', '777', dir_path])
if owner:
_check_call_wrapper(['chown', '-R', owner, dir_path])
def _rm_all_pyc():
"""Remove all pyc files, to be sure"""
_call_wrapper('find . -name \*.pyc -print0 | xargs -0 rm', shell=True,
cwd=env['vcs_root_dir'])
def _ask_for_password(prompt, test_fn=None, max_attempts=3):
"""Get password from user.
prompt is the text for the password prompt
test_fn is a function to test the password. It should return True if
the password works, or False otherwise.
"""
password = None
attempts = 0
while password is None:
if attempts < max_attempts:
attempts += 1
password = getpass(prompt)
if test_fn and not test_fn(password):
print "Sorry, invalid password"
password = None
else:
raise InvalidPasswordError("None of your passwords worked")
return password
def _get_file_contents(file_path, sudo=False):
if sudo:
try:
# we use this rather than file exists so that the script doesn't
# have to be run as root
file_exists = _call_wrapper(['sudo', 'test', '-f', file_path])
except (WindowsError, CalledProcessError):
return None
if file_exists != 0:
return None
# note this requires sudoers to work with this - jenkins particularly
contents = _capture_command(["sudo", "cat", file_path])
else:
if not path.isfile(file_path):
return None
contents = open(file_path).read()
return contents.rstrip()
| qris/mailer-dye | dye/tasklib/util.py | Python | gpl-3.0 | 4,469 |
"""
OpenGapps Copy Files
This takes the sources directory, folder and platform level as input, and produces
to the standard output a FILE:system/FILE structure that will be used by the
AOSP build system PRODUCT_COPY_FILES.
"""
import os
import re
import subprocess
import sys
def main():
"""
Main
"""
if len(sys.argv) != 4:
print("expect {} [opengapps sources] [folder] [platform level]".format(sys.argv[0]))
sys.exit(-1)
opengapps_sources = sys.argv[1]
if not opengapps_sources.endswith('/'):
opengapps_sources += '/'
folder = sys.argv[2]
max_api_level = int(sys.argv[3])
files = {}
pattern = re.compile(r'(\d{2})/.*')
for dp, dn, found_files in os.walk(opengapps_sources):
for line in found_files:
line = os.path.join(dp, line)
file_name = line.strip('\n').replace(opengapps_sources, '')
if not file_name.startswith(folder):
continue
# Remove folder name, we'll prepend this later
file_name = file_name.replace('{}/'.format(folder), '')
match = pattern.match(file_name)
if match and int(match.group(1)) <= max_api_level:
version = int(match.group(1))
file_name = file_name.replace('{}/'.format(version), '')
if file_name in files and int(files[file_name]) > version:
continue
files[file_name] = version
elif not match:
files[file_name] = None
for key in files:
version = files[key]
if version:
print("{0}{1}/{2}/{3}:system/{1}/{3}".format(opengapps_sources, folder, version, key))
else:
print("{0}{1}/{2}:system/{1}/{2}".format(opengapps_sources, folder, key))
if __name__ == "__main__":
main()
| opengapps/aosp_build | core/copy_files.py | Python | gpl-3.0 | 1,852 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# # Балистический калькулятор
import argparse
import csv
UtilName = 'Ballistic tool: Add bullet to bulletDB'
__author__ = 'DarkHaisam'
__version__ = '1.2 Beta'
parser = argparse.ArgumentParser(description=UtilName + __version__)
parser.add_argument('-m', '--metric', action='store_true', help=' Use for metric unit')
parser.add_argument('-b', '--bulletname', help=' Use bullet paramter from bullet name in bulletDB ')
parser.add_argument('-v', '--verbose', help=' Verbose output')
parser.add_argument('-D', '--dbullet', type=float, help='Diametr bullet')
parser.add_argument('-L', '--lbullet', type=float, help='Length bullet ')
parser.add_argument('-SG', '--stability', type=float, help='Factor Gyroscopic Stabilit ')
parser.add_argument('-M', '--mass', type=float, help='Bullet mass')
args = parser.parse_args()
if not args.stability:
args.stability = ''
with open('bulletsDB', 'a') as f:
writer = csv.writer(f, delimiter=';', quoting=csv.QUOTE_NONE)
BulletsParam = args.bulletname, args.dbullet, args.lbullet, args.mass, args.stability
writer.writerow(BulletsParam)
| darkhaisam/ballistic | experemental/badd.py | Python | gpl-2.0 | 1,160 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gn_flavor
"""Utils for running under Valgrind."""
class ValgrindFlavorUtils(gn_flavor.GNFlavorUtils):
def __init__(self, m):
super(ValgrindFlavorUtils, self).__init__(m)
self._suppressions_file = self.m.vars.skia_dir.join(
'tools', 'valgrind.supp')
def step(self, name, cmd, **kwargs):
new_cmd = ['valgrind', '--gen-suppressions=all', '--leak-check=full',
'--track-origins=yes', '--error-exitcode=1', '--num-callers=40',
'--suppressions=%s' % self._suppressions_file]
path_to_app = self.out_dir.join(cmd[0])
new_cmd.append(path_to_app)
new_cmd.extend(cmd[1:])
return self.m.run(self.m.step, name, cmd=new_cmd,
**kwargs)
| geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/skia/infra/bots/recipe_modules/flavor/valgrind_flavor.py | Python | gpl-3.0 | 895 |
# Copyright (C) 2017-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from django_js_reverse.views import urls_js
from django.conf import settings
from django.conf.urls import (
handler400,
handler403,
handler404,
handler500,
include,
url,
)
from django.contrib.auth.views import LogoutView
from django.contrib.staticfiles.views import serve
from django.shortcuts import render
from django.views.generic.base import RedirectView
from swh.web.browse.identifiers import swhid_browse
from swh.web.common.exc import (
swh_handle400,
swh_handle403,
swh_handle404,
swh_handle500,
)
from swh.web.config import get_config
swh_web_config = get_config()
favicon_view = RedirectView.as_view(
url="/static/img/icons/swh-logo-32x32.png", permanent=True
)
def _default_view(request):
return render(request, "homepage.html")
urlpatterns = [
url(r"^admin/", include("swh.web.admin.urls")),
url(r"^favicon\.ico$", favicon_view),
url(r"^api/", include("swh.web.api.urls")),
url(r"^browse/", include("swh.web.browse.urls")),
url(r"^$", _default_view, name="swh-web-homepage"),
url(r"^jsreverse/$", urls_js, name="js_reverse"),
# keep legacy SWHID resolving URL with trailing slash for backward compatibility
url(
r"^(?P<swhid>(swh|SWH):[0-9]+:[A-Za-z]+:[0-9A-Fa-f]+.*)/$",
swhid_browse,
name="browse-swhid-legacy",
),
url(
r"^(?P<swhid>(swh|SWH):[0-9]+:[A-Za-z]+:[0-9A-Fa-f]+.*)$",
swhid_browse,
name="browse-swhid",
),
url(r"^", include("swh.web.misc.urls")),
url(r"^", include("swh.web.auth.views")),
url(r"^logout/$", LogoutView.as_view(template_name="logout.html"), name="logout"),
]
# allow to serve assets through django staticfiles
# even if settings.DEBUG is False
def insecure_serve(request, path, **kwargs):
return serve(request, path, insecure=True, **kwargs)
# enable to serve compressed assets through django development server
if swh_web_config["serve_assets"]:
static_pattern = r"^%s(?P<path>.*)$" % settings.STATIC_URL[1:]
urlpatterns.append(url(static_pattern, insecure_serve))
handler400 = swh_handle400 # noqa
handler403 = swh_handle403 # noqa
handler404 = swh_handle404 # noqa
handler500 = swh_handle500 # noqa
| SoftwareHeritage/swh-web-ui | swh/web/urls.py | Python | agpl-3.0 | 2,484 |
#!/usr/bin/env python
from __future__ import print_function
import sys
import numpy
import pni.io.nx.h5 as nexus
f = nexus.create_file("test_string2.nxs",True);
d = f.root().create_group("scan_1","NXentry").\
create_group("detector","NXdetector")
sa= d.create_field("ListofStrings","string",shape=(3,2))
sa[0,0]="safdfdsffdsfd"
sa[1,0]="safdsfsfdsffdsfd"
sa[2,0]="safdfsfd"
print(sa[0,0])
print(sa[1,0])
print(sa[2,0])
print(sa[...])
f.close()
| pni-libraries/python-pni | doc/examples/old_examples/test_string2.py | Python | gpl-2.0 | 494 |
# -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.sql import SparkSession, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type
from pyspark.tests import ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode
d = [Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"})]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _verify_type
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._wrapped._ssql_ctx.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_verify_type(ExamplePoint(1.0, 2.0), ExamplePointUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], ExamplePointUDT()))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_verify_type(PythonOnlyPoint(1.0, 2.0), PythonOnlyUDT())
self.assertRaises(ValueError, lambda: _verify_type([1.0, 2.0], PythonOnlyUDT()))
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(), cs.startswith('a'), cs.endswith('a')
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
with self.assertRaises(ValueError):
struct1 = StructType().add("name")
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
with self.assertRaises(KeyError):
not_a_field = struct1["f9"]
with self.assertRaises(IndexError):
not_a_field = struct1[9]
with self.assertRaises(TypeError):
not_a_field = struct1[9.9]
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger_takes_keyword_args(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
# should throw error
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame([(u'Alice', None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with string
row = self.spark.createDataFrame([(None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.join(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT)")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT)")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT)")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT)")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
| DataReplyUK/datareplyuk | GenesAssociation/spark-2.0.0-bin-hadoop2.7/python/pyspark/sql/tests.py | Python | apache-2.0 | 78,990 |
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns
urlpatterns = patterns('components.rights.views',
(r'^$', 'ingest_rights_list'),
(r'add/$', 'ingest_rights_edit'),
(r'delete/(?P<id>\d+)/$', 'ingest_rights_delete'),
(r'grants/(?P<id>\d+)/delete/$', 'ingest_rights_grant_delete'),
(r'grants/(?P<id>\d+)/$', 'ingest_rights_grants_edit'),
(r'(?P<id>\d+)/$', 'ingest_rights_edit')
)
| michal-ruzicka/archivematica | src/dashboard/src/components/rights/ingest_urls.py | Python | agpl-3.0 | 1,151 |
# -*- coding:Utf-8 -*-
#####################################################################
#This file is part of Network and RGPA.
#PyLayers is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#PyLayers is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with PyLayers. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------
#authors :
#Nicolas AMIOT : nicolas.amiot@univ-rennes1.fr
#Bernard UGUEN : bernard.uguen@univ-rennes1.fr
#Mohamed LAARAIEDH : mohamed.laaraiedh@univ-rennes1.fr
#####################################################################
import numpy as np
import scipy as sp
from pylayers.util.project import *
import pdb
import doctest
class PLSmodel(PyLayers):
""" Path Loss Shadowing model
Attributes
----------
f : float
frequency in GHz
rssnp : float
path loss exponent
d0 : float
PL0 distance
sigrss : float
shadowing variance
method : used model
"""
def __init__(self,f=3.0,rssnp=2.64,d0=1.0,sigrss=3.0,method='mode'):
self.f = f
self.d0 = d0
self.rssnp = rssnp
self.sigrss = sigrss
self.getPL0()
self.method = method
self.param = dict(f=self.f,
d0=self.d0,
rssnp=self.rssnp,
sigrss=self.sigrss,
PL0=self.PL0,
method=self.method)
def __repr__(self):
st = 'frequency (GHz) : ' +str(self.f)+'\n'
st = st + 'path loss exponent (n) : '+str(self.rssnp)+'\n'
st = st + 'PL0 (dB): '+str(self.PL0)
return(st)
def getPL0(self,Gt=0,Gr=0):
""" get Path Loss at reference distance d0
Parameters
----------
PL0_c : PL0 compute
Gt : transmitting antenna gain dB (default 0 dB)
Gr : receiving antenna gain dB (default 0 dB)
Examples
--------
>>> from pylayers.network.model import *
>>> plm = PLSmodel()
>>> plm.getPL0()
"""
Gt = 10**(Gt/10.)
Gr = 10**(Gr/10.)
ld = 0.3/self.f
self.PL0 = -20*np.log10(ld/(4.0*np.pi*self.d0))
def OneSlope(self,r):
""" OneSlope model : give Power Level from distance with OneSlope method
Parameters
----------
r : range (meters)
Returns
-------
PL : float
path loss values
"""
try:
PL = self.PL0+10*self.np*np.log10(r)
except:
self.getPL0()
PL = self.PL0+10*self.np*np.log10(r)
return(PL)
def iOneSlope(self,PL):
""" goes from PL to estimated distance
inverse OneSlope model : give distance from Power Level with OneSlope method
Parameters
----------
PL :
path loss in dB
Returns
-------
r : range array
"""
try :
r = 10**((PL-self.PL0)/(10*self.rssnp))
except:
self.getPL0()
r = 10**((PL-self.PL0)/(10*self.rssnp))
return(r)
def getPLmean(self, d):
""" compute PL mean
Notes
-----
$$\bar{PL}=PL_0 - 10 n_p \log_{10}{\frac{d}{d_0}}$$
"""
PLmean = self.PL0-10*self.rssnp*np.log10(d/self.d0)
return PLmean
def getPL(self,r,RSSStd):
""" Get Power Level from a given distance
Parameters
----------
r : range
RSSStd : range standard deviation
Examples
--------
>>> M = PLSmodel(f=0.3,rssnp=2.64,d0=1,sigrss=3,method='mode')
>>> PL = M.getPL(16,1)
"""
if self.method =='OneSlope':
PL=self.OneSlope(r)
elif self.method == 'mode' or self.method == 'median' or self.method == 'mean':
PLmean = self.getPLmean(r)
try:
shPLmean = np.shape(PLmean)
Xrand = RSSStd*sp.randn(shPLmean[0])
except:
Xrand = RSSStd*sp.randn()
PL = PLmean+Xrand
else :
raise NameError('Pathloss method name')
return(PL)
def getRange(self,RSS,RSSStd):
""" Get distance from a given Power Level
Parameters
----------
RSS :
R
Returns
-------
r :
"""
if self.method =='OneSlope':
r = self.iOneSlope(RSS)
elif self.method == 'mode':
S = -(np.log(10)/10)* RSSStd/self.rssnp # STD of ranges distribution
M = (np.log(10)/10)*(self.PL0-RSS)/self.rssnp + np.log(self.d0) # Mean of ranges distribution
r = np.exp(M-S**2)
elif self.method == 'median':
S = -(np.log(10)/10)* RSSStd/self.rssnp # STD of ranges distribution
M = (np.log(10)/10)*(self.PL0-RSS)/self.rssnp + np.log(self.d0) # Mean of ranges distribution
r = np.exp(M)
elif self.method == 'mean':
S = -(np.log(10)/10)* RSSStd/self.rssnp # STD of ranges distribution
M = (np.log(10)/10)*(self.PL0-RSS)/self.rssnp + np.log(self.d0) # Mean of ranges distribution
r = np.exp(M+0.5*S**2)
else :
raise NameError('invalid Pathloss method name for range computation')
return(r)
def getRangeStd(self, RSS, RSSStd):
"""Compute Ranges std associated to "Rest" estimator
Parameters
----------
RSS :
RSSStd :
"""
if self.method == 'mode':
S = -(np.log(10)/10)* RSSStd/self.rssnp # STD of ranges distribution
M = (np.log(10)/10)*(self.PL0-RSS)/self.rssnp + np.log(self.d0) # Mean of ranges distribution
r = np.sqrt((np.exp(2*M-2*S**2))*(-np.exp(-S**2)+1))
elif self.method == 'median':
S = -(np.log(10)/10)* RSSStd/self.rssnp # STD of ranges distribution
M = (np.log(10)/10)*(self.PL0-RSS)/self.rssnp + np.log(self.d0) # Mean of ranges distribution
r = np.sqrt((np.exp(2*M+S**2))*(np.exp(S**2)-1))
elif self.method == 'mean':
S = -(np.log(10)/10)* RSSStd/self.rssnp # STD of ranges distribution
M = (np.log(10)/10)*(self.PL0-RSS)/self.rssnp + np.log(self.d0) # Mean of ranges distribution
r = np.sqrt((np.exp(2*M+3*S**2))*(np.exp(S**2)-1))
else :
raise NameError('invalid Pathloss method name STD range computation')
return (r)
if __name__=='__main__':
doctest.testmod()
| pylayers/pylayers | pylayers/network/model.py | Python | mit | 7,416 |
class aMSNConfig:
def __init__(self):
self._config = {}
def get_key(self, key, default = None):
"""
Get a existing config key or a default value in any other case.
@type key: str
@param key: name of the config key.
@type default: Any
@param default: default value to return if key doesn't exist.
@rtype: Any
@return: config key value.
"""
try:
return self._config[key]
except KeyError:
return default
def set_key(self, key, value):
"""
Set a key value
@type key: str
@param key: name of the config key.
@type value: Any
@param value: value of the key to be set.
"""
self._config[key] = value
| kakaroto/amsn2 | amsn2/core/config.py | Python | gpl-2.0 | 797 |
# Copyright 2014-2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .workspace import Workspace
help = 'Clean generated projects'
def run(args):
workspace = Workspace(args.file, os.getcwd())
if args.project:
workspace.clean_project(args.project, args.tool)
else:
workspace.clean_projects(args.tool)
def setup(subparser):
subparser.add_argument("-f", "--file", help="YAML projects file", default='projects.yaml')
subparser.add_argument("-p", "--project", help="Specify which project to be removed")
subparser.add_argument(
"-t", "--tool", help="Clean project files for specified tool (uvision by default)")
| sg-/project_generator | project_generator/clean.py | Python | apache-2.0 | 1,182 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.domain import exp_services
from core.domain import exp_services_test
from core.domain import rights_manager
from core.domain import summary_services
import feconf
class ExplorationDisplayableSummaries(
exp_services_test.ExplorationServicesUnitTests):
"""Test functions for getting displayable exploration summary dicts."""
ALBERT_EMAIL = 'albert@example.com'
BOB_EMAIL = 'bob@example.com'
ALBERT_NAME = 'albert'
BOB_NAME = 'bob'
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
EXP_ID_3 = 'eid3'
EXPECTED_VERSION_1 = 4
EXPECTED_VERSION_2 = 2
def setUp(self):
"""Populate the database of explorations and their summaries.
The sequence of events is:
- (1) Albert creates EXP_ID_1.
- (2) Bob edits the title of EXP_ID_1.
- (3) Albert creates EXP_ID_2.
- (4) Albert edits the title of EXP_ID_1.
- (5) Albert edits the title of EXP_ID_2.
- (6) Bob reverts Albert's last edit to EXP_ID_1.
- Bob tries to publish EXP_ID_2, and is denied access.
- (7) Albert publishes EXP_ID_2.
- (8) Albert creates EXP_ID_3
- (9) Albert publishes EXP_ID_3
- (10) Albert deletes EXP_ID_3
"""
super(ExplorationDisplayableSummaries, self).setUp()
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.bob_id = self.get_user_id_from_email(self.BOB_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.signup(self.BOB_EMAIL, self.BOB_NAME)
self.save_new_valid_exploration(self.EXP_ID_1, self.albert_id)
exp_services.update_exploration(
self.bob_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 1 title'
}], 'Changed title.')
self.save_new_valid_exploration(self.EXP_ID_2, self.albert_id)
exp_services.update_exploration(
self.albert_id, self.EXP_ID_1, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 1 Albert title'
}], 'Changed title to Albert1 title.')
exp_services.update_exploration(
self.albert_id, self.EXP_ID_2, [{
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Exploration 2 Albert title'
}], 'Changed title to Albert2 title.')
exp_services.revert_exploration(self.bob_id, self.EXP_ID_1, 3, 2)
with self.assertRaisesRegexp(
Exception, 'This exploration cannot be published'
):
rights_manager.publish_exploration(self.bob_id, self.EXP_ID_2)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_2)
self.save_new_valid_exploration(self.EXP_ID_3, self.albert_id)
rights_manager.publish_exploration(self.albert_id, self.EXP_ID_3)
exp_services.delete_exploration(self.albert_id, self.EXP_ID_3)
def test_get_displayable_exp_summary_dicts_matching_ids(self):
# A list of exp_id's are passed in:
# EXP_ID_1 -- private exploration
# EXP_ID_2 -- pubished exploration
# EXP_ID_3 -- deleted exploration
# Should only return [EXP_ID_2]
displayable_summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
[self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3]))
self.assertEqual(len(displayable_summaries), 1)
self.assertEqual(
displayable_summaries[0]['id'], self.EXP_ID_2)
self.assertEqual(
displayable_summaries[0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
self.assertEqual(
displayable_summaries[0]['community_owned'], False)
self.assertEqual(
displayable_summaries[0]['language_code'],
feconf.DEFAULT_LANGUAGE_CODE)
self.assertEqual(
displayable_summaries[0]['category'], 'A category')
self.assertEqual(
displayable_summaries[0]['ratings'], feconf.get_empty_ratings())
self.assertEqual(
displayable_summaries[0]['title'], 'Exploration 2 Albert title')
self.assertEqual(
displayable_summaries[0]['contributor_names'], [self.ALBERT_NAME])
self.assertEqual(
displayable_summaries[0]['objective'], 'An objective')
self.assertEqual(displayable_summaries[0]['num_views'], 0)
self.assertIn('last_updated_msec', displayable_summaries[0])
| amitdeutsch/oppia | core/domain/summary_services_test.py | Python | apache-2.0 | 5,280 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.template import Context, Template
class WebdesignTest(unittest.TestCase):
def test_lorem_tag(self):
t = Template("{% load webdesign %}{% lorem 3 w %}")
self.assertEqual(t.render(Context({})),
'lorem ipsum dolor')
| doismellburning/django | django/contrib/webdesign/tests.py | Python | bsd-3-clause | 355 |
from bloom_freqmap import BloomFreqMap
from pybloomfilter import BloomFilter
from time import time
from numpy import inf, log
from sklearn.metrics import f1_score
from code import interact
import sanity_naive_bayes
class MultinomiamNaiveBayes(object):
def __init__(self, base, alpha, initial_capacity, error_rate, cache_size):
self.initial_capacity = initial_capacity
self.error_rate = error_rate
self.alpha = alpha
self.base = base
#Tracks count | class for p(x|c)
self.class_conditional_counts = BloomFreqMap(base)
#Tracks count all tokens | class for p(x|c)
self.tokens_per_class = {}
#Tracks count(class) for p(c)
self.class_freqs = {}
#Counts vocab size for smoothing
self.token_type_bf = BloomFilter(capacity=initial_capacity, error_rate=error_rate)
self.vocab_sizes = {}
#Tracks the tokens in each class so that we can penalize unseen tokens
#self.class_to_toks_bf = {}
self.N = 0 #instance count
def makeTokenFreqmap(self, tokens):
f = {}
get = f.get
for token in tokens:
f[token] = get(token, 0) + 1
return f
def fit(self, tokens, class_label):
#if class_label not in self.class_to_toks_bf:
# self.class_to_toks_bf[class_label] = BloomFilter(capacity=self.initial_capacity, error_rate=self.error_rate)
if class_label not in self.vocab_sizes:
self.vocab_sizes[class_label] = BloomFilter(capacity=self.initial_capacity, error_rate=self.error_rate)
self.tokens_per_class[class_label] = self.tokens_per_class.get(class_label, 0) + len(tokens)
tok_freqs = self.makeTokenFreqmap(tokens)
for token, token_freq in tok_freqs.iteritems():
#self.class_to_toks_bf[class_label].add(token)
self.token_type_bf.add(token)
#conditional_counts_bf[token+'_'+class_label] += token_freq
self.class_conditional_counts[token+'_'+class_label] += token_freq
self.vocab_sizes[class_label].add(token)
self.class_freqs[class_label] = self.class_freqs.get(class_label, 0) + 1
self.N += 1
def predict(self, tokens, tie_breaker='highest_freq', use_class_prior=True):
N = self.N
max_class, max_score = None, -inf
tok_freqs = self.makeTokenFreqmap(tokens)
num_instances = sum((item[1] for item in self.class_freqs.iteritems()))
for c, cf in self.class_freqs.iteritems():
this_score = log(cf) - log(N) if use_class_prior else 0.0
f_t_c = self.tokens_per_class[c]
num_unseen = 0
V = len(self.vocab_sizes[c])
theta_denominator = log(f_t_c + V)
for token, freq in tok_freqs.iteritems():
count_in_c = self.class_conditional_counts[token+'_'+c]
if count_in_c == 0:
num_unseen += freq
continue
this_score += freq*(log(count_in_c + self.alpha) - theta_denominator)
#Penalize unseen tokens
this_score += num_unseen*(log(self.alpha) - log(theta_denominator))
max_score, max_class = max((max_score, max_class), (this_score, c))
return max_class, max_score
class TextToId(object):
def __init__(self):
self.str_to_id = {}
self.id_to_str = {}
def fit(self, l):
for i in l:
if i not in self.str_to_id:
self.str_to_id[i] = len(self.str_to_id)
self.id_to_str[len(self.str_to_id)-1] = i
def transform(self, l):
try:
out = [self.str_to_id[s] for s in l]
except:
import code; code.interact(local=locals())
return out
def inverse(self, l):
for id in l:
yield self.id_to_str[str]
def fit_transform(self, l):
self.fit(l)
return self.transform(l)
def test_nb():
from os import walk
print 'Testing nb'
full_filenames = []
#Get filenames
for root, dirs, files in walk('data/smaller'):
for filename in files:
full_filenames.append(root + '/' + filename)
print len(full_filenames)
from random import shuffle
shuffle(full_filenames)
from re import findall
training_time = 0
labels = []
docs = []
for filename in full_filenames:
docs.append(findall('[A-Za-z]{3,}', open(filename).read()))
label = filename.rsplit('/', 2)[-2]
labels.append(label)
#Do some CV
from sklearn.cross_validation import StratifiedKFold
from numpy import array
from random import shuffle
print 'Let us cross validate'
le = TextToId()
Y = le.fit_transform(labels)
X = array(docs)
Y = array(Y)
cv = StratifiedKFold(Y)
scores = []
total_trained = 0
for train, test in cv:
X_train, X_test = X[train], X[test]
Y_train, Y_test = Y[train], Y[test]
total_trained += X_train.shape[0]
clf = MultinomiamNaiveBayes(0.5, 500000, 0.001)
#clf = sanity_naive_bayes.MNB(0.5)
for x, y in zip(X_train, Y_train):
t0 = time()
# interact(local=locals())
clf.fit(x, y)
t1 = time()
training_time += t1-t0
pred = [clf.bernoulli_predict(x)[0] for x in X_test]
scores.append(f1_score(Y_test, pred, pos_label=None, average='macro'))
print scores[-1]
scores = array(scores)
print 'Average macro F1:', scores.mean()
print 'Standard deviation across folds:', scores.std()
print 'Total trained:', total_trained
print 'Training time:', training_time
print 'Done'
interact(local=locals())
if __name__ == '__main__':
test_nb() | AWNystrom/BloomML | multinomial_naive_bayes.py | Python | apache-2.0 | 5,066 |
import re
import os.path
import glob
import tempfile
import shutil
# For backwards compatibility, remove in bright and shiny future.
def detect_version(problemdir, problemtex):
# Check for 0.1 - lack of \problemname
if open(problemtex).read().find(r'\problemname') < 0:
return '0.1'
return '' # Current
class Template:
def __init__(self, problemdir, language='',
title='Problem Title', force_copy_cls=False):
if not os.path.isdir(problemdir):
raise Exception('%s is not a directory' % problemdir)
if problemdir[-1] == '/':
problemdir = problemdir[:-1]
stmtdir = os.path.join(problemdir, 'problem_statement')
langs = []
if glob.glob(os.path.join(stmtdir, 'problem.tex')):
langs.append('')
for f in glob.glob(os.path.join(stmtdir, 'problem.[a-z][a-z].tex')):
langs.append(re.search("problem.([a-z][a-z]).tex$", f).group(1))
if len(langs) == 0:
raise Exception('No problem statements available')
dotlang = ''
# If language unspec., use first available one (will be
# problem.tex if exists)
if language == '':
language = langs[0]
if language != '':
if len(language) != 2 or not language.isalpha():
raise Exception('Invalid language code "%s"' % language)
if language not in langs:
raise Exception('No problem statement for language "%s" available' % language)
dotlang = '.' + language
# Used in the template.tex variable substitution.
self.language = dotlang
problemtex = os.path.join(stmtdir, 'problem' + dotlang + '.tex')
if not os.path.isfile(problemtex):
raise Exception('Unable to find problem statement, was looking for "%s"' % problemtex)
self.templatefile = 'template.tex'
self.clsfile = 'problemset.cls'
timelim = 1 # Legacy for compatibility with v0.1
version = detect_version(problemdir, problemtex)
if version != '':
print('Note: problem is in an old version (%s) of problem format, you should consider updating it' % version)
self.templatefile = 'template_%s.tex' % version
self.clsfile = 'problemset_%s.cls' % version
templatepaths = [os.path.join(os.path.dirname(__file__), 'templates/latex'),
os.path.join(os.path.dirname(__file__), '../templates/latex'),
'/usr/lib/problemtools/templates/latex']
self.templatepath = next((p for p in templatepaths
if os.path.isdir(p) and os.path.isfile(os.path.join(p, self.templatefile))),
None)
if self.templatepath is None:
raise Exception('Could not find directory with latex template "%s"' % self.templatefile)
self.basedir = os.path.dirname(problemdir)
self.shortname = os.path.basename(problemdir)
sample_dir = os.path.join(problemdir, 'data', 'sample')
self.samples = sorted(set([os.path.splitext(os.path.basename(f))[0]
for f in (glob.glob(os.path.join(sample_dir, '*.in')) +
glob.glob(os.path.join(sample_dir, '*.interaction')))]))
self.problemset_cls = os.path.join(self.basedir, 'problemset.cls')
self.copy_cls = True
if os.path.isfile(self.problemset_cls) and not force_copy_cls:
print('%s exists, will not copy it -- in case of weirdness this is likely culprit' % self.problemset_cls)
self.copy_cls = False
def __enter__(self):
if self.copy_cls:
shutil.copyfile(os.path.join(self.templatepath, self.clsfile), self.problemset_cls)
(templfd, self.filename) = tempfile.mkstemp(suffix='.tex', dir=self.basedir)
templout = os.fdopen(templfd, 'w')
templin = open(os.path.join(self.templatepath, self.templatefile))
data = {'language': self.language,
'shortname': self.shortname}
for line in templin:
try:
templout.write(line % data)
except:
# This is a bit ugly I guess
for sample in self.samples:
data['sample'] = sample
templout.write(line % data)
if self.samples:
del data['sample']
templout.close()
templin.close()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.problemset_cls is not None and self.copy_cls and os.path.isfile(self.problemset_cls):
os.remove(self.problemset_cls)
if self.filename is not None:
for f in glob.glob(os.path.splitext(self.filename)[0] + '.*'):
if os.path.isfile(f):
os.remove(f)
def get_file_name(self):
assert os.path.isfile(self.filename)
return self.filename
| Kattis/problemtools | problemtools/template.py | Python | mit | 5,068 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-16 08:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('argus', '0012_argusadsl_lira'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(default='N/A', max_length=512)),
('hostname', models.CharField(default='None', max_length=512)),
('ne_ip', models.GenericIPAddressField(default='0.0.0.0', protocol='IPv4')),
('tel_num', models.CharField(db_index=True, default='', max_length=16)),
('address', models.CharField(default='N/A', max_length=512)),
('fio', models.CharField(default='N/A', max_length=512)),
('room', models.CharField(default='N/A', max_length=512)),
('iptv_login', models.CharField(db_index=True, max_length=512)),
('inet_login', models.CharField(db_index=True, max_length=512)),
('slot', models.CharField(default='N/A', max_length=512)),
('port', models.CharField(default='N/A', max_length=512)),
('lira', models.CharField(default='N/A', max_length=32)),
('tech', models.CharField(default='N/A', max_length=32)),
],
),
]
| dehu4ka/lna | argus/migrations/0013_client.py | Python | mit | 1,537 |
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
from castellan.common.objects import symmetric_key as key
import mock
from oslo_concurrency import processutils
import uuid
from nova.tests.unit.volume.encryptors import test_cryptsetup
from nova.volume.encryptors import luks
class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
def _create(self, connection_info):
return luks.LuksEncryptor(connection_info)
@mock.patch('nova.utils.execute')
def test_is_luks(self, mock_execute):
luks.is_luks(self.dev_path)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.volume.encryptors.luks.LOG')
@mock.patch('nova.utils.execute')
def test_is_luks_with_error(self, mock_execute, mock_log):
error_msg = "Device %s is not a valid LUKS device." % self.dev_path
mock_execute.side_effect = \
processutils.ProcessExecutionError(exit_code=1,
stderr=error_msg)
luks.is_luks(self.dev_path)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
self.assertEqual(1, mock_log.warning.call_count) # warning logged
@mock.patch('nova.utils.execute')
def test__format_volume(self, mock_execute):
self.encryptor._format_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path,
process_input='passphrase',
run_as_root=True, check_exit_code=True, attempts=3),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test__open_volume(self, mock_execute):
self.encryptor._open_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input='passphrase',
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume_not_formatted(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=1), # luksOpen
processutils.ProcessExecutionError(exit_code=1), # isLuks
mock.DEFAULT, # luksFormat
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path, process_input=fake_key,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(5, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume_fail(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = test_cryptsetup.fake__get_key(
None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=1), # luksOpen
mock.DEFAULT, # isLuks
]
self.assertRaises(processutils.ProcessExecutionError,
self.encryptor.attach_volume, None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test__close_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
attempts=3, run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_detach_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
attempts=3, run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
def test_get_mangled_passphrase(self):
# Confirm that a mangled passphrase is provided as per bug#1633518
unmangled_raw_key = bytes(binascii.unhexlify('0725230b'))
symmetric_key = key.SymmetricKey('AES', len(unmangled_raw_key) * 8,
unmangled_raw_key)
unmangled_encoded_key = symmetric_key.get_encoded()
encryptor = luks.LuksEncryptor(self.connection_info)
self.assertEqual(encryptor._get_mangled_passphrase(
unmangled_encoded_key), '72523b')
@mock.patch('nova.utils.execute')
def test_attach_volume_unmangle_passphrase(self, mock_execute):
fake_key = '0725230b'
fake_key_mangled = '72523b'
self.encryptor._get_key = mock.MagicMock(name='mock_execute')
self.encryptor._get_key.return_value = \
test_cryptsetup.fake__get_key(None, fake_key)
mock_execute.side_effect = [
processutils.ProcessExecutionError(exit_code=2), # luksOpen
mock.DEFAULT, # luksOpen
mock.DEFAULT, # luksClose
mock.DEFAULT, # luksAddKey
mock.DEFAULT, # luksOpen
mock.DEFAULT, # luksClose
mock.DEFAULT, # luksRemoveKey
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key_mangled,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'luksClose', self.dev_name,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksAddKey', self.dev_path,
process_input=''.join([fake_key_mangled,
'\n', fake_key,
'\n', fake_key]),
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'luksClose', self.dev_name,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksRemoveKey', self.dev_path,
process_input=fake_key_mangled, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
], any_order=False)
self.assertEqual(9, mock_execute.call_count)
| hanlind/nova | nova/tests/unit/volume/encryptors/test_luks.py | Python | apache-2.0 | 10,988 |
import math
from collections import defaultdict
# calculate the entropy for options with the given probabilities
# eg. for a fair coin, entropy([0.5,0.5]) = 1
def entropy(probabilities):
entropy = 0
# entropy = sum[P*log2(P)]
for P in probabilities:
# log is undefined for non-positive numbers (so lets pretend it's zero)
if P > 0:
entropy -= (P * math.log(P, 2))
return entropy
# get the probabilities of each class from a list of counts of class elements
# eg. for a fair coin, probs_from_counts([1,1]) = [0.5,0.5]
def probs_from_counts(counts):
total = sum(counts)
return [(1.0 * count) / total for count in counts]
# a data point that has a feature (attribute) vector and a class
class Instance:
def __init__(self, vector = {}, c = 0):
self.vector = vector
self.c = c
def getclass(self):
return self.c
def getvalue(self, attribute):
return self.vector[attribute]
# nodes should take the attributes so far and the data points
class Node:
def __init__(self, attributes_remaining, instances):
self.attributes_remaining = attributes_remaining
self.instances = instances
# a dictionary of value -> node for the attribute we're splitting on
self.attribute = ''
self.children = {}
# the node can't be split anymore if all the instances have the same class
def finished(self):
# if there are no instances, then we're done
if len(self.instances) == 0:
return True
# check the class of all the instances
majority = instances[0].getclass()
for instance in self.instances[1:]:
# if an instance has a different class, then we're not finished
if instance.getclass() != majority:
return False
# otherwise, we're done
return True
# get the number of instances in each classes among this node's instances
def getcountdistribution(self, instances = None):
# default is the distribution of all of this node's instances
if instances == None:
instances = self.instances
# work out the count of each class
counts = defaultdict(int)
for instance in instances:
counts[instance.getclass()] += 1
return counts
# get the entropy at this node
def entropy(self, instances = None):
# default is the entropy of all of this node's instances
if instances == None:
instances = self.instances
# return the entropy for a list of probabilities for each class
return entropy(probs_from_counts(\
self.getcountdistribution(instances).values()\
))
# get the majority class at this node
def majorityclass(self):
# get the distribution of class memberships
distribution = self.getcountdistribution()
# go through each all their counts to find the majority
majorityclass = None
majoritycount = 0
for c in distribution.keys():
# if the current class beats the current majority
if distribution[c] > majoritycount:
# replace the majority with this one
majorityclass = c
majoritycount = distribution[c]
return majorityclass
# a testing function
def DFS(self, parents = []):
# perform DFS on its children
for value in self.children.keys():
self.children[value].DFS(parents + [self.attribute + '=' + value])
# if it's a leaf node; print its majority class and entropy
if len(self.children) == 0:
print ', '.join(parents)
print 'distribution: [' +\
','.join(\
[str(c) for c in self.getcountdistribution().values()]\
) +\
']'
print 'majority class: ' + self.majorityclass()
print 'entropy: ' + str(self.entropy())
# get the value distribution for if we split on the given attribute
def getdistributionforattribute(self, attribute):
# { value1 -> { class1 -> 5, class2 -> 3, ... }, ... }
values = {}
# for each instance in the current node
for instance in self.instances:
# get its value for this attribute
value = instance.getvalue(attribute)
# initialize this value's dictionary if needed
if value not in values:
values[value] = defaultdict(int)
# increment the count of this class
values[value][instance.getclass()] += 1
# return the distribution we found
return values
# get the mean information that the given attribute would have
def meaninfo(self, attribute):
mean = 0
# get the distribution of values for this attribute
distribution = self.getdistributionforattribute(attribute)
# the number of this node's instances
total = len(self.instances)
# for each value in the distribution
for value in distribution.keys():
# find the instances that have that value
i = [instance for instance in self.instances \
if instance.getvalue(attribute) == value]
# mean = Sum[Entropy * Probability] for all values
E = self.entropy(i)
P = len(i) / total
mean += E * P
# return the mean information
return mean
# perform the ID3 algorithm on this node
def ID3(self):
# if we're satisfied with what we have; stop
if self.finished():
return
# work out the entropy at this node
E = self.entropy()
# initially, we don't know what the best attribute is
best_attribute = None
best_infogain = 0
# for each attribute
for attribute in self.attributes_remaining:
# find its information gain
IG = E - self.meaninfo(attribute)
# if it beats what we have, keep it
if IG > best_infogain:
best_attribute = attribute
best_infogain = IG
# if we haven't found an attribute that gives an info gain; stop
if best_attribute == None:
return
# since we're splitting on that attribute, take it out of the new list
# of attributes remaining
attributes_remaining = [att for att in self.attributes_remaining \
if att != best_attribute]
# remember the attribute we're splitting on
self.attribute = best_attribute
# group the instances by their value of that attribute
# { value1 -> [instance1, instance2, ...], ... }
values = {}
for instance in self.instances:
# get the value of each instance
value = instance.getvalue(best_attribute)
# init a list for that value if needed
if value not in values:
values[value] = []
# add the instance to that value's list
values[value].append(instance)
# branch into child nodes
for value in values.keys():
# create a node for it
valueN = Node(attributes_remaining, values[value])
# add the new node to this node's children
self.children[value] = valueN
# run ID3 on it
valueN.ID3()
# weather data
instances = [\
Instance({ 'outlook' : 'sunny', 'temperature' : 'hot', 'humidity' : 'high', 'windy' : 'false' }, 'no'),\
Instance({ 'outlook' : 'sunny', 'temperature' : 'hot', 'humidity' : 'high', 'windy' : 'true' }, 'no'),\
\
Instance({ 'outlook' : 'overcast', 'temperature' : 'hot', 'humidity' : 'high', 'windy' : 'false' }, 'yes'),\
\
Instance({ 'outlook' : 'rainy', 'temperature' : 'mild', 'humidity' : 'high', 'windy' : 'false' }, 'yes'),\
Instance({ 'outlook' : 'rainy', 'temperature' : 'cool', 'humidity' : 'normal', 'windy' : 'false' }, 'yes'),\
Instance({ 'outlook' : 'rainy', 'temperature' : 'cool', 'humidity' : 'normal', 'windy' : 'true' }, 'no'),\
\
Instance({ 'outlook' : 'overcast', 'temperature' : 'cool', 'humidity' : 'normal', 'windy' : 'true' }, 'yes'),\
\
Instance({ 'outlook' : 'sunny', 'temperature' : 'mild', 'humidity' : 'high', 'windy' : 'false' }, 'no'),\
Instance({ 'outlook' : 'sunny', 'temperature' : 'cool', 'humidity' : 'normal', 'windy' : 'false' }, 'yes'),\
\
Instance({ 'outlook' : 'rainy', 'temperature' : 'mild', 'humidity' : 'normal', 'windy' : 'false' }, 'yes'),\
\
Instance({ 'outlook' : 'sunny', 'temperature' : 'mild', 'humidity' : 'normal', 'windy' : 'true' }, 'yes'),\
\
Instance({ 'outlook' : 'overcast', 'temperature' : 'mild', 'humidity' : 'high', 'windy' : 'true' }, 'yes'),\
Instance({ 'outlook' : 'overcast', 'temperature' : 'hot', 'humidity' : 'normal', 'windy' : 'false' }, 'yes'),\
\
Instance({ 'outlook' : 'rainy', 'temperature' : 'mild', 'humidity' : 'high', 'windy' : 'true' }, 'no'),\
]
attributes = ['outlook', 'temperature', 'humidity', 'windy']
# test it out
tree = Node(attributes, instances)
tree.ID3()
tree.DFS() | pmansour/algorithms | machine-learning/ID3.py | Python | mit | 9,495 |
from phystricks import *
def SuiteInverseAlterne():
def suite(i):
return SR((-1)**i)/i
pspict,fig = SinglePicture("SuiteInverseAlterne")
n=10
for i in range(1,n+1):
P=Point(i,suite(i))
P.put_mark(0.3,(-1)**i*90,"$%s$"%(repr(suite(i))),automatic_place=pspict)
pspict.DrawGraph(P)
pspict.axes.no_graduation()
pspict.DrawDefaultAxes()
pspict.dilatation_Y(3)
fig.conclude()
fig.write_the_file()
| Naereen/mazhe | phystricksSuiteInverseAlterne.py | Python | gpl-3.0 | 414 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.description'
db.add_column('rcal_event', 'description',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Event.description'
db.delete_column('rcal_event', 'description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'rcal.event': {
'Meta': {'object_name': 'Event'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rcal.Resource']"}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'rcal.resource': {
'Meta': {'object_name': 'Resource'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'rcal.rule': {
'Meta': {'ordering': "('id',)", 'object_name': 'Rule'},
'check': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rule_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rcal.RuleGroup']"})
},
'rcal.rulegroup': {
'Meta': {'object_name': 'RuleGroup'},
'action': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rcal'] | apollo13/django-rcal | rcal/migrations/0004_add_field_event_description.py | Python | bsd-3-clause | 5,814 |
# -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2015
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.choiceboxext import ChoiceBoxExt
from Plugins.Extensions.MediaPortal.resources.keyboardext import VirtualKeyBoardExt
from Plugins.Extensions.MediaPortal.resources.pininputext import PinInputExt
from Plugins.Extensions.MediaPortal.additions.porn.x2search4porn import toSearchForPorn
config.mediaportal.sharedir_size = ConfigText(default="", fixed_size=False)
config.mediaportal.sharedir_sort = ConfigText(default="", fixed_size=False)
config.mediaportal.sharedir_hoster = ConfigText(default="all Hosters;0", fixed_size=False)
config.mediaportal.sharedir_type = ConfigText(default="Video", fixed_size=False)
hosters =[]
def sharedirListEntry(entry):
return [entry,
(eListboxPythonMultiContent.TYPE_TEXT, 20, 0, 860, mp_globals.fontsize + 2 * mp_globals.sizefactor, 0, RT_HALIGN_CENTER | RT_VALIGN_CENTER, entry[0])
]
def sharedirMultiListEntry(entry):
if config.mediaportal.premiumize_use.value and re.search(mp_globals.premium_hosters, entry[2], re.S|re.I):
premiumFarbe = int(config.mediaportal.premium_color.value, 0)
return [entry,
(eListboxPythonMultiContent.TYPE_TEXT, 5, 0, 600, mp_globals.fontsize, 0, RT_HALIGN_LEFT | RT_VALIGN_CENTER, entry[0]),
(eListboxPythonMultiContent.TYPE_TEXT, 650, 0, 65, mp_globals.fontsize, 0, RT_HALIGN_LEFT | RT_VALIGN_CENTER, entry[3]),
(eListboxPythonMultiContent.TYPE_TEXT, 720, 0, 175, mp_globals.fontsize, 0, RT_HALIGN_LEFT | RT_VALIGN_CENTER, entry[2], premiumFarbe)
]
else:
return [entry,
(eListboxPythonMultiContent.TYPE_TEXT, 5, 0, 600, mp_globals.fontsize, 0, RT_HALIGN_LEFT | RT_VALIGN_CENTER, entry[0]),
(eListboxPythonMultiContent.TYPE_TEXT, 650, 0, 65, mp_globals.fontsize, 0, RT_HALIGN_LEFT | RT_VALIGN_CENTER, entry[3]),
(eListboxPythonMultiContent.TYPE_TEXT, 720, 0, 175, mp_globals.fontsize, 0, RT_HALIGN_LEFT | RT_VALIGN_CENTER, entry[2])
]
def sharedirKeywordEntry(entry):
return [entry,
(eListboxPythonMultiContent.TYPE_TEXT, 20, 0, 860, mp_globals.fontsize + 2 * mp_globals.sizefactor, 0, RT_HALIGN_CENTER | RT_VALIGN_CENTER, entry[0])
]
class sharedirHelper():
def keyType(self):
if self.keyLocked:
return
if re.match(".*?2Search4Porn", self.Name):
return
if self.type != "Audio":
self.type = "Audio"
else:
self.type = "Video"
config.mediaportal.sharedir_type.value = self.type
config.mediaportal.sharedir_type.save()
configfile.save()
self['F4'].setText(self.type)
self.loadFirstPage()
def keySize(self):
if self.keyLocked:
return
rangelist = [['any Size', ''], ['Less than 50MB', '1'], ['50 MB to 500 MB', '2'], ['500 MB to 1 GB', '3'], ['Bigger than 1 GB', '4'], ['Bigger than 2 GB', '2048MB-99999MB'], ['Bigger than 3 GB', '3072MB-99999MB'], ['Bigger than 4 GB', '4096MB-99999MB'], ['Bigger than 5 GB', '5120MB-99999MB'], ['Bigger than 6 GB', '6144MB-99999MB'], ['Bigger than 7 GB', '7168MB-99999MB'], ['Bigger than 8 GB', '8192MB-99999MB']]
self.session.openWithCallback(self.returnSize, ChoiceBoxExt, title=_('Select Size'), list = rangelist)
def returnSize(self, data):
if data:
self.size = data[1]
config.mediaportal.sharedir_size.value = self.size
config.mediaportal.sharedir_size.save()
configfile.save
self['F3'].setText(data[0])
self.loadFirstPage()
def keySort(self):
if self.keyLocked:
return
rangelist = [['Relevance', ''], ['Date +', 'da'], ['Date -', 'dd'], ['Size +', 'sa'], ['Size -', 'sd'], ['Filename +', 'na'], ['Filename -', 'nd']]
self.session.openWithCallback(self.returnSort, ChoiceBoxExt, title=_('Select Sort order'), list = rangelist)
def returnSort(self,data):
if data:
self.sort = data[1]
config.mediaportal.sharedir_sort.value = self.sort
config.mediaportal.sharedir_sort.save()
configfile.save()
self['F2'].setText(data[0])
self.loadFirstPage()
def keyHoster(self):
if self.keyLocked:
return
rangelist =[]
for hoster, id in self.hosters:
rangelist.append([hoster, id])
rangelist.sort()
rangelist.insert(0, (['all Hosters', '0']))
self.session.openWithCallback(self.returnHoster, ChoiceBoxExt, title=_('Select Hoster'), list = rangelist)
def returnHoster(self, data):
if data:
self.hoster = data[1]
config.mediaportal.sharedir_hoster.value = data[0]+";"+data[1]
config.mediaportal.sharedir_hoster.save()
configfile.save()
self['F1'].setText(data[0])
self.loadFirstPage()
def loadFirstPage(self):
try:
self.page = 1
self.filmliste = []
self.loadPage()
except:
pass
def errCancelDeferreds(self, error):
myerror = error.getErrorMessage()
if myerror:
raise error
def dataError(self, error):
printl(error,self,"E")
self.keyLocked = False
def cancelSetValue(self):
self.hoster = config.mediaportal.sharedir_hoster.value.split(";")[1]
self.sort = config.mediaportal.sharedir_sort.value
self.size = config.mediaportal.sharedir_size.value
self['F1'].setText(config.mediaportal.sharedir_hoster.value.split(";")[0])
rangelist = [['Relevance', ''], ['Date +', 'da'], ['Date -', 'dd'], ['Size +', 'sa'], ['Size -', 'sd'], ['Filename +', 'na'], ['Filename -', 'nd']]
for item in rangelist:
if item[1] == self.sort:
self['F2'].setText(item[0])
rangelist = [['any Size', ''], ['Less than 50MB', '1'], ['50 MB to 500 MB', '2'], ['500 MB to 1 GB', '3'], ['Bigger than 1 GB', '4'], ['Bigger than 2 GB', '2048MB-99999MB'], ['Bigger than 3 GB', '3072MB-99999MB'], ['Bigger than 4 GB', '4096MB-99999MB'], ['Bigger than 5 GB', '5120MB-99999MB'], ['Bigger than 6 GB', '6144MB-99999MB'], ['Bigger than 7 GB', '7168MB-99999MB'], ['Bigger than 8 GB', '8192MB-99999MB']]
for item in rangelist:
if item[1] == self.size:
self['F3'].setText(item[0])
self['F4'].setText(config.mediaportal.sharedir_type.value)
class sharedirMenueScreen(sharedirHelper, MPScreen):
def __init__(self, session):
self.Name = "--- Multi Search Engine ---"
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"0" : self.closeAll,
"up" : self.keyUp,
"down" : self.keyDown,
"red" : self.keyHoster,
"green" : self.keySort,
"yellow" : self.keySize,
"blue" : self.keyType
}, -1)
self.hoster = config.mediaportal.sharedir_hoster.value.split(";")[1]
self.sort = config.mediaportal.sharedir_sort.value
self.size = config.mediaportal.sharedir_size.value
self.type = config.mediaportal.sharedir_type.value
self['title'] = Label("ShareDir (BETA)")
self['ContentTitle'] = Label("%s" % self.Name)
self['F1'] = Label(config.mediaportal.sharedir_hoster.value.split(";")[0])
self['F4'] = Label(self.type)
self.keyLocked = True
self.suchString = ''
self.hosters = []
self.pin = False
self.genreliste = []
self.chooseMenuList = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self.chooseMenuList.l.setFont(0, gFont('mediaportal', mp_globals.fontsize))
self.chooseMenuList.l.setItemHeight(mp_globals.fontsize + 2 * mp_globals.sizefactor)
self['liste'] = self.chooseMenuList
self.onLayoutFinish.append(self.getHosters)
def getHosters(self):
self.cancelSetValue()
url = "http://sharedir.com"
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadHosters).addErrback(self.dataError)
def loadHosters(self, data):
hosterdata = re.findall('<input\stype="checkbox"\sname="dh_\d+"\sid="dh_\d+"\svalue="(\d+)".*?<label\sfor="dh_\d+">(.*?)</label>', data, re.S)
if hosterdata:
for (id, hostername) in hosterdata:
if isSupportedHoster(hostername, True):
self.hosters.append((hostername, id))
global hosters
hosters = self.hosters
self.genreData()
def genreData(self):
self.genreliste.append(("--- Search ---", "callSuchen"))
self.genreliste.append(("Search using Keyword List", "callKeywordList"))
if config.mediaportal.showporn.value and config.mediaportal.show2search4porn.value:
self.genreliste.append(("Search using 2Search4Porn List", "call2SearchList"))
self.chooseMenuList.setList(map(sharedirListEntry, self.genreliste))
self.chooseMenuList.moveToIndex(0)
self.keyLocked = False
def keyOK(self):
if self.keyLocked:
return
Pick = self['liste'].getCurrent()[0][1]
if config.mediaportal.pornpin.value and not self.pin:
self.pincheck()
else:
if Pick == "callSuchen":
self.type = config.mediaportal.sharedir_type.value
self.suchen()
elif Pick == "callKeywordList":
self.session.openWithCallback(self.cancelSetValue, sharedirKeyword, self.type)
else:
self.session.openWithCallback(self.cancelSetValue, call2SearchList)
def SuchenCallback(self, callback = None, entry = None):
Name = self['liste'].getCurrent()[0][0]
Pick = self['liste'].getCurrent()[0][1]
if callback is not None and len(callback):
self.suchString = callback.replace(' ', '+')
self.session.openWithCallback(self.cancelSetValue, sharedirListScreen, self.suchString, Name, self.hoster, self.type, self.size, self.sort)
def pincheck(self):
self.session.openWithCallback(self.pincheckok, PinInputExt, pinList = [(config.mediaportal.pincode.value)], triesEntry = self.getTriesEntry(), title = _("Please enter the correct pin code"), windowTitle = _("Enter pin code"))
def getTriesEntry(self):
return config.ParentalControl.retries.setuppin
def pincheckok(self, pincode):
if pincode:
self.pin = True
self.keyOK()
class call2SearchList(toSearchForPorn):
def keyOK(self):
if self.keyLocked:
return
if len(self.genreliste) > 0:
search = self['liste'].getCurrent()[0][0].rstrip()
Name = "2Search4Porn ShareDir"
self.type = "Video"
self.session.open(sharedirListScreen, search, Name, config.mediaportal.sharedir_hoster.value.split(";")[1], self.type , config.mediaportal.sharedir_size.value, config.mediaportal.sharedir_sort.value)
class sharedirKeyword(MPScreen):
def __init__(self, session, type):
self.type = type
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreenCover.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreenCover.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"red" : self.keyRed,
"green" : self.keyGreen,
"yellow" : self.keyYellow
}, -1)
self['title'] = Label("ShareDir")
self['name'] = Label("Your Search Requests")
self['ContentTitle'] = Label("Annoyed, typing in your search-words again and again?")
self['F1'] = Label(_("Delete"))
self['F2'] = Label(_("Add"))
self['F3'] = Label(_("Edit"))
self.keyLocked = True
self.suchString = ''
self.chooseMenuList = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self.chooseMenuList.l.setFont(0, gFont('mediaportal', mp_globals.fontsize))
self.chooseMenuList.l.setItemHeight(mp_globals.fontsize + 2 * mp_globals.sizefactor)
self['liste'] = self.chooseMenuList
self.onLayoutFinish.append(self.Searches)
def Searches(self):
self.genreliste = []
self['liste'] = self.chooseMenuList
if not fileExists(config.mediaportal.watchlistpath.value+"mp_keywords"):
open(config.mediaportal.watchlistpath.value+"mp_keywords","w").close()
if fileExists(config.mediaportal.watchlistpath.value+"mp_keywords"):
fobj = open(config.mediaportal.watchlistpath.value+"mp_keywords","r")
for line in fobj:
self.genreliste.append((line, None))
fobj.close()
self.chooseMenuList.setList(map(sharedirKeywordEntry, self.genreliste))
self.keyLocked = False
def SearchAdd(self):
suchString = ""
self.session.openWithCallback(self.SearchAdd1, VirtualKeyBoardExt, title = (_("Enter Search")), text = suchString, is_dialog=True)
def SearchAdd1(self, suchString):
if suchString is not None and suchString != "":
self.genreliste.append((suchString,None))
self.chooseMenuList.setList(map(sharedirKeywordEntry, self.genreliste))
def SearchEdit(self):
if len(self.genreliste) > 0:
suchString = self['liste'].getCurrent()[0][0].rstrip()
self.session.openWithCallback(self.SearchEdit1, VirtualKeyBoardExt, title = (_("Enter Search")), text = suchString, is_dialog=True)
def SearchEdit1(self, suchString):
if suchString is not None and suchString != "":
pos = self['liste'].getSelectedIndex()
self.genreliste.pop(pos)
self.genreliste.insert(pos,(suchString,None))
self.chooseMenuList.setList(map(self._defaultlistcenter, self.genreliste))
def keyOK(self):
if self.keyLocked:
return
if len(self.genreliste) > 0:
search = self['liste'].getCurrent()[0][0].rstrip()
Name = "Keywords ShareDir"
self.session.open(sharedirListScreen, search, Name, config.mediaportal.sharedir_hoster.value.split(";")[1], self.type , config.mediaportal.sharedir_size.value, config.mediaportal.sharedir_sort.value)
def keyRed(self):
if self.keyLocked:
return
if len(self.genreliste) > 0:
self.genreliste.pop(self['liste'].getSelectedIndex())
self.chooseMenuList.setList(map(self._defaultlistcenter, self.genreliste))
def keyGreen(self):
if self.keyLocked:
return
self.SearchAdd()
def keyYellow(self):
if self.keyLocked:
return
self.SearchEdit()
def keyCancel(self):
if self.keyLocked:
return
self.genreliste.sort(key=lambda t : t[0].lower())
fobj_out = open(config.mediaportal.watchlistpath.value+"mp_keywords","w")
x = len(self.genreliste)
if x > 0:
for c in range(x):
writeback = self.genreliste[c][0].rstrip()+"\n"
fobj_out.write(writeback)
fobj_out.close()
else:
os.remove(config.mediaportal.watchlistpath.value+"mp_keywords")
self.close()
class sharedirListScreen(sharedirHelper, MPScreen):
def __init__(self, session, suchString, Name, hoster, type, size, sort):
self.suchString = suchString
self.Name = Name
self.type = type
self.sort = sort
self.size = size
self.hoster = hoster
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"0" : self.closeAll,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"red" : self.keyHoster,
"green" : self.keySort,
"yellow" : self.keySize,
"blue" : self.keyType
}, -1)
self['title'] = Label("ShareDir")
self['ContentTitle'] = Label("%s / Search for: %s" % (self.Name, self.suchString))
self['Page'] = Label(_("Page:"))
self['F1'] = Label(config.mediaportal.sharedir_hoster.value.split(";")[0])
self['F4'] = Label(self.type)
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.hosters = hosters
self.filmliste = []
self.Cover = ''
self.chooseMenuList = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self.chooseMenuList.l.setFont(0, gFont('mediaportal', mp_globals.fontsize - 4 * mp_globals.sizefactor))
self.chooseMenuList.l.setItemHeight(mp_globals.fontsize)
self['liste'] = self.chooseMenuList
self.deferreds = []
self.ds = defer.DeferredSemaphore(tokens=1)
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.cancelSetValue()
if re.match(".*?2Search4Porn", self.Name):
self['F4'].setText("")
if self.hoster == '0':
for items in self.hosters:
self.hoster = self.hoster + ",%s" % items[1]
if self.hoster[0] == ',':
self.hoster = self.hoster[1:]
self.keyLocked = True
self.filmliste = []
self.chooseMenuList.setList(map(sharedirMultiListEntry, self.filmliste))
self['handlung'].setText('')
self['name'].setText(_('Please wait...'))
Url = "%s" % self.suchString.replace(" ", "+")
if self.sort != '':
Url = "%s&sort=%s" % (Url, self.sort)
if self.size != '':
Url = "%s&size=%s" % (Url, self.size)
if Url:
if Url[0] == '+':
Url = Url[1:]
if self.type == "Audio":
ftype = "3"
else:
ftype = "4"
for items in self.deferreds:
items.cancel()
dsUrl = "http://sharedir.com/index.php?s=%s&start=%s&ftype=%s&stype=%s" % (Url, self.page, ftype, self.hoster)
d = self.ds.run(getPage, dsUrl, agent=std_headers, timeout=5, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadPageData).addErrback(self.dataError)
self.deferreds.append(d)
def loadPageData(self, data):
self.getLastPage(data, 'id="page_links"(.*?)</div>', '.*>\[{0,1}\s{0,1}(\d+)[\]{0,1}\s{0,1}|<]')
preparse = re.search('class="sp_header">(.*?)id="footer', data, re.S)
if preparse:
Movies = re.findall('class="big"\stitle="(.*?)"\shref="(.*?)".*?class="irow".*?<div>(.*?)</div>.*?extension:\s<b>(.*?)</b>.*?size:\s<b>(.*?)</div>.*?date:\s(.*?)</div>.*?class="info_in">(.*?)\/', preparse.group(1), re.S)
if Movies:
for Title, Url, Hostername, Ext, Size, Date, Source in Movies:
Url = "http://sharedir.com%s" % Url
if isSupportedHoster(Hostername, True):
Size = stripAllTags(Size).strip()
self.filmliste.append((decodeHtml(Title), Url, Hostername, Ext, Size, Date, Source.strip("www.")))
if len(self.filmliste) == 0:
self.filmliste.append((_("No Files found!"), None, '', '', '', ''))
self.chooseMenuList.setList(map(sharedirMultiListEntry, self.filmliste))
self.keyLocked = False
self.showInfos()
def showInfos(self):
Title = self['liste'].getCurrent()[0][0]
Hoster = self['liste'].getCurrent()[0][2]
Ext = self['liste'].getCurrent()[0][3]
Size = self['liste'].getCurrent()[0][4]
Date = self['liste'].getCurrent()[0][5]
Source = self['liste'].getCurrent()[0][6]
Handlung = "Extension: %s Date: %s Size: %s Hoster: %s Source: %s\n%s" % (Ext, Date, Size, Hoster, Source, Title)
self['name'].setText(Title)
self['handlung'].setText(Handlung)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
if Link == None:
return
self.keyLocked = True
getPage(Link, agent=std_headers, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getHosterLink).addErrback(self.noVideoError).addErrback(self.dataError)
def getHosterLink(self, data):
streams = re.search('<pre\sid="dirlinks"\sclass="dl_normal">(.*?).</pre>', data, re.S)
if streams:
Hoster = self['liste'].getCurrent()[0][2]
self.get_redirect(streams.group(1))
self.keyLocked = False
def noVideoError(self, error):
try:
if error.value.status == '404':
message = self.session.open(MessageBoxExt, _("No link found."), MessageBoxExt.TYPE_INFO, timeout=3)
except:
pass
self.keyLocked = False
raise error
def keyCancel(self):
for items in self.deferreds:
items.cancel()
self.deferreds = []
self.close()
def get_redirect(self, url):
get_stream_link(self.session).check_link(url, self.got_link)
def got_link(self, stream_url):
self.keyLocked = False
if stream_url == None:
message = self.session.open(MessageBoxExt, _("Stream not found, try another Stream Hoster."), MessageBoxExt.TYPE_INFO, timeout=3)
else:
Title = self['liste'].getCurrent()[0][0]
self.session.open(SimplePlayer, [(Title, stream_url)], showPlaylist=False, ltype='sharedir') | n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/MediaPortal/additions/grauzone/sharedir.py | Python | gpl-2.0 | 21,955 |
"""Base classes for multi-valued assignments in methodgraphs"""
from method import Method, MethodGraph
class MultiVariable:
"""For representing multi-valued variables
"""
def __init__(self, name=None):
self.name = name
def __repr__(self):
return str(self)
def __str__(self):
if self.name == None:
return "MultiVariable#"+str(id(self))
else:
return "MultiVariable("+self.name+")"
class MultiMethod(Method):
"""A Method that is executed for multiple alternative inputs, resulting
in multiple output values.
Input may optionally contain MultiVariable instances.
There must be a single MultiVariable output variable
Subclasses should implement the 'multi_execute' method, not overide the 'execute' method.
This method is called for every permutation of values of multi-valued input variables.
Any input variables that are instances of MultiVariable will be replaced by their
shadowed counterpart in the input map for multi_execute.
The 'multi_execute' method must return a list of possible values for the output variable.
The output values returned by subsequent calls multi-execute are collected and stored in the
output MultiVariable.
Note that a set of values for the outputvariable is stored, so that equivalent values are only stored once.
"""
def __init__(self):
"""Call this initialize after _inputs and _outputs has been set"""
self._multi_inputs = []
for variable in self._inputs:
if isinstance(variable, MultiVariable):
self._multi_inputs.append(variable)
if len(self._outputs) != 1:
raise StandardError, "requires exactly one output"
if not isinstance(self._outputs[0], MultiVariable):
raise StandardError, "requires a MultiVariable output"
def execute(self, inmap):
"""calls multi_execute for each permutation of multi-valued input variables and collects
result in multi-valued ouput variables. Subclasses should implement multi_execute."""
base_inmap = {}
for variable in self._inputs:
if variable not in self._multi_inputs:
value = inmap[variable]
base_inmap[variable] = value
outvar = self._outputs[0]
values = self._recurse_execute(inmap, base_inmap, self._multi_inputs)
return {outvar:values}
def _recurse_execute(self, inmap, base_inmap, multi_inputs):
if len(multi_inputs) > 0:
mvar = multi_inputs[0]
values = inmap[mvar]
output = set()
for value in values:
base_inmap[mvar] = value
output.update(self._recurse_execute(inmap, base_inmap, multi_inputs[1:]))
return output
else:
return self.multi_execute(base_inmap)
#####
class SumProdMethod(MultiMethod):
"""A MultiMethod that assigns the sum and product of its input to it's ouput MultiVariable"""
def __init__(self, a,b,c):
self._inputs = [a,b]
self._outputs = [c]
MultiMethod.__init__(self)
def multi_execute(self, inmap):
#print str(inmap)
a = inmap[self._inputs[0]]
b = inmap[self._inputs[1]]
result = [a+b, a*b]
#print result
return result
def test():
graph = MethodGraph()
graph.add_variable('a', 1)
graph.add_variable('b', 2)
mv_x = MultiVariable('x')
graph.add_variable(mv_x)
graph.add_method(SumProdMethod('a','b', mv_x))
graph.add_variable('p', 3)
graph.add_variable('q', 4)
mv_y = MultiVariable('y')
graph.add_variable(mv_y)
graph.add_method(SumProdMethod('p','q', mv_y))
mv_z = MultiVariable('z')
graph.add_variable(mv_z)
graph.add_method(SumProdMethod(mv_x,mv_y,mv_z))
print graph.get(mv_z)
graph.set('a', 100)
print graph.get(mv_z)
if __name__== '__main__': test()
| philetus/geosolver | geosolver/multimethod.py | Python | gpl-3.0 | 4,069 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlTestMore(PerlPackage):
"""Test2 is a new testing framework produced by forking Test::Builder,
completely refactoring it, adding many new features and capabilities."""
homepage = "https://github.com/Test-More/test-more"
url = "https://github.com/Test-More/test-more/archive/v1.302183.tar.gz"
version('1.302183', sha256='1356ec24c5ab3f7ad8327091ddc6ace164a27767be10325776bf9743360ab4f7')
version('1.302182', sha256='60727db9435cb244f6dcf4ca598c8ef39ac3035a0c36fd5c9c5b89be4f138366')
version('1.302181', sha256='acb3c990d646928e7571c140510d7424716d3281c4064b1787294e72b39f61ce')
version('1.302180', sha256='4dbed31e9434d74427b41a17ca3e0511a81ee5cb1240408c7f439c6449672a20')
| LLNL/spack | var/spack/repos/builtin/packages/perl-test-more/package.py | Python | lgpl-2.1 | 940 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.10 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
import subprocess
import sys
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' variables were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = ""
parentdir_prefix = ""
versionfile_source = "src/gitissuebot/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded variables.
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
except NameError:
return default
return (versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
| foosel/GitIssueBot | src/gitissuebot/_version.py | Python | agpl-3.0 | 7,318 |
"""Tests for the Entity Registry."""
import asyncio
from unittest.mock import patch
import asynctest
import pytest
from homeassistant.core import valid_entity_id, callback
from homeassistant.helpers import entity_registry
from tests.common import mock_registry, flush_store
YAML__OPEN_PATH = "homeassistant.util.yaml.loader.open"
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, async_capture)
return events
async def test_get_or_create_returns_same_entry(hass, registry, update_events):
"""Make sure we do not duplicate entries."""
entry = registry.async_get_or_create("light", "hue", "1234")
entry2 = registry.async_get_or_create("light", "hue", "1234")
await hass.async_block_till_done()
assert len(registry.entities) == 1
assert entry is entry2
assert entry.entity_id == "light.hue_1234"
assert len(update_events) == 1
assert update_events[0]["action"] == "create"
assert update_events[0]["entity_id"] == entry.entity_id
def test_get_or_create_suggested_object_id(registry):
"""Test that suggested_object_id works."""
entry = registry.async_get_or_create(
"light", "hue", "1234", suggested_object_id="beer"
)
assert entry.entity_id == "light.beer"
def test_get_or_create_suggested_object_id_conflict_register(registry):
"""Test that we don't generate an entity id that is already registered."""
entry = registry.async_get_or_create(
"light", "hue", "1234", suggested_object_id="beer"
)
entry2 = registry.async_get_or_create(
"light", "hue", "5678", suggested_object_id="beer"
)
assert entry.entity_id == "light.beer"
assert entry2.entity_id == "light.beer_2"
def test_get_or_create_suggested_object_id_conflict_existing(hass, registry):
"""Test that we don't generate an entity id that currently exists."""
hass.states.async_set("light.hue_1234", "on")
entry = registry.async_get_or_create("light", "hue", "1234")
assert entry.entity_id == "light.hue_1234_2"
def test_create_triggers_save(hass, registry):
"""Test that registering entry triggers a save."""
with patch.object(registry, "async_schedule_save") as mock_schedule_save:
registry.async_get_or_create("light", "hue", "1234")
assert len(mock_schedule_save.mock_calls) == 1
async def test_loading_saving_data(hass, registry):
"""Test that we load/save data correctly."""
orig_entry1 = registry.async_get_or_create("light", "hue", "1234")
orig_entry2 = registry.async_get_or_create(
"light", "hue", "5678", config_entry_id="mock-id"
)
assert len(registry.entities) == 2
# Now load written data in new registry
registry2 = entity_registry.EntityRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
# Ensure same order
assert list(registry.entities) == list(registry2.entities)
new_entry1 = registry.async_get_or_create("light", "hue", "1234")
new_entry2 = registry.async_get_or_create(
"light", "hue", "5678", config_entry_id="mock-id"
)
assert orig_entry1 == new_entry1
assert orig_entry2 == new_entry2
def test_generate_entity_considers_registered_entities(registry):
"""Test that we don't create entity id that are already registered."""
entry = registry.async_get_or_create("light", "hue", "1234")
assert entry.entity_id == "light.hue_1234"
assert registry.async_generate_entity_id("light", "hue_1234") == "light.hue_1234_2"
def test_generate_entity_considers_existing_entities(hass, registry):
"""Test that we don't create entity id that currently exists."""
hass.states.async_set("light.kitchen", "on")
assert registry.async_generate_entity_id("light", "kitchen") == "light.kitchen_2"
def test_is_registered(registry):
"""Test that is_registered works."""
entry = registry.async_get_or_create("light", "hue", "1234")
assert registry.async_is_registered(entry.entity_id)
assert not registry.async_is_registered("light.non_existing")
async def test_loading_extra_values(hass, hass_storage):
"""Test we load extra data from the registry."""
hass_storage[entity_registry.STORAGE_KEY] = {
"version": entity_registry.STORAGE_VERSION,
"data": {
"entities": [
{
"entity_id": "test.named",
"platform": "super_platform",
"unique_id": "with-name",
"name": "registry override",
},
{
"entity_id": "test.no_name",
"platform": "super_platform",
"unique_id": "without-name",
},
{
"entity_id": "test.disabled_user",
"platform": "super_platform",
"unique_id": "disabled-user",
"disabled_by": "user",
},
{
"entity_id": "test.disabled_hass",
"platform": "super_platform",
"unique_id": "disabled-hass",
"disabled_by": "hass",
},
]
},
}
registry = await entity_registry.async_get_registry(hass)
entry_with_name = registry.async_get_or_create(
"test", "super_platform", "with-name"
)
entry_without_name = registry.async_get_or_create(
"test", "super_platform", "without-name"
)
assert entry_with_name.name == "registry override"
assert entry_without_name.name is None
assert not entry_with_name.disabled
entry_disabled_hass = registry.async_get_or_create(
"test", "super_platform", "disabled-hass"
)
entry_disabled_user = registry.async_get_or_create(
"test", "super_platform", "disabled-user"
)
assert entry_disabled_hass.disabled
assert entry_disabled_hass.disabled_by == entity_registry.DISABLED_HASS
assert entry_disabled_user.disabled
assert entry_disabled_user.disabled_by == entity_registry.DISABLED_USER
def test_async_get_entity_id(registry):
"""Test that entity_id is returned."""
entry = registry.async_get_or_create("light", "hue", "1234")
assert entry.entity_id == "light.hue_1234"
assert registry.async_get_entity_id("light", "hue", "1234") == "light.hue_1234"
assert registry.async_get_entity_id("light", "hue", "123") is None
async def test_updating_config_entry_id(hass, registry, update_events):
"""Test that we update config entry id in registry."""
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry_id="mock-id-1"
)
entry2 = registry.async_get_or_create(
"light", "hue", "5678", config_entry_id="mock-id-2"
)
assert entry.entity_id == entry2.entity_id
assert entry2.config_entry_id == "mock-id-2"
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["entity_id"] == entry.entity_id
assert update_events[1]["action"] == "update"
assert update_events[1]["entity_id"] == entry.entity_id
async def test_removing_config_entry_id(hass, registry, update_events):
"""Test that we update config entry id in registry."""
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry_id="mock-id-1"
)
assert entry.config_entry_id == "mock-id-1"
registry.async_clear_config_entry("mock-id-1")
assert not registry.entities
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["entity_id"] == entry.entity_id
assert update_events[1]["action"] == "remove"
assert update_events[1]["entity_id"] == entry.entity_id
async def test_migration(hass):
"""Test migration from old data to new."""
old_conf = {
"light.kitchen": {
"config_entry_id": "test-config-id",
"unique_id": "test-unique",
"platform": "test-platform",
"name": "Test Name",
"disabled_by": "hass",
}
}
with patch("os.path.isfile", return_value=True), patch("os.remove"), patch(
"homeassistant.helpers.entity_registry.load_yaml", return_value=old_conf
):
registry = await entity_registry.async_get_registry(hass)
assert registry.async_is_registered("light.kitchen")
entry = registry.async_get_or_create(
domain="light",
platform="test-platform",
unique_id="test-unique",
config_entry_id="test-config-id",
)
assert entry.name == "Test Name"
assert entry.disabled_by == "hass"
assert entry.config_entry_id == "test-config-id"
async def test_loading_invalid_entity_id(hass, hass_storage):
"""Test we autofix invalid entity IDs."""
hass_storage[entity_registry.STORAGE_KEY] = {
"version": entity_registry.STORAGE_VERSION,
"data": {
"entities": [
{
"entity_id": "test.invalid__middle",
"platform": "super_platform",
"unique_id": "id-invalid-middle",
"name": "registry override",
},
{
"entity_id": "test.invalid_end_",
"platform": "super_platform",
"unique_id": "id-invalid-end",
},
{
"entity_id": "test._invalid_start",
"platform": "super_platform",
"unique_id": "id-invalid-start",
},
]
},
}
registry = await entity_registry.async_get_registry(hass)
entity_invalid_middle = registry.async_get_or_create(
"test", "super_platform", "id-invalid-middle"
)
assert valid_entity_id(entity_invalid_middle.entity_id)
entity_invalid_end = registry.async_get_or_create(
"test", "super_platform", "id-invalid-end"
)
assert valid_entity_id(entity_invalid_end.entity_id)
entity_invalid_start = registry.async_get_or_create(
"test", "super_platform", "id-invalid-start"
)
assert valid_entity_id(entity_invalid_start.entity_id)
async def test_loading_race_condition(hass):
"""Test only one storage load called when concurrent loading occurred ."""
with asynctest.patch(
"homeassistant.helpers.entity_registry.EntityRegistry.async_load"
) as mock_load:
results = await asyncio.gather(
entity_registry.async_get_registry(hass),
entity_registry.async_get_registry(hass),
)
mock_load.assert_called_once_with()
assert results[0] == results[1]
async def test_update_entity_unique_id(registry):
"""Test entity's unique_id is updated."""
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry_id="mock-id-1"
)
new_unique_id = "1234"
with patch.object(registry, "async_schedule_save") as mock_schedule_save:
updated_entry = registry.async_update_entity(
entry.entity_id, new_unique_id=new_unique_id
)
assert updated_entry != entry
assert updated_entry.unique_id == new_unique_id
assert mock_schedule_save.call_count == 1
async def test_update_entity_unique_id_conflict(registry):
"""Test migration raises when unique_id already in use."""
entry = registry.async_get_or_create(
"light", "hue", "5678", config_entry_id="mock-id-1"
)
entry2 = registry.async_get_or_create(
"light", "hue", "1234", config_entry_id="mock-id-1"
)
with patch.object(
registry, "async_schedule_save"
) as mock_schedule_save, pytest.raises(ValueError):
registry.async_update_entity(entry.entity_id, new_unique_id=entry2.unique_id)
assert mock_schedule_save.call_count == 0
| fbradyirl/home-assistant | tests/helpers/test_entity_registry.py | Python | apache-2.0 | 12,247 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RXvector(RPackage):
"""Memory efficient S4 classes for storing sequences "externally" (behind
an R external pointer, or on disk)."""
homepage = "https://bioconductor.org/packages/XVector/"
git = "https://git.bioconductor.org/packages/XVector.git"
version('0.16.0', commit='54615888e1a559da4a81de33e934fc0f1c3ad99f')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@0.16.0')
| krafczyk/spack | var/spack/repos/builtin/packages/r-xvector/package.py | Python | lgpl-2.1 | 1,870 |
## -*- coding: utf-8 -*-
##
## util.py
##
## Author: Toke Høiland-Jørgensen (toke@toke.dk)
## Date: 16 October 2012
## Copyright (c) 2012, Toke Høiland-Jørgensen
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import math, os, gzip, io, socket, re
from bisect import bisect_left
from datetime import datetime
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
import configparser
except ImportError:
import ConfigParser as configparser
def uscore_to_camel(s):
"""Turn a underscore style string (org_table) into a CamelCase style string
(OrgTable) for class names."""
return ''.join(x.capitalize() for x in s.split("_"))
def classname(s, suffix=''):
return uscore_to_camel(s)+suffix
def parse_date(timestring):
try:
return datetime.strptime(timestring, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
return datetime.strptime(timestring, "%Y-%m-%dT%H:%M:%S")
def clean_path(path, allow_dirs=False):
if allow_dirs:
return re.sub("[^A-Za-z0-9_/-]", "_", path)
else:
return re.sub("[^A-Za-z0-9_-]", "_", path)
def long_substr(data, prefix_only=False):
"""Find the longest common substring between a list of strings.
Optionally limit search to prefixes only.
Brute force approach (i.e. not very efficient...).
Based on https://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python"""
substr = ''
if len(data) > 1 and len(data[0]) > 0:
if prefix_only:
start_pos = [0]
else:
start_pos = range(len(data[0]))
for i in start_pos:
for j in range(len(data[0])-i+1):
if j > len(substr) and all(data[0][i:i+j] in x for x in data):
substr = data[0][i:i+j]
return substr
# Calculate discrete cdf function using bisect_left.
def cum_prob(data, val, size):
return bisect_left(data, val)/size
# from http://code.activestate.com/recipes/66472/
def frange(limit1, limit2 = None, increment = 1.):
"""
Range function that accepts floats (and integers).
Usage:
frange(-2, 2, 0.1)
frange(10)
frange(10, increment = 0.5)
The returned value is an iterator. Use list(frange) for a list.
"""
if limit2 is None:
limit2, limit1 = limit1, 0.
else:
limit1 = float(limit1)
count = int(math.ceil((limit2 - limit1)/increment))
return (limit1 + n*increment for n in range(count))
def is_executable(filename):
return os.path.isfile(filename) and os.access(filename, os.X_OK)
def which(executable, fail=False):
pathname, filename = os.path.split(executable)
if pathname:
if is_executable(executable):
return executable
else:
for path in [i.strip('""') for i in os.environ["PATH"].split(os.pathsep)]:
filename = os.path.join(path, executable)
if is_executable(filename):
return filename
if fail:
raise RuntimeError("No %s binary found in PATH." % executable)
return None
def lookup_host(hostname, version=None):
if version == 4:
version = socket.AF_INET
elif version == 6:
version = socket.AF_INET6
else:
version = socket.AF_UNSPEC
hostnames = socket.getaddrinfo(hostname, None, version,
socket.SOCK_STREAM)
if not hostnames:
raise RuntimeError("Found no hostnames on lookup of %s" % h)
return hostnames[0]
# In Python 2.6, the GzipFile object does not have a 'closed' property, which makes
# the io module blow up when trying to close it. This tidbit tries to detect that
# and substitute a subclass that does have the property, while not touching
# anything if the property is already present.
if hasattr(gzip.GzipFile, "closed"):
_gzip_open = gzip.open
else:
class GzipFile(gzip.GzipFile):
def get_closed(self):
return self.fileobj is None
# Setter needed for python3.1-compatibility
def set_closed(self, closed):
self._closed = closed
closed = property(get_closed, set_closed)
_gzip_open = GzipFile
def gzip_open(filename, mode="rb"):
"""Compatibility layer for gzip to work in Python 3.1 and 3.2."""
wrap_text = False
if "t" in mode:
wrap_text = True
mode = mode.replace("t", "")
binary_file = _gzip_open(filename, mode)
if wrap_text:
# monkey-patching required to make gzip object compatible with TextIOWrapper
# in Python 3.1.
if not hasattr(binary_file, "readable"):
def readable():
return binary_file.mode == gzip.READ
binary_file.readable = readable
if not hasattr(binary_file, "writable"):
def writable():
return binary_file.mode == gzip.WRITE
binary_file.writable = writable
if not hasattr(binary_file, "seekable"):
def seekable():
return True
binary_file.seekable = seekable
# This wrapping is done by the builtin gzip module in python 3.3.
return io.TextIOWrapper(binary_file)
else:
return binary_file
class DefaultConfigParser(configparser.ConfigParser):
class _NoDefault(object):
pass
def get(self, section, option, default=_NoDefault):
try:
return configparser.ConfigParser.get(self, section, option)
except configparser.NoOptionError:
if default==self._NoDefault:
raise
else:
return default
def getint(self, section, option, default=_NoDefault):
try:
return configparser.ConfigParser.getint(self, section, option)
except configparser.NoOptionError:
if default==self._NoDefault:
raise
else:
return default
def getfloat(self, section, option, default=_NoDefault):
try:
return configparser.ConfigParser.getfloat(self, section, option)
except configparser.NoOptionError:
if default==self._NoDefault:
raise
else:
return default
def getboolean(self, section, option, default=_NoDefault):
try:
return configparser.ConfigParser.getboolean(self, section, option)
except configparser.NoOptionError:
if default==self._NoDefault:
raise
else:
return default
| tohoanglinh/study_bufferbloat | p_bb_os_netperf_wrapper/netperf-wrapper/build/lib.linux-i686-2.7/netperf_wrapper/util.py | Python | gpl-3.0 | 7,125 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ToughPathRenderingCasesPage(page_module.Page):
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('ClickStart'):
action_runner.Wait(10)
class ChalkboardPage(page_module.Page):
def RunPageInteractions(self, action_runner):
with action_runner.CreateInteraction('ClickStart'):
action_runner.EvaluateJavaScript(
'document.getElementById("StartButton").click()')
action_runner.Wait(20)
class ToughPathRenderingCasesPageSet(story.StorySet):
"""
Description: Self-driven path rendering examples
"""
def __init__(self):
super(ToughPathRenderingCasesPageSet, self).__init__(
archive_data_file='data/tough_path_rendering_cases.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
urls_list = [
'http://www.craftymind.com/factory/guimark2/HTML5ChartingTest.html'
]
for url in urls_list:
self.AddStory(ToughPathRenderingCasesPage(url, self))
# Chalkboard content linked from
# http://ie.microsoft.com/testdrive/Performance/Chalkboard/.
self.AddStory(ChalkboardPage(
'https://testdrive-archive.azurewebsites.net/performance/chalkboard/', self))
| danakj/chromium | tools/perf/page_sets/tough_path_rendering_cases.py | Python | bsd-3-clause | 1,424 |
import os
import sys
import tempfile
import unittest
from datetime import date
from copy import deepcopy
from pyprint.ConsolePrinter import ConsolePrinter
from coalib.output.ConfWriter import ConfWriter
from coala_quickstart.coala_quickstart import _get_arg_parser
from coala_quickstart.generation.Settings import write_info, generate_settings
from coala_quickstart.generation.Bears import filter_relevant_bears
from coala_quickstart.generation.Project import get_used_languages
class SettingsTest(unittest.TestCase):
def setUp(self):
self.project_dir = os.getcwd()
self.printer = ConsolePrinter()
self.coafile = os.path.join(tempfile.gettempdir(), '.coafile')
self.writer = ConfWriter(self.coafile)
self.arg_parser = _get_arg_parser()
self.old_argv = deepcopy(sys.argv)
del sys.argv[1:]
def tearDown(self):
self.writer.close()
os.remove(self.coafile)
sys.argv = self.old_argv
def test_write_info(self):
result_date = date.today().strftime('%d %b %Y')
result_comment = ('# Generated by coala-quickstart on '
'{date}.\n'.format(date=result_date))
write_info(self.writer)
self.writer.close()
with open(self.coafile, 'r') as f:
line = f.readline()
self.assertEqual(result_comment, line)
def test_allow_complete_section_mode(self):
project_dir = '/repo'
project_files = ['/repo/hello.html']
ignore_globs = []
used_languages = list(get_used_languages(project_files))
relevant_bears = filter_relevant_bears(
used_languages, self.printer, self.arg_parser, {})
res = generate_settings(
project_dir, project_files, ignore_globs, relevant_bears, {}, True)
bears_list = res['all.HTML']['bears'].value.replace(' ', '').split(',')
files_list = res['all.HTML']['files'].value.replace(' ', '').split(',')
self.assertEqual(
['HTMLLintBear', 'coalaBear', 'BootLintBear',
'LicenseCheckBear', 'SpaceConsistencyBear', 'KeywordBear',
'LineLengthBear', 'DuplicateFileBear'].sort(),
bears_list.sort())
self.assertEqual(['**.html'], files_list)
| MalkmusT/coala-quickstart | tests/generation/SettingsTest.py | Python | agpl-3.0 | 2,273 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import re
import sys
import numpy as np
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.training import py_checkpoint_reader
FLAGS = None
def _count_total_params(reader, count_exclude_pattern=""):
"""Count total number of variables."""
var_to_shape_map = reader.get_variable_to_shape_map()
# Filter out tensors that we don't want to count
if count_exclude_pattern:
regex_pattern = re.compile(count_exclude_pattern)
new_var_to_shape_map = {}
exclude_num_tensors = 0
exclude_num_params = 0
for v in var_to_shape_map:
if regex_pattern.search(v):
exclude_num_tensors += 1
exclude_num_params += np.prod(var_to_shape_map[v])
else:
new_var_to_shape_map[v] = var_to_shape_map[v]
var_to_shape_map = new_var_to_shape_map
print("# Excluding %d tensors (%d params) that match %s when counting." % (
exclude_num_tensors, exclude_num_params, count_exclude_pattern))
var_sizes = [np.prod(var_to_shape_map[v]) for v in var_to_shape_map]
return np.sum(var_sizes, dtype=int)
def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors,
all_tensor_names=False,
count_exclude_pattern=""):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
all_tensors: Boolean indicating whether to print all tensors.
all_tensor_names: Boolean indicating whether to print all tensor names.
count_exclude_pattern: Regex string, pattern to exclude tensors when count.
"""
try:
reader = py_checkpoint_reader.NewCheckpointReader(file_name)
if all_tensors or all_tensor_names:
var_to_shape_map = reader.get_variable_to_shape_map()
var_to_dtype_map = reader.get_variable_to_dtype_map()
for key, value in sorted(var_to_shape_map.items()):
print("tensor: %s (%s) %s" % (key, var_to_dtype_map[key].name, value))
if all_tensors:
print(reader.get_tensor(key))
elif not tensor_name:
print(reader.debug_string().decode("utf-8", errors="ignore"))
else:
if not reader.has_tensor(tensor_name):
print("Tensor %s not found in checkpoint" % tensor_name)
return
var_to_shape_map = reader.get_variable_to_shape_map()
var_to_dtype_map = reader.get_variable_to_dtype_map()
print("tensor: %s (%s) %s" %
(tensor_name, var_to_dtype_map[tensor_name].name,
var_to_shape_map[tensor_name]))
print(reader.get_tensor(tensor_name))
# Count total number of parameters
print("# Total number of params: %d" % _count_total_params(
reader, count_exclude_pattern=count_exclude_pattern))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
if ("Data loss" in str(e) and
any(e in file_name for e in [".index", ".meta", ".data"])):
proposed_file = ".".join(file_name.split(".")[0:-1])
v2_file_error_template = """
It's likely that this is a V2 checkpoint and you need to provide the filename
*prefix*. Try removing the '.' and extension. Try:
inspect checkpoint --file_name = {}"""
print(v2_file_error_template.format(proposed_file))
def parse_numpy_printoption(kv_str):
"""Sets a single numpy printoption from a string of the form 'x=y'.
See documentation on numpy.set_printoptions() for details about what values
x and y can take. x can be any option listed there other than 'formatter'.
Args:
kv_str: A string of the form 'x=y', such as 'threshold=100000'
Raises:
argparse.ArgumentTypeError: If the string couldn't be used to set any
nump printoption.
"""
k_v_str = kv_str.split("=", 1)
if len(k_v_str) != 2 or not k_v_str[0]:
raise argparse.ArgumentTypeError("'%s' is not in the form k=v." % kv_str)
k, v_str = k_v_str
printoptions = np.get_printoptions()
if k not in printoptions:
raise argparse.ArgumentTypeError("'%s' is not a valid printoption." % k)
v_type = type(printoptions[k])
if v_type is type(None):
raise argparse.ArgumentTypeError(
"Setting '%s' from the command line is not supported." % k)
try:
v = (
v_type(v_str)
if v_type is not bool else flags.BooleanParser().parse(v_str))
except ValueError as e:
raise argparse.ArgumentTypeError(e.message)
np.set_printoptions(**{k: v})
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=checkpoint_file_name "
"[--tensor_name=tensor_to_print] "
"[--all_tensors] "
"[--all_tensor_names] "
"[--printoptions]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(
FLAGS.file_name, FLAGS.tensor_name,
FLAGS.all_tensors, FLAGS.all_tensor_names,
count_exclude_pattern=FLAGS.count_exclude_pattern)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--file_name",
type=str,
default="",
help="Checkpoint filename. "
"Note, if using Checkpoint V2 format, file_name is the "
"shared prefix between all files in the checkpoint.")
parser.add_argument(
"--tensor_name",
type=str,
default="",
help="Name of the tensor to inspect")
parser.add_argument(
"--count_exclude_pattern",
type=str,
default="",
help="Pattern to exclude tensors, e.g., from optimizers, when counting.")
parser.add_argument(
"--all_tensors",
nargs="?",
const=True,
type="bool",
default=False,
help="If True, print the names and values of all the tensors.")
parser.add_argument(
"--all_tensor_names",
nargs="?",
const=True,
type="bool",
default=False,
help="If True, print the names of all the tensors.")
parser.add_argument(
"--printoptions",
nargs="*",
type=parse_numpy_printoption,
help="Argument for numpy.set_printoptions(), in the form 'k=v'.")
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| karllessard/tensorflow | tensorflow/python/tools/inspect_checkpoint.py | Python | apache-2.0 | 7,468 |
from django.db import models
class ActiveManager(models.Manager):
def get_query_set(self):
return super(ActiveManager, self).get_query_set().filter(date_deleted__isnull=True) | guilleJB/fabric-bolt | src/fabric_bolt/projects/model_managers.py | Python | mit | 188 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields
from osv import osv
import base64
from tools.translate import _
import addons
class base_report_designer_installer(osv.osv_memory):
_name = 'base_report_designer.installer'
_inherit = 'res.config.installer'
def default_get(self, cr, uid, fields, context=None):
data = super(base_report_designer_installer, self).default_get(cr, uid, fields, context=context)
plugin_file = open(addons.get_module_resource('base_report_designer','plugin', 'openerp_report_designer.zip'),'rb')
data['plugin_file'] = base64.encodestring(plugin_file.read())
return data
_columns = {
'name':fields.char('File name', size=34),
'plugin_file':fields.binary('OpenObject Report Designer Plug-in', readonly=True, help="OpenObject Report Designer plug-in file. Save as this file and install this plug-in in OpenOffice."),
'description':fields.text('Description', readonly=True)
}
_defaults = {
'name' : 'openerp_report_designer.zip',
'description' : """
* Save the OpenERP Report Designer plug-in.
* Follow these steps to install plug-in.
1. Open Extension Manager window from Menu Bar of Openoffice writer, Open Tools > Extension Menu.
2. Click on "Add" button.
3. Select path where the openerp_report_designer.zip is located.
4. On the completion of adding package you will get your package under 'Extension Manager' and the status of your package become 'Enabled'.
5. Restart openoffice writer.
* Follow the steps to configure OpenERP Report Designer plug-in in Openoffice writer.
1. Connect OpenERP Server from Menu bar , OpenERP Report Designer > Server parameter.
2. Select Server url, database and provide user name and password
3. Click "Connect".
4. if your connection success, A message appears like 'You can start creating your report in current document.'.
"""
}
base_report_designer_installer()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Johnzero/erp | openerp/addons/base_report_designer/installer.py | Python | agpl-3.0 | 3,097 |
import json
import types
from copy import copy
import six
from django import template
from django.forms import CheckboxInput, CheckboxSelectMultiple, RadioSelect
from django.utils.html import escapejs
from django.utils.safestring import mark_safe
from six.moves import range, zip
from oioioi.contests.scores import IntegerScore
from oioioi.pa.score import PAScore
register = template.Library()
@register.filter
def is_checkbox(field):
return isinstance(field.field.widget, CheckboxInput)
@register.filter
def is_checkbox_select_multiple(field):
return isinstance(field.field.widget, CheckboxSelectMultiple)
@register.filter
def is_radioselect(field):
return isinstance(field.field.widget, RadioSelect)
@register.filter
def lookup(d, key):
"""
Lookup value from dictionary
Example:
{% load simple_filters %}
{{ dict|lookup:key }}
"""
return d[key]
@register.filter
def multival_lookup(d, key):
"""
Returns a value list corresponding to a key from Django's MultiValueDict
"""
return d.getlist(key)
@register.filter(name='indent')
def indent_string(value, num_spaces=4):
"""
Adds ``num_spaces`` spaces at the
beginning of every line in value.
"""
return ' ' * num_spaces + value.replace('\n', '\n' + ' ' * num_spaces)
def _append_attr(field, attribute, value):
# adapted from 'django-widget-tweaks'
field = copy(field)
# decorate field.as_widget method with updated attributes
old_as_widget = field.as_widget
def as_widget(self, widget=None, attrs=None, only_initial=False):
widget = widget or self.field.widget
attrs = attrs or {}
custom_append_attr = getattr(widget, "append_attr", None)
if not (custom_append_attr and custom_append_attr(attribute, value)):
if attrs.get(attribute):
attrs[attribute] += " " + value
elif widget.attrs.get(attribute):
attrs[attribute] = widget.attrs[attribute] + " " + value
else:
attrs[attribute] = value
if attribute == "type": # change the Input type
self.field.widget.input_type = value
del attrs["type"]
html = old_as_widget(widget, attrs, only_initial)
self.as_widget = old_as_widget
return html
field.as_widget = types.MethodType(as_widget, field)
return field
@register.filter(name='add_class')
def add_class(field, css_class):
"""
Adds css class to a django form field
:param field: form field
:param css_class: css class
:return: field with added class
Example usage
# my_app/forms.py
```python
class MyForm(Form):
my_field = forms.CharField(max_length=100)
# my_app/views.py
```python
def get_form(request):
my_form = MyForm()
return render(request, 'my_form.html', { form: my_form })
```
# my_app/templates/my_form.html
```html
{{ form.field|add_class:"my-class" }}
```
would generate
```html
<input class="my-class" id="my_field" name="my_field" />
```
"""
return _append_attr(field, "class", css_class)
@register.filter
def partition(thelist, n):
"""
From: http://djangosnippets.org/snippets/6/
Break a list into ``n`` pieces. If ``n`` is not a divisor of the
length of the list, then first pieces are one element longer
then the last ones. That is::
>>> l = range(10)
>>> partition(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> partition(l, 3)
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> partition(l, 4)
[[0, 1, 2], [3, 4, 5], [6, 7], [8, 9]]
>>> partition(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
You can use the filter in the following way:
{% load simple_filters %}
{% for sublist in mylist|parition:"3" %}
{% for item in sublist %}
do something with {{ item }}
{% endfor %}
{% endfor %}
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
p = len(thelist) // n
num_longer = len(thelist) - p * n
return [thelist[((p + 1) * i) : ((p + 1) * (i + 1))] for i in range(num_longer)] + [
thelist[(p * i + num_longer) : (p * (i + 1) + num_longer)]
for i in range(num_longer, n)
]
@register.filter
def cyclic_lookup(thelist, index):
return thelist[index % len(thelist)]
@register.filter(name='zip')
def zip_lists(a, b):
return list(zip(a, b))
@register.filter
def jsonify(value):
"""
Be careful when using it directly in js! Code like that:
<script>
var x = {{ some_user_data|jsonify }};
</script>
contains an XSS vulnerability. That's because browsers
will interpret </script> tag inside js string.
"""
return mark_safe(json.dumps(value))
@register.filter
def json_parse(value):
"""
This is a correct way of embedding json inside js in an HTML template.
"""
return mark_safe('JSON.parse(\'%s\')' % escapejs(json.dumps(value)))
@register.filter
def latex_escape(x):
r"""
Escape string for generating LaTeX report.
Usage:
{{ malicious|latex_escape }}
Remember: when generating LaTeX report, you should always check
whether \write18 is disabled!
http://www.texdev.net/2009/10/06/what-does-write18-mean/
"""
res = six.text_type(x)
# Braces + backslashes
res = res.replace('\\', '\\textbackslash\\q{}')
res = res.replace('{', '\\{')
res = res.replace('}', '\\}')
res = res.replace('\\q\\{\\}', '\\q{}')
# then everything followed by empty space
repls = [
('#', '\\#'),
('$', '\\$'),
('%', '\\%'),
('_', '\\_'),
('<', '\\textless{}'),
('>', '\\textgreater{}'),
('&', '\\ampersand{}'),
('~', '\\textasciitilde{}'),
('^', '\\textasciicircum{}'),
('"', '\\doublequote{}'),
('\'', '\\singlequote{}'),
]
for key, value in repls:
res = res.replace(key, value)
return res
@register.filter
def result_color_class(raw_score):
if raw_score in [None, '']:
return ''
if callable(getattr(raw_score, 'to_int', None)):
score = raw_score.to_int()
else:
score = int(raw_score)
if isinstance(raw_score, IntegerScore):
score_max_value = 100
elif isinstance(raw_score, PAScore):
score_max_value = 10
else:
# There should be a method to get maximum points for
# contest, for now, support just above cases.
return ''
if score == 0:
return 'submission--WA'
score_color_threshold = 25
buckets_count = 4
points_per_bucket = score_max_value / float(buckets_count)
# Round down to multiple of $score_color_threshold.
bucket = int(score / points_per_bucket) * score_color_threshold
return 'submission--OK{}'.format(bucket)
| sio2project/oioioi | oioioi/base/templatetags/simple_filters.py | Python | gpl-3.0 | 6,994 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
#in this file, we mostly add the tag translate=True on existing fields that we now want to be translated
class account_account_template(osv.osv):
_inherit = 'account.account.template'
_columns = {
'name': fields.char('Name', required=True, select=True, translate=True),
}
class account_account(osv.osv):
_inherit = 'account.account'
_columns = {
'name': fields.char('Name', required=True, select=True, translate=True),
}
class account_tax(osv.osv):
_inherit = 'account.tax'
_columns = {
'name': fields.char('Tax Name', required=True, select=True, translate=True),
}
class account_tax_template(osv.osv):
_inherit = 'account.tax.template'
_columns = {
'name': fields.char('Tax Name', required=True, select=True, translate=True),
}
class account_chart_template(osv.osv):
_inherit = 'account.chart.template'
_columns={
'name': fields.char('Name', required=True, translate=True),
'spoken_languages': fields.char('Spoken Languages', help="State here the languages for which the translations of templates could be loaded at the time of installation of this localization module and copied in the final object when generating them from templates. You must provide the language codes separated by ';'"),
}
_order = 'name'
class account_fiscal_position(osv.osv):
_inherit = 'account.fiscal.position'
_columns = {
'name': fields.char('Fiscal Position', required=True, translate=True),
'note': fields.text('Notes', translate=True),
}
class account_fiscal_position_template(osv.osv):
_inherit = 'account.fiscal.position.template'
_columns = {
'name': fields.char('Fiscal Position Template', required=True, translate=True),
'note': fields.text('Notes', translate=True),
}
class account_journal(osv.osv):
_inherit = 'account.journal'
_columns = {
'name': fields.char('Journal Name', required=True, translate=True),
}
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_columns = {
'name': fields.char('Account Name', required=True, translate=True),
}
| vileopratama/vitech | src/addons/l10n_multilang/account.py | Python | mit | 2,364 |
#!/usr/bin/env python3
import os, re, sqlite3
from bs4 import BeautifulSoup, NavigableString, Tag
import sys
import os.path
res_dir = sys.argv[1]
doc_id = "gettext"
db = sqlite3.connect("{}/docSet.dsidx".format(res_dir))
cur = db.cursor()
try: cur.execute('DROP TABLE searchIndex;')
except: pass
cur.execute('CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);')
cur.execute('CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);')
pages = { "Program" : "Command",
"PO-Mode" : "Command",
"Variable" : "Variable",
"Autoconf-Macro" : "Macro"
}
sql = 'INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?,?,?)'
doc_dir = "{}/Documents/{}".format(res_dir, doc_id)
for page in pages.keys():
print("Processing {}".format(page))
soup = BeautifulSoup(open("{}/{}-Index.html".format(doc_dir, page)))
for tag in soup.find_all('a'):
for ct in tag.contents:
if ct.name == "code":
path = doc_id + "/" + tag['href']
if len(ct.contents) == 1:
obj_name = ct.string
else:
obj_name = ct.contents[0].string + ct.contents[1].string
print("{}:{}->{}".format(page, obj_name, tag['href']))
cur.execute(sql, (obj_name, pages[page], path))
print("Processing Option")
soup = BeautifulSoup(open("{}/Option-Index.html".format(doc_dir)))
for a in soup.find_all('a'):
obj_name = ''
for c1 in a.find_all('code'):
#print(c1.contents)
obj_name += c1.contents[0].string + ', '
for c2 in c1.find_all('code'):
#print(c2.contents)
obj_name += c2.contents[0].string
print(obj_name)
cur.execute(sql, (obj_name, "Option", doc_id + "/" + a['href']))
soup = BeautifulSoup(open("{}/index.html".format(doc_dir)))
for tag in soup.find_all('tr'):
for td in tag.find_all('td'):
for a in td.find_all('a'):
print(a['href'], a.string)
cur.execute(sql, (a.string, "Guide", doc_id + "/" + a['href']))
db.commit()
db.close()
| pekingduck/dash-tools | docsets/GNU_Gettext/gen_gettext_doc.py | Python | gpl-3.0 | 1,995 |
# -*- coding: UTF-8 -*-
# Authors: Thomas Hartmann <thomas.hartmann@th-ht.de>
# Dirk Gütlin <dirk.guetlin@stud.sbg.ac.at>
#
# License: BSD (3-clause)
import numpy as np
from ..meas_info import create_info
from ...transforms import rotation3d_align_z_axis
from ...channels import DigMontage
from ..constants import FIFF
from ...utils import warn, _check_pandas_installed
from ..pick import pick_info
_supported_megs = ['neuromag306']
_unit_dict = {'m': 1,
'cm': 1e-2,
'mm': 1e-3,
'V': 1,
'mV': 1e-3,
'uV': 1e-6,
'T': 1,
'T/m': 1,
'T/cm': 1e2}
NOINFO_WARNING = 'Importing FieldTrip data without an info dict from the ' \
'original file. Channel locations, orientations and types ' \
'will be incorrect. The imported data cannot be used for ' \
'source analysis, channel interpolation etc.'
def _create_info(ft_struct, raw_info):
"""Create MNE info structure from a FieldTrip structure."""
if raw_info is None:
warn(NOINFO_WARNING)
sfreq = _set_sfreq(ft_struct)
ch_names = ft_struct['label']
if raw_info:
info = raw_info.copy()
missing_channels = set(ch_names) - set(info['ch_names'])
if missing_channels:
warn('The following channels are present in the FieldTrip data '
'but cannot be found in the provided info: %s.\n'
'These channels will be removed from the resulting data!'
% (str(missing_channels), ))
missing_chan_idx = [ch_names.index(ch) for ch in missing_channels]
new_chs = [ch for ch in ch_names if ch not in missing_channels]
ch_names = new_chs
ft_struct['label'] = ch_names
if ft_struct['trial'].ndim == 2:
ft_struct['trial'] = np.delete(ft_struct['trial'],
missing_chan_idx,
axis=0)
info['sfreq'] = sfreq
ch_idx = [info['ch_names'].index(ch) for ch in ch_names]
pick_info(info, ch_idx, copy=False)
else:
montage = _create_montage(ft_struct)
info = create_info(ch_names, sfreq, montage=montage)
chs = _create_info_chs(ft_struct)
info['chs'] = chs
info._update_redundant()
return info
def _create_info_chs(ft_struct):
"""Create the chs info field from the FieldTrip structure."""
all_channels = ft_struct['label']
ch_defaults = dict(coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
cal=1.0,
range=1.0,
unit_mul=FIFF.FIFF_UNITM_NONE,
loc=np.array([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]),
unit=FIFF.FIFF_UNIT_V)
try:
elec = ft_struct['elec']
except KeyError:
elec = None
try:
grad = ft_struct['grad']
except KeyError:
grad = None
if elec is None and grad is None:
warn('The supplied FieldTrip structure does not have an elec or grad '
'field. No channel locations will extracted and the kind of '
'channel might be inaccurate.')
chs = list()
for idx_chan, cur_channel_label in enumerate(all_channels):
cur_ch = ch_defaults.copy()
cur_ch['ch_name'] = cur_channel_label
cur_ch['logno'] = idx_chan + 1
cur_ch['scanno'] = idx_chan + 1
if elec and cur_channel_label in elec['label']:
cur_ch = _process_channel_eeg(cur_ch, elec)
elif grad and cur_channel_label in grad['label']:
cur_ch = _process_channel_meg(cur_ch, grad)
else:
if cur_channel_label.startswith('EOG'):
cur_ch['kind'] = FIFF.FIFFV_EOG_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG
elif cur_channel_label.startswith('ECG'):
cur_ch['kind'] = FIFF.FIFFV_ECG_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
elif cur_channel_label.startswith('STI'):
cur_ch['kind'] = FIFF.FIFFV_STIM_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE
else:
warn('Cannot guess the correct type of channel %s. Making '
'it a MISC channel.' % (cur_channel_label,))
cur_ch['kind'] = FIFF.FIFFV_MISC_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE
chs.append(cur_ch)
return chs
def _create_montage(ft_struct):
"""Create a montage from the FieldTrip data."""
# try to create a montage
montage_pos, montage_ch_names = list(), list()
for cur_ch_type in ('grad', 'elec'):
if cur_ch_type in ft_struct:
cur_ch_struct = ft_struct[cur_ch_type]
available_channels = np.where(np.in1d(cur_ch_struct['label'],
ft_struct['label']))[0]
tmp_labels = cur_ch_struct['label']
if not isinstance(tmp_labels, list):
tmp_labels = [tmp_labels]
cur_labels = np.asanyarray(tmp_labels)
montage_ch_names.extend(
cur_labels[available_channels])
montage_pos.extend(
cur_ch_struct['chanpos'][available_channels])
montage = None
if (len(montage_ch_names) > 0 and len(montage_pos) > 0 and
len(montage_ch_names) == len(montage_pos)):
montage = DigMontage(
dig_ch_pos=dict(zip(montage_ch_names, montage_pos)))
return montage
def _set_sfreq(ft_struct):
"""Set the sample frequency."""
try:
sfreq = ft_struct['fsample']
except KeyError:
try:
t1 = ft_struct['time'][0]
t2 = ft_struct['time'][1]
difference = abs(t1 - t2)
sfreq = 1 / difference
except KeyError:
raise ValueError('No Source for sfreq found')
return sfreq
def _set_tmin(ft_struct):
"""Set the start time before the event in evoked data if possible."""
times = ft_struct['time']
time_check = all(times[i][0] == times[i - 1][0]
for i, x in enumerate(times))
if time_check:
tmin = times[0][0]
else:
tmin = None
return tmin
def _create_events(ft_struct, trialinfo_column):
"""Create an event matrix from the FieldTrip structure."""
event_type = ft_struct['trialinfo']
event_number = range(len(event_type))
if trialinfo_column < 0:
raise ValueError('trialinfo_column must be positive')
available_ti_cols = 1
if event_type.ndim == 2:
available_ti_cols = event_type.shape[1]
if trialinfo_column > (available_ti_cols - 1):
raise ValueError('trialinfo_column is higher than the amount of'
'columns in trialinfo.')
event_trans_val = np.zeros(len(event_type))
if event_type.ndim == 2:
event_type = event_type[:, trialinfo_column]
events = np.vstack([np.array(event_number), event_trans_val,
event_type]).astype('int').T
return events
def _create_event_metadata(ft_struct):
"""Create event metadata from trialinfo."""
pandas = _check_pandas_installed(strict=False)
if not pandas:
warn('The Pandas library is not installed. Not returning the original '
'trialinfo matrix as metadata.')
return None
metadata = pandas.DataFrame(ft_struct['trialinfo'])
return metadata
def _process_channel_eeg(cur_ch, elec):
"""Convert EEG channel from FieldTrip to MNE.
Parameters
----------
cur_ch: dict
Channel specific dictionary to populate.
elec: dict
elec dict as loaded from the FieldTrip structure
Returns
-------
cur_ch: dict
The original dict (cur_ch) with the added information
"""
all_labels = np.asanyarray(elec['label'])
chan_idx_in_elec = np.where(all_labels == cur_ch['ch_name'])[0][0]
position = np.squeeze(elec['chanpos'][chan_idx_in_elec, :])
chanunit = elec['chanunit'][chan_idx_in_elec]
position_unit = elec['unit']
position = position * _unit_dict[position_unit]
cur_ch['loc'] = np.hstack((position, np.zeros((9,))))
cur_ch['loc'][-1] = 1
cur_ch['unit'] = FIFF.FIFF_UNIT_V
cur_ch['unit_mul'] = np.log10(_unit_dict[chanunit[0]])
cur_ch['kind'] = FIFF.FIFFV_EEG_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG
cur_ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD
return cur_ch
def _process_channel_meg(cur_ch, grad):
"""Convert MEG channel from FieldTrip to MNE.
Parameters
----------
cur_ch: dict
Channel specific dictionary to populate.
grad: dict
grad dict as loaded from the FieldTrip structure
Returns
-------
dict: The original dict (cur_ch) with the added information
"""
all_labels = np.asanyarray(grad['label'])
chan_idx_in_grad = np.where(all_labels == cur_ch['ch_name'])[0][0]
gradtype = grad['type']
chantype = grad['chantype'][chan_idx_in_grad]
position_unit = grad['unit']
position = np.squeeze(grad['chanpos'][chan_idx_in_grad, :])
position = position * _unit_dict[position_unit]
if gradtype == 'neuromag306' and 'tra' in grad and 'coilpos' in grad:
# Try to regenerate original channel pos.
idx_in_coilpos = np.where(grad['tra'][chan_idx_in_grad, :] != 0)[0]
cur_coilpos = grad['coilpos'][idx_in_coilpos, :]
cur_coilpos = cur_coilpos * _unit_dict[position_unit]
cur_coilori = grad['coilori'][idx_in_coilpos, :]
if chantype == 'megmag':
position = cur_coilpos[0] - 0.0003 * cur_coilori[0]
if chantype == 'megplanar':
tmp_pos = cur_coilpos - 0.0003 * cur_coilori
position = np.average(tmp_pos, axis=0)
original_orientation = np.squeeze(grad['chanori'][chan_idx_in_grad, :])
try:
orientation = rotation3d_align_z_axis(original_orientation).T
orientation = orientation.flatten()
except AssertionError:
orientation = np.eye(4, 4).flatten()
chanunit = grad['chanunit'][chan_idx_in_grad]
cur_ch['loc'] = np.hstack((position, orientation))
cur_ch['kind'] = FIFF.FIFFV_MEG_CH
if chantype == 'megmag':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER
cur_ch['unit'] = FIFF.FIFF_UNIT_T
elif chantype == 'megplanar':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_VV_PLANAR_T1
cur_ch['unit'] = FIFF.FIFF_UNIT_T_M
elif chantype == 'refmag':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_MAG
cur_ch['unit'] = FIFF.FIFF_UNIT_T
elif chantype == 'refgrad':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD
cur_ch['unit'] = FIFF.FIFF_UNIT_T
elif chantype == 'meggrad':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_AXIAL_GRAD_5CM
cur_ch['unit'] = FIFF.FIFF_UNIT_T
else:
raise RuntimeError('Unexpected coil type: %s.' % (
chantype,))
cur_ch['unit_mul'] = np.log10(_unit_dict[chanunit[0]])
cur_ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD
return cur_ch
| adykstra/mne-python | mne/io/fieldtrip/utils.py | Python | bsd-3-clause | 11,251 |
import murraylab_tools.biotek as mt_biotek
import os
gitexamplepath = "C:\\Users\\Andrey\\Documents\\GitHub\\"+\
"murraylab_tools\\examples\\biotek_examples\\"
data_filename = gitexamplepath+\
"180515_big384wellplate.csv"
supplementary_filename = gitexamplepath+\
"supp_inductiongrid.csv"
#mt_biotek.tidy_biotek_data(data_filename, supplementary_filename, convert_to_uM = False)
import pandas as pd
tidy_filename = gitexamplepath+"180515_big384wellplate_tidy.csv"
df = pd.read_csv(tidy_filename)
#df.head()
#df.head()
#gdf = df.groupby(["Channel", "Gain", "Well"])
#gdf.head()
#df[df.Channel == "GFP"].head()
normdf = mt_biotek.normalize(df,norm_channel= "OD")
#normdf[normdf.Gain==100].head()
end_df = mt_biotek.window_averages(normdf,15,17,"hours")
end_df.Excitation.unique()
slicedf = end_df[(end_df.Gain == 100 )&(end_df.Construct=="pQi41")&(end_df.aTC==250)]
end_df[(end_df.Gain == 100 )&(end_df.Construct=="pQi41")&(end_df.aTC==250)].head()
| sclamons/murraylab_tools | examples/biotektests.py | Python | mit | 999 |
BROKER_URL = 'redis://localhost:6379'
# TODO: Save Celery Result to Backend
CELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend'
# CELERY_RESULT_BACKEND = 'redis://localhost:6379'
# In Debug Mode this value be setting true
CELERY_ALWAYS_EAGER = True
#celery setting
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'query_transactions': {
'task': 'django_bitcoin.tasks.query_transactions',
'schedule': timedelta(seconds=10),
},
'sync_alladdress_balance': {
'task': 'django_bitcoin.tasks.sync_alladdress_balance',
'schedule': timedelta(seconds=10),
},
# 'check_integrity': {
# 'task': 'django_bitcoin.tasks.check_integrity',
# 'schedule': timedelta(seconds=10),
# },
} | texib/bitcoin-zoo | core/celeryconfig.py | Python | mit | 772 |
# Get the length of a word or variable
# Variable = a
a = "LeonThiess"
print(len(a))
print(len("Google"))
| TechGenius32400/python3-help | Python3-Class1/basic-functions/len.py | Python | gpl-3.0 | 108 |
def __rope_start_everything():
import os
import sys
import socket
import cPickle as pickle
import marshal
import inspect
import types
import threading
class _MessageSender(object):
def send_data(self, data):
pass
class _SocketSender(_MessageSender):
def __init__(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
self.my_file = s.makefile('w')
def send_data(self, data):
if not self.my_file.closed:
pickle.dump(data, self.my_file)
def close(self):
self.my_file.close()
class _FileSender(_MessageSender):
def __init__(self, file_name):
self.my_file = open(file_name, 'wb')
def send_data(self, data):
if not self.my_file.closed:
marshal.dump(data, self.my_file)
def close(self):
self.my_file.close()
def _cached(func):
cache = {}
def newfunc(self, arg):
if arg in cache:
return cache[arg]
result = func(self, arg)
cache[arg] = result
return result
return newfunc
class _FunctionCallDataSender(object):
def __init__(self, send_info, project_root):
self.project_root = project_root
if send_info.isdigit():
self.sender = _SocketSender(int(send_info))
else:
self.sender = _FileSender(send_info)
def global_trace(frame, event, arg):
# HACK: Ignoring out->in calls
# This might lose some information
if self._is_an_interesting_call(frame):
return self.on_function_call
sys.settrace(global_trace)
threading.settrace(global_trace)
def on_function_call(self, frame, event, arg):
if event != 'return':
return
args = []
returned = ('unknown',)
code = frame.f_code
for argname in code.co_varnames[:code.co_argcount]:
try:
args.append(self._object_to_persisted_form(
frame.f_locals[argname]))
except (TypeError, AttributeError):
args.append(('unknown',))
try:
returned = self._object_to_persisted_form(arg)
except (TypeError, AttributeError):
pass
try:
data = (self._object_to_persisted_form(frame.f_code),
tuple(args), returned)
self.sender.send_data(data)
except (TypeError):
pass
return self.on_function_call
def _is_an_interesting_call(self, frame):
#if frame.f_code.co_name in ['?', '<module>']:
# return False
#return not frame.f_back or
# not self._is_code_inside_project(frame.f_back.f_code)
if not self._is_code_inside_project(frame.f_code) and \
(not frame.f_back or
not self._is_code_inside_project(frame.f_back.f_code)):
return False
return True
def _is_code_inside_project(self, code):
source = self._path(code.co_filename)
return source is not None and os.path.exists(source) and \
_realpath(source).startswith(self.project_root)
@_cached
def _get_persisted_code(self, object_):
source = self._path(object_.co_filename)
if not os.path.exists(source):
raise TypeError('no source')
return ('defined', _realpath(source), str(object_.co_firstlineno))
@_cached
def _get_persisted_class(self, object_):
try:
return ('defined', _realpath(inspect.getsourcefile(object_)),
object_.__name__)
except (TypeError, AttributeError):
return ('unknown',)
def _get_persisted_builtin(self, object_):
if isinstance(object_, (str, unicode)):
return ('builtin', 'str')
if isinstance(object_, list):
holding = None
if len(object_) > 0:
holding = object_[0]
return ('builtin', 'list',
self._object_to_persisted_form(holding))
if isinstance(object_, dict):
keys = None
values = None
if len(object_) > 0:
keys = object_.keys()[0]
values = object_[keys]
return ('builtin', 'dict',
self._object_to_persisted_form(keys),
self._object_to_persisted_form(values))
if isinstance(object_, tuple):
objects = []
if len(object_) < 3:
for holding in object_:
objects.append(self._object_to_persisted_form(holding))
else:
objects.append(self._object_to_persisted_form(object_[0]))
return tuple(['builtin', 'tuple'] + objects)
if isinstance(object_, set):
holding = None
if len(object_) > 0:
for o in object_:
holding = o
break
return ('builtin', 'set',
self._object_to_persisted_form(holding))
return ('unknown',)
def _object_to_persisted_form(self, object_):
if object_ is None:
return ('none',)
if isinstance(object_, types.CodeType):
return self._get_persisted_code(object_)
if isinstance(object_, types.FunctionType):
return self._get_persisted_code(object_.func_code)
if isinstance(object_, types.MethodType):
return self._get_persisted_code(object_.im_func.func_code)
if isinstance(object_, types.ModuleType):
return self._get_persisted_module(object_)
if isinstance(object_, (str, unicode, list, dict, tuple, set)):
return self._get_persisted_builtin(object_)
if isinstance(object_, (types.TypeType, types.ClassType)):
return self._get_persisted_class(object_)
return ('instance', self._get_persisted_class(type(object_)))
@_cached
def _get_persisted_module(self, object_):
path = self._path(object_.__file__)
if path and os.path.exists(path):
return ('defined', _realpath(path))
return ('unknown',)
def _path(self, path):
if path.endswith('.pyc'):
path = path[:-1]
if path.endswith('.py'):
return path
def close(self):
self.sender.close()
sys.settrace(None)
def _realpath(path):
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
send_info = sys.argv[1]
project_root = sys.argv[2]
file_to_run = sys.argv[3]
run_globals = globals()
run_globals.update({'__name__': '__main__',
'__builtins__': __builtins__,
'__file__': file_to_run})
if send_info != '-':
data_sender = _FunctionCallDataSender(send_info, project_root)
del sys.argv[1:4]
execfile(file_to_run, run_globals)
if send_info != '-':
data_sender.close()
if __name__ == '__main__':
__rope_start_everything()
| JetChars/vim | vim/bundle/python-mode/pymode/libs2/rope/base/oi/runmod.py | Python | apache-2.0 | 7,738 |
import numpy as np
import math
from nlputils import tokenize_text
from itertools import repeat
import numpy as np
from scipy import spatial
import json
from tqdm import tqdm
from pprint import pprint
from operator import itemgetter
import time
num_docs = 0
def build_index(corpus):
terms_list = {}
start_time = time.time()
doc_vectors = []
pbar = tqdm(total=len(corpus)*2)
for key,item in corpus.items():
doc_term_freqs = {}
tokens,doc_len = tokenize_text(item)
for token,freq in tokens.items():
if token in terms_list:
terms_list[token][0] += 1
term_index = terms_list[token][1] #previously defined token
else:
term_index = len(terms_list) #index for new term
terms_list[token] = [1,term_index]
doc_term_freqs[token] = get_norm_tf(freq,doc_len)
doc_vectors.append((doc_term_freqs,key))
pbar.update(1)
num_docs = len(corpus)
num_terms = len(terms_list)
tfidf_array = np.zeros([num_docs,num_terms],dtype=np.float32)
counter = 0
for doc in doc_vectors:
for term,tf in doc[0].items():
idf = get_idf(term,terms_list)
#doc_tfidf = np.zeros([num_terms],dtype=np.float32)
#doc_tfidf[counter,terms_list[term][1]] = tf*idf
tfidf_array[counter,terms_list[term][1]] = tf*idf
counter += 1
pbar.update(1)
pbar.close()
pprint(len(tfidf_array[0]))
print("Time:",time.time()-start_time,"s")
return tfidf_array,terms_list
"""def add_doc_to_index(doc):
tokens,doc_len = tokenize_text(doc)
for term in tokens.keys():
if term in terms_list:
continue
else:
terms_list.append(term)
doc_vector = dict(zip(tokens,list(map(get_tf,tokens,repeat(doc_len)))))"""
def get_idf(term,terms_list):
if not term in terms_list:
return 0
return math.log(float(1 + num_docs) /
(1 + terms_list[term][0]))
def get_norm_tf(term_freq,doc_len):
return term_freq/doc_len
def cosine_similarity(a,b):
return 1 - spatial.distance.cosine(a, b)
def get_query_tfidf(query_text,terms_list):
doc_tfidf = {}
tokens,doc_len = tokenize_text(query_text)
for token,freq in tokens.items():
if token in terms_list:
term_index = terms_list[token][1] #previously defined token
tf = get_norm_tf(freq,doc_len)
idf = get_idf(token,terms_list)
doc_tfidf[token] = tf*idf
doc_vector = np.zeros([len(terms_list)],dtype=np.float32)
for term,tfidf in doc_tfidf.items():
term_index = terms_list[term]
doc_vector[term_index] = tfidf
return doc_vector
def get_rankings(index,terms_list,query):
rankings = []
query_vector = get_query_tfidf(query,terms_list)
for i in range(index.shape[0]):
vector = index[i]
cos_sim = cosine_similarity(query_vector,vector)
doc_sim = [i,cos_sim]
rankings.append(doc_sim)
rankings.sort(key=itemgetter(1),reverse=True)
return rankings
def search(query,index,terms_list,doc_keys):
rankings = get_rankings(index,terms_list,query)
results = []
for key,val in rankings:
results.append(doc_keys[key])
return results
def load_index():
saved_index = np.load("index.npz")
index = saved_index['arr_0']
terms_list = saved_index['arr_1'].item()
doc_keys = saved_index['arr_2'].item()
return index,terms_list,doc_keys
def main():
with open("sceposts.json","r") as postsfile:
posts = json.loads(postsfile.read())
corpus = {}
counter = 0
doc_keys = {}
for category,cat_items in posts.items():
#if not category=='document':
# continue
for item in cat_items:
corpus[counter] = item['text']
doc_keys[counter] = {'excerpt':item['content'][:150],'url':item['url'],'title':item['title']}
counter += 1
#print(item)
#break
index,terms_list = build_index(corpus)
print (type(index))
np.savez_compressed("index.npz",index,terms_list,doc_keys)
if __name__ == "__main__":
main() | DigitalUSSouth/ExploreSC | exploresc-server/api2/index.py | Python | bsd-3-clause | 3,858 |
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
import tct
import sys
#
import copy
import cgi
import os
import shutil
import six
params = tct.readjson(sys.argv[1])
facts = tct.readjson(params['factsfile'])
milestones = tct.readjson(params['milestonesfile'])
reason = ''
resultfile = params['resultfile']
result = tct.readjson(resultfile)
toolname = params['toolname']
toolname_pure = params['toolname_pure']
toolchain_name = facts['toolchain_name']
workdir = params['workdir']
loglist = result['loglist'] = result.get('loglist', [])
exitcode = CONTINUE = 0
# ==================================================
# Make a copy of milestones for later inspection?
# --------------------------------------------------
if 0 or milestones.get('debug_always_make_milestones_snapshot'):
tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1])
# ==================================================
# Helper functions
# --------------------------------------------------
deepget = tct.deepget
def lookup(D, *keys, **kwdargs):
result = deepget(D, *keys, **kwdargs)
loglist.append((keys, result))
return result
# ==================================================
# define
# --------------------------------------------------
# Set to true to generate an email with all textblocks for the purpose of reviewing
debugkeepAllBlocks = 0
HKV = html_key_values = {}
htmlesc = lambda x: cgi.escape(x, True)
htmlmail_template_file = None
milestone_abc = None
talk = milestones.get('talk', 1)
TheProjectLogHtmlmail = ''
TheProjectLogHtmlmailMessageHtml = ''
TheProjectResultBuildinfoMessage = ''
TheProjectResultHtmlmailMessageHtml = ''
TheProjectLogHtmlmailMessageMdTxt = ''
TheProjectLogHtmlmailMessageRstTxt = ''
TheProjectLogHtmlmailMessageTxt = ''
xeq_name_cnt = 0
# ==================================================
# Check params
# --------------------------------------------------
if exitcode == CONTINUE:
loglist.append('CHECK PARAMS')
create_buildinfo = lookup(milestones, 'create_buildinfo')
TheProjectLog = lookup(milestones, 'TheProjectLog')
TheProjectResultBuildinfo = lookup(milestones, 'TheProjectResultBuildinfo')
if not (create_buildinfo and TheProjectLog and TheProjectResultBuildinfo):
exitcode = 22
reason = 'Bad params or nothing to do'
if exitcode == CONTINUE:
TheProjectResultBuildinfoMessage = lookup(milestones, 'TheProjectResultBuildinfoMessage')
if not TheProjectResultBuildinfoMessage:
TheProjectResultBuildinfoMessage = os.path.join(TheProjectResultBuildinfo, 'DearProjectOwner')
TheProjectResultHtmlmailMessageHtml = TheProjectResultBuildinfoMessage + '.html'
toolfolderabspath = lookup(params, 'toolfolderabspath')
if not toolfolderabspath:
exitcode = 22
reason = 'Bad params or nothing to do'
if exitcode == CONTINUE:
loglist.append('PARAMS are ok')
else:
loglist.append('Bad PARAMS or nothing to do')
# ==================================================
# work
# --------------------------------------------------
# DocumentationGeneratedZipFile
if exitcode == CONTINUE:
TheProjectLogHtmlmail = lookup(milestones, 'TheProjectLogHtmlmail')
if not TheProjectLogHtmlmail:
TheProjectLogHtmlmail = os.path.join(TheProjectLog, 'htmlmail')
if not os.path.exists(TheProjectLogHtmlmail):
os.mkdir(TheProjectLogHtmlmail)
if exitcode == CONTINUE:
htmlmail_template_file = os.path.join(toolfolderabspath, 'templates', 't3docs.html')
if not os.path.isfile(htmlmail_template_file):
loglist.append(('fatal: htmlmail_template_file not found', htmlmail_template_file))
exitcode = 22
reason = 'Fatal: htmlmail_template not found'
if exitcode == CONTINUE:
# use individual variables for nice code completion in PyCharm
absurl_buildinfo_dir = lookup(milestones, 'absurl_buildinfo_dir')
absurl_html_dir = lookup(milestones, 'absurl_html_dir')
absurl_package_dir = lookup(milestones, 'absurl_package_dir')
absurl_package_file = lookup(milestones, 'absurl_package_file')
absurl_parent_dir = lookup(milestones, 'absurl_parent_dir')
absurl_project_parent_dir = lookup(milestones, 'absurl_project_parent_dir')
absurl_pdf_dir = lookup(milestones, 'absurl_pdf_dir')
absurl_pdf_file = lookup(milestones, 'absurl_pdf_file')
absurl_settings_cfg_file = lookup(milestones, 'absurl_settings_cfg_file')
absurl_singlehtml_dir = lookup(milestones, 'absurl_singlehtml_dir')
absurl_warnings_txt_file = lookup(milestones, 'absurl_warnings_txt_file')
documentation_zip_file = lookup(milestones, 'DocumentationGeneratedZipFile', default='')
email_notify_about_new_build = lookup(milestones, 'email_notify_about_new_build', default=[])
email_user_notify_is_turned_off = lookup(milestones, 'email_user_notify_is_turned_off', default=0)
emails_user_from_project = lookup(milestones, 'emails_user_from_project')
if documentation_zip_file:
absurl_documentation_zip_file = '%s/%s' % (absurl_buildinfo_dir.rstrip('/'), documentation_zip_file)
else:
absurl_documentation_zip_file = ''
# ==================================================
# work
# --------------------------------------------------
def do_the_work():
global email_notify_about_new_build
global emails_user_from_project
from bs4 import BeautifulSoup
import codecs
import sys
with codecs.open(htmlmail_template_file, 'r', 'utf-8') as f1:
html_doc = f1.read()
soup = BeautifulSoup(html_doc, 'html.parser')
def first_or_none(resultset):
result = None
if len(resultset):
result = resultset[0]
return result
def decompose_these(*args):
result = []
for i, arg in enumerate(args):
if arg:
arg.decompose()
result.append(None)
else:
result.append(arg)
return result
# gather information
a = soup.a
h1 = soup.h1
h2 = soup.h2
h3 = soup.h3
h4 = soup.h4
p = soup.p
a_attrs = a.attrs.copy() if a else {}
h1_attrs = h1.attrs.copy() if h1 else {}
h2_attrs = h2.attrs.copy() if h2 else {}
h3_attrs = h3.attrs.copy() if h3 else {}
h4_attrs = h4.attrs.copy() if h4 else {}
p_attrs = p.attrs.copy() if p else {}
idDivYourProject = soup.find(id="idDivYourProject")
idCalloutSettingsFile = soup.find(id="idCalloutSettingsFile")
idDocumentationFromOpenOffice = soup.find(id="idDocumentationFromOpenOffice")
idCalloutCongratulations = soup.find(id="idCalloutCongratulations")
idCalloutThereAreWarnings = soup.find(id="idCalloutThereAreWarnings")
idDivAboutThisMail = soup.find(id="idDivAboutThisMail")
idDivGeneralInformation = soup.find(id="idDivGeneralInformation")
idDivMoreOnYourProject = soup.find(id="idDivMoreOnYourProject")
idSpanISendToReceivers = soup.find(id="idSpanISendToReceivers")
idSpanISawANo = soup.find(id="idSpanISawANo")
idFoundSettingAboutEmailing = soup.find(id="idFoundSettingAboutEmailing")
idFoundNoSettingAboutEmailing = soup.find(id="idFoundNoSettingAboutEmailing")
idSendToProjectEmails = soup.find(id="idSendToProjectEmails")
idABUILDINFO = soup.find(id="idABUILDINFO")
idAHTML = soup.find(id="idAHTML")
idASINGLEHTML = soup.find(id="idASINGLEHTML")
idAPDF = soup.find(id="idAPDF")
idAPACKAGE = soup.find(id="idAPACKAGE")
# # there is this 'clone' functionality
# # https://stackoverflow.com/questions/23057631/clone-element-with-beautifulsoup
#
# idCalloutDocumentationFromOpenOffice = copy.copy(idCalloutSettingsFile)
# if idCalloutDocumentationFromOpenOffice:
# idCalloutDocumentationFromOpenOffice.attrs['id'] = 'idCalloutDocumentationFromOpenOffice'
# idCalloutSettingsFile.insert_after(idCalloutDocumentationFromOpenOffice)
#
# for elm in idCalloutDocumentationFromOpenOffice.find_all('p'):
# elm.decompose()
#
# ptag = soup.new_tag('p', **p_attrs)
# ptag.string = 'Important!'
# idCalloutDocumentationFromOpenOffice.h2.insert_after(ptag)
# Add info about localization
localization_has_localization = lookup(milestones, 'localization_has_localization')
localization = tct.deepget(milestones, 'buildsettings', 'localization')
if localization_has_localization:
h3tag = soup.new_tag('h3', **h3_attrs)
h3tag.string = 'Localization'
idDivMoreOnYourProject.append(h3tag)
ptag = soup.new_tag('p', **p_attrs)
ptag.append(u'Yes, I have seen that your project contains one or more localizations.\n')
if localization in ['', 'default']:
ptag.append(u'In this run I have rendered the default language.\n')
else:
ptag.append(u"In this run I have rendered the '%s' version.\n" % localization)
ptag.append(u'Each localization is done in an extra run.\n')
idDivMoreOnYourProject.append(ptag)
# What succeeded? What failed?
successparts = []
failparts = []
build_html = lookup(milestones, 'build_html')
if build_html and absurl_html_dir:
attrs = a.attrs.copy()
attrs['href'] = absurl_html_dir
atag = soup.new_tag('a', **attrs)
atag.string = 'html'
successparts.append(six.text_type(atag))
else:
failparts.append('html')
build_singlehtml = lookup(milestones, 'build_singlehtml')
if build_singlehtml and absurl_singlehtml_dir:
attrs = a.attrs.copy()
attrs['href'] = absurl_singlehtml_dir
atag = soup.new_tag('a', **attrs)
atag.string = 'singlehtml'
successparts.append(six.text_type(atag))
else:
failparts.append('singlehtml')
if absurl_pdf_file:
attrs = a.attrs.copy()
attrs['href'] = absurl_pdf_file
atag = soup.new_tag('a', **attrs)
atag.string = 'pdf'
successparts.append(six.text_type(atag))
else:
failparts.append('pdf')
if absurl_package_file:
attrs = a.attrs.copy()
attrs['href'] = absurl_package_file
atag = soup.new_tag('a', **attrs)
atag.string = 'package'
successparts.append(six.text_type(atag))
else:
failparts.append('package')
if absurl_buildinfo_dir:
attrs = a.attrs.copy()
attrs['href'] = absurl_buildinfo_dir
atag = soup.new_tag('a', **attrs)
atag.string = 'buildinfo'
successparts.append(six.text_type(atag))
else:
failparts.append('buildinfo')
successparts = successparts if successparts else ['nothing']
failparts = failparts if failparts else ['nothing']
# Example of suitable values for the html template:
HKV['project_name'] = 'Projectname'
HKV['project_version'] = '1.2.3'
HKV['build_time'] = '2017-02-02 16:41:13'
HKV['this_was_made'] = 'html, pdf.'
HKV['this_failed'] = 'singlehtml, package.'
HKV['absurl_buildinfo'] = '#absurl_buildinfo'
HKV['absurl_warnings_txt'] = '#absurl_warnings_txt'
HKV['absurl_settings_cfg'] = '#absurl_settings_cfg'
HKV['receivers_from_settings_cfg'] = '<a href="mailto:one@mail.com>one@mail.com</a>, <a href="mailto:two@mail.com>two@mail.com</a>'
HKV['receivers_from_project'] = '<a href="mailto:three@mail.com>three@mail.com</a>'
build_time = milestones.get('time_finished_at')
if build_time:
build_time = ' '.join(build_time.split(' ')[:2])
# email_user_receivers_exlude_list
project_name = tct.deepget(milestones, 'settings_cfg', 'general', 'project', default='PROJECT?')
project_version = tct.deepget(milestones, 'settings_cfg', 'general', 'version', default='VERSION?')
build_time = build_time if build_time else 'BUILDTIME?'
# The values are filled into the HTML code directly.
# So we have to escape them.
HKV['project_name'] = htmlesc(project_name)
HKV['project_version'] = htmlesc(project_version)
HKV['build_time'] = htmlesc(build_time)
HKV['this_was_made'] = u', '.join(successparts) + '.'
HKV['this_failed'] = u', '.join(failparts) + '.'
HKV['absurl_buildinfo'] = htmlesc(absurl_buildinfo_dir)
HKV['absurl_publish_dir'] = htmlesc(absurl_html_dir)
HKV['absurl_warnings_txt'] = htmlesc(absurl_warnings_txt_file)
HKV['absurl_settings_cfg'] = htmlesc(absurl_settings_cfg_file)
HKV['absurl_documentation_zip'] = htmlesc(absurl_documentation_zip_file)
HKV['receivers_from_settings_cfg'] = '<a href="mailto:one@mail.com>one@mail.com</a>, <a href="mailto:two@mail.com>two@mail.com</a>'
HKV['receivers_from_project'] = '<a href="mailto:three@mail.com>three@mail.com</a>, <a href="mailto:four@mail.com>four@mail.com</a>'
v = 'None'
if email_notify_about_new_build:
temp = []
for email in email_notify_about_new_build:
attrs = a.attrs.copy()
attrs['href'] = 'mailto:' + email
atag = soup.new_tag('a', **attrs)
atag.string = email
temp.append(six.text_type(atag))
v = u', '.join(temp)
HKV['receivers_from_settings_cfg'] = v
v = 'None'
if emails_user_from_project:
temp = []
for email in emails_user_from_project:
attrs = a.attrs.copy()
attrs['href'] = 'mailto:' + email
atag = soup.new_tag('a', **attrs)
atag.string = email
temp.append(six.text_type(atag))
v = u', '.join(temp)
HKV['receivers_from_project'] = v
# text block logic
# we remove textblocks that shall not appear
has_settingscfg_generated = lookup(milestones, 'has_settingscfg_generated')
if has_settingscfg_generated:
# We have created a Settings.cfg from a Yaml file
pass
else:
if not debugkeepAllBlocks:
idCalloutSettingsFile = decompose_these(idCalloutSettingsFile)
# Documentation generated from OpenOffice?
if documentation_zip_file:
# yes
pass
else:
# no
if not debugkeepAllBlocks:
idDocumentationFromOpenOffice = decompose_these(idDocumentationFromOpenOffice)
warnings_file_size = lookup(milestones, 'warnings_file_size')
if warnings_file_size == 0:
# Congratulations!
if not debugkeepAllBlocks:
idCalloutThereAreWarnings = decompose_these(idCalloutThereAreWarnings)
else:
# Sphinx shows errors
if not debugkeepAllBlocks:
idCalloutCongratulations = decompose_these(idCalloutCongratulations)
# explicitly turn off by config or commandline
email_user_do_not_send = lookup(milestones, 'email_user_do_not_send', default=0)
# explicitly turn off by 'no' as email
email_user_notify_is_turned_off = lookup(milestones, 'email_user_notify_is_turned_off')
# list of emails. May be empty.
email_notify_about_new_build = lookup(milestones, 'email_notify_about_new_build')
if not email_user_notify_is_turned_off: # and email_notify_about_new_build: # ?
# We really send to receivers we found in settings
if not debugkeepAllBlocks:
idSpanISawANo = decompose_these(idSpanISawANo)
else:
# We really found a 'no' in the settings
if not debugkeepAllBlocks:
idSpanISendToReceivers = decompose_these(idSpanISendToReceivers)
email_user_notify_setting_exists = lookup(milestones, 'email_user_notify_setting_exists')
if email_user_notify_setting_exists:
# We found an entry about emailing in the settings
if not debugkeepAllBlocks:
idFoundNoSettingAboutEmailing, idSendToProjectEmails = decompose_these(idFoundNoSettingAboutEmailing, idSendToProjectEmails)
else:
# We did not find an entry about emailing in the settings
if not debugkeepAllBlocks:
decompose_these(idFoundSettingAboutEmailing)
emails_user_from_project = lookup(milestones, 'emails_user_from_project')
if idSendToProjectEmails and not email_user_do_not_send and not email_notify_about_new_build and emails_user_from_project:
pass
else:
if not debugkeepAllBlocks:
idSendToProjectEmails = decompose_these(idSendToProjectEmails)
# handle links in the General Info section
if absurl_buildinfo_dir:
# if we have BUILDINFO '#buildinfo
idABUILDINFO.attrs['href'] = absurl_buildinfo_dir
else:
# if we have no BUILDINFO
new_strong_tag = soup.new_tag("strong")
new_strong_tag.string = idABUILDINFO.string
idABUILDINFO.replace_with(new_strong_tag)
if absurl_html_dir:
# if we have HTML '#html'
idAHTML.attrs['href'] = absurl_html_dir
else:
# if we have no HTML
new_strong_tag = soup.new_tag("strong")
new_strong_tag.string = idAHTML.string
idAHTML.replace_with(new_strong_tag)
if absurl_singlehtml_dir:
# if we have SINGLEHTML '#singlehtml'
idASINGLEHTML.attrs['href'] = absurl_singlehtml_dir
else:
# if we have no SINGLEHTML
new_strong_tag = soup.new_tag("strong")
new_strong_tag.string = idASINGLEHTML.string
idASINGLEHTML.replace_with(new_strong_tag)
if absurl_pdf_file:
# if we have a PDF '#pdf'
idAPDF.attrs['href'] = absurl_pdf_file
else:
# if we have no PDF
new_strong_tag = soup.new_tag("strong")
new_strong_tag.string = idAPDF.string
idAPDF.replace_with(new_strong_tag)
if absurl_package_file:
# if we have no PACKAGE '#package'
idAPACKAGE.attrs['href'] = absurl_package_file
else:
# if we have no PACKAGE
new_strong_tag = soup.new_tag("strong")
new_strong_tag.string = idAPACKAGE.string
idAPACKAGE.replace_with(new_strong_tag)
# lstrip the <pre> blocks
for pre in soup.find_all('pre'):
pre.string.replace_with(u'\n'.join([part.lstrip() for part in pre.string.split('\n')]))
# create outfile
# replace variables
with codecs.open('DearProjectOwner-prettified.html', 'w', 'utf-8') as f2:
prettified = soup.prettify()
prettified = prettified.replace('%', '%%').replace('%%(', '%(') % HKV
f2.write(prettified)
with codecs.open('DearProjectOwner.html', 'w', 'utf-8') as f2:
prettified = six.text_type(soup)
prettified = prettified.replace('%', '%%').replace('%%(', '%(') % HKV
f2.write(prettified)
if exitcode == CONTINUE:
do_the_work()
if 0:
# atm there may be a DearProjectOwner.txt as well. Rename that file
# so it is flagged as disabled
TheProjectResultBuildinfoMessage = lookup(milestones, 'TheProjectResultBuildinfoMessage')
if TheProjectResultBuildinfoMessage:
fpath, fname = os.path.split(TheProjectResultBuildinfoMessage)
obsolete = os.path.join(fpath, 'zzz-OBSOLETE-' + fname)
shutil.move(TheProjectResultBuildinfoMessage, obsolete)
TheProjectResultBuildinfoMessage = ''
if exitcode == CONTINUE:
src = 'DearProjectOwner-prettified.html'
TheProjectLogHtmlmailMessageHtml = TheProjectLogHtmlmail + '/DearProjectOwner.html'
shutil.copy(src, TheProjectLogHtmlmailMessageHtml)
if exitcode == CONTINUE:
src = 'DearProjectOwner-prettified.html'
TheProjectResultBuildinfoMessage = os.path.join(TheProjectResultBuildinfo, 'DearProjectOwner')
TheProjectResultBuildinfoMessageHtml = TheProjectResultBuildinfoMessage + '.html'
shutil.copy(TheProjectLogHtmlmailMessageHtml, TheProjectResultBuildinfoMessageHtml)
# ==================================================
# Set MILESTONE
# --------------------------------------------------
if 'always!':
result['MILESTONES'].append({
'TheProjectResultBuildinfoMessage': TheProjectResultBuildinfoMessage,
'TheProjectResultHtmlmailMessageHtml': TheProjectResultHtmlmailMessageHtml,
})
if html_key_values:
result['MILESTONES'].append({'html_key_values': html_key_values})
if TheProjectLogHtmlmail: result['MILESTONES'].append(
{'TheProjectLogHtmlmail': TheProjectLogHtmlmail})
if TheProjectLogHtmlmailMessageHtml: result['MILESTONES'].append(
{'TheProjectLogHtmlmailMessageHtml': TheProjectLogHtmlmailMessageHtml})
# ==================================================
# save result
# --------------------------------------------------
tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason)
# ==================================================
# Return with proper exitcode
# --------------------------------------------------
sys.exit(exitcode)
| marble/Toolchain_RenderDocumentation | 30-If-success/02-Create-html-report/01-Create-html-file/run_01-Create-html-file.py | Python | mit | 21,159 |
import json
import os.path, sys
from pahera.Utilities import DictFetchAll
from pahera.Utilities import WritingToJson
from django.db import connection, transaction
from pahera.models import Person
from pahera.PythonModules import CheckIfUserExists_mod
# To get the user's existing details..!!!
def getDetails(person):
person_dict = {}
person_dict['FName'] = person.first_name
person_dict['LName'] = person.last_name
person_dict['Email'] = person.email
person_dict['PhoneNo'] = person.phone_no
person_data = {}
person_data['res'] = person_dict
scriptpath = os.path.dirname('./static/JSON/temp/')
fName = os.path.join(scriptpath, 'Userdata.json')
WritingToJson.writeJSON(person_data, fName)
return True
# Updating the details changed by the user..!!!
def updateDetails(data, person):
userForm = {}
if data['login'] == 'email':
userForm['username'] = data['email']
elif data['login'] == 'phone':
userForm['username'] = data['phone']
# This module is called because to check whether the changed the details to some registred user details..!!!
res = CheckIfUserExists_mod.VerifyTheUserUpdate(data, person)
if res:
cursor2 = connection.cursor()
# Updating login details [IF CHNAGED]..!!
cursor2.execute("UPDATE auth_user SET username = %s, email = %s WHERE email = %s", [userForm['username'], data['email'], person.email])
cursor = connection.cursor()
# Updating all the details [IF CHNAGED]..!!!
cursor.execute("UPDATE pahera_person SET first_name = %s, last_name = %s, email = %s, phone_no = %s, login_using = %s WHERE id = %s", [data['fname'], data['lname'], data['email'], data['phone'], data['login'], person.id])
cursor1 = connection.cursor()
# Updating the login table..!!!
cursor1.execute("UPDATE pahera_user_login SET username = %s WHERE person_id = %s", [userForm['username'], person.id])
| thebachchaoproject/bachchao-server | pahera/PythonModules/UpdateUser.py | Python | mit | 2,068 |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import os
import subprocess
import time
from ccmlib.dse_cluster import DseCluster
from nose.plugins.attrib import attr
from packaging.version import Version
from cassandra.auth import (DSEGSSAPIAuthProvider, DSEPlainTextAuthProvider,
SaslAuthProvider, TransitionalModePlainTextAuthProvider)
from cassandra.cluster import EXEC_PROFILE_GRAPH_DEFAULT, NoHostAvailable
from cassandra.protocol import Unauthorized
from cassandra.query import SimpleStatement
from tests.integration import (get_cluster, greaterthanorequaldse51,
remove_cluster, requiredse, DSE_VERSION, TestCluster)
from tests.integration.advanced import ADS_HOME, use_single_node_with_graph
from tests.integration.advanced.graph import reset_graph, ClassicGraphFixtures
log = logging.getLogger(__name__)
def setup_module():
if DSE_VERSION:
use_single_node_with_graph()
def teardown_module():
if DSE_VERSION:
remove_cluster() # this test messes with config
def wait_role_manager_setup_then_execute(session, statements):
for s in statements:
exc = None
for attempt in range(3):
try:
session.execute(s)
break
except Exception as e:
exc = e
time.sleep(5)
else: # if we didn't reach `break`
if exc is not None:
raise exc
@attr('long')
@requiredse
class BasicDseAuthTest(unittest.TestCase):
@classmethod
def setUpClass(self):
"""
This will setup the necessary infrastructure to run our authentication tests. It requres the ADS_HOME environment variable
and our custom embedded apache directory server jar in order to run.
"""
if not DSE_VERSION:
return
clear_kerberos_tickets()
self.cluster = None
# Setup variables for various keytab and other files
self.conf_file_dir = os.path.join(ADS_HOME, "conf/")
self.krb_conf = os.path.join(self.conf_file_dir, "krb5.conf")
self.dse_keytab = os.path.join(self.conf_file_dir, "dse.keytab")
self.dseuser_keytab = os.path.join(self.conf_file_dir, "dseuser.keytab")
self.cassandra_keytab = os.path.join(self.conf_file_dir, "cassandra.keytab")
self.bob_keytab = os.path.join(self.conf_file_dir, "bob.keytab")
self.charlie_keytab = os.path.join(self.conf_file_dir, "charlie.keytab")
actual_jar = os.path.join(ADS_HOME, "embedded-ads.jar")
# Create configuration directories if they don't already exists
if not os.path.exists(self.conf_file_dir):
os.makedirs(self.conf_file_dir)
if not os.path.exists(actual_jar):
raise RuntimeError('could not find {}'.format(actual_jar))
log.warning("Starting adserver")
# Start the ADS, this will create the keytab con configuration files listed above
self.proc = subprocess.Popen(['java', '-jar', actual_jar, '-k', '--confdir', self.conf_file_dir], shell=False)
time.sleep(10)
# TODO poll for server to come up
log.warning("Starting adserver started")
ccm_cluster = get_cluster()
log.warning("fetching tickets")
# Stop cluster if running and configure it with the correct options
ccm_cluster.stop()
if isinstance(ccm_cluster, DseCluster):
# Setup kerberos options in cassandra.yaml
config_options = {'kerberos_options': {'keytab': self.dse_keytab,
'service_principal': 'dse/_HOST@DATASTAX.COM',
'qop': 'auth'},
'authentication_options': {'enabled': 'true',
'default_scheme': 'kerberos',
'scheme_permissions': 'true',
'allow_digest_with_kerberos': 'true',
'plain_text_without_ssl': 'warn',
'transitional_mode': 'disabled'},
'authorization_options': {'enabled': 'true'}}
krb5java = "-Djava.security.krb5.conf=" + self.krb_conf
# Setup dse authenticator in cassandra.yaml
ccm_cluster.set_configuration_options({
'authenticator': 'com.datastax.bdp.cassandra.auth.DseAuthenticator',
'authorizer': 'com.datastax.bdp.cassandra.auth.DseAuthorizer'
})
ccm_cluster.set_dse_configuration_options(config_options)
ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True, jvm_args=[krb5java])
else:
log.error("Cluster is not dse cluster test will fail")
@classmethod
def tearDownClass(self):
"""
Terminates running ADS (Apache directory server).
"""
if not DSE_VERSION:
return
self.proc.terminate()
def tearDown(self):
"""
This will clear any existing kerberos tickets by using kdestroy
"""
clear_kerberos_tickets()
if self.cluster:
self.cluster.shutdown()
def refresh_kerberos_tickets(self, keytab_file, user_name, krb_conf):
"""
Fetches a new ticket for using the keytab file and username provided.
"""
self.ads_pid = subprocess.call(['kinit', '-t', keytab_file, user_name], env={'KRB5_CONFIG': krb_conf}, shell=False)
def connect_and_query(self, auth_provider, query=None):
"""
Runs a simple system query with the auth_provided specified.
"""
os.environ['KRB5_CONFIG'] = self.krb_conf
self.cluster = TestCluster(auth_provider=auth_provider)
self.session = self.cluster.connect()
query = query if query else "SELECT * FROM system.local"
statement = SimpleStatement(query)
rs = self.session.execute(statement)
return rs
def test_should_not_authenticate_with_bad_user_ticket(self):
"""
This tests will attempt to authenticate with a user that has a valid ticket, but is not a valid dse user.
@since 3.20
@jira_ticket PYTHON-457
@test_category dse auth
@expected_result NoHostAvailable exception should be thrown
"""
self.refresh_kerberos_tickets(self.dseuser_keytab, "dseuser@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"])
self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider)
def test_should_not_athenticate_without_ticket(self):
"""
This tests will attempt to authenticate with a user that is valid but has no ticket
@since 3.20
@jira_ticket PYTHON-457
@test_category dse auth
@expected_result NoHostAvailable exception should be thrown
"""
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"])
self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider)
def test_connect_with_kerberos(self):
"""
This tests will attempt to authenticate with a user that is valid and has a ticket
@since 3.20
@jira_ticket PYTHON-457
@test_category dse auth
@expected_result Client should be able to connect and run a basic query
"""
self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider()
rs = self.connect_and_query(auth_provider)
self.assertIsNotNone(rs)
connections = [c for holders in self.cluster.get_connection_holders() for c in holders.get_connections()]
# Check to make sure our server_authenticator class is being set appropriate
for connection in connections:
self.assertTrue('DseAuthenticator' in connection.authenticator.server_authenticator_class)
def test_connect_with_kerberos_and_graph(self):
"""
This tests will attempt to authenticate with a user and execute a graph query
@since 3.20
@jira_ticket PYTHON-457
@test_category dse auth
@expected_result Client should be able to connect and run a basic graph query with authentication
"""
self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"])
rs = self.connect_and_query(auth_provider)
self.assertIsNotNone(rs)
reset_graph(self.session, self._testMethodName.lower())
profiles = self.cluster.profile_manager.profiles
profiles[EXEC_PROFILE_GRAPH_DEFAULT].graph_options.graph_name = self._testMethodName.lower()
self.session.execute_graph(ClassicGraphFixtures.classic())
rs = self.session.execute_graph('g.V()')
self.assertIsNotNone(rs)
def test_connect_with_kerberos_host_not_resolved(self):
"""
This tests will attempt to authenticate with IP, this will fail on osx.
The success or failure of this test is dependent on a reverse dns lookup which can be impacted by your environment
if it fails don't panic.
@since 3.20
@jira_ticket PYTHON-566
@test_category dse auth
@expected_result Client should error when ip is used
"""
self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf)
DSEGSSAPIAuthProvider(service='dse', qops=["auth"], resolve_host_name=False)
def test_connect_with_explicit_principal(self):
"""
This tests will attempt to authenticate using valid and invalid user principals
@since 3.20
@jira_ticket PYTHON-574
@test_category dse auth
@expected_result Client principals should be used by the underlying mechanism
"""
# Connect with valid principal
self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="cassandra@DATASTAX.COM")
self.connect_and_query(auth_provider)
connections = [c for holders in self.cluster.get_connection_holders() for c in holders.get_connections()]
# Check to make sure our server_authenticator class is being set appropriate
for connection in connections:
self.assertTrue('DseAuthenticator' in connection.authenticator.server_authenticator_class)
# Use invalid principal
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="notauser@DATASTAX.COM")
self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider)
@greaterthanorequaldse51
def test_proxy_login_with_kerberos(self):
"""
Test that the proxy login works with kerberos.
"""
# Set up users for proxy login test
self._setup_for_proxy()
query = "select * from testkrbproxy.testproxy"
# Try normal login with Charlie
self.refresh_kerberos_tickets(self.charlie_keytab, "charlie@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="charlie@DATASTAX.COM")
self.connect_and_query(auth_provider, query=query)
# Try proxy login with bob
self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM",
authorization_id='charlie@DATASTAX.COM')
self.connect_and_query(auth_provider, query=query)
# Try logging with bob without mentioning charlie
self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM")
self.assertRaises(Unauthorized, self.connect_and_query, auth_provider, query=query)
self._remove_proxy_setup()
@greaterthanorequaldse51
def test_proxy_login_with_kerberos_forbidden(self):
"""
Test that the proxy login fail when proxy role is not granted
"""
# Set up users for proxy login test
self._setup_for_proxy(False)
query = "select * from testkrbproxy.testproxy"
# Try normal login with Charlie
self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM",
authorization_id='charlie@DATASTAX.COM')
self.assertRaises(NoHostAvailable, self.connect_and_query, auth_provider, query=query)
self.refresh_kerberos_tickets(self.bob_keytab, "bob@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal="bob@DATASTAX.COM")
self.assertRaises(Unauthorized, self.connect_and_query, auth_provider, query=query)
self._remove_proxy_setup()
def _remove_proxy_setup(self):
os.environ['KRB5_CONFIG'] = self.krb_conf
self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='cassandra@DATASTAX.COM')
cluster = TestCluster(auth_provider=auth_provider)
session = cluster.connect()
session.execute("REVOKE PROXY.LOGIN ON ROLE '{0}' FROM '{1}'".format('charlie@DATASTAX.COM', 'bob@DATASTAX.COM'))
session.execute("DROP ROLE IF EXISTS '{0}';".format('bob@DATASTAX.COM'))
session.execute("DROP ROLE IF EXISTS '{0}';".format('charlie@DATASTAX.COM'))
# Create a keyspace and allow only charlie to query it.
session.execute("DROP KEYSPACE testkrbproxy")
cluster.shutdown()
def _setup_for_proxy(self, grant=True):
os.environ['KRB5_CONFIG'] = self.krb_conf
self.refresh_kerberos_tickets(self.cassandra_keytab, "cassandra@DATASTAX.COM", self.krb_conf)
auth_provider = DSEGSSAPIAuthProvider(service='dse', qops=["auth"], principal='cassandra@DATASTAX.COM')
cluster = TestCluster(auth_provider=auth_provider)
session = cluster.connect()
stmts = [
"CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('bob@DATASTAX.COM'),
"CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('bob@DATASTAX.COM'),
"GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES to 'bob@DATASTAX.COM'",
"CREATE ROLE IF NOT EXISTS '{0}' WITH LOGIN = TRUE;".format('charlie@DATASTAX.COM'),
"GRANT EXECUTE ON ALL AUTHENTICATION SCHEMES to 'charlie@DATASTAX.COM'",
# Create a keyspace and allow only charlie to query it.
"CREATE KEYSPACE testkrbproxy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}",
"CREATE TABLE testkrbproxy.testproxy (id int PRIMARY KEY, value text)",
"GRANT ALL PERMISSIONS ON KEYSPACE testkrbproxy to '{0}'".format('charlie@DATASTAX.COM'),
]
if grant:
stmts.append("GRANT PROXY.LOGIN ON ROLE '{0}' to '{1}'".format('charlie@DATASTAX.COM', 'bob@DATASTAX.COM'))
wait_role_manager_setup_then_execute(session, stmts)
cluster.shutdown()
def clear_kerberos_tickets():
subprocess.call(['kdestroy'], shell=False)
@attr('long')
@requiredse
class BaseDseProxyAuthTest(unittest.TestCase):
@classmethod
def setUpClass(self):
"""
This will setup the necessary infrastructure to run unified authentication tests.
"""
if not DSE_VERSION or DSE_VERSION < Version('5.1'):
return
self.cluster = None
ccm_cluster = get_cluster()
# Stop cluster if running and configure it with the correct options
ccm_cluster.stop()
if isinstance(ccm_cluster, DseCluster):
# Setup dse options in dse.yaml
config_options = {'authentication_options': {'enabled': 'true',
'default_scheme': 'internal',
'scheme_permissions': 'true',
'transitional_mode': 'normal'},
'authorization_options': {'enabled': 'true'}
}
# Setup dse authenticator in cassandra.yaml
ccm_cluster.set_configuration_options({
'authenticator': 'com.datastax.bdp.cassandra.auth.DseAuthenticator',
'authorizer': 'com.datastax.bdp.cassandra.auth.DseAuthorizer'
})
ccm_cluster.set_dse_configuration_options(config_options)
ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
else:
log.error("Cluster is not dse cluster test will fail")
# Create users and test keyspace
self.user_role = 'user1'
self.server_role = 'server'
self.root_cluster = TestCluster(auth_provider=DSEPlainTextAuthProvider('cassandra', 'cassandra'))
self.root_session = self.root_cluster.connect()
stmts = [
"CREATE USER {0} WITH PASSWORD '{1}'".format(self.server_role, self.server_role),
"CREATE USER {0} WITH PASSWORD '{1}'".format(self.user_role, self.user_role),
"CREATE KEYSPACE testproxy WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}",
"CREATE TABLE testproxy.testproxy (id int PRIMARY KEY, value text)",
"GRANT ALL PERMISSIONS ON KEYSPACE testproxy to {0}".format(self.user_role)
]
wait_role_manager_setup_then_execute(self.root_session, stmts)
@classmethod
def tearDownClass(self):
"""
Shutdown the root session.
"""
if not DSE_VERSION or DSE_VERSION < Version('5.1'):
return
self.root_session.execute('DROP KEYSPACE testproxy;')
self.root_session.execute('DROP USER {0}'.format(self.user_role))
self.root_session.execute('DROP USER {0}'.format(self.server_role))
self.root_cluster.shutdown()
def tearDown(self):
"""
Shutdown the cluster and reset proxy permissions
"""
self.cluster.shutdown()
self.root_session.execute("REVOKE PROXY.LOGIN ON ROLE {0} from {1}".format(self.user_role, self.server_role))
self.root_session.execute("REVOKE PROXY.EXECUTE ON ROLE {0} from {1}".format(self.user_role, self.server_role))
def grant_proxy_login(self):
"""
Grant PROXY.LOGIN permission on a role to a specific user.
"""
self.root_session.execute("GRANT PROXY.LOGIN on role {0} to {1}".format(self.user_role, self.server_role))
def grant_proxy_execute(self):
"""
Grant PROXY.EXECUTE permission on a role to a specific user.
"""
self.root_session.execute("GRANT PROXY.EXECUTE on role {0} to {1}".format(self.user_role, self.server_role))
@attr('long')
@greaterthanorequaldse51
class DseProxyAuthTest(BaseDseProxyAuthTest):
"""
Tests Unified Auth. Proxy Login using SASL and Proxy Execute.
"""
@classmethod
def get_sasl_options(self, mechanism='PLAIN'):
sasl_options = {
"service": 'dse',
"username": 'server',
"mechanism": mechanism,
'password': self.server_role,
'authorization_id': self.user_role
}
return sasl_options
def connect_and_query(self, auth_provider, execute_as=None, query="SELECT * FROM testproxy.testproxy"):
self.cluster = TestCluster(auth_provider=auth_provider)
self.session = self.cluster.connect()
rs = self.session.execute(query, execute_as=execute_as)
return rs
def test_proxy_login_forbidden(self):
"""
Test that a proxy login is forbidden by default for a user.
@since 3.20
@jira_ticket PYTHON-662
@test_category dse auth
@expected_result connect and query should not be allowed
"""
auth_provider = SaslAuthProvider(**self.get_sasl_options())
with self.assertRaises(Unauthorized):
self.connect_and_query(auth_provider)
def test_proxy_login_allowed(self):
"""
Test that a proxy login is allowed with proper permissions.
@since 3.20
@jira_ticket PYTHON-662
@test_category dse auth
@expected_result connect and query should be allowed
"""
auth_provider = SaslAuthProvider(**self.get_sasl_options())
self.grant_proxy_login()
self.connect_and_query(auth_provider)
def test_proxy_execute_forbidden(self):
"""
Test that a proxy execute is forbidden by default for a user.
@since 3.20
@jira_ticket PYTHON-662
@test_category dse auth
@expected_result connect and query should not be allowed
"""
auth_provider = DSEPlainTextAuthProvider(self.server_role, self.server_role)
with self.assertRaises(Unauthorized):
self.connect_and_query(auth_provider, execute_as=self.user_role)
def test_proxy_execute_allowed(self):
"""
Test that a proxy execute is allowed with proper permissions.
@since 3.20
@jira_ticket PYTHON-662
@test_category dse auth
@expected_result connect and query should be allowed
"""
auth_provider = DSEPlainTextAuthProvider(self.server_role, self.server_role)
self.grant_proxy_execute()
self.connect_and_query(auth_provider, execute_as=self.user_role)
def test_connection_with_transitional_mode(self):
"""
Test that the driver can connect using TransitionalModePlainTextAuthProvider
@since 3.20
@jira_ticket PYTHON-831
@test_category dse auth
@expected_result connect and query should be allowed
"""
auth_provider = TransitionalModePlainTextAuthProvider()
self.assertIsNotNone(self.connect_and_query(auth_provider, query="SELECT * from system.local"))
| datastax/python-driver | tests/integration/advanced/test_auth.py | Python | apache-2.0 | 23,152 |
#!/usr/bin/env python2.5
# -*- coding:utf-8 -*-
"""
AppStatsMiddleware adapted to Kay framework.
:Copyright: (c) 2010 Ian Lewis <ianmlewis@gmail.com>,
:license: BSD, see LICENSE for more details.
"""
from kay.conf import settings
class AppStatsMiddleware(object):
"""
Middleware to enable appstats recording.
Based off of the the AppstatsDjangoMiddleware in the
Appengine SDK
"""
def _record_ok(self, request):
if 'kay.ext.live_settings' in settings.INSTALLED_APPS:
from kay.ext.live_settings import live_settings
record_ok = live_settings.get("kay.ext.appstats.middleware", "on")
request._appstats_record = (record_ok.lower() == "on")
return request._appstats_record
else:
return True
def process_request(self, request):
"""
Called by Kay before deciding which view to execute.
"""
if self._record_ok(request):
from google.appengine.ext.appstats.recording import start_recording
start_recording()
def process_response(self, request, response):
"""
Stops recording. Optionally sets some extension data for
FirePython.
"""
if getattr(request, '_appstats_record', True):
from google.appengine.ext.appstats.recording import end_recording
firepython_set_extension_data = getattr(
request,
'firepython_set_extension_data',
None)
end_recording(response.status_code, firepython_set_extension_data)
return response
| yosukesuzuki/calendar-app | project/kay/ext/appstats/middleware.py | Python | mit | 1,467 |
from ckan.lib.cli import CkanCommand
import ckanapi
from ckanext.canada.metadata_schema import schema_description
import csv
from paste.script import command
import org_commands
__author__ = 'Statistics Canada'
__copyright__ = "Copyright 2013, Government of Canada"
__maintainer__ = "Ross Thompson"
__license__ = "MIT"
__status__ = "Development"
class UtilCommand(CkanCommand):
"""CKAN Utilities Extension
Usage::
paster utility org-datasets -i <organization> [-r <remote server>] ][-c <configuration file>]
move-org-datasets -1 <organization> -2 <organization> [-r <remote server>] ]
[-c <configuration file>][-v]
delete-org -i <organization>
del-datasets -f <source_file> [-a <apikey>] [-r <remote server>] [-c <configuration file>]
report-raw-datasets -f <source_file> [-r <remote server>] ][-c <configuration file>]
Options::
-1/--org_from <organization> From Organization ID
-2/--org_to <organization> To Organization ID
-a/--apikey <apikey> push to <remote server> using apikey
-c/--config <configuration file> Paster configuration file
-f/--file <src_file> Text file. For del_datasets this is list of package ID's.
For report_raw_datasets this is the CSV file that is generated.
-i/--org <organization> Organization ID e.g. ec
-r/--remote_server <remote server> Remote CKAN server to connect to. Default: "localhost"
Be sure to use the prefix in the server name e.g. http://data.gc.ca
-v/--verbose Display status messages while processing command
Examples::
paster ckan_util report_raw_datasets -f myreport.csv -r http://open.data.org/data/
"""
summary = __doc__.split('\n')[0]
usage = __doc__
parser = command.Command.standard_parser(verbose=True)
parser.add_option('-a', '--apikey', dest='apikey', default=None)
parser.add_option('-r', '--remote_server', dest='remote', default='localhost')
parser.add_option('-i', '--org', dest='organization', default='*')
parser.add_option('-1', '--org_from', dest='org_from', default=None)
parser.add_option('-2', '--org_to', dest='org_to', default=None)
parser.add_option('-f', '--file', dest='src_file', default='')
parser.add_option('-c', '--config', dest='config',
default='development.ini', help='Configuration file to use.')
parser.add_option('-G', '--geo', dest='geo_only', action='store_true')
res_types = schema_description.resource_field_by_id['resource_type']['choices_by_key']
langs = schema_description.resource_field_by_id['language']['choices_by_key']
fmt = schema_description.resource_field_by_id['format']['choices_by_key']
def command(self):
'''
Parse command line arguments and call appropriate method.
'''
if not self.args or self.args[0] in ['--help', '-h', 'help']:
print self.__doc__
return
cmd = self.args[0]
self._load_config()
ckan_server = None
if self.options.remote <> 'localhost':
if self.options.apikey:
ckan_server = ckanapi.RemoteCKAN(self.options.remote, apikey=self.options.apikey)
else:
ckan_server = ckanapi.RemoteCKAN(self.options.remote)
else:
ckan_server = ckanapi.LocalCKAN()
# Organization Commands
org_commands.cmd_configure(ckan_server)
if cmd == 'org-datasets':
if self.options.geo_only:
org_commands.get_datasets(self.options.organization, u"Geo Data | G\u00e9o")
else:
org_commands.get_datasets(self.options.organization)
elif cmd == 'move-org-datasets':
if self.options.org_from and self.options.org_to:
org_commands.move_datasets(self.options.org_from, self.options.org_to, self.options.verbose)
else:
print self.usage
elif cmd == 'delete-org':
if self.options.organization == '*':
print "Please provide a valid organization ID"
else:
org_commands.delete_organization(self.options.organization)
elif cmd == 'del-datasets':
id_list = []
f = open(self.options.src_file)
id_list = f.readlines()
f.close()
for id in id_list:
print "Deleting Package %s" % id.strip()
ckan_server.action.package_delete(id=id.strip())
elif cmd == 'report-raw-datasets':
# Write out a CSV file with some basic information about raw data-sets
ds_query = "catalog_type:\"Data | Donn\u00e9es\""
result = ckan_server.action.package_search(fq=ds_query, rows='100')
count = result['count']
print "%s records found" % count
csvfile = open(self.options.src_file, 'wb')
csvwriter = csv.writer(csvfile, dialect='excel')
# Create the header
header_fields = ['ID', 'Title English', 'Title French', 'Publisher', 'Data Type', 'Openness Score']
i = 0
while i < 12:
i += 1
header_fields.extend(['Format', 'Type', 'Title English', 'Title French', 'URL', 'Language'])
csvwriter.writerow(header_fields)
self._extract_lines(result['results'], csvwriter)
if count > 100:
start_row = 100
while count > 0:
result = ckan_server.action.package_search(fq=ds_query, rows='100', start=start_row)
self._extract_lines(result['results'], csvwriter)
start_row += 100
count -= 100
csvfile.close()
def _get_extra_field(self, package_dict, field_name):
rc = ""
for field in package_dict['extras']:
if field['key'] == field_name:
rc = field['value']
return rc
def _encode_fields(self, fields):
ufields = []
for field in fields:
if field:
field = field.split('|')[0]
ufields.append(field.encode('cp1252'))
else:
ufields.append(field)
return ufields
def _openness_score(self, res_dict):
score = 0
for r in res_dict:
if r['resource_type'] != 'file':
continue
score = max(score, self.fmt[r['format']]['openness_score'])
return score.__str__()
def _extract_lines(self, datasets, csvwriter):
for ds in datasets:
fields = [ds['id'], ds['title'], ds['title_fra'], ds['organization']['title']]
fields.append(self._get_extra_field(ds, 'catalog_type'))
fields.append(self._openness_score(ds['resources']))
for rs in ds['resources']:
fields.extend([rs['format'],
self.res_types[rs['resource_type']]['eng'],
rs['name'], rs['name_fra'],
rs['url'],
self.langs[rs['language']]['eng']])
csvwriter.writerow(self._encode_fields(fields))
print ds['name'] | thriuin/ckanext-utilities | ckanext/utilities/commands.py | Python | mit | 7,608 |
from tkinter import *
from tkinter import filedialog as fd
from tkinter import messagebox
import random
f2p = [301,308,316,326,335,382,383,384,393,394]
p2p = [302,303,304,305,306,307,309,310,311,312,
313,314,315,317,318,319,320,321,322,323,324,
327,328,329,330,331,332,333,334,336,338,339,
340,341,342,343,344,346,347,348,350,351,352,
354,355,356,357,358,359,360,362,367,368,369
,370,374,375,376,377,378,386]
def initGUI():
master = Tk()
master.title("CreateBat v0.4.1 by Tylersobored")
master.resizable(width=False, height=False)
master.geometry('{}x{}'.format(550, 700))
bots = ListBox(master,16,0,2,8,4,40,EXTENDED)
proxies = ListBox(master,10,3,1,10,4,22,BROWSE)
lowcpu = IntVar()
lowresource = IntVar()
username = StringVar()
password = StringVar()
filename = FileName()
dirname = FileName()
botfilename = FileName()
proxyfilename= FileName()
batname = StringVar()
botusername = StringVar()
botpassword = StringVar()
botpin = StringVar()
botworld = StringVar()
botscript = StringVar()
botparam = StringVar()
ip = StringVar()
port = StringVar()
proxyname = StringVar()
proxypass = StringVar()
################### View ############################
Checkbutton(master, text="lowcpu", variable=lowcpu, height=2)\
.grid(row=2, column=3, sticky=W)
Checkbutton(master, text="lowresource", variable=lowresource, height=2)\
.grid(row=3, column=3, sticky=W)
#################### Client Details ####################
Label(master, text="Client Username")\
.grid(row=2, column=0, sticky=W, padx=5, pady=10)
Entry(master, textvariable=username, width=20)\
.grid(row=2, column=1, sticky=W, padx=5, pady=10)
Label(master, text="Client Password")\
.grid(row=3, column=0, sticky=W, padx=5, pady=10)
Entry(master, textvariable=password, width=20)\
.grid(row=3, column=1, sticky=W, padx=5, pady=10)
Label(master, text="OSBot Jar")\
.grid(row=4, column=0, sticky=W, padx=5, pady=10)
Label(master, text="Path:").grid(row=5, column=0, sticky=W,padx=5, pady=10)
pathlabel = Label(master, text="",width = 20)
pathlabel.grid(row=5, column=1, sticky=W,padx=5, pady=10)
################### Bot Details #######################
Label(master, text="Bot Details")\
.grid(row=6, column=0, columnspan=2, padx=5, pady=10)
Label(master, text="Username")\
.grid(row=7, column=0, sticky=W, padx=5, pady=10)
wbname = Entry(master, textvariable=botusername, width=20)
wbname.grid(row=7, column=1, sticky=W, padx=5, pady=10)
Label(master, text="Password")\
.grid(row=8, column=0, sticky=W, padx=5, pady=10)
wbpass = Entry(master, textvariable=botpassword, width=20)
wbpass.grid(row=8, column=1, sticky=W, padx=5, pady=10)
Label(master, text="Pin")\
.grid(row=9, column=0, sticky=W, padx=5, pady=10)
wbpin = Entry(master, textvariable=botpin, width=20)
wbpin.grid(row=9, column=1, sticky=W, padx=5, pady=10)
Label(master, text="World")\
.grid(row=10, column=0, sticky=W, padx=5, pady=10)
wbworld = Entry(master, textvariable=botworld, width=20)
wbworld.grid(row=10, column=1, sticky=W, padx=5, pady=10)
createToolTip(wbworld,"Enter \"p2p\" or \"f2p\" for random f2p or p2p world")
Label(master, text="Script")\
.grid(row=11, column=0, sticky=W, padx=5, pady=10)
wbscript = Entry(master, textvariable=botscript, width=20)
wbscript.grid(row=11, column=1, sticky=W, padx=5, pady=10)
Label(master, text="Param")\
.grid(row=12, column=0, sticky=W, padx=5, pady=10)
wbparam = Entry(master, textvariable=botparam, width=20)
wbparam.grid(row=12, column=1, sticky=W, padx=5, pady=10)
#Create Proxies box
Label(master, text="Proxies")\
.grid(row=9, column=3,sticky=S)
proxies.createListBox()
############## Proxy details #####################
Label(master, text="Proxy Details")\
.grid(row=4, column=2, columnspan=2, padx=5, pady=10)
Label(master, text="IP")\
.grid(row=5, column=2, sticky=W, padx=5, pady=10)
wip = Entry(master, textvariable=ip, width=20)
wip.grid(row=5, column=3, sticky=W, padx=(5,20), pady=10)
Label(master, text="Port")\
.grid(row=6, column=2, sticky=W, padx=5, pady=10)
wport = Entry(master, textvariable=port, width=20)
wport.grid(row=6, column=3, sticky=W, padx=5, pady=10)
Label(master, text="Name")\
.grid(row=7, column=2, sticky=W, padx=5, pady=10)
wname = Entry(master, textvariable=proxyname, width=20)
wname.grid(row=7, column=3, sticky=W, padx=5, pady=10)
Label(master, text="Password")\
.grid(row=8, column=2, sticky=W, padx=5, pady=10)
wpass = Entry(master, textvariable=proxypass, width=20)
wpass.grid(row=8, column=3, sticky=W, padx=5, pady=10)
####################### Buttons ############################
waddbot = Button(master, text="Add Bot", command=lambda: addBot(bots,proxies,botusername,botpassword,\
botpin,wbname,wbpass,wbpin,botworld,botscript,botparam))
waddbot.grid(row=13, column=1,pady=20,sticky=E,padx=(0,10))
createToolTip(waddbot,"Click on proxy to attatch to bot")
Button(master, text="Add Proxy", command=lambda: addProxy(proxies,ip,port\
,proxyname,proxypass,wip,wport,wname,wpass)).grid(row=10, column=2, sticky=E)
Button(master, text="Clear Proxies", command=lambda: proxies.deleteElements()).grid(row=12, column=2, sticky=E)
Button(master, text="Delete Proxy", command=lambda: proxies.deleteSelected()).grid(row=11, column=2, sticky=E)
Button(master, text="Clear Bots", command=lambda: bots.deleteElements()).grid(row=17,column=2,sticky=W)
wimportproxies = Button(master,text="Import Proxies",command=lambda:getProxies(proxies,proxyfilename))
wimportproxies.grid(row=13,column=2,sticky=E,padx=(40,0),pady=(0,20))
createToolTip(wimportproxies,"Import .txt file with format \"ip:root:name:pass\" per proxy per line")
wmakebat = Button(master, text="Make Bat",command=lambda: makeBat(pathlabel,lowcpu,lowresource,username,\
password,bots,batname,dirname,filename))
wmakebat.grid(row=18,column=2,sticky=W)
createToolTip(wmakebat,"Creates .bat file at specified location")
Button(master, text="Delete Bot", command=lambda: bots.deleteSelected()).grid(row=16,column=2,sticky=W)
Button(master, text="Browse",command=lambda: getFile(filename,pathlabel)).grid(row=4,column=1,sticky=W)
wimportbots = Button(master, text="Import Bots",command=lambda: getBots(bots,botfilename))
wimportbots.grid(row=19,column=2,sticky=W)
createToolTip(wimportbots,"Import .txt file with format \"username:password\" per bot per line")
wupdate = Button(master, text="Update Bot",command=lambda: updateBot(bots,proxies,botpin,botworld,botscript,botparam))
wupdate.grid(row=13,column=0,sticky=W,padx=(10,0))
createToolTip(wupdate,"Updates selected bot (does not update username or password)")
wupdateall = Button(master,text="Update All",command=lambda:updateAll(bots,proxies,botpin,botworld,botscript,botparam))
wupdateall.grid(row=13,column=1,sticky=W)
createToolTip(wupdateall,"Updates all bots")
#Create Bot Box
Label(master, text="Bots").grid(row=15,column=0,columnspan=2)
bots.createListBox()
#Bat path
Label(master, text=".bat Location").grid(row=16,column=3,sticky=W)
wbatlocation = Button(master, text="Browse",command=lambda: getDir(dirname,dirlabel))
wbatlocation.grid(row=16,column=3,sticky=E,padx=(0,10))
createToolTip(wbatlocation,"Choose where the .bat file will be created")
dirlabel = Label(master, text="",width = 20)
dirlabel.grid(row=17, column=3, pady=10,sticky=W)
Label(master, text=".bat Name").grid(row=18,column=3,sticky=W,padx=(0,20))
wbatname = Entry(master, textvariable=batname, width=13)
wbatname.grid(row=18,column=3,sticky=E)
mainloop()
class FileName:
def __init__(self):
self.name = None
class ListBox:
def __init__(self,master,row,column,columnspan,height,rowspan,width,selection):
self.master = master
self.elements = []
self.row = row
self.column = column
self.columnspan = columnspan
self.rowspan = rowspan
self.height = height
self.lb = None
self.width = width
self.selection = selection
def createListBox(self):
self.lb = Listbox(self.master, width=self.width, height=self.height,exportselection=0,selectmode=self.selection)
self.lb.grid(row = self.row, column = self.column,\
columnspan=self.columnspan,rowspan=self.rowspan, padx=5,sticky=N)
def deleteElements(self):
self.lb.delete(0,END)
self.elements = []
def updateElements(self):
self.lb.delete(0,END)
for element in self.elements:
self.lb.insert(END,element)
def insertElement(self,element):
self.elements.append(element)
self.updateElements()
def updateElement(self,element,index):
self.elements[index] = element
self.updateElements()
def getSelectedElement(self):
return self.lb.curselection()
def getIndex(self,index):
return self.lb.get(index)
def selected(self):
if self.lb.curselection():
return True
return False
def getIndexOfSelected(self):
return self.lb.curselection()[0]
def deleteSelected(self):
if self.selected():
indices = self.getSelectedElement()
toDelete = []
for i in indices:
toDelete.append(self.getIndex(i))
for element in toDelete:
counter = 0
for entry in self.elements:
if entry == element:
self.lb.delete(counter)
self.elements.pop(counter)
break
counter += 1
def addProxy(proxies,ip,port,proxyname,proxypass,wip,wport,wname,wpass):
check = checkProxy(ip,port,proxyname,proxypass)
if check:
error = "Missing: "
for field in check:
if check[-1] != field:
error += field + ","
else:
error += field
messagebox.showinfo("Missing Fields",error)
return
proxies.insertElement(ip.get()+":"+port.get()+":"+proxyname.get()+":"+proxypass.get())
wip.delete(0,END)
def addBot(bots,proxies,botusername,botpassword,botpin,wbname,wbpass,wbpin,botworld,botscript,botparam):
check = checkBot(botusername,botpassword)
if check:
error = "Missing: "
for field in check:
if check[-1] != field:
error += field + ","
else:
error += field
messagebox.showinfo("Missing Fields",error)
return
result = botusername.get()+":"+botpassword.get()
if botpin.get():
result += ":" + botpin.get()
else:
result += ":0000"
if proxies.selected():
result += " -proxy " + proxies.getIndex(proxies.getSelectedElement()[0])
if botscript.get():
result += " -script " + botscript.get()
if botparam.get():
result += ":" + botparam.get()
else:
result += ":0"
if botworld.get():
if botworld.get().lower() == "f2p":
result += " -world " + str(f2p[random.randint(-1,len(f2p)-1)])
elif botworld.get().lower() == "p2p":
result += " -world " + str(p2p[random.randint(-1,len(f2p)-1)])
else:
result += " -world " + botworld.get()
bots.insertElement(result)
wbname.delete(0,END)
wbpass.delete(0,END)
wbpin.delete(0,END)
def getFile(filename,pathlabel):
filename.name = fd.askopenfilename()
if(filename.name):
pathlabel.config(text="Path: " + filename.name)
def getDir(dirname,dirlabel):
dirname.name = fd.askdirectory()
if(dirname.name):
dirlabel.config(text=dirname.name)
def makeBat(pathlabel,lowcpu,lowresource,username,password,bots,batname,dirname,filename):
check = checkFields(username,password,filename,dirname,batname,bots)
if check:
error = "Missing: "
for field in check:
if check[-1] != field:
error += field + ","
else:
error += field
messagebox.showinfo("Missing Fields",error)
return
outfile = open(dirname.name+"/"+batname.get()+".bat","w").close()
outfile = open(dirname.name+"/"+batname.get()+".bat","w")
result = "java -jar \""
result += pathlabel.cget("text")[6:] + "\""
if lowcpu.get() or lowresource.get():
result += " -allow "
if lowcpu.get():
result +="lowcpu,"
if lowresource.get():
result += "lowresource"
result += " -login " + username.get() + ":" + password.get() + " -bot "
for bot in bots.elements:
outfile.write(result + bot + "\n")
outfile.close()
messagebox.showinfo("File created",batname.get()+".bat created")
def checkFields(username,password,filename,dirname,batname,bots):
check = []
if not filename.name:
check.append("OSBot Path")
if not batname.get():
check.append(".bat Name")
if not dirname.name:
check.append(".bat Location")
if not bots.elements:
check.append("Bots")
return check;
def checkProxy(ip,port,proxyname,proxypass):
check=[]
if not ip.get():
check.append("IP")
if not port.get():
check.append("Port")
if not proxyname.get():
check.append("Name")
if not proxypass.get():
check.append("Password")
return check
def checkBot(botusername,botpassword):
check=[]
if not botusername.get():
check.append("Username")
if not botpassword.get():
check.append("Password")
return check
def getBots(bots,botfilename):
botfilename.name = fd.askopenfilename()
infile = open(botfilename.name,"r")
data = infile.readlines()
for element in data:
result = element.strip() + ":0000"
bots.insertElement(result)
infile.close()
messagebox.showinfo("File import",str(len(data)) + " bots imported")
def getProxies(proxies,proxyfilename):
proxyfilename.name = fd.askopenfilename()
infile = open(proxyfilename.name,"r")
data = infile.readlines()
for element in data:
proxies.insertElement(element)
infile.close()
messagebox.showinfo("Proxy import",str(len(data)) + " proxies imported")
def updateBot(bots,proxies,botpin,botworld,botscript,botparam):
if not bots.selected():
return
for bot in bots.getSelectedElement():
result = bots.getIndex(bot)
paramIndex = result.find("-")
if paramIndex != -1:
endIndex = paramIndex -6
else:
endIndex = result.rfind(":")
result = result[0:endIndex]
if botpin.get():
result += ":" + botpin.get()
else:
result += ":0000"
if proxies.selected():
result += " -proxy " + proxies.getIndex(proxies.getSelectedElement()[0])
if botscript.get():
result += " -script " + botscript.get()
if botparam.get():
result += ":" + botparam.get()
else:
result += ":0"
if botworld.get():
if botworld.get().lower() == "f2p":
result += " -world " + str(f2p[random.randint(-1,len(f2p)-1)])
elif botworld.get().lower() == "p2p":
result += " -world " + str(p2p[random.randint(-1,len(f2p)-1)])
else:
result += " -world " + botworld.get()
bots.updateElement(result,bot)
def updateAll(bots,proxies,botpin,botworld,botscript,botparam):
counter = 0
for bot in bots.elements:
result = bot
paramIndex = result.find("-")
if paramIndex != -1:
endIndex = paramIndex -6
else:
endIndex = result.rfind(":")
result = result[0:endIndex]
if botpin.get():
result += ":" + botpin.get()
else:
result += ":0000"
if proxies.selected():
result += " -proxy " + proxies.getIndex(proxies.getSelectedElement()[0])
if botscript.get():
result += " -script " + botscript.get()
if botparam.get():
result += ":" + botparam.get()
else:
result += ":0"
if botworld.get():
if botworld.get().lower() == "f2p":
result += " -world " + str(f2p[random.randint(-1,len(f2p)-1)])
elif botworld.get().lower() == "p2p":
result += " -world " + str(p2p[random.randint(-1,len(f2p)-1)])
else:
result += " -world " + botworld.get()
bots.updateElement(result,counter)
counter += 1
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except TclError:
pass
label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
if __name__ == '__main__':
initGUI()
| Tylersobored/MakeBat | CreateBat.py | Python | mit | 19,415 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-24 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='subscription',
options={'ordering': ('-created_at',), 'verbose_name': 'inscrição', 'verbose_name_plural': 'inscrições'},
),
migrations.AddField(
model_name='subscription',
name='paid',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='subscription',
name='cpf',
field=models.CharField(max_length=11, verbose_name='CPF'),
),
migrations.AlterField(
model_name='subscription',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='criado em'),
),
migrations.AlterField(
model_name='subscription',
name='email',
field=models.EmailField(max_length=254, verbose_name='e-mail'),
),
migrations.AlterField(
model_name='subscription',
name='name',
field=models.CharField(max_length=100, verbose_name='nome'),
),
migrations.AlterField(
model_name='subscription',
name='phone',
field=models.CharField(max_length=20, verbose_name='telefone'),
),
]
| rtancman/eventex | eventex/subscriptions/migrations/0002_auto_20160124_1935.py | Python | gpl-2.0 | 1,572 |
#!/usr/bin/env python
# encoding=utf-8
from django.contrib.gis.utils import LayerMapping
from django.contrib.gis.gdal import DataSource
from django.utils.encoding import smart_text, python_2_unicode_compatible
from ..models import Location, Gadm, SiteLocation, LocationType
# ./manage.py ogrinspect apps/locations/data/CMR_adm/CMR_adm3.shp
# LocationGeoPoly --srid=4326 --mapping --multi
# ./manage.py dumpdata locations.location locations.locationgeopoly
# > nga_adm.json
def load_gadm(country, adm_shp, level):
# Set Up Mappings and Links to SHP files
adm0_mapping = {
'id_0': 'GADMID',
'srcid': 'GADMID',
'iso': 'ISO',
'name': 'NAME_ISO',
'hasc': 'ISO2',
'validfr': 'VALIDFR',
'validto': 'VALIDTO',
'shape_leng': 'Shape_Leng',
'shape_area': 'Shape_Area',
'geom': 'MULTIPOLYGON',
}
adm1_mapping = {
'id_0': 'ID_0',
'iso': 'ISO',
'name_0': 'NAME_0',
'id_1': 'ID_1',
'srcid': 'ID_1',
'name_1': 'NAME_1',
'name': 'NAME_1',
'varname': 'VARNAME_1',
'nl_name': 'NL_NAME_1',
'hasc': 'HASC_1',
'cc': 'CC_1',
'loctype': 'TYPE_1',
'engtype': 'ENGTYPE_1',
'validfr': 'VALIDFR_1',
'validto': 'VALIDTO_1',
'remark': 'REMARKS_1',
'shape_leng': 'Shape_Leng',
'shape_area': 'Shape_Area',
'geom': 'MULTIPOLYGON',
}
adm2_mapping = {
'id_0': 'ID_0',
'iso': 'ISO',
'name_0': 'NAME_0',
'id_1': 'ID_1',
'name_1': 'NAME_1',
'id_2': 'ID_2',
'srcid': 'ID_2',
'name_2': 'NAME_2',
'name': 'NAME_2',
'varname': 'VARNAME_2',
'nl_name': 'NL_NAME_2',
'hasc': 'HASC_2',
'cc': 'CC_2',
'loctype': 'TYPE_2',
'engtype': 'ENGTYPE_2',
'validfr': 'VALIDFR_2',
'validto': 'VALIDTO_2',
'remark': 'REMARKS_2',
'shape_leng': 'Shape_Leng',
'shape_area': 'Shape_Area',
'geom': 'MULTIPOLYGON',
}
adm3_mapping = {
'id_0': 'ID_0',
'iso': 'ISO',
'name_0': 'NAME_0',
'id_1': 'ID_1',
'name_1': 'NAME_1',
'id_2': 'ID_2',
'name_2': 'NAME_2',
'id_3': 'ID_3',
'srcid': 'ID_3',
'name_3': 'NAME_3',
'name': 'NAME_3',
'varname': 'VARNAME_3',
'nl_name': 'NL_NAME_3',
'hasc': 'HASC_3',
'loctype': 'TYPE_3',
'engtype': 'ENGTYPE_3',
'validfr': 'VALIDFR_3',
'validto': 'VALIDTO_3',
'remark': 'REMARKS_3',
'shape_leng': 'Shape_Leng',
'shape_area': 'Shape_Area',
'geom': 'MULTIPOLYGON',
}
if level == 0:
import_gadm(DataSource(adm_shp), adm0_mapping)
elif level == 1:
import_gadm(DataSource(adm_shp), adm1_mapping)
elif level == 2:
import_gadm(DataSource(adm_shp), adm2_mapping)
elif level == 3:
import_gadm(DataSource(adm_shp), adm3_mapping)
return True
@python_2_unicode_compatible
def import_gadm(adm_shp, adm_map):
lm_adm = LayerMapping(Gadm, adm_shp, adm_map, transform=False,
encoding='utf-8')
lm_adm.save(strict=True, verbose=False)
return True
@python_2_unicode_compatible
def load_sites(sites_shp):
sites_map = {
'code': 'CODE',
'factype': 'TYPE',
'name': 'NAMEOFFICI',
'altname': 'NAMEPOPULA',
'adm1_name': 'PROVINCE',
'adm1_code': 'PROVCODE',
'adm2_name': 'MUNICIPALI',
'adm2_code': 'MUNICODE',
'longitude': 'LONGITUDE',
'latitude': 'LATITUDE',
'zonetype': 'ZONETYPE',
'nutrition': 'NUTRITION',
'geom': 'POINT',
}
lm_import = LayerMapping(SiteLocation, sites_shp, sites_map,
transform=False, encoding='utf-8')
lm_import.save(strict=True, verbose=False)
return True
@python_2_unicode_compatible
def gadm_to_loc(country_name):
# create root country location (adm0)
country_gadm = Gadm.objects.get(name=country_name)
country_type = LocationType.objects.get(code='adm0')
country_loc = Location.objects.create(
name=smart_text(country_gadm.name.strip().title()),
loc_type=country_type,
hcid=country_gadm.hasc, srcid=country_gadm.srcid)
country_gadm.location = country_loc
country_gadm.save()
# create provinces (adm1)
provinces_gadm = Gadm.objects.filter(engtype="Province")
province_type = LocationType.objects.get(code='adm1')
for prov_gadm in provinces_gadm:
prov_loc = Location.objects.create(
name=smart_text(prov_gadm.name.strip().title()),
loc_type=province_type,
hcid=prov_gadm.hasc[3:], srcid=prov_gadm.srcid, parent=country_loc,
alt_names=smart_text(prov_gadm.varname.strip()))
prov_gadm.location = prov_loc
prov_gadm.save()
print u"1: %s" % prov_gadm.location
# create municipalities (adm2)
munis_gadm = Gadm.objects.filter(engtype="Municpality|City Council")
muni_type = LocationType.objects.get(code='adm2')
for muni_gadm in munis_gadm:
hcid = muni_gadm.hasc[3:].replace(".", "")
parent_loc = provinces_gadm.get(srcid=muni_gadm.id_1).location
muni_loc = Location.objects.create(
name=smart_text(muni_gadm.name.strip().title()), loc_type=muni_type,
hcid=hcid, srcid=muni_gadm.srcid, parent=parent_loc,
alt_names=smart_text(muni_gadm.varname.strip()))
muni_gadm.location = muni_loc
muni_gadm.save()
print u"2: %s - %s" % (muni_gadm.location, muni_loc.hcid)
# create communes (adm3)
communes_gadm = Gadm.objects.filter(engtype="Commune")
commune_type = LocationType.objects.get(code='adm3')
for commune_gadm in communes_gadm:
parent_loc = munis_gadm.get(srcid=commune_gadm.id_2).location
hcid = u"%s%s" % (parent_loc.hcid, commune_gadm.name[:2].upper())
if Location.objects.filter(hcid=hcid):
hcid = u"%s%s-%s" % (
parent_loc.hcid, commune_gadm.name[:2].upper(),
commune_gadm.srcid
)
commune_loc = Location.objects.create(
name=smart_text(commune_gadm.name.strip().title()),
loc_type=commune_type,
hcid=hcid, srcid=commune_gadm.srcid, parent=parent_loc,
alt_names=smart_text(commune_gadm.varname.strip()))
commune_gadm.location = commune_loc
commune_gadm.save()
print u"3: %s - %s" % (commune_gadm.location, commune_loc.hcid)
return True
@python_2_unicode_compatible
def sites_to_loc():
sites = SiteLocation.objects.all()
site_type = LocationType.objects.get(code='adm6')
for site in sites:
try:
parent_gadm = Gadm.objects.get(geom__contains=site.geom,
loctype="Commune")
parent_loc = parent_gadm.location
hcid = u"%s%s" % (
parent_loc.hcid, parent_loc.get_descendant_count() + 1
)
except Gadm.DoesNotExist:
parent_loc = Location.objects.get(pk=1)
hcid = u"%s%s" % (
parent_loc.hcid, parent_loc.get_descendant_count() + 1
)
loc = Location.objects.create(parent=parent_loc,
name=smart_text(site.name.strip()),
alt_names=smart_text(
site.altname.strip()), hcid=hcid,
srcid=site.code, loc_type=site_type,
fac_type=site.factype)
site.location = loc
site.save()
print u"%s - %s" % (loc.parent, loc.name)
return
| system7-open-source/imamd | imam/locations/utils/load.py | Python | agpl-3.0 | 7,981 |
from haystack import indexes
from models import Banca
class BancaIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document = True, use_template=True)
def get_model(self):
return Banca
def index_queryset(self):
"""Used when the entire index for model is updated."""
return self.get_model().objects.all() | agendaTCC/AgendaTCC | tccweb/apps/bancas/search_indexes.py | Python | gpl-2.0 | 378 |
import metacomm.combinatorics.all_pairs2
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
"""
Provided to make it easier to compare efficiency with other tools
as per http://pairwise.org/tools.asp
Current iutput is:
3^4: produces 9 rows
3^13: produces 17 rows
4^15 * 3^17 * 2^29: produces 37 rows
4^1 * 3^39 * 2^35: produces 27 rows
3^100: produces 29 rows
10^20: produces 219 rows
10^10: produces 172 rows
"""
def get_arrays( dimensions ):
opts = []
for d in dimensions:
r = []
for i in range(d[1]):
r.append( range(d[0]) )
opts += r
return opts
def print_result( dimensions ):
header_list = []
for d in dimensions:
header_list.append( "%i^%i" % d )
header = " * ".join(header_list)
pairwise = all_pairs( get_arrays( dimensions ) )
n = len(list(pairwise))
print "%s: produces %i rows" % (header, n)
print_result(( (3, 4), ))
print_result(( (3, 13), ))
print_result(( (4, 15), (3, 17), (2, 29) ))
print_result(( (4, 1), (3, 39), (2, 35) ))
print_result(( (3, 100), ))
print_result(( (10, 20), ))
print_result(( (10, 10), ))
| bayandin/allpairs | examples/compare_to_others.py | Python | mit | 1,209 |
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import warnings
import pytz
import pandas as pd
import numpy as np
from datetime import datetime
from itertools import groupby, chain
from six.moves import filter
from six import (
exec_,
iteritems,
itervalues,
string_types,
)
from operator import attrgetter
from zipline.errors import (
AddTermPostInit,
OrderDuringInitialize,
OverrideCommissionPostInit,
OverrideSlippagePostInit,
RegisterAccountControlPostInit,
RegisterTradingControlPostInit,
UnsupportedCommissionModel,
UnsupportedOrderParameters,
UnsupportedSlippageModel,
UnsupportedDatetimeFormat,
)
from zipline.finance.trading import TradingEnvironment
from zipline.finance.blotter import Blotter
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.controls import (
LongOnly,
MaxOrderCount,
MaxOrderSize,
MaxPositionSize,
MaxLeverage,
RestrictedListOrder
)
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.performance import PerformanceTracker
from zipline.finance.slippage import (
VolumeShareSlippage,
SlippageModel,
transact_partial
)
from zipline.assets import Asset, Future
from zipline.assets.futures import FutureChain
from zipline.gens.composites import date_sorted_sources
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.modelling.engine import (
NoOpFFCEngine,
SimpleFFCEngine,
)
from zipline.sources import DataFrameSource, DataPanelSource
from zipline.utils.api_support import (
api_method,
require_not_initialized,
ZiplineAPI,
)
import zipline.utils.events
from zipline.utils.events import (
EventManager,
make_eventrule,
DateRuleFactory,
TimeRuleFactory,
)
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.math_utils import tolerant_equals
import zipline.protocol
from zipline.protocol import Event
from zipline.history import HistorySpec
from zipline.history.history_container import HistoryContainer
DEFAULT_CAPITAL_BASE = float("1.0e5")
class TradingAlgorithm(object):
"""
Base class for trading algorithms. Inherit and overload
initialize() and handle_data(data).
A new algorithm could look like this:
```
from zipline.api import order, symbol
def initialize(context):
context.sid = symbol('AAPL')
context.amount = 100
def handle_data(context, data):
sid = context.sid
amount = context.amount
order(sid, amount)
```
To then to run this algorithm pass these functions to
TradingAlgorithm:
my_algo = TradingAlgorithm(initialize, handle_data)
stats = my_algo.run(data)
"""
def __init__(self, *args, **kwargs):
"""Initialize sids and other state variables.
:Arguments:
:Optional:
initialize : function
Function that is called with a single
argument at the begninning of the simulation.
handle_data : function
Function that is called with 2 arguments
(context and data) on every bar.
script : str
Algoscript that contains initialize and
handle_data function definition.
data_frequency : {'daily', 'minute'}
The duration of the bars.
capital_base : float <default: 1.0e5>
How much capital to start with.
instant_fill : bool <default: False>
Whether to fill orders immediately or on next bar.
asset_finder : An AssetFinder object
A new AssetFinder object to be used in this TradingEnvironment
equities_metadata : can be either:
- dict
- pandas.DataFrame
- object with 'read' property
If dict is provided, it must have the following structure:
* keys are the identifiers
* values are dicts containing the metadata, with the metadata
field name as the key
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the metadata fields
* index must be the different asset identifiers
* array contents should be the metadata value
If an object with a 'read' property is provided, 'read' must
return rows containing at least one of 'sid' or 'symbol' along
with the other metadata fields.
identifiers : List
Any asset identifiers that are not provided in the
equities_metadata, but will be traded by this TradingAlgorithm
"""
self.sources = []
# List of trading controls to be used to validate orders.
self.trading_controls = []
# List of account controls to be checked on each bar.
self.account_controls = []
self._recorded_vars = {}
self.namespace = kwargs.pop('namespace', {})
self._platform = kwargs.pop('platform', 'zipline')
self.logger = None
self.benchmark_return_source = None
# default components for transact
self.slippage = VolumeShareSlippage()
self.commission = PerShare()
self.instant_fill = kwargs.pop('instant_fill', False)
# If an env has been provided, pop it
self.trading_environment = kwargs.pop('env', None)
if self.trading_environment is None:
self.trading_environment = TradingEnvironment()
# Update the TradingEnvironment with the provided asset metadata
self.trading_environment.write_data(
equities_data=kwargs.pop('equities_metadata', {}),
equities_identifiers=kwargs.pop('identifiers', []),
futures_data=kwargs.pop('futures_metadata', {}),
)
# set the capital base
self.capital_base = kwargs.pop('capital_base', DEFAULT_CAPITAL_BASE)
self.sim_params = kwargs.pop('sim_params', None)
if self.sim_params is None:
self.sim_params = create_simulation_parameters(
capital_base=self.capital_base,
start=kwargs.pop('start', None),
end=kwargs.pop('end', None),
env=self.trading_environment,
)
else:
self.sim_params.update_internal_from_env(self.trading_environment)
# Build a perf_tracker
self.perf_tracker = PerformanceTracker(sim_params=self.sim_params,
env=self.trading_environment)
# Pull in the environment's new AssetFinder for quick reference
self.asset_finder = self.trading_environment.asset_finder
self.init_engine(kwargs.pop('ffc_loader', None))
# Maps from name to Term
self._filters = {}
self._factors = {}
self._classifiers = {}
self.blotter = kwargs.pop('blotter', None)
if not self.blotter:
self.blotter = Blotter()
# Set the dt initally to the period start by forcing it to change
self.on_dt_changed(self.sim_params.period_start)
# The symbol lookup date specifies the date to use when resolving
# symbols to sids, and can be set using set_symbol_lookup_date()
self._symbol_lookup_date = None
self.portfolio_needs_update = True
self.account_needs_update = True
self.performance_needs_update = True
self._portfolio = None
self._account = None
self.history_container_class = kwargs.pop(
'history_container_class', HistoryContainer,
)
self.history_container = None
self.history_specs = {}
# If string is passed in, execute and get reference to
# functions.
self.algoscript = kwargs.pop('script', None)
self._initialize = None
self._before_trading_start = None
self._analyze = None
self.event_manager = EventManager()
if self.algoscript is not None:
filename = kwargs.pop('algo_filename', None)
if filename is None:
filename = '<string>'
code = compile(self.algoscript, filename, 'exec')
exec_(code, self.namespace)
self._initialize = self.namespace.get('initialize')
if 'handle_data' not in self.namespace:
raise ValueError('You must define a handle_data function.')
else:
self._handle_data = self.namespace['handle_data']
self._before_trading_start = \
self.namespace.get('before_trading_start')
# Optional analyze function, gets called after run
self._analyze = self.namespace.get('analyze')
elif kwargs.get('initialize') and kwargs.get('handle_data'):
if self.algoscript is not None:
raise ValueError('You can not set script and \
initialize/handle_data.')
self._initialize = kwargs.pop('initialize')
self._handle_data = kwargs.pop('handle_data')
self._before_trading_start = kwargs.pop('before_trading_start',
None)
self.event_manager.add_event(
zipline.utils.events.Event(
zipline.utils.events.Always(),
# We pass handle_data.__func__ to get the unbound method.
# We will explicitly pass the algorithm to bind it again.
self.handle_data.__func__,
),
prepend=True,
)
# If method not defined, NOOP
if self._initialize is None:
self._initialize = lambda x: None
# Alternative way of setting data_frequency for backwards
# compatibility.
if 'data_frequency' in kwargs:
self.data_frequency = kwargs.pop('data_frequency')
self._most_recent_data = None
# Prepare the algo for initialization
self.initialized = False
self.initialize_args = args
self.initialize_kwargs = kwargs
def init_engine(self, loader):
"""
Construct and save an FFCEngine from loader.
If loader is None, constructs a NoOpFFCEngine.
"""
if loader is not None:
self.engine = SimpleFFCEngine(
loader,
self.trading_environment.trading_days,
self.asset_finder,
)
else:
self.engine = NoOpFFCEngine()
def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self, *args, **kwargs)
def before_trading_start(self, data):
if self._before_trading_start is None:
return
self._before_trading_start(self, data)
def handle_data(self, data):
self._most_recent_data = data
if self.history_container:
self.history_container.update(data, self.datetime)
self._handle_data(self, data)
# Unlike trading controls which remain constant unless placing an
# order, account controls can change each bar. Thus, must check
# every bar no matter if the algorithm places an order or not.
self.validate_account_controls()
def analyze(self, perf):
if self._analyze is None:
return
with ZiplineAPI(self):
self._analyze(self, perf)
def __repr__(self):
"""
N.B. this does not yet represent a string that can be used
to instantiate an exact copy of an algorithm.
However, it is getting close, and provides some value as something
that can be inspected interactively.
"""
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage={slippage},
commission={commission},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(class_name=self.__class__.__name__,
capital_base=self.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage=repr(self.slippage),
commission=repr(self.commission),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars))
def _create_data_generator(self, source_filter, sim_params=None):
"""
Create a merged data generator using the sources attached to this
algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if sim_params is None:
sim_params = self.sim_params
if self.benchmark_return_source is None:
if sim_params.data_frequency == 'minute' or \
sim_params.emission_rate == 'minute':
def update_time(date):
return self.trading_environment.get_open_and_close(date)[1]
else:
def update_time(date):
return date
benchmark_return_source = [
Event({'dt': update_time(dt),
'returns': ret,
'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
'source_id': 'benchmarks'})
for dt, ret in
self.trading_environment.benchmark_returns.iteritems()
if dt.date() >= sim_params.period_start.date() and
dt.date() <= sim_params.period_end.date()
]
else:
benchmark_return_source = self.benchmark_return_source
date_sorted = date_sorted_sources(*self.sources)
if source_filter:
date_sorted = filter(source_filter, date_sorted)
with_benchmarks = date_sorted_sources(benchmark_return_source,
date_sorted)
# Group together events with the same dt field. This depends on the
# events already being sorted.
return groupby(with_benchmarks, attrgetter('dt'))
def _create_generator(self, sim_params, source_filter=None):
"""
Create a basic generator setup using the sources to this algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if not self.initialized:
self.initialize(*self.initialize_args, **self.initialize_kwargs)
self.initialized = True
if self.perf_tracker is None:
# HACK: When running with the `run` method, we set perf_tracker to
# None so that it will be overwritten here.
self.perf_tracker = PerformanceTracker(
sim_params=sim_params, env=self.trading_environment
)
self.portfolio_needs_update = True
self.account_needs_update = True
self.performance_needs_update = True
self.data_gen = self._create_data_generator(source_filter, sim_params)
self.trading_client = AlgorithmSimulator(self, sim_params)
transact_method = transact_partial(self.slippage, self.commission)
self.set_transact(transact_method)
return self.trading_client.transform(self.data_gen)
def get_generator(self):
"""
Override this method to add new logic to the construction
of the generator. Overrides can use the _create_generator
method to get a standard construction generator.
"""
return self._create_generator(self.sim_params)
# TODO: make a new subclass, e.g. BatchAlgorithm, and move
# the run method to the subclass, and refactor to put the
# generator creation logic into get_generator.
def run(self, source, overwrite_sim_params=True,
benchmark_return_source=None):
"""Run the algorithm.
:Arguments:
source : can be either:
- pandas.DataFrame
- zipline source
- list of sources
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the different asset identifiers
* index must be DatetimeIndex
* array contents should be price info.
:Returns:
daily_stats : pandas.DataFrame
Daily performance metrics such as returns, alpha etc.
"""
# Ensure that source is a DataSource object
if isinstance(source, list):
if overwrite_sim_params:
warnings.warn("""List of sources passed, will not attempt to extract start and end
dates. Make sure to set the correct fields in sim_params passed to
__init__().""", UserWarning)
overwrite_sim_params = False
elif isinstance(source, pd.DataFrame):
# if DataFrame provided, map columns to sids and wrap
# in DataFrameSource
copy_frame = source.copy()
copy_frame.columns = self._write_and_map_id_index_to_sids(
source.columns, source.index[0],
)
source = DataFrameSource(copy_frame)
elif isinstance(source, pd.Panel):
# If Panel provided, map items to sids and wrap
# in DataPanelSource
copy_panel = source.copy()
copy_panel.items = self._write_and_map_id_index_to_sids(
source.items, source.major_axis[0],
)
source = DataPanelSource(copy_panel)
if isinstance(source, list):
self.set_sources(source)
else:
self.set_sources([source])
# Override sim_params if params are provided by the source.
if overwrite_sim_params:
if hasattr(source, 'start'):
self.sim_params.period_start = source.start
if hasattr(source, 'end'):
self.sim_params.period_end = source.end
# Changing period_start and period_close might require updating
# of first_open and last_close.
self.sim_params.update_internal_from_env(
env=self.trading_environment
)
# The sids field of the source is the reference for the universe at
# the start of the run
self._current_universe = set()
for source in self.sources:
for sid in source.sids:
self._current_universe.add(sid)
# Check that all sids from the source are accounted for in
# the AssetFinder. This retrieve call will raise an exception if the
# sid is not found.
for sid in self._current_universe:
self.asset_finder.retrieve_asset(sid)
# force a reset of the performance tracker, in case
# this is a repeat run of the algorithm.
self.perf_tracker = None
# create zipline
self.gen = self._create_generator(self.sim_params)
# Create history containers
if self.history_specs:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.sim_params.first_open,
self.sim_params.data_frequency,
self.trading_environment,
)
# loop through simulated_trading, each iteration returns a
# perf dictionary
perfs = []
for perf in self.gen:
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
return daily_stats
def _write_and_map_id_index_to_sids(self, identifiers, as_of_date):
# Build new Assets for identifiers that can't be resolved as
# sids/Assets
identifiers_to_build = []
for identifier in identifiers:
asset = None
if isinstance(identifier, Asset):
asset = self.asset_finder.retrieve_asset(sid=identifier.sid,
default_none=True)
elif hasattr(identifier, '__int__'):
asset = self.asset_finder.retrieve_asset(sid=identifier,
default_none=True)
if asset is None:
identifiers_to_build.append(identifier)
self.trading_environment.write_data(
equities_identifiers=identifiers_to_build)
return self.asset_finder.map_identifier_index_to_sids(
identifiers, as_of_date,
)
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if 'daily_perf' in perf:
perf['daily_perf'].update(
perf['daily_perf'].pop('recorded_vars')
)
perf['daily_perf'].update(perf['cumulative_risk_metrics'])
daily_perfs.append(perf['daily_perf'])
else:
self.risk_report = perf
daily_dts = [np.datetime64(perf['period_close'], utc=True)
for perf in daily_perfs]
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
@api_method
def add_transform(self, transform, days=None):
"""
Ensures that the history container will have enough size to service
a simple transform.
:Arguments:
transform : string
The transform to add. must be an element of:
{'mavg', 'stddev', 'vwap', 'returns'}.
days : int <default=None>
The maximum amount of days you will want for this transform.
This is not needed for 'returns'.
"""
if transform not in {'mavg', 'stddev', 'vwap', 'returns'}:
raise ValueError('Invalid transform')
if transform == 'returns':
if days is not None:
raise ValueError('returns does use days')
self.add_history(2, '1d', 'price')
return
elif days is None:
raise ValueError('no number of days specified')
if self.sim_params.data_frequency == 'daily':
mult = 1
freq = '1d'
else:
mult = 390
freq = '1m'
bars = mult * days
self.add_history(bars, freq, 'price')
if transform == 'vwap':
self.add_history(bars, freq, 'volume')
@api_method
def get_environment(self, field='platform'):
env = {
'arena': self.sim_params.arena,
'data_frequency': self.sim_params.data_frequency,
'start': self.sim_params.first_open,
'end': self.sim_params.last_close,
'capital_base': self.sim_params.capital_base,
'platform': self._platform
}
if field == '*':
return env
else:
return env[field]
def add_event(self, rule=None, callback=None):
"""
Adds an event to the algorithm's EventManager.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
)
@api_method
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True):
"""
Schedules a function to be called with some timed rules.
"""
date_rule = date_rule or DateRuleFactory.every_day()
time_rule = ((time_rule or TimeRuleFactory.market_open())
if self.sim_params.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
zipline.utils.events.Always())
self.add_event(
make_eventrule(date_rule, time_rule, half_days),
func,
)
@api_method
def record(self, *args, **kwargs):
"""
Track and record local variable (i.e. attributes) each day.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, iteritems(kwargs)):
self._recorded_vars[name] = value
@api_method
def symbol(self, symbol_str):
"""
Default symbol lookup for any source that directly maps the
symbol to the Asset (e.g. yahoo finance).
"""
# If the user has not set the symbol lookup date,
# use the period_end as the date for sybmol->sid resolution.
_lookup_date = self._symbol_lookup_date if self._symbol_lookup_date is not None \
else self.sim_params.period_end
return self.asset_finder.lookup_symbol(
symbol_str,
as_of_date=_lookup_date,
)
@api_method
def symbols(self, *args):
"""
Default symbols lookup for any source that directly maps the
symbol to the Asset (e.g. yahoo finance).
"""
return [self.symbol(identifier) for identifier in args]
@api_method
def sid(self, a_sid):
"""
Default sid lookup for any source that directly maps the integer sid
to the Asset.
"""
return self.asset_finder.retrieve_asset(a_sid)
@api_method
def future_chain(self, root_symbol, as_of_date=None):
""" Look up a future chain with the specified parameters.
Parameters
----------
root_symbol : str
The root symbol of a future chain.
as_of_date : datetime.datetime or pandas.Timestamp or str, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc.
Returns
-------
FutureChain
The future chain matching the specified parameters.
Raises
------
RootSymbolNotFound
If a future chain could not be found for the given root symbol.
"""
if as_of_date:
try:
as_of_date = pd.Timestamp(as_of_date, tz='UTC')
except ValueError:
raise UnsupportedDatetimeFormat(input=as_of_date,
method='future_chain')
return FutureChain(
asset_finder=self.asset_finder,
get_datetime=self.get_datetime,
root_symbol=root_symbol.upper(),
as_of_date=as_of_date
)
def _calculate_order_value_amount(self, asset, value):
"""
Calculates how many shares/contracts to order based on the type of
asset being ordered.
"""
last_price = self.trading_client.current_data[asset].price
if tolerant_equals(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=asset
)
if self.logger:
self.logger.debug(zero_message)
# Don't place any order
return 0
if isinstance(asset, Future):
value_multiplier = asset.contract_multiplier
else:
value_multiplier = 1
return value / (last_price * value_multiplier)
@api_method
def order(self, sid, amount,
limit_price=None,
stop_price=None,
style=None):
"""
Place an order using the specified parameters.
"""
def round_if_near_integer(a, epsilon=1e-4):
"""
Round a to the nearest integer if that integer is within an epsilon
of a.
"""
if abs(a - round(a)) <= epsilon:
return round(a)
else:
return a
# Truncate to the integer share count that's either within .0001 of
# amount or closer to zero.
# E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0
amount = int(round_if_near_integer(amount))
# Raises a ZiplineError if invalid parameters are detected.
self.validate_order_params(sid,
amount,
limit_price,
stop_price,
style)
# Convert deprecated limit_price and stop_price parameters to use
# ExecutionStyle objects.
style = self.__convert_order_params_for_blotter(limit_price,
stop_price,
style)
return self.blotter.order(sid, amount, style)
def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
if not isinstance(asset, Asset):
raise UnsupportedOrderParameters(
msg="Passing non-Asset argument to 'order()' is not supported."
" Use 'sid()' or 'symbol()' methods to look up an Asset."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.updated_portfolio(),
self.get_datetime(),
self.trading_client.current_data)
@staticmethod
def __convert_order_params_for_blotter(limit_price, stop_price, style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
# TODO_SS: DeprecationWarning for usage of limit_price and stop_price.
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price)
if limit_price:
return LimitOrder(limit_price)
if stop_price:
return StopOrder(stop_price)
else:
return MarketOrder()
@api_method
def order_value(self, sid, value,
limit_price=None, stop_price=None, style=None):
"""
Place an order by desired value rather than desired number of shares.
If the requested sid is found in the universe, the requested value is
divided by its price to imply the number of shares to transact.
If the Asset being ordered is a Future, the 'value' calculated
is actually the exposure, as Futures have no 'value'.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
Market order: order(sid, value)
Limit order: order(sid, value, limit_price)
Stop order: order(sid, value, None, stop_price)
StopLimit order: order(sid, value, limit_price, stop_price)
"""
amount = self._calculate_order_value_amount(sid, value)
return self.order(sid, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
@property
def portfolio(self):
return self.updated_portfolio()
def updated_portfolio(self):
if self.portfolio_needs_update:
self._portfolio = \
self.perf_tracker.get_portfolio(self.performance_needs_update)
self.portfolio_needs_update = False
self.performance_needs_update = False
return self._portfolio
@property
def account(self):
return self.updated_account()
def updated_account(self):
if self.account_needs_update:
self._account = \
self.perf_tracker.get_account(self.performance_needs_update)
self.account_needs_update = False
self.performance_needs_update = False
return self._account
def set_logger(self, logger):
self.logger = logger
def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
assert isinstance(dt, datetime), \
"Attempt to set algorithm's current time with non-datetime"
assert dt.tzinfo == pytz.utc, \
"Algorithm expects a utc datetime"
self.datetime = dt
self.perf_tracker.set_date(dt)
self.blotter.set_date(dt)
@api_method
def get_datetime(self, tz=None):
"""
Returns the simulation datetime.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
# Convert to the given timezone passed as a string or tzinfo.
if isinstance(tz, string_types):
tz = pytz.timezone(tz)
dt = dt.astimezone(tz)
return dt # datetime.datetime objects are immutable.
def set_transact(self, transact):
"""
Set the method that will be called to create a
transaction from open orders and trade events.
"""
self.blotter.transact = transact
def update_dividends(self, dividend_frame):
"""
Set DataFrame used to process dividends. DataFrame columns should
contain at least the entries in zp.DIVIDEND_FIELDS.
"""
self.perf_tracker.update_dividends(dividend_frame)
@api_method
def set_slippage(self, slippage):
if not isinstance(slippage, SlippageModel):
raise UnsupportedSlippageModel()
if self.initialized:
raise OverrideSlippagePostInit()
self.slippage = slippage
@api_method
def set_commission(self, commission):
if not isinstance(commission, (PerShare, PerTrade, PerDollar)):
raise UnsupportedCommissionModel()
if self.initialized:
raise OverrideCommissionPostInit()
self.commission = commission
@api_method
def set_symbol_lookup_date(self, dt):
"""
Set the date for which symbols will be resolved to their sids
(symbols may map to different firms or underlying assets at
different times)
"""
try:
self._symbol_lookup_date = pd.Timestamp(dt, tz='UTC')
except ValueError:
raise UnsupportedDatetimeFormat(input=dt,
method='set_symbol_lookup_date')
def set_sources(self, sources):
assert isinstance(sources, list)
self.sources = sources
# Remain backwards compatibility
@property
def data_frequency(self):
return self.sim_params.data_frequency
@data_frequency.setter
def data_frequency(self, value):
assert value in ('daily', 'minute')
self.sim_params.data_frequency = value
@api_method
def order_percent(self, sid, percent,
limit_price=None, stop_price=None, style=None):
"""
Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Note that percent must expressed as a decimal (0.50 means 50\%).
"""
value = self.portfolio.portfolio_value * percent
return self.order_value(sid, value,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
"""
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
req_shares = target - current_position
return self.order(sid, req_shares,
limit_price=limit_price,
stop_price=stop_price,
style=style)
else:
return self.order(sid, target,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target_value(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
"""
target_amount = self._calculate_order_value_amount(sid, target)
return self.order_target(sid, target_amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target_percent(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Note that target must expressed as a decimal (0.50 means 50\%).
"""
target_value = self.portfolio.portfolio_value * target
return self.order_target_value(sid, target_value,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def get_open_orders(self, sid=None):
if sid is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in iteritems(self.blotter.open_orders)
if orders
}
if sid in self.blotter.open_orders:
orders = self.blotter.open_orders[sid]
return [order.to_api_obj() for order in orders]
return []
@api_method
def get_order(self, order_id):
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj()
@api_method
def cancel_order(self, order_param):
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id)
@api_method
def add_history(self, bar_count, frequency, field, ffill=True):
data_frequency = self.sim_params.data_frequency
history_spec = HistorySpec(bar_count, frequency, field, ffill,
data_frequency=data_frequency,
env=self.trading_environment)
self.history_specs[history_spec.key_str] = history_spec
if self.initialized:
if self.history_container:
self.history_container.ensure_spec(
history_spec, self.datetime, self._most_recent_data,
)
else:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.sim_params.first_open,
self.sim_params.data_frequency,
env=self.trading_environment,
)
def get_history_spec(self, bar_count, frequency, field, ffill):
spec_key = HistorySpec.spec_key(bar_count, frequency, field, ffill)
if spec_key not in self.history_specs:
data_freq = self.sim_params.data_frequency
spec = HistorySpec(
bar_count,
frequency,
field,
ffill,
data_frequency=data_freq,
env=self.trading_environment,
)
self.history_specs[spec_key] = spec
if not self.history_container:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.datetime,
self.sim_params.data_frequency,
bar_data=self._most_recent_data,
env=self.trading_environment,
)
self.history_container.ensure_spec(
spec, self.datetime, self._most_recent_data,
)
return self.history_specs[spec_key]
@api_method
def history(self, bar_count, frequency, field, ffill=True):
history_spec = self.get_history_spec(
bar_count,
frequency,
field,
ffill,
)
return self.history_container.get_history(history_spec, self.datetime)
####################
# Account Controls #
####################
def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control)
def validate_account_controls(self):
for control in self.account_controls:
control.validate(self.updated_portfolio(),
self.updated_account(),
self.get_datetime(),
self.trading_client.current_data)
@api_method
def set_max_leverage(self, max_leverage=None):
"""
Set a limit on the maximum leverage of the algorithm.
"""
control = MaxLeverage(max_leverage)
self.register_account_control(control)
####################
# Trading Controls #
####################
def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control)
@api_method
def set_max_position_size(self,
sid=None,
max_shares=None,
max_notional=None):
"""
Set a limit on the number of shares and/or dollar value held for the
given sid. Limits are treated as absolute values and are enforced at
the time that the algo attempts to place an order for sid. This means
that it's possible to end up with more than the max number of shares
due to splits/dividends, and more than the max notional due to price
improvement.
If an algorithm attempts to place an order that would result in
increasing the absolute value of shares/dollar value exceeding one of
these limits, raise a TradingControlException.
"""
control = MaxPositionSize(asset=sid,
max_shares=max_shares,
max_notional=max_notional)
self.register_trading_control(control)
@api_method
def set_max_order_size(self, sid=None, max_shares=None, max_notional=None):
"""
Set a limit on the number of shares and/or dollar value of any single
order placed for sid. Limits are treated as absolute values and are
enforced at the time that the algo attempts to place an order for sid.
If an algorithm attempts to place an order that would result in
exceeding one of these limits, raise a TradingControlException.
"""
control = MaxOrderSize(asset=sid,
max_shares=max_shares,
max_notional=max_notional)
self.register_trading_control(control)
@api_method
def set_max_order_count(self, max_count):
"""
Set a limit on the number of orders that can be placed within the given
time interval.
"""
control = MaxOrderCount(max_count)
self.register_trading_control(control)
@api_method
def set_do_not_order_list(self, restricted_list):
"""
Set a restriction on which sids can be ordered.
"""
control = RestrictedListOrder(restricted_list)
self.register_trading_control(control)
@api_method
def set_long_only(self):
"""
Set a rule specifying that this algorithm cannot take short positions.
"""
self.register_trading_control(LongOnly())
###########
# FFC API #
###########
@api_method
@require_not_initialized(AddTermPostInit())
def add_factor(self, factor, name):
if name in self._factors:
raise ValueError("Name %r is already a factor!" % name)
self._factors[name] = factor
@api_method
@require_not_initialized(AddTermPostInit())
def add_filter(self, filter):
name = "anon_filter_%d" % len(self._filters)
self._filters[name] = filter
# Note: add_classifier is not yet implemented since you can't do anything
# useful with classifiers yet.
def _all_terms(self):
# Merge all three dicts.
return dict(
chain.from_iterable(
iteritems(terms)
for terms in (self._filters, self._factors, self._classifiers)
)
)
def compute_factor_matrix(self, start_date):
"""
Compute a factor matrix containing at least the data necessary to
provide values for `start_date`.
Loads a factor matrix with data extending from `start_date` until a
year from `start_date`, or until the end of the simulation.
"""
days = self.trading_environment.trading_days
# Load data starting from the previous trading day...
start_date_loc = days.get_loc(start_date)
# ...continuing until either the day before the simulation end, or
# until 252 days of data have been loaded. 252 is a totally arbitrary
# choice that seemed reasonable based on napkin math.
sim_end = self.sim_params.last_close.normalize()
end_loc = min(start_date_loc + 252, days.get_loc(sim_end))
end_date = days[end_loc]
return self.engine.factor_matrix(
self._all_terms(),
start_date,
end_date,
), end_date
def current_universe(self):
return self._current_universe
@classmethod
def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [
fn for fn in itervalues(vars(cls))
if getattr(fn, 'is_api_method', False)
]
| YuepengGuo/zipline | zipline/algorithm.py | Python | apache-2.0 | 50,333 |
#!/usr/bin/python
import numpy as np
from sklearn.metrics import mutual_info_score
import MDAnalysis
import os
import re
import math
import sys
from itertools import combinations_with_replacement,permutations
from concurrent.futures import ProcessPoolExecutor, Future, wait
usecpus = 10#how many cores to use
frms_num = 10000
u = MDAnalysis.Universe('ini.pdb','allpdb.trr')
f = open('CA-out.txt', 'w')
b = np.zeros((352,352))
#for i in range(0,352):
# for j in range(0,352):
# b[i][j] = 100
def new_dihedral(p):
p0 = p[0]
p1 = p[1]
p2 = p[2]
p3 = p[3]
b0 = -1.0*(p1 - p0)
b1 = p2 - p1
b2 = p3 - p2
b1 /= np.linalg.norm(b1)
v = b0 - np.dot(b0, b1)*b1
w = b2 - np.dot(b2, b1)*b1
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.degrees(np.arctan2(y, x))
"""get a list of transition time points in sequential order"""
def trans_time(X):
t_t = []
"""increase 1D neighbor search radius nbr to filter out thermal fluctuations,
assuming symmetric peaks, for unsymmetric case need left nbr and right nbr"""
nbr = 10
for i in range(0 + nbr, len(X) - nbr):
peak = 1
for j in range(1, nbr+1):
if X[i] < X[i - j] or X[i] < X[i + j]:
peak = 0
break
if peak == 1:
t_t.append(i+1)
find_basin = t_time(X, t_t)
return find_basin
def t_time(X, t_t):
rg = 1
k_k = []
for i in range(0, len(X)):
peak1 = min(t_t, key=lambda x:abs(x-i))
peak2 = min(t_t, key=lambda x:abs(x-(i+rg)))
if peak1 != peak2:
k_k.append(i)
return k_k
"""
transition time function T(X, n) to get the time of
N th transition of a time series X time unit is 100 picosecond
"""
def trans_time_n(X, n, tx):
T = tx
return T[n-1]
"""
waiting time function W(X, t) to get the time interval
from t until the next transition/peak of X
"""
def wait_time(X, t, tx):
#if t < 0 :
# sys.exit("Error: time needs to be a positive number")
wait = 0
T = tx
for i in range(0,len(T)-1):
if t > T[i] and t < T[i+1]:
wait = T[i+1] - t
break
elif t < T[i]:
wait = T[i] - t
break
elif t == T[i]:
wait = T[i+1] - t
break
#elif t > T[-1]:
# wait = -100
return wait
"""
get mean persistence time of X
"""
def tau_p(X, tx):
T = tx
observ_t = T[-1]
sum = 0
for i in range(0,len(T)):
sum += wait_time(X, trans_time_n(X,i+1,T), T)
taup = math.pow(float(sum), 2)/float(2*observ_t)
return taup
"""
get mean exchange time of X following the (i+1)th transition in Y
in the cases whereas after the transition time of Y,
no transtion occurred in X, wait time is assigned to 0
"""
def tau_ex(X, Y, tx, ty):
TX = tx
TY = ty
observ_t_Y = TY[-1]
sum = 0
for i in range(0,len(TY)-1):
w1 = wait_time(X, trans_time_n(Y,i+1,TY), TX)
w2 = wait_time(Y, trans_time_n(Y,i,TY), TY)
sum += w1 * w2
tauex = float(sum)/float(observ_t_Y)
return tauex
def get_ij_ca(res_i,res_j,fs):
protein = u.select_atoms('backbone')
phi_sel = protein.residues[res_i].phi_selection()
phi_sel2 = protein.residues[res_j].phi_selection()
resi_phi = []
resj_phi = []
for ts in u.trajectory:
frame = ts.frame
if frame >= fs:
break
k = new_dihedral(phi_sel.positions)
resi_phi.append(k)
p = new_dihedral(phi_sel2.positions)
resj_phi.append(p)
#get TX, TY, lists of transition time and pass them
X = np.array(resi_phi)
Y = np.array(resj_phi)
TX = trans_time(X)
TY = trans_time(Y)
CA = (-1) * math.log(tau_ex(X, Y, TX, TY)/tau_p(X, TX))
#CA = get_ca(np.array(resi_phi),np.array(resj_phi),TX,TY)
pair = str(res_i) + '-' + str(res_j) + '.ax'
all = str(res_i) + '\t' + str(res_j) + '\t' + str(CA) + '\n'
f1 = open(pair, 'w')
f1.write(all)
f1.close()
def main():
with ProcessPoolExecutor(max_workers=usecpus) as executer:
a = []
for i, j in permutations(range(2,8), 2):
future = executer.submit(get_ij_ca, i, j, frms_num)
a.append(future)
wait(a)
#join small files together
os.system('cat *.ax > temp-all')
f2 = open("temp-all")
for line in f2.readlines():
a = re.split('\t|\n',line)
s0 = int(a[0])
s1 = int(a[1])
s2 = float(a[2])
b[s0][s1] = s2
#b[s1][s0] = s2
f2.close()
for i in range(0,352):
for j in range(0,352):
p = str(i) + '\t' + str(j) + '\t' + str(b[i][j]) + '\n'
f.write(p)
f.close()
os.system('mv *.ax crap/')
os.system('rm temp-all')
if __name__ == '__main__':
main()
| id4zs2008/blob-dyn | ca-multicore.py | Python | gpl-3.0 | 4,857 |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
#
# This is a thin wrapper for native LD. This is not meant to be
# used by the user, but is called from pnacl-translate.
# This implements the native linking part of translation.
#
# All inputs must be native objects or linker scripts.
#
# --pnacl-sb will cause the sandboxed LD to be used.
# The bulk of this file is logic to invoke the sandboxed translator.
import os
import subprocess
from driver_tools import CheckTranslatorPrerequisites, GetArch, ParseArgs, \
Run, UnrecognizedOption
from driver_env import env
from driver_log import Log
import driver_tools
import elftools
import ldtools
import pathtools
EXTRA_ENV = {
'INPUTS' : '',
'OUTPUT' : '',
# The INPUTS file coming from the llc translation step
'LLC_TRANSLATED_FILE' : '',
# Number of separate modules used for multi-threaded translation. This should
# have been set by pnacl-translate, but default to 0 for checking.
'SPLIT_MODULE' : '0',
'USE_STDLIB': '1',
# Upstream gold has the segment gap built in, but the gap can be modified
# when not using the IRT. The gap does need to be at least one bundle so the
# halt sled can be added for the TCB in case the segment ends up being a
# multiple of 64k.
# --eh-frame-hdr asks the linker to generate an .eh_frame_hdr section,
# which is a presorted list of registered frames. This section is
# used by libgcc_eh/libgcc_s to avoid doing the sort during runtime.
# http://www.airs.com/blog/archives/462
#
# BE CAREFUL: anything added to LD_FLAGS should be synchronized with
# flags used by the in-browser translator.
# See: binutils/gold/nacl_file.cc
'LD_FLAGS' : '-nostdlib ' +
# Only relevant for ARM where it suppresses a warning.
# Ignored for other archs.
'--no-fix-cortex-a8 ' +
'--eh-frame-hdr ' +
# Give an error if any TEXTRELs occur.
'-z text ' +
'--build-id ',
'SEARCH_DIRS' : '${SEARCH_DIRS_USER} ${SEARCH_DIRS_BUILTIN}',
'SEARCH_DIRS_USER' : '',
'SEARCH_DIRS_BUILTIN': '${USE_STDLIB ? ${LIBS_NATIVE_ARCH}/}',
# Note: this is only used in the unsandboxed case
'RUN_LD' : '${LD} ${LD_FLAGS} ${inputs} -o ${output}'
}
def PassThrough(*args):
env.append('LD_FLAGS', *args)
LDPatterns = [
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-L(.+)',
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))"),
( ('-L', '(.*)'),
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))"),
# Note: we do not yet support all the combinations of flags which affect
# layout of the various sections and segments because the corner cases in gold
# may not all be worked out yet. They can be added (and tested!) as needed.
( '(-static)', PassThrough),
( '(-pie)', PassThrough),
( ('(-Ttext=.*)'), PassThrough),
( ('(-Trodata=.*)'), PassThrough),
( ('(-Ttext-segment=.*)'), PassThrough),
( ('(-Trodata-segment=.*)'), PassThrough),
( ('(--rosegment-gap=.*)'), PassThrough),
( ('(--section-start)', '(.+)'),PassThrough),
( ('(--section-start=.*)'), PassThrough),
( ('(-e)','(.*)'), PassThrough),
( '(--entry=.*)', PassThrough),
( '(-M)', PassThrough),
( '(-t)', PassThrough),
( ('-y','(.*)'), PassThrough),
( ('(-defsym)','(.*)'), PassThrough),
( '(-defsym=.*)', PassThrough),
( '-export-dynamic', PassThrough),
( '(--print-gc-sections)', PassThrough),
( '(--gc-sections)', PassThrough),
( '(--unresolved-symbols=.*)', PassThrough),
( '(--dynamic-linker=.*)', PassThrough),
( '(-g)', PassThrough),
( '(--build-id)', PassThrough),
( '-melf_nacl', "env.set('ARCH', 'X8632')"),
( ('-m','elf_nacl'), "env.set('ARCH', 'X8632')"),
( '-melf64_nacl', "env.set('ARCH', 'X8664')"),
( ('-m','elf64_nacl'), "env.set('ARCH', 'X8664')"),
( '-marmelf_nacl', "env.set('ARCH', 'ARM')"),
( ('-m','armelf_nacl'), "env.set('ARCH', 'ARM')"),
( '-mmipselelf_nacl', "env.set('ARCH', 'MIPS32')"),
( ('-m','mipselelf_nacl'), "env.set('ARCH', 'MIPS32')"),
# Inputs and options that need to be kept in order
( '(--no-as-needed)', "env.append('INPUTS', $0)"),
( '(--as-needed)', "env.append('INPUTS', $0)"),
( '(--start-group)', "env.append('INPUTS', $0)"),
( '(--end-group)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
# This is the file passed from llc during translation (used to be via shmem)
( ('--llc-translated-file=(.*)'), "env.append('INPUTS', $0)\n"
"env.set('LLC_TRANSLATED_FILE', $0)"),
( '-split-module=([0-9]+)', "env.set('SPLIT_MODULE', $0)"),
( '(--(no-)?whole-archive)', "env.append('INPUTS', $0)"),
( '(-l.*)', "env.append('INPUTS', $0)"),
( '(--undefined=.*)', "env.append('INPUTS', $0)"),
( '(-.*)', UnrecognizedOption),
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"),
]
def RemoveInterpProgramHeader(filename):
headers = elftools.GetELFAndProgramHeaders(filename)
assert headers
ehdr, phdrs = headers
for i, phdr in enumerate(phdrs):
if phdr.type == elftools.ProgramHeader.PT_INTERP:
fp = open(filename, 'rb+')
fp.seek(ehdr.phoff + ehdr.phentsize * i)
# Zero this program header. Note PT_NULL is 0.
fp.write('\0' * ehdr.phentsize)
fp.close()
def main(argv):
env.update(EXTRA_ENV)
ParseArgs(argv, LDPatterns)
GetArch(required=True)
inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if output == '':
output = pathtools.normalize('a.out')
# As we will modify the output file in-place for non-SFI, we output
# the file to a temporary file first and then rename it. Otherwise,
# build systems such as make assume the output file is ready even
# if the last build failed during the in-place update.
tmp_output = output + '.tmp'
# Expand all parameters
# This resolves -lfoo into actual filenames,
# and expands linker scripts into command-line arguments.
inputs = ldtools.ExpandInputs(inputs,
env.get('SEARCH_DIRS'),
True,
ldtools.LibraryTypes.NATIVE)
env.push()
env.set('inputs', *inputs)
env.set('output', tmp_output)
if env.getbool('SANDBOXED'):
RunLDSandboxed()
else:
Run('${RUN_LD}')
if env.getbool('NONSFI_NACL'):
# Remove PT_INTERP in non-SFI binaries as we never use host's
# dynamic linker/loader.
#
# This is necessary otherwise we get a statically linked
# executable that is not directly runnable by Linux, because Linux
# tries to load the non-existent file that PT_INTERP points to.
#
# This is fairly hacky. It would be better if the linker provided
# an option for omitting PT_INTERP (e.g. "--dynamic-linker ''").
RemoveInterpProgramHeader(tmp_output)
if driver_tools.IsWindowsPython() and os.path.exists(output):
# On Windows (but not on Unix), the os.rename() call would fail if the
# output file already exists.
os.remove(output)
os.rename(tmp_output, output)
env.pop()
# only reached in case of no errors
return 0
def IsFlag(arg):
return arg.startswith('-')
def RunLDSandboxed():
if not env.getbool('USE_STDLIB'):
Log.Fatal('-nostdlib is not supported by the sandboxed translator')
CheckTranslatorPrerequisites()
# The "main" input file is the application's combined object file.
all_inputs = env.get('inputs')
main_input = env.getone('LLC_TRANSLATED_FILE')
if not main_input:
Log.Fatal("Sandboxed LD requires one shm input file")
outfile = env.getone('output')
modules = int(env.getone('SPLIT_MODULE'))
assert modules >= 1
first_mainfile = all_inputs.index(main_input)
first_extra = all_inputs.index(main_input) + modules
# Have a list of just the split module files.
llc_outputs = all_inputs[first_mainfile:first_extra]
# Have a list of everything else.
other_inputs = all_inputs[:first_mainfile] + all_inputs[first_extra:]
ld_flags = env.get('LD_FLAGS')
script = MakeSelUniversalScriptForLD(ld_flags,
llc_outputs,
outfile)
native_libs_dirname = pathtools.tosys(GetNativeLibsDirname(other_inputs))
Run('${SEL_UNIVERSAL_PREFIX} ${SEL_UNIVERSAL} ' +
'${SEL_UNIVERSAL_FLAGS} -a -B ${IRT_BLOB} ' +
'-E NACL_IRT_OPEN_RESOURCE_BASE=' + native_libs_dirname + ' ' +
'-E NACL_IRT_OPEN_RESOURCE_REMAP=' +
'libpnacl_irt_shim.a:libpnacl_irt_shim_dummy.a' +
' -- ${LD_SB}',
stdin_contents=script,
# stdout/stderr will be automatically dumped
# upon failure
redirect_stderr=subprocess.PIPE,
redirect_stdout=subprocess.PIPE)
def MakeSelUniversalScriptForLD(ld_flags,
llc_outputs,
outfile):
""" Return sel_universal script text for invoking LD.nexe with the
given ld_flags, and llc_outputs (which are treated specially).
The output will be written to outfile. """
script = []
# Open the output file.
script.append('readwrite_file nexefile %s' % outfile)
modules = len(llc_outputs)
script.extend(['readonly_file objfile%d %s' % (i, f)
for i, f in zip(range(modules), llc_outputs)])
script.append('rpc RunWithSplit i(%d) ' % modules +
' '.join(['h(objfile%s)' % m for m in range(modules)] +
['h(invalid)' for x in range(modules, 16)]) +
' h(nexefile) *')
script.append('echo "ld complete"')
script.append('')
return '\n'.join(script)
def GetNativeLibsDirname(other_inputs):
"""Check that native libs have a common directory and return the directory."""
dirname = None
for f in other_inputs:
if IsFlag(f):
continue
else:
if not pathtools.exists(f):
Log.Fatal("Unable to open '%s'", pathtools.touser(f))
if dirname is None:
dirname = pathtools.dirname(f)
else:
if dirname != pathtools.dirname(f):
Log.Fatal('Need a common directory for native libs: %s != %s',
dirname, pathtools.dirname(f))
if not dirname:
Log.Fatal('No native libraries found')
return dirname + '/'
| cohortfsllc/cohort-cocl2-sandbox | pnacl/driver/nativeld.py | Python | bsd-3-clause | 10,971 |
"""
Helper functions for managing processes.
"""
from __future__ import print_function
import sys
import os
import subprocess
import signal
import psutil
def kill_process(proc):
"""
Kill the process `proc` created with `subprocess`.
"""
p1_group = psutil.Process(proc.pid)
child_pids = p1_group.get_children(recursive=True)
for child_pid in child_pids:
os.kill(child_pid.pid, signal.SIGKILL)
def run_multi_processes(cmd_list, out_log=None, err_log=None):
"""
Run each shell command in `cmd_list` in a separate process,
piping stdout to `out_log` (a path) and stderr to `err_log` (also a path).
Terminates the processes on CTRL-C and ensures the processes are killed
if an error occurs.
"""
kwargs = {'shell': True, 'cwd': None}
pids = []
if out_log:
out_log_file = open(out_log, 'w')
kwargs['stdout'] = out_log_file
if err_log:
err_log_file = open(err_log, 'w')
kwargs['stderr'] = err_log_file
try:
for cmd in cmd_list:
pids.extend([subprocess.Popen(cmd, **kwargs)])
def _signal_handler(*args):
print("\nEnding...")
signal.signal(signal.SIGINT, _signal_handler)
print("Enter CTL-C to end")
signal.pause()
print("Processes ending")
except Exception as err:
print("Error running process {}".format(err), file=sys.stderr)
finally:
for pid in pids:
kill_process(pid)
def run_process(cmd, out_log=None, err_log=None):
"""
Run the shell command `cmd` in a separate process,
piping stdout to `out_log` (a path) and stderr to `err_log` (also a path).
Terminates the process on CTRL-C or if an error occurs.
"""
return run_multi_processes([cmd], out_log=out_log, err_log=err_log)
| nanolearning/edx-platform | pavelib/utils/process.py | Python | agpl-3.0 | 1,828 |
import logging
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, transaction, router
from django.utils import timezone
from django.utils.encoding import force_text
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
def load(self):
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
return {}
def exists(self, session_key):
return Session.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
if self.session_key is None:
return self.create()
obj = Session(
session_key=self._get_or_create_session_key(),
session_data=self.encode(self._get_session(no_load=must_create)),
expire_date=self.get_expiry_date()
)
using = router.db_for_write(Session, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
Session.objects.get(session_key=session_key).delete()
except Session.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
Session.objects.filter(expire_date__lt=timezone.now()).delete()
# At bottom to avoid circular import
from django.contrib.sessions.models import Session
| redhat-openstack/django | django/contrib/sessions/backends/db.py | Python | bsd-3-clause | 2,973 |
import sys, os, re
classes_ignore_list = (
'OpenCV(Test)?Case',
'OpenCV(Test)?Runner',
'CvException',
)
funcs_ignore_list = (
'\w+--HashCode',
'Mat--MatLong',
'\w+--Equals',
'Core--MinMaxLocResult',
)
class JavaParser:
def __init__(self):
self.clear()
def clear(self):
self.mdict = {}
self.tdict = {}
self.mwhere = {}
self.twhere = {}
self.empty_stubs_cnt = 0
self.r1 = re.compile("\s*public\s+(?:static\s+)?(\w+)\(([^)]*)\)") # c-tor
self.r2 = re.compile("\s*(?:(?:public|static|final)\s+){1,3}\S+\s+(\w+)\(([^)]*)\)")
self.r3 = re.compile('\s*fail\("Not yet implemented"\);') # empty test stub
def dict2set(self, d):
s = set()
for f in d.keys():
if len(d[f]) == 1:
s.add(f)
else:
s |= set(d[f])
return s
def get_tests_count(self):
return len(self.tdict)
def get_empty_stubs_count(self):
return self.empty_stubs_cnt
def get_funcs_count(self):
return len(self.dict2set(self.mdict)), len(self.mdict)
def get_not_tested(self):
mset = self.dict2set(self.mdict)
tset = self.dict2set(self.tdict)
nottested = mset - tset
out = set()
for name in nottested:
out.add(name + " " + self.mwhere[name])
return out
def parse(self, path):
if ".svn" in path:
return
if os.path.isfile(path):
if path.endswith("FeatureDetector.java"):
for prefix1 in ("", "Grid", "Pyramid", "Dynamic"):
for prefix2 in ("FAST", "STAR", "MSER", "ORB", "SIFT", "SURF", "GFTT", "HARRIS", "SIMPLEBLOB", "DENSE"):
parser.parse_file(path,prefix1+prefix2)
elif path.endswith("DescriptorExtractor.java"):
for prefix1 in ("", "Opponent"):
for prefix2 in ("BRIEF", "ORB", "SIFT", "SURF"):
parser.parse_file(path,prefix1+prefix2)
elif path.endswith("GenericDescriptorMatcher.java"):
for prefix in ("OneWay", "Fern"):
parser.parse_file(path,prefix)
elif path.endswith("DescriptorMatcher.java"):
for prefix in ("BruteForce", "BruteForceHamming", "BruteForceHammingLUT", "BruteForceL1", "FlannBased", "BruteForceSL2"):
parser.parse_file(path,prefix)
else:
parser.parse_file(path)
elif os.path.isdir(path):
for x in os.listdir(path):
self.parse(path + "/" + x)
return
def parse_file(self, fname, prefix = ""):
istest = fname.endswith("Test.java")
clsname = os.path.basename(fname).replace("Test", "").replace(".java", "")
clsname = prefix + clsname[0].upper() + clsname[1:]
for cls in classes_ignore_list:
if re.match(cls, clsname):
return
f = open(fname, "rt")
linenum = 0
for line in f:
linenum += 1
m1 = self.r1.match(line)
m2 = self.r2.match(line)
m3 = self.r3.match(line)
func = ''
args_str = ''
if m1:
func = m1.group(1)
args_str = m1.group(2)
elif m2:
if "public" not in line:
continue
func = m2.group(1)
args_str = m2.group(2)
elif m3:
self.empty_stubs_cnt += 1
continue
else:
#if "public" in line:
#print "UNRECOGNIZED: " + line
continue
d = (self.mdict, self.tdict)[istest]
w = (self.mwhere, self.twhere)[istest]
func = re.sub(r"^test", "", func)
func = clsname + "--" + func[0].upper() + func[1:]
args_str = args_str.replace("[]", "Array").replace("...", "Array ")
args_str = re.sub(r"List<(\w+)>", "ListOf\g<1>", args_str)
args_str = re.sub(r"List<(\w+)>", "ListOf\g<1>", args_str)
args = [a.split()[0] for a in args_str.split(",") if a]
func_ex = func + "".join([a[0].upper() + a[1:] for a in args])
func_loc = fname + " (line: " + str(linenum) + ")"
skip = False
for fi in funcs_ignore_list:
if re.match(fi, func_ex):
skip = True
break
if skip:
continue
if func in d:
d[func].append(func_ex)
else:
d[func] = [func_ex]
w[func_ex] = func_loc
w[func] = func_loc
f.close()
return
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage:\n", \
os.path.basename(sys.argv[0]), \
"<Classes/Tests dir1/file1> [<Classes/Tests dir2/file2> ...]\n", "Not tested methods are loggedto stdout."
exit(0)
parser = JavaParser()
for x in sys.argv[1:]:
parser.parse(x)
funcs = parser.get_not_tested()
if funcs:
print "NOT TESTED methods:\n\t", "\n\t".join(sorted(funcs))
print "Total methods found: %i (%i)" % parser.get_funcs_count()
print "Not tested methods found:", len(funcs)
print "Total tests found:", parser.get_tests_count()
print "Empty test stubs found:", parser.get_empty_stubs_count()
| petterreinholdtsen/cinelerra-hv | thirdparty/OpenCV-2.3.1/modules/java/check-tests.py | Python | gpl-2.0 | 5,677 |
n = int(raw_input())
ans = 0
for i in range(n):
x1, y1, x2, y2 = map(int, raw_input().split())
ans += (x2 - x1 + 1) * (y2 - y1 + 1)
print ans
| Sarthak30/Codeforces | vanya_and_tables.py | Python | gpl-2.0 | 150 |
# Copyright 2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Creates a spanning tree.
This component uses the discovery component to build a view of the network
topology, constructs a spanning tree, and then disables flooding on switch
ports that aren't on the tree by setting their NO_FLOOD bit. The result
is that topologies with loops no longer turn your network into useless
hot packet soup.
This component is inspired by and roughly based on the description of
Glenn Gibb's spanning tree module for NOX:
http://www.openflow.org/wk/index.php/Basic_Spanning_Tree
Note that this does not have much of a relationship to Spanning Tree
Protocol. They have similar purposes, but this is a rather different way
of going about it.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
from collections import defaultdict
from pox.openflow.discovery import Discovery
from pox.lib.util import dpidToStr
from pox.lib.recoco import Timer
log = core.getLogger()
# Might be nice if we made this accessible on core...
#_adj = defaultdict(lambda:defaultdict(lambda:[]))
def _calc_spanning_tree ():
"""
Calculates the actual spanning tree
Returns it as dictionary where the keys are DPID1, and the
values are tuples of (DPID2, port-num), where port-num
is the port on DPID1 connecting to DPID2.
"""
def flip (link):
return Discovery.Link(link[2],link[3], link[0],link[1])
adj = defaultdict(lambda:defaultdict(lambda:[]))
switches = set()
# Add all links and switches
for l in core.openflow_discovery.adjacency:
adj[l.dpid1][l.dpid2].append(l)
switches.add(l.dpid1)
switches.add(l.dpid2)
# Cull links -- we want a single symmetric link connecting nodes
for s1 in switches:
for s2 in switches:
if s2 not in adj[s1]:
continue
if not isinstance(adj[s1][s2], list):
continue
assert s1 is not s2
good = False
for l in adj[s1][s2]:
if flip(l) in core.openflow_discovery.adjacency:
# This is a good one
adj[s1][s2] = l.port1
adj[s2][s1] = l.port2
good = True
break
if not good:
del adj[s1][s2]
if s1 in adj[s2]:
# Delete the other way too
del adj[s2][s1]
q = []
more = set(switches)
done = set()
tree = defaultdict(set)
while True:
q = sorted(list(more)) + q
more.clear()
if len(q) == 0: break
v = q.pop(False)
if v in done: continue
done.add(v)
for w,p in adj[v].iteritems():
if w in tree: continue
more.add(w)
tree[v].add((w,p))
tree[w].add((v,adj[w][v]))
if False:
log.debug("*** SPANNING TREE ***")
for sw,ports in tree.iteritems():
#print " ", dpidToStr(sw), ":", sorted(list(ports))
#print " ", sw, ":", [l[0] for l in sorted(list(ports))]
log.debug((" %i : " % sw) + " ".join([str(l[0]) for l in
sorted(list(ports))]))
log.debug("*********************")
return tree
# Keep a list of previous port states so that we can skip some port mods
_prev = defaultdict(lambda : defaultdict(lambda : None))
def _reset (event):
# When a switch connects, forget about previous port states
_prev[event.dpid].clear()
def _handle (event):
# When links change, update spanning tree
# Get a spanning tree
tree = _calc_spanning_tree()
log.debug("Spanning tree updated")
# Now modify ports as needed
try:
change_count = 0
for sw, ports in tree.iteritems():
con = core.openflow.getConnection(sw)
if con is None: continue # Must have disconnected
tree_ports = [p[1] for p in ports]
for p in con.ports.itervalues():
if p.port_no < of.OFPP_MAX:
flood = p.port_no in tree_ports
if not flood:
if core.openflow_discovery.is_edge_port(sw, p.port_no):
flood = True
if _prev[sw][p.port_no] is flood:
continue # Skip
change_count += 1
_prev[sw][p.port_no] = flood
#print sw,p.port_no,flood
#TODO: Check results
pm = of.ofp_port_mod(port_no=p.port_no,
hw_addr=p.hw_addr,
config = 0 if flood else of.OFPPC_NO_FLOOD,
mask = of.OFPPC_NO_FLOOD)
con.send(pm)
_invalidate_ports(con.dpid)
if change_count:
log.info("%i ports changed", change_count)
except:
_prev.clear()
log.exception("Couldn't push spanning tree")
_dirty_switches = {} # A map dpid_with_dirty_ports->Timer
_coalesce_period = 2 # Seconds to wait between features requests
def _invalidate_ports (dpid):
"""
Registers the fact that port info for dpid may be out of date
When the spanning tree adjusts the port flags, the port config bits
we keep in the Connection become out of date. We don't want to just
set them locally because an in-flight port status message could
overwrite them. We also might not want to assume they get set the
way we want them. SO, we do send a features request, but we wait a
moment before sending it so that we can potentially coalesce several.
TLDR: Port information for this switch may be out of date for around
_coalesce_period seconds.
"""
if dpid in _dirty_switches:
# We're already planning to check
return
t = Timer(_coalesce_period, _check_ports, args=(dpid,))
_dirty_switches[dpid] = t
def _check_ports (dpid):
"""
Sends a features request to the given dpid
"""
_dirty_switches.pop(dpid,None)
con = core.openflow.getConnection(dpid)
if con is None: return
con.send(of.ofp_barrier_request())
con.send(of.ofp_features_request())
log.debug("Requested switch features for %s", str(con))
def launch ():
def start_spanning_tree ():
core.openflow.addListenerByName("ConnectionUp", _reset)
core.openflow_discovery.addListenerByName("LinkEvent", _handle)
log.debug("Spanning tree component ready")
core.call_when_ready(start_spanning_tree, "openflow_discovery")
| 09zwcbupt/undergrad_thesis | pox/openflow/spanning_tree.py | Python | gpl-3.0 | 6,719 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
from dateutil.relativedelta import relativedelta
import pytz
import unittest
from odoo.tools import misc, date_utils
from odoo.tests.common import TransactionCase, tagged
@tagged('standard', 'at_install')
class TestCountingStream(unittest.TestCase):
def test_empty_stream(self):
s = misc.CountingStream(iter([]))
self.assertEqual(s.index, -1)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
def test_single(self):
s = misc.CountingStream(range(1))
self.assertEqual(s.index, -1)
self.assertEqual(next(s, None), 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 1)
def test_full(self):
s = misc.CountingStream(range(42))
for _ in s:
pass
self.assertEqual(s.index, 42)
def test_repeated(self):
""" Once the CountingStream has stopped iterating, the index should not
increase anymore (the internal state should not be allowed to change)
"""
s = misc.CountingStream(iter([]))
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
@tagged('standard', 'at_install')
class TestDateRangeFunction(unittest.TestCase):
""" Test on date_range generator. """
def test_date_range_with_naive_datetimes(self):
""" Check date_range with naive datetimes. """
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
expected = [
datetime.datetime(1985, 1, 1, 0, 0),
datetime.datetime(1985, 2, 1, 0, 0),
datetime.datetime(1985, 3, 1, 0, 0),
datetime.datetime(1985, 4, 1, 0, 0),
datetime.datetime(1985, 5, 1, 0, 0),
datetime.datetime(1985, 6, 1, 0, 0),
datetime.datetime(1985, 7, 1, 0, 0),
datetime.datetime(1985, 8, 1, 0, 0),
datetime.datetime(1985, 9, 1, 0, 0),
datetime.datetime(1985, 10, 1, 0, 0),
datetime.datetime(1985, 11, 1, 0, 0),
datetime.datetime(1985, 12, 1, 0, 0),
datetime.datetime(1986, 1, 1, 0, 0)
]
dates = [date for date in date_utils.date_range(start, end)]
self.assertEqual(dates, expected)
def test_date_range_with_timezone_aware_datetimes_other_than_utc(self):
""" Check date_range with timezone-aware datetimes other than UTC."""
timezone = pytz.timezone('Europe/Brussels')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
start = timezone.localize(start)
end = timezone.localize(end)
expected = [datetime.datetime(1985, 1, 1, 0, 0),
datetime.datetime(1985, 2, 1, 0, 0),
datetime.datetime(1985, 3, 1, 0, 0),
datetime.datetime(1985, 4, 1, 0, 0),
datetime.datetime(1985, 5, 1, 0, 0),
datetime.datetime(1985, 6, 1, 0, 0),
datetime.datetime(1985, 7, 1, 0, 0),
datetime.datetime(1985, 8, 1, 0, 0),
datetime.datetime(1985, 9, 1, 0, 0),
datetime.datetime(1985, 10, 1, 0, 0),
datetime.datetime(1985, 11, 1, 0, 0),
datetime.datetime(1985, 12, 1, 0, 0),
datetime.datetime(1986, 1, 1, 0, 0)]
expected = [timezone.localize(e) for e in expected]
dates = [date for date in date_utils.date_range(start, end)]
self.assertEqual(expected, dates)
def test_date_range_with_mismatching_zones(self):
""" Check date_range with mismatching zone should raise an exception."""
start_timezone = pytz.timezone('Europe/Brussels')
end_timezone = pytz.timezone('America/Recife')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
start = start_timezone.localize(start)
end = end_timezone.localize(end)
with self.assertRaises(ValueError):
dates = [date for date in date_utils.date_range(start, end)]
def test_date_range_with_inconsistent_datetimes(self):
""" Check date_range with a timezone-aware datetime and a naive one."""
context_timezone = pytz.timezone('Europe/Brussels')
start = datetime.datetime(1985, 1, 1)
end = datetime.datetime(1986, 1, 1)
end = context_timezone.localize(end)
with self.assertRaises(ValueError):
dates = [date for date in date_utils.date_range(start, end)]
def test_date_range_with_hour(self):
""" Test date range with hour and naive datetime."""
start = datetime.datetime(2018, 3, 25)
end = datetime.datetime(2018, 3, 26)
step = relativedelta(hours=1)
expected = [
datetime.datetime(2018, 3, 25, 0, 0),
datetime.datetime(2018, 3, 25, 1, 0),
datetime.datetime(2018, 3, 25, 2, 0),
datetime.datetime(2018, 3, 25, 3, 0),
datetime.datetime(2018, 3, 25, 4, 0),
datetime.datetime(2018, 3, 25, 5, 0),
datetime.datetime(2018, 3, 25, 6, 0),
datetime.datetime(2018, 3, 25, 7, 0),
datetime.datetime(2018, 3, 25, 8, 0),
datetime.datetime(2018, 3, 25, 9, 0),
datetime.datetime(2018, 3, 25, 10, 0),
datetime.datetime(2018, 3, 25, 11, 0),
datetime.datetime(2018, 3, 25, 12, 0),
datetime.datetime(2018, 3, 25, 13, 0),
datetime.datetime(2018, 3, 25, 14, 0),
datetime.datetime(2018, 3, 25, 15, 0),
datetime.datetime(2018, 3, 25, 16, 0),
datetime.datetime(2018, 3, 25, 17, 0),
datetime.datetime(2018, 3, 25, 18, 0),
datetime.datetime(2018, 3, 25, 19, 0),
datetime.datetime(2018, 3, 25, 20, 0),
datetime.datetime(2018, 3, 25, 21, 0),
datetime.datetime(2018, 3, 25, 22, 0),
datetime.datetime(2018, 3, 25, 23, 0),
datetime.datetime(2018, 3, 26, 0, 0)
]
dates = [date for date in date_utils.date_range(start, end, step)]
self.assertEqual(dates, expected)
class TestFormatLangDate(TransactionCase):
def test_00_accepted_types(self):
date_datetime = datetime.datetime.strptime('2017-01-31 12:00:00', "%Y-%m-%d %H:%M:%S")
date_date = date_datetime.date()
date_str = '2017-01-31'
self.assertEqual(misc.format_date(self.env, date_datetime), '01/31/2017')
self.assertEqual(misc.format_date(self.env, date_date), '01/31/2017')
self.assertEqual(misc.format_date(self.env, date_str), '01/31/2017')
self.assertEqual(misc.format_date(self.env, ''), '')
self.assertEqual(misc.format_date(self.env, False), '')
self.assertEqual(misc.format_date(self.env, None), '')
def test_01_code_and_format(self):
date_str = '2017-01-31'
lang = self.env['res.lang']
# Activate French and Simplified Chinese (test with non-ASCII characters)
lang.search([('active', '=', False), ('code', 'in', ['fr_FR', 'zh_CN'])]).write({'active': True})
# Change a single parameter
self.assertEqual(misc.format_date(lang.with_context(lang='fr_FR').env, date_str), '31/01/2017')
self.assertEqual(misc.format_date(lang.env, date_str, lang_code='fr_FR'), '31/01/2017')
self.assertEqual(misc.format_date(lang.env, date_str, date_format='MMM d, y'), 'Jan 31, 2017')
# Change 2 parameters
self.assertEqual(misc.format_date(lang.with_context(lang='zh_CN').env, date_str, lang_code='fr_FR'), '31/01/2017')
self.assertEqual(misc.format_date(lang.with_context(lang='zh_CN').env, date_str, date_format='MMM d, y'), u'1\u6708 31, 2017')
self.assertEqual(misc.format_date(lang.env, date_str, lang_code='fr_FR', date_format='MMM d, y'), 'janv. 31, 2017')
# Change 3 parameters
self.assertEqual(misc.format_date(lang.with_context(lang='zh_CN').env, date_str, lang_code='en_US', date_format='MMM d, y'), 'Jan 31, 2017')
| t3dev/odoo | odoo/addons/base/tests/test_misc.py | Python | gpl-3.0 | 8,288 |
from layers.core import Layer
from utils.theano_utils import shared_zeros
import initializations
class BatchNormalization(Layer):
'''
Reference:
Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift
http://arxiv.org/pdf/1502.03167v3.pdf
'''
def __init__(self, input_shape, epsilon=1e-6, weights=None):
self.init = initializations.get("uniform")
self.input_shape = input_shape
self.epsilon = epsilon
self.gamma = self.init((self.input_shape))
self.beta = shared_zeros(self.input_shape)
self.params = [self.gamma, self.beta]
if weights is not None:
self.set_weights(weights)
def output(self, train):
X = self.get_input(train)
X_normed = (X - X.mean(keepdims=True)) / (X.std(keepdims=True) + self.epsilon)
out = self.gamma * X_normed + self.beta
return out | nagadomi/keras | layers/normalization.py | Python | mit | 949 |
#!/usr/bin/python
import json
f = file('treasures.json', 'r')
try:
foo = json.load(f)
json_contents = foo
except ValueError:
json_contents = dict()
f.close()
print 'Type \'q\' to [q]uit'
while True:
name = raw_input('Treasure Name: ')
if name == 'q':
break
print 'Type \'n\' to stop entering heroes and go to [n]ext treasure'
set_contents = dict()
hero = ''
while True:
hero = raw_input('Hero name: ')
if hero == 'n' or hero == 'q':
break
else:
bundle_rating = raw_input('Item set rating [1-3]: ')
set_contents[hero] = bundle_rating
json_contents[name] = set_contents
if hero == 'q':
break
f = open('treasures.json', 'w')
json.dump(json_contents, f, indent=4)
f.close() | mosbasik/dotatreasures | create_treasures_json.py | Python | mit | 794 |
from ray.rllib.utils.deprecation import deprecation_warning
deprecation_warning(
old="ray/rllib/examples/recsim_with_slateq.py",
new="ray/rllib/examples/recommender_system_with_recsim_and_slateq.py",
error=True,
)
| ray-project/ray | rllib/examples/recsim_with_slateq.py | Python | apache-2.0 | 227 |
# coding=utf-8
import logging
module_logger = logging.getLogger("pyrst.client")
module_logger.setLevel(logging.DEBUG)
class PyrstException(Exception):
"""
Generic, abstract class of exception.
"""
def __init__(self):
self.logger = logging.getLogger("pyrst.client")
self.logger.warning(self.__repr__())
class TokenException(PyrstException):
"""
Raised when there is no token saved in the instance and a function that
requires a token (i.e. any function other than login()) is called."
"""
def __repr__(self):
return "Cannot perform this operation without authentication token. Use" \
" the login() method to obtain one."
class AuthException(PyrstException):
"""
Raised when the user is not authorised.
"""
def __repr__(self):
return "Not authorised."
class ConnectionException(PyrstException):
"""
Raised when the client could not connect for a network error.
"""
def __repr__(self):
return "Connection error."
class SpaceIDException(PyrstException):
"""
Raised where a space ID is provided that does not meet the formal criteria
for a space ID (36 characters separated by hyphens).
"""
def __repr__(self):
return "You have provided an incorrect space ID. A valid Birst space ID" \
"is 36 characters long and consists of five groups of hexadecimal" \
"characters separated by hyphens."
class MissingCredentialsException(PyrstException):
"""
Raised where an operation that requires credentials (e.g. login()) is
called without providing the appropriate credentials, either directly
or via a configuration file.
"""
def __repr__(self):
return "You need to provide a password and a username, either via your" \
" configuration file or at the time of creating the Birst client " \
"object." | chrisvoncsefalvay/pyrst | pyrst/exceptions.py | Python | apache-2.0 | 1,947 |
#!/usr/bin/env python
import pylibftdi
def runner_serial(func):
"""
Decorator for functions that take a serial number as the first argument,
possibly with other arguments to follow
"""
def inner():
import sys
args = sys.argv
if len(args)>1:
serial = args[1]
else:
serial = None
if serial:
func(serial)
return 0
else:
print('Looking for APT controllers')
drv = pylibftdi.Driver()
controllers = drv.list_devices()
if controllers:
for con in controllers:
print('Found %s %s S/N: %s'%con)
func(con[2].decode('latin-1'))
print('')
return 0
else:
print('\tNo APT controllers found. Maybe you need to specify a PID')
return 1
return inner
| weinshec/pyAPT | scripts/runner.py | Python | mit | 790 |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
##This software is available to you under the terms of the GPL-3, see "/usr/share/common-licenses/GPL-3".
##Copyright:
##- Tomasz Makarewicz (makson96@gmail.com)
import os, tarfile, urllib.request, time, shutil
from subprocess import Popen, PIPE
recultis_dir = os.getenv("HOME") + "/.recultis/"
steam_dir = recultis_dir + "shops/steam/"
def start(login, password, recultis_dir, s_appid, game_dir):
print("Starting SteamCMD procedure")
shop_install_dir = recultis_dir + "shops/steam/"
if os.path.isdir(shop_install_dir) == False:
os.makedirs(shop_install_dir)
#start of legacy code for Recultis 1.2
if os.path.isfile(recultis_dir+"steam.sh") == True:
shutil.move(recultis_dir+"steam.sh", shop_install_dir)
if os.path.isfile(recultis_dir+"steamcmd.sh") == True:
shutil.move(recultis_dir+"steamcmd.sh", shop_install_dir)
if os.path.isfile(recultis_dir+"steamcmd_linux.tar.gz") == True:
shutil.move(recultis_dir+"steamcmd_linux.tar.gz", shop_install_dir)
if os.path.isfile(recultis_dir+"steam_log.txt") == True:
shutil.move(recultis_dir+"steam_log.txt", shop_install_dir)
if os.path.isdir(recultis_dir+"linux32") == True:
shutil.move(recultis_dir+"linux32", shop_install_dir)
if os.path.isdir(recultis_dir+"linux64") == True:
shutil.move(recultis_dir+"linux64", shop_install_dir)
if os.path.isdir(recultis_dir+"package") == True:
shutil.move(recultis_dir+"package", shop_install_dir)
if os.path.isdir(recultis_dir+"public") == True:
shutil.move(recultis_dir+"public", shop_install_dir)
#end of legacy code for Recultis 1.2
os.chdir(shop_install_dir)
if login == "" or password == "":
steam_log_file = open("steam_log.txt", "w")
steam_log_file.write("Steamcmd Error. Login or password not provided.\n")
steam_log_file.close()
print("Steamcmd Error. Login or password not provided. try again with correct one.")
steam_error = 0
else:
steamcmd_install(shop_install_dir)
steam_error = 2
retry_nr = 0
while steam_error == 2:
steam_error = run(login, password, shop_install_dir, s_appid, game_dir)
if steam_error == 2:
print("Steamcmd error. Retry.")
retry_nr = retry_nr + 1
if retry_nr == 5:
print("Steamcmd error. Reinstall steamcmd.")
steamcmd_reinstall(shop_install_dir)
elif retry_nr == 8:
steam_error = 0
if steam_error == 0:
steam_log_file = open("steam_log.txt", "a")
steam_log_file.write("\nSteamcmd Error. Terminate.")
steam_log_file.close()
print("Steamcmd Error. Terminate.")
return steam_error
def steamcmd_install(shop_install_dir):
print("Installing SteamCMD")
if os.path.isfile(shop_install_dir+"steamcmd.sh") == False:
urllib.request.urlretrieve("http://media.steampowered.com/client/steamcmd_linux.tar.gz", shop_install_dir + "steamcmd_linux.tar.gz")
tar = tarfile.open(shop_install_dir + "steamcmd_linux.tar.gz")
tar.extractall()
tar.close()
def get_last_log_line():
wrong_lines = ["CWorkThreadPool"]
last_line_nr = -1
try:
steam_log_file = open("steam_log.txt", "r")
steam_log_lines = steam_log_file.readlines()
if len(steam_log_lines) > 0:
steam_last_line = steam_log_lines[last_line_nr]
for w_line in wrong_lines:
while w_line in steam_last_line:
last_line_nr -= 1
steam_last_line = steam_log_lines[last_line_nr]
else:
steam_last_line = ""
steam_log_file.close()
except FileNotFoundError:
steam_last_line = ""
return steam_last_line
def steam_guard():
while os.path.isfile(recultis_dir + "guard_key.txt") == False:
time.sleep(2)
print('Steam Guard Key detected. Verifying...')
steam_guard_file = open(recultis_dir + "guard_key.txt", "r")
steam_guard_code = steam_guard_file.readline()
steam_guard_file.close()
os.remove(recultis_dir + "guard_key.txt")
print(str(steam_guard_code).upper())
return str(steam_guard_code.upper())
def run(login, password, shop_install_dir, s_appid, game_dir):
if os.path.isfile(shop_install_dir+"steam_log.txt") == True:
os.remove(shop_install_dir+"steam_log.txt")
print("Running following steamcmd command:")
print("./steamcmd.sh +@sSteamCmdForcePlatformType windows +login '" + login + "' '******' +force_install_dir " + game_dir + " +app_update " + s_appid + " validate +quit")
print("Check " + shop_install_dir + "steam_log.txt for more details.")
steam_download = Popen("script -q -c \"./steamcmd.sh +@sSteamCmdForcePlatformType windows +login '" + login + "' '" + password + "' +force_install_dir " + game_dir + " +app_update " + s_appid + " validate +quit\" /dev/null", shell=True, stdout=open("steam_log.txt", "wb"), stdin=PIPE)
while steam_download.poll() is None:
time.sleep(2)
steam_last_line = get_last_log_line()
#Terminate the process if bad login or password
if "FAILED with result code" in steam_last_line:
steam_download.terminate()
return 0
#Terminate the process if not owning the game
elif "Failed to install app" in steam_last_line:
steam_download.terminate()
return 0
#Retry 5 times if steamcmd has memory access error
elif '$DEBUGGER "$STEAMEXE" "$@"' in steam_last_line:
return 2
#If computer is not registered on Steam, handle Steam Guard
elif 'Steam Guard' in steam_last_line:
steam_guard_code = steam_guard()
steam_download.stdin.write(bytes(steam_guard_code + '\n', 'ascii'))
steam_download.stdin.flush()
#if there is only 1 line after steamcmd finished working, it means it crashed.
if sum(1 for line in open('steam_log.txt')) == 1:
rc = 0
else:
rc = 1
return rc
def steamcmd_reinstall(shop_install_dir):
print("Reinstalling SteamCMD")
print("Removing SteamCMD")
if os.path.isfile(shop_install_dir+"steam.sh") == True:
os.remove(shop_install_dir+"steam.sh")
if os.path.isfile(shop_install_dir+"steamcmd.sh") == True:
os.remove(shop_install_dir+"steamcmd.sh")
if os.path.isfile(shop_install_dir+"steamcmd_linux.tar.gz") == True:
os.remove(shop_install_dir+"steamcmd_linux.tar.gz")
if os.path.isdir(shop_install_dir+"linux32") == True:
shutil.rmtree(shop_install_dir+"linux32")
if os.path.isdir(shop_install_dir+"linux64") == True:
shutil.rmtree(shop_install_dir+"linux64")
if os.path.isdir(shop_install_dir+"package") == True:
shutil.rmtree(shop_install_dir+"package")
if os.path.isdir(shop_install_dir+"public") == True:
shutil.rmtree(shop_install_dir+"public")
steamcmd_install(shop_install_dir)
def status():
if os.path.isdir(steam_dir) == True:
os.chdir(steam_dir)
else:
status = "Preparing SteamCMD"
percent = 0
return status, percent
status = "Downloading and installing game data"
percent = 0
steam_last_line = get_last_log_line()
if steam_last_line == "":
steam_last_line = "downloading, progress: 0,0 ("
#This code handle steamcmd status if everything is ok
if ("downloading, progress: " in steam_last_line) or ("validating, progress: " in steam_last_line):
steam_value = steam_last_line.split("progress: ")[1]
steam_value = steam_value.split(" (")[0]
steam_value = steam_value.split(",")[0]
steam_value = steam_value.split(".")[0]
steam_value = int(steam_value)
status = "Downloading and installing game data"
percent = steam_value
elif "Success!" in steam_last_line:
status = "Download of game data completed"
percent = 100
#this code handle steamcmd status if warning is present.
elif "Steam Guard" in steam_last_line:
status = "Warning: Waiting for Steam Guard authentication."
percent = 0
#this code handle steamcmd status if steam tool marked steam_log.txt file with error.
if "Steamcmd Error." in steam_last_line:
try:
steam_log_file = open("steam_log.txt", "r")
steam_log_lines = steam_log_file.readlines()
steam_error_line = steam_log_lines[-3]
steam_log_file.close()
except:
steam_error_line = "Steamcmd Error. Terminate."
if "FAILED with result code 5" in steam_error_line:
status = "Error: Steam - bad login or password. Please correct and start again."
percent = 0
elif "Login or password not provided." in steam_error_line:
status = "Error: Steam - Login or password not provided. Try again with correct one."
percent = 0
elif "Failed to install app" in steam_error_line:
status = "Error: Steam - you are not game owner. Please correct and start again."
percent = 0
elif "FAILED with result code 65" in steam_error_line:
status = "Error: Could not perform Steam Guard authentication. Please try again."
percent = 0
else:
status = "Error: Steamcmd internal error. Please contact Recultis project for support."
percent = 0
return status, percent
| makson96/Recultis | tools/steam.py | Python | gpl-3.0 | 8,550 |
import unittest
from fem import QuadFE
class TestFiniteElement(unittest.TestCase):
"""
Test FiniteElement class
"""
def test_cell_type(self):
for etype in ['Q1','Q2','Q3']:
element = QuadFE(2,etype)
t = element.cell_type()
self.assertEqual(t,'quadrilateral','Type should be quadrilateral.')
| hvanwyk/quadmesh | tests/test_fem/test_finite_element.py | Python | mit | 353 |
# -*- coding: utf-8 -*-
# polkit_agent.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Daemonizes polkit authentication agent.
"""
import os
import subprocess
POLKIT_PATHS = (
'/usr/bin/lxpolkit',
'/usr/bin/lxqt-policykit-agent',
'/usr/lib/policykit-1-gnome/polkit-gnome-authentication-agent-1',
'/usr/lib/x86_64-linux-gnu/polkit-mate/polkit-mate-authentication-agent-1',
'/usr/lib/mate-polkit/polkit-mate-authentication-agent-1',
'/usr/lib/x86_64-linux-gnu/libexec/polkit-kde-authentication-agent-1',
'/usr/lib/kde4/libexec/polkit-kde-authentication-agent-1',
# now we get weird
'/usr/libexec/policykit-1-pantheon/pantheon-agent-polkit',
'/usr/lib/polkit-1-dde/dde-polkit-agent',
# do you know some we're still missing? :)
)
POLKIT_PROC_NAMES = (
'polkit-gnome-authentication-agent-1',
'polkit-kde-authentication-agent-1',
'polkit-mate-authentication-agent-1',
'lxpolkit',
'lxsession',
'gnome-shell',
'gnome-flashback',
'fingerprint-polkit-agent',
'xfce-polkit',
)
# TODO write tests for this piece.
def _get_polkit_agent():
"""
Return a valid polkit agent to use.
:rtype: str or None
"""
for polkit in POLKIT_PATHS:
if os.path.isfile(polkit):
return polkit
return None
def launch():
"""
Launch a polkit authentication agent as a daemon.
"""
agent = _get_polkit_agent()
subprocess.call("(setsid {polkit} &)".format(polkit=agent), shell=True)
if __name__ == "__main__":
launch()
| leapcode/bitmask-dev | src/leap/bitmask/vpn/polkit.py | Python | gpl-3.0 | 2,160 |
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def headers():
headers = h2o.import_file(pyunit_utils.locate("smalldata/airlines/allyears2k_headers_only.csv"))
headers_and = h2o.import_file(pyunit_utils.locate("smalldata/airlines/allyears2k.zip"))
headers_and.set_names(headers.names)
print headers.names
print headers_and.names
assert headers.names == headers_and.names, "Expected the same column names but got {0} and {1}". \
format(headers.names, headers_and.names)
if __name__ == "__main__":
pyunit_utils.standalone_test(headers)
else:
headers()
| madmax983/h2o-3 | h2o-py/tests/testdir_misc/pyunit_headers.py | Python | apache-2.0 | 641 |
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
import atexit
import re
from os import getenv, listdir, remove, sep, walk
from os.path import basename, dirname, isdir, isfile, join, normpath
from SCons.Script import Exit, SConscript, SConscriptChdir
from SCons.Util import case_sensitive_suffixes
from platformio.util import pioversion_to_intstr
def BuildFirmware(env):
# fix ASM handling under non-casitive OS
if not case_sensitive_suffixes('.s', '.S'):
env.Replace(
AS="$CC",
ASCOM="$ASPPCOM"
)
if "extra_flags" in env.get("BOARD_OPTIONS", {}).get("build", {}):
env.MergeFlags(env.subst("${BOARD_OPTIONS['build']['extra_flags']}"))
if "BUILD_FLAGS" in env:
env.MergeFlags(env['BUILD_FLAGS'])
env.BuildFramework()
firmenv = env.Clone()
vdirs = firmenv.VariantDirRecursive(
join("$BUILD_DIR", "src"), "$PROJECTSRC_DIR")
# build dependent libs
deplibs = firmenv.BuildDependentLibraries("$PROJECTSRC_DIR")
# append specified LD_SCRIPT
if "LDSCRIPT_PATH" in firmenv:
firmenv.Append(
LINKFLAGS=["-T", "$LDSCRIPT_PATH"]
)
# enable "cyclic reference" for linker
firmenv.Prepend(
_LIBFLAGS="-Wl,--start-group "
)
firmenv.Append(
_LIBFLAGS=" -Wl,--end-group"
)
_srcbuild_flags = getenv("PLATFORMIO_SRCBUILD_FLAGS",
env.subst("$SRCBUILD_FLAGS"))
if _srcbuild_flags:
firmenv.MergeFlags(_srcbuild_flags)
firmenv.Append(
CPPDEFINES=["PLATFORMIO={0:02d}{1:02d}{2:02d}".format(
*pioversion_to_intstr())]
)
return firmenv.Program(
join("$BUILD_DIR", "firmware"),
[firmenv.GlobCXXFiles(vdir) for vdir in vdirs],
LIBS=env.get("LIBS", []) + deplibs,
LIBPATH=env.get("LIBPATH", []) + ["$BUILD_DIR"],
PROGSUFFIX=".elf"
)
def GlobCXXFiles(env, path):
files = []
for suff in ["*.c", "*.cpp", "*.S"]:
_list = env.Glob(join(path, suff))
if _list:
files += _list
return files
def VariantDirRecursive(env, variant_dir, src_dir, duplicate=True,
ignore_pattern=None):
if not ignore_pattern:
ignore_pattern = (".git", ".svn")
variants = []
src_dir = env.subst(src_dir)
for root, _, _ in walk(src_dir):
_src_dir = root
_var_dir = variant_dir + root.replace(src_dir, "")
if any([s in _var_dir.lower() for s in ignore_pattern]):
continue
env.VariantDir(_var_dir, _src_dir, duplicate)
variants.append(_var_dir)
return variants
def BuildFramework(env):
if "FRAMEWORK" not in env:
return
if env['FRAMEWORK'].lower() in ("arduino", "energia"):
env.ConvertInoToCpp()
for f in env['FRAMEWORK'].split(","):
framework = f.strip().lower()
if framework in env.get("BOARD_OPTIONS", {}).get("frameworks"):
SConscriptChdir(0)
SConscript(
env.subst(join("$PIOBUILDER_DIR", "scripts", "frameworks",
"%s.py" % framework))
)
else:
Exit("Error: This board doesn't support %s framework!" %
framework)
def BuildLibrary(env, variant_dir, library_dir, ignore_files=None):
lib = env.Clone()
vdirs = lib.VariantDirRecursive(
variant_dir, library_dir, ignore_pattern=(".git", ".svn", "examples"))
srcfiles = []
for vdir in vdirs:
for item in lib.GlobCXXFiles(vdir):
if not ignore_files or item.name not in ignore_files:
srcfiles.append(item)
return lib.Library(
lib.subst(variant_dir),
srcfiles
)
def BuildDependentLibraries(env, src_dir): # pylint: disable=R0914
INCLUDES_RE = re.compile(r"^\s*#include\s+(\<|\")([^\>\"\']+)(?:\>|\")",
re.M)
LIBSOURCE_DIRS = [env.subst(d) for d in env.get("LIBSOURCE_DIRS", [])]
# start internal prototypes
class IncludeFinder(object):
def __init__(self, base_dir, name, is_system=False):
self.base_dir = base_dir
self.name = name
self.is_system = is_system
self._inc_path = None
self._lib_dir = None
self._lib_name = None
def getIncPath(self):
return self._inc_path
def getLibDir(self):
return self._lib_dir
def getLibName(self):
return self._lib_name
def run(self):
if not self.is_system and self._find_in_local():
return True
return self._find_in_system()
def _find_in_local(self):
if isfile(join(self.base_dir, self.name)):
self._inc_path = join(self.base_dir, self.name)
return True
else:
return False
def _find_in_system(self):
for lsd_dir in LIBSOURCE_DIRS:
if not isdir(lsd_dir):
continue
for ld in listdir(lsd_dir):
inc_path = normpath(join(lsd_dir, ld, self.name))
try:
lib_dir = inc_path[:inc_path.index(
sep, len(lsd_dir) + 1)]
except ValueError:
continue
lib_name = basename(lib_dir)
# ignore user's specified libs
if "IGNORE_LIBS" in env and lib_name in env['IGNORE_LIBS']:
continue
if not isfile(inc_path):
# if source code is in "src" dir
lib_dir = join(lsd_dir, lib_name, "src")
inc_path = join(lib_dir, self.name)
if isfile(inc_path):
self._lib_dir = lib_dir
self._lib_name = lib_name
self._inc_path = inc_path
return True
return False
def _get_dep_libs(src_dir):
state = {
"paths": set(),
"libs": set(),
"ordered": set()
}
state = _process_src_dir(state, env.subst(src_dir))
result = []
for item in sorted(state['ordered'], key=lambda s: s[0]):
result.append((item[1], item[2]))
return result
def _process_src_dir(state, src_dir):
for root, _, _ in walk(src_dir):
for node in (env.GlobCXXFiles(root) +
env.Glob(join(root, "*.h"))):
state = _parse_includes(state, node)
return state
def _parse_includes(state, node):
if node.path in state['paths']:
return state
else:
state['paths'].add(node.path)
skip_includes = ("arduino.h", "energia.h")
matches = INCLUDES_RE.findall(node.get_text_contents())
for (inc_type, inc_name) in matches:
base_dir = dirname(node.path)
if inc_name.lower() in skip_includes:
continue
if join(base_dir, inc_name) in state['paths']:
continue
else:
state['paths'].add(join(base_dir, inc_name))
finder = IncludeFinder(base_dir, inc_name, inc_type == "<")
if finder.run():
_parse_includes(state, env.File(finder.getIncPath()))
_lib_dir = finder.getLibDir()
if _lib_dir and _lib_dir not in state['libs']:
state['ordered'].add((
len(state['ordered']) + 1, finder.getLibName(),
_lib_dir))
state['libs'].add(_lib_dir)
state = _process_src_dir(state, _lib_dir)
return state
# end internal prototypes
deplibs = _get_dep_libs(src_dir)
env.Prepend(
CPPPATH=[join("$BUILD_DIR", l) for (l, _) in deplibs]
)
# add automatically "utility" dir from the lib (Arduino issue)
env.Prepend(
CPPPATH=[
join("$BUILD_DIR", l, "utility") for (l, ld) in deplibs
if isdir(join(ld, "utility"))
]
)
libs = []
for (libname, inc_dir) in deplibs:
lib = env.BuildLibrary(
join("$BUILD_DIR", libname), inc_dir)
env.Clean(libname, lib)
libs.append(lib)
return libs
def ConvertInoToCpp(env):
PROTOTYPE_RE = re.compile(
r"""^(
(?:\s*[a-z_\d]+){1,2} # return type
\s+[a-z_\d]+\s* # name of prototype
\([a-z_,\.\*\&\[\]\s\d]*\) # arguments
)\s*\{ # must end with {
""",
re.X | re.M | re.I
)
DETECTMAIN_RE = re.compile(r"void\s+(setup|loop)\s*\(", re.M | re.I)
def delete_tmpcpp_file(file_):
remove(file_)
def is_main_ino(contents):
return DETECTMAIN_RE.search(contents)
ino_nodes = (env.Glob(join("$PROJECTSRC_DIR", "*.ino")) +
env.Glob(join("$PROJECTSRC_DIR", "*.pde")))
prototypes = []
data = []
for node in ino_nodes:
ino_contents = node.get_text_contents()
prototypes += PROTOTYPE_RE.findall(ino_contents)
item = (basename(node.get_path()), ino_contents)
if is_main_ino(ino_contents):
data = [item] + data
else:
data.append(item)
if not data:
return
# create new temporary C++ valid file
tmpcpp_file = join(env.subst("$PROJECTSRC_DIR"), "piomain.cpp")
with open(tmpcpp_file, "w") as f:
f.write("#include <Arduino.h>\n")
if prototypes:
f.write("%s;" % ";\n".join(prototypes))
for name, contents in data:
f.write('\n#line 1 "%s"\n' % name)
f.write(contents)
atexit.register(delete_tmpcpp_file, tmpcpp_file)
def exists(_):
return True
def generate(env):
env.AddMethod(BuildFirmware)
env.AddMethod(GlobCXXFiles)
env.AddMethod(VariantDirRecursive)
env.AddMethod(BuildFramework)
env.AddMethod(BuildLibrary)
env.AddMethod(BuildDependentLibraries)
env.AddMethod(ConvertInoToCpp)
return env
| awong1900/platformio | platformio/builder/tools/platformio.py | Python | mit | 10,278 |
# test_repo.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.test.lib import (
patch,
TestBase,
with_rw_repo,
fixture,
assert_false,
assert_equal,
assert_true,
raises
)
from git import (
InvalidGitRepositoryError,
Repo,
NoSuchPathError,
Head,
Commit,
Tree,
IndexFile,
Git,
Reference,
GitDB,
Submodule,
GitCmdObjectDB,
Remote,
BadName,
GitCommandError
)
from git.repo.fun import touch
from git.util import join_path_native
from git.exc import BadObject
from gitdb.util import bin_to_hex
from git.compat import string_types
from gitdb.test.lib import with_rw_directory
import os
import sys
import tempfile
import shutil
from io import BytesIO
class TestRepo(TestBase):
@raises(InvalidGitRepositoryError)
def test_new_should_raise_on_invalid_repo_location(self):
Repo(tempfile.gettempdir())
@raises(NoSuchPathError)
def test_new_should_raise_on_non_existant_path(self):
Repo("repos/foobar")
@with_rw_repo('0.3.2.1')
def test_repo_creation_from_different_paths(self, rw_repo):
r_from_gitdir = Repo(rw_repo.git_dir)
assert r_from_gitdir.git_dir == rw_repo.git_dir
assert r_from_gitdir.git_dir.endswith('.git')
assert not rw_repo.git.working_dir.endswith('.git')
assert r_from_gitdir.git.working_dir == rw_repo.git.working_dir
def test_description(self):
txt = "Test repository"
self.rorepo.description = txt
assert_equal(self.rorepo.description, txt)
def test_heads_should_return_array_of_head_objects(self):
for head in self.rorepo.heads:
assert_equal(Head, head.__class__)
def test_heads_should_populate_head_data(self):
for head in self.rorepo.heads:
assert head.name
assert isinstance(head.commit, Commit)
# END for each head
assert isinstance(self.rorepo.heads.master, Head)
assert isinstance(self.rorepo.heads['master'], Head)
def test_tree_from_revision(self):
tree = self.rorepo.tree('0.1.6')
assert len(tree.hexsha) == 40
assert tree.type == "tree"
assert self.rorepo.tree(tree) == tree
# try from invalid revision that does not exist
self.failUnlessRaises(BadName, self.rorepo.tree, 'hello world')
def test_commit_from_revision(self):
commit = self.rorepo.commit('0.1.4')
assert commit.type == 'commit'
assert self.rorepo.commit(commit) == commit
def test_commits(self):
mc = 10
commits = list(self.rorepo.iter_commits('0.1.6', max_count=mc))
assert len(commits) == mc
c = commits[0]
assert_equal('9a4b1d4d11eee3c5362a4152216376e634bd14cf', c.hexsha)
assert_equal(["c76852d0bff115720af3f27acdb084c59361e5f6"], [p.hexsha for p in c.parents])
assert_equal("ce41fc29549042f1aa09cc03174896cf23f112e3", c.tree.hexsha)
assert_equal("Michael Trier", c.author.name)
assert_equal("mtrier@gmail.com", c.author.email)
assert_equal(1232829715, c.authored_date)
assert_equal(5 * 3600, c.author_tz_offset)
assert_equal("Michael Trier", c.committer.name)
assert_equal("mtrier@gmail.com", c.committer.email)
assert_equal(1232829715, c.committed_date)
assert_equal(5 * 3600, c.committer_tz_offset)
assert_equal("Bumped version 0.1.6\n", c.message)
c = commits[1]
assert isinstance(c.parents, tuple)
def test_trees(self):
mc = 30
num_trees = 0
for tree in self.rorepo.iter_trees('0.1.5', max_count=mc):
num_trees += 1
assert isinstance(tree, Tree)
# END for each tree
assert num_trees == mc
def _assert_empty_repo(self, repo):
# test all kinds of things with an empty, freshly initialized repo.
# It should throw good errors
# entries should be empty
assert len(repo.index.entries) == 0
# head is accessible
assert repo.head
assert repo.head.ref
assert not repo.head.is_valid()
# we can change the head to some other ref
head_ref = Head.from_path(repo, Head.to_full_path('some_head'))
assert not head_ref.is_valid()
repo.head.ref = head_ref
# is_dirty can handle all kwargs
for args in ((1, 0, 0), (0, 1, 0), (0, 0, 1)):
assert not repo.is_dirty(*args)
# END for each arg
# we can add a file to the index ( if we are not bare )
if not repo.bare:
pass
# END test repos with working tree
def test_init(self):
prev_cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
git_dir_rela = "repos/foo/bar.git"
del_dir_abs = os.path.abspath("repos")
git_dir_abs = os.path.abspath(git_dir_rela)
try:
# with specific path
for path in (git_dir_rela, git_dir_abs):
r = Repo.init(path=path, bare=True)
assert isinstance(r, Repo)
assert r.bare is True
assert not r.has_separate_working_tree()
assert os.path.isdir(r.git_dir)
self._assert_empty_repo(r)
# test clone
clone_path = path + "_clone"
rc = r.clone(clone_path)
self._assert_empty_repo(rc)
try:
shutil.rmtree(clone_path)
except OSError:
# when relative paths are used, the clone may actually be inside
# of the parent directory
pass
# END exception handling
# try again, this time with the absolute version
rc = Repo.clone_from(r.git_dir, clone_path)
self._assert_empty_repo(rc)
shutil.rmtree(git_dir_abs)
try:
shutil.rmtree(clone_path)
except OSError:
# when relative paths are used, the clone may actually be inside
# of the parent directory
pass
# END exception handling
# END for each path
os.makedirs(git_dir_rela)
os.chdir(git_dir_rela)
r = Repo.init(bare=False)
assert r.bare is False
assert not r.has_separate_working_tree()
self._assert_empty_repo(r)
finally:
try:
shutil.rmtree(del_dir_abs)
except OSError:
pass
os.chdir(prev_cwd)
# END restore previous state
def test_bare_property(self):
self.rorepo.bare
def test_daemon_export(self):
orig_val = self.rorepo.daemon_export
self.rorepo.daemon_export = not orig_val
assert self.rorepo.daemon_export == (not orig_val)
self.rorepo.daemon_export = orig_val
assert self.rorepo.daemon_export == orig_val
def test_alternates(self):
cur_alternates = self.rorepo.alternates
# empty alternates
self.rorepo.alternates = []
assert self.rorepo.alternates == []
alts = ["other/location", "this/location"]
self.rorepo.alternates = alts
assert alts == self.rorepo.alternates
self.rorepo.alternates = cur_alternates
def test_repr(self):
assert repr(self.rorepo).startswith('<git.Repo ')
def test_is_dirty_with_bare_repository(self):
orig_value = self.rorepo._bare
self.rorepo._bare = True
assert_false(self.rorepo.is_dirty())
self.rorepo._bare = orig_value
def test_is_dirty(self):
self.rorepo._bare = False
for index in (0, 1):
for working_tree in (0, 1):
for untracked_files in (0, 1):
assert self.rorepo.is_dirty(index, working_tree, untracked_files) in (True, False)
# END untracked files
# END working tree
# END index
orig_val = self.rorepo._bare
self.rorepo._bare = True
assert self.rorepo.is_dirty() is False
self.rorepo._bare = orig_val
def test_head(self):
assert self.rorepo.head.reference.object == self.rorepo.active_branch.object
def test_index(self):
index = self.rorepo.index
assert isinstance(index, IndexFile)
def test_tag(self):
assert self.rorepo.tag('refs/tags/0.1.5').commit
def test_archive(self):
tmpfile = tempfile.mktemp(suffix='archive-test')
stream = open(tmpfile, 'wb')
self.rorepo.archive(stream, '0.1.6', path='doc')
assert stream.tell()
stream.close()
os.remove(tmpfile)
@patch.object(Git, '_call_process')
def test_should_display_blame_information(self, git):
git.return_value = fixture('blame')
b = self.rorepo.blame('master', 'lib/git.py')
assert_equal(13, len(b))
assert_equal(2, len(b[0]))
# assert_equal(25, reduce(lambda acc, x: acc + len(x[-1]), b))
assert_equal(hash(b[0][0]), hash(b[9][0]))
c = b[0][0]
assert_true(git.called)
assert_equal('634396b2f541a9f2d58b00be1a07f0c358b999b3', c.hexsha)
assert_equal('Tom Preston-Werner', c.author.name)
assert_equal('tom@mojombo.com', c.author.email)
assert_equal(1191997100, c.authored_date)
assert_equal('Tom Preston-Werner', c.committer.name)
assert_equal('tom@mojombo.com', c.committer.email)
assert_equal(1191997100, c.committed_date)
assert_equal('initial grit setup', c.message)
# test the 'lines per commit' entries
tlist = b[0][1]
assert_true(tlist)
assert_true(isinstance(tlist[0], string_types))
assert_true(len(tlist) < sum(len(t) for t in tlist)) # test for single-char bug
# BINARY BLAME
git.return_value = fixture('blame_binary')
blames = self.rorepo.blame('master', 'rps')
assert len(blames) == 2
def test_blame_real(self):
c = 0
nml = 0 # amount of multi-lines per blame
for item in self.rorepo.head.commit.tree.traverse(
predicate=lambda i, d: i.type == 'blob' and i.path.endswith('.py')):
c += 1
for b in self.rorepo.blame(self.rorepo.head, item.path):
nml += int(len(b[1]) > 1)
# END for each item to traverse
assert c, "Should have executed at least one blame command"
assert nml, "There should at least be one blame commit that contains multiple lines"
@patch.object(Git, '_call_process')
def test_blame_complex_revision(self, git):
git.return_value = fixture('blame_complex_revision')
res = self.rorepo.blame("HEAD~10..HEAD", "README.md")
assert len(res) == 1
assert len(res[0][1]) == 83, "Unexpected amount of parsed blame lines"
def test_untracked_files(self):
base = self.rorepo.working_tree_dir
files = (join_path_native(base, "__test_myfile"),
join_path_native(base, "__test_other_file"))
num_recently_untracked = 0
try:
for fpath in files:
fd = open(fpath, "wb")
fd.close()
# END for each filename
untracked_files = self.rorepo.untracked_files
num_recently_untracked = len(untracked_files)
# assure we have all names - they are relative to the git-dir
num_test_untracked = 0
for utfile in untracked_files:
num_test_untracked += join_path_native(base, utfile) in files
assert len(files) == num_test_untracked
finally:
for fpath in files:
if os.path.isfile(fpath):
os.remove(fpath)
# END handle files
assert len(self.rorepo.untracked_files) == (num_recently_untracked - len(files))
def test_config_reader(self):
reader = self.rorepo.config_reader() # all config files
assert reader.read_only
reader = self.rorepo.config_reader("repository") # single config file
assert reader.read_only
def test_config_writer(self):
for config_level in self.rorepo.config_level:
try:
writer = self.rorepo.config_writer(config_level)
assert not writer.read_only
writer.release()
except IOError:
# its okay not to get a writer for some configuration files if we
# have no permissions
pass
# END for each config level
def test_config_level_paths(self):
for config_level in self.rorepo.config_level:
assert self.rorepo._get_config_path(config_level)
# end for each config level
def test_creation_deletion(self):
# just a very quick test to assure it generally works. There are
# specialized cases in the test_refs module
head = self.rorepo.create_head("new_head", "HEAD~1")
self.rorepo.delete_head(head)
tag = self.rorepo.create_tag("new_tag", "HEAD~2")
self.rorepo.delete_tag(tag)
writer = self.rorepo.config_writer()
writer.release()
remote = self.rorepo.create_remote("new_remote", "git@server:repo.git")
self.rorepo.delete_remote(remote)
def test_comparison_and_hash(self):
# this is only a preliminary test, more testing done in test_index
assert self.rorepo == self.rorepo and not (self.rorepo != self.rorepo)
assert len(set((self.rorepo, self.rorepo))) == 1
@with_rw_directory
def test_tilde_and_env_vars_in_repo_path(self, rw_dir):
ph = os.environ['HOME']
try:
os.environ['HOME'] = rw_dir
Repo.init(os.path.join('~', 'test.git'), bare=True)
os.environ['FOO'] = rw_dir
Repo.init(os.path.join('$FOO', 'test.git'), bare=True)
finally:
os.environ['HOME'] = ph
del os.environ['FOO']
# end assure HOME gets reset to what it was
def test_git_cmd(self):
# test CatFileContentStream, just to be very sure we have no fencepost errors
# last \n is the terminating newline that it expects
l1 = b"0123456789\n"
l2 = b"abcdefghijklmnopqrstxy\n"
l3 = b"z\n"
d = l1 + l2 + l3 + b"\n"
l1p = l1[:5]
# full size
# size is without terminating newline
def mkfull():
return Git.CatFileContentStream(len(d) - 1, BytesIO(d))
ts = 5
def mktiny():
return Git.CatFileContentStream(ts, BytesIO(d))
# readlines no limit
s = mkfull()
lines = s.readlines()
assert len(lines) == 3 and lines[-1].endswith(b'\n')
assert s._stream.tell() == len(d) # must have scrubbed to the end
# realines line limit
s = mkfull()
lines = s.readlines(5)
assert len(lines) == 1
# readlines on tiny sections
s = mktiny()
lines = s.readlines()
assert len(lines) == 1 and lines[0] == l1p
assert s._stream.tell() == ts + 1
# readline no limit
s = mkfull()
assert s.readline() == l1
assert s.readline() == l2
assert s.readline() == l3
assert s.readline() == ''
assert s._stream.tell() == len(d)
# readline limit
s = mkfull()
assert s.readline(5) == l1p
assert s.readline() == l1[5:]
# readline on tiny section
s = mktiny()
assert s.readline() == l1p
assert s.readline() == ''
assert s._stream.tell() == ts + 1
# read no limit
s = mkfull()
assert s.read() == d[:-1]
assert s.read() == ''
assert s._stream.tell() == len(d)
# read limit
s = mkfull()
assert s.read(5) == l1p
assert s.read(6) == l1[5:]
assert s._stream.tell() == 5 + 6 # its not yet done
# read tiny
s = mktiny()
assert s.read(2) == l1[:2]
assert s._stream.tell() == 2
assert s.read() == l1[2:ts]
assert s._stream.tell() == ts + 1
def _assert_rev_parse_types(self, name, rev_obj):
rev_parse = self.rorepo.rev_parse
if rev_obj.type == 'tag':
rev_obj = rev_obj.object
# tree and blob type
obj = rev_parse(name + '^{tree}')
assert obj == rev_obj.tree
obj = rev_parse(name + ':CHANGES')
assert obj.type == 'blob' and obj.path == 'CHANGES'
assert rev_obj.tree['CHANGES'] == obj
def _assert_rev_parse(self, name):
"""tries multiple different rev-parse syntaxes with the given name
:return: parsed object"""
rev_parse = self.rorepo.rev_parse
orig_obj = rev_parse(name)
if orig_obj.type == 'tag':
obj = orig_obj.object
else:
obj = orig_obj
# END deref tags by default
# try history
rev = name + "~"
obj2 = rev_parse(rev)
assert obj2 == obj.parents[0]
self._assert_rev_parse_types(rev, obj2)
# history with number
ni = 11
history = [obj.parents[0]]
for pn in range(ni):
history.append(history[-1].parents[0])
# END get given amount of commits
for pn in range(11):
rev = name + "~%i" % (pn + 1)
obj2 = rev_parse(rev)
assert obj2 == history[pn]
self._assert_rev_parse_types(rev, obj2)
# END history check
# parent ( default )
rev = name + "^"
obj2 = rev_parse(rev)
assert obj2 == obj.parents[0]
self._assert_rev_parse_types(rev, obj2)
# parent with number
for pn, parent in enumerate(obj.parents):
rev = name + "^%i" % (pn + 1)
assert rev_parse(rev) == parent
self._assert_rev_parse_types(rev, parent)
# END for each parent
return orig_obj
@with_rw_repo('HEAD', bare=False)
def test_rw_rev_parse(self, rwrepo):
# verify it does not confuse branches with hexsha ids
ahead = rwrepo.create_head('aaaaaaaa')
assert(rwrepo.rev_parse(str(ahead)) == ahead.commit)
def test_rev_parse(self):
rev_parse = self.rorepo.rev_parse
# try special case: This one failed at some point, make sure its fixed
assert rev_parse("33ebe").hexsha == "33ebe7acec14b25c5f84f35a664803fcab2f7781"
# start from reference
num_resolved = 0
for ref_no, ref in enumerate(Reference.iter_items(self.rorepo)):
path_tokens = ref.path.split("/")
for pt in range(len(path_tokens)):
path_section = '/'.join(path_tokens[-(pt + 1):])
try:
obj = self._assert_rev_parse(path_section)
assert obj.type == ref.object.type
num_resolved += 1
except (BadName, BadObject):
print("failed on %s" % path_section)
# is fine, in case we have something like 112, which belongs to remotes/rname/merge-requests/112
pass
# END exception handling
# END for each token
if ref_no == 3 - 1:
break
# END for each reference
assert num_resolved
# it works with tags !
tag = self._assert_rev_parse('0.1.4')
assert tag.type == 'tag'
# try full sha directly ( including type conversion )
assert tag.object == rev_parse(tag.object.hexsha)
self._assert_rev_parse_types(tag.object.hexsha, tag.object)
# multiple tree types result in the same tree: HEAD^{tree}^{tree}:CHANGES
rev = '0.1.4^{tree}^{tree}'
assert rev_parse(rev) == tag.object.tree
assert rev_parse(rev + ':CHANGES') == tag.object.tree['CHANGES']
# try to get parents from first revision - it should fail as no such revision
# exists
first_rev = "33ebe7acec14b25c5f84f35a664803fcab2f7781"
commit = rev_parse(first_rev)
assert len(commit.parents) == 0
assert commit.hexsha == first_rev
self.failUnlessRaises(BadName, rev_parse, first_rev + "~")
self.failUnlessRaises(BadName, rev_parse, first_rev + "^")
# short SHA1
commit2 = rev_parse(first_rev[:20])
assert commit2 == commit
commit2 = rev_parse(first_rev[:5])
assert commit2 == commit
# todo: dereference tag into a blob 0.1.7^{blob} - quite a special one
# needs a tag which points to a blob
# ref^0 returns commit being pointed to, same with ref~0, and ^{}
tag = rev_parse('0.1.4')
for token in (('~0', '^0', '^{}')):
assert tag.object == rev_parse('0.1.4%s' % token)
# END handle multiple tokens
# try partial parsing
max_items = 40
for i, binsha in enumerate(self.rorepo.odb.sha_iter()):
assert rev_parse(bin_to_hex(binsha)[:8 - (i % 2)].decode('ascii')).binsha == binsha
if i > max_items:
# this is rather slow currently, as rev_parse returns an object
# which requires accessing packs, it has some additional overhead
break
# END for each binsha in repo
# missing closing brace commit^{tree
self.failUnlessRaises(ValueError, rev_parse, '0.1.4^{tree')
# missing starting brace
self.failUnlessRaises(ValueError, rev_parse, '0.1.4^tree}')
# REVLOG
#######
head = self.rorepo.head
# need to specify a ref when using the @ syntax
self.failUnlessRaises(BadObject, rev_parse, "%s@{0}" % head.commit.hexsha)
# uses HEAD.ref by default
assert rev_parse('@{0}') == head.commit
if not head.is_detached:
refspec = '%s@{0}' % head.ref.name
assert rev_parse(refspec) == head.ref.commit
# all additional specs work as well
assert rev_parse(refspec + "^{tree}") == head.commit.tree
assert rev_parse(refspec + ":CHANGES").type == 'blob'
# END operate on non-detached head
# position doesn't exist
self.failUnlessRaises(IndexError, rev_parse, '@{10000}')
# currently, nothing more is supported
self.failUnlessRaises(NotImplementedError, rev_parse, "@{1 week ago}")
# the last position
assert rev_parse('@{1}') != head.commit
def test_repo_odbtype(self):
target_type = GitDB
if sys.version_info[:2] < (2, 5):
target_type = GitCmdObjectDB
assert isinstance(self.rorepo.odb, target_type)
def test_submodules(self):
assert len(self.rorepo.submodules) == 1 # non-recursive
assert len(list(self.rorepo.iter_submodules())) >= 2
assert isinstance(self.rorepo.submodule("gitdb"), Submodule)
self.failUnlessRaises(ValueError, self.rorepo.submodule, "doesn't exist")
@with_rw_repo('HEAD', bare=False)
def test_submodule_update(self, rwrepo):
# fails in bare mode
rwrepo._bare = True
self.failUnlessRaises(InvalidGitRepositoryError, rwrepo.submodule_update)
rwrepo._bare = False
# test create submodule
sm = rwrepo.submodules[0]
sm = rwrepo.create_submodule("my_new_sub", "some_path", join_path_native(self.rorepo.working_tree_dir, sm.path))
assert isinstance(sm, Submodule)
# note: the rest of this functionality is tested in test_submodule
@with_rw_repo('HEAD')
def test_git_file(self, rwrepo):
# Move the .git directory to another location and create the .git file.
real_path_abs = os.path.abspath(join_path_native(rwrepo.working_tree_dir, '.real'))
os.rename(rwrepo.git_dir, real_path_abs)
git_file_path = join_path_native(rwrepo.working_tree_dir, '.git')
open(git_file_path, 'wb').write(fixture('git_file'))
# Create a repo and make sure it's pointing to the relocated .git directory.
git_file_repo = Repo(rwrepo.working_tree_dir)
assert os.path.abspath(git_file_repo.git_dir) == real_path_abs
# Test using an absolute gitdir path in the .git file.
open(git_file_path, 'wb').write(('gitdir: %s\n' % real_path_abs).encode('ascii'))
git_file_repo = Repo(rwrepo.working_tree_dir)
assert os.path.abspath(git_file_repo.git_dir) == real_path_abs
def test_file_handle_leaks(self):
def last_commit(repo, rev, path):
commit = next(repo.iter_commits(rev, path, max_count=1))
commit.tree[path]
# This is based on this comment
# https://github.com/gitpython-developers/GitPython/issues/60#issuecomment-23558741
# And we expect to set max handles to a low value, like 64
# You should set ulimit -n X, see .travis.yml
# The loops below would easily create 500 handles if these would leak (4 pipes + multiple mapped files)
for i in range(64):
for repo_type in (GitCmdObjectDB, GitDB):
repo = Repo(self.rorepo.working_tree_dir, odbt=repo_type)
last_commit(repo, 'master', 'git/test/test_base.py')
# end for each repository type
# end for each iteration
def test_remote_method(self):
self.failUnlessRaises(ValueError, self.rorepo.remote, 'foo-blue')
assert isinstance(self.rorepo.remote(name='origin'), Remote)
@with_rw_directory
def test_empty_repo(self, rw_dir):
"""Assure we can handle empty repositories"""
r = Repo.init(rw_dir, mkdir=False)
# It's ok not to be able to iterate a commit, as there is none
self.failUnlessRaises(ValueError, r.iter_commits)
assert r.active_branch.name == 'master'
assert not r.active_branch.is_valid(), "Branch is yet to be born"
# actually, when trying to create a new branch without a commit, git itself fails
# We should, however, not fail ungracefully
self.failUnlessRaises(BadName, r.create_head, 'foo')
self.failUnlessRaises(BadName, r.create_head, 'master')
# It's expected to not be able to access a tree
self.failUnlessRaises(ValueError, r.tree)
new_file_path = os.path.join(rw_dir, "new_file.ext")
touch(new_file_path)
r.index.add([new_file_path])
r.index.commit("initial commit")
# Now a branch should be creatable
nb = r.create_head('foo')
assert nb.is_valid()
def test_merge_base(self):
repo = self.rorepo
c1 = 'f6aa8d1'
c2 = repo.commit('d46e3fe')
c3 = '763ef75'
self.failUnlessRaises(ValueError, repo.merge_base)
self.failUnlessRaises(ValueError, repo.merge_base, 'foo')
# two commit merge-base
res = repo.merge_base(c1, c2)
assert isinstance(res, list) and len(res) == 1 and isinstance(res[0], Commit)
assert res[0].hexsha.startswith('3936084')
for kw in ('a', 'all'):
res = repo.merge_base(c1, c2, c3, **{kw: True})
assert isinstance(res, list) and len(res) == 1
# end for eaech keyword signalling all merge-bases to be returned
# Test for no merge base - can't do as we have
self.failUnlessRaises(GitCommandError, repo.merge_base, c1, 'ffffff')
| avinassh/GitPython | git/test/test_repo.py | Python | bsd-3-clause | 27,790 |
#!/bin/env python3
# -*- coding: utf-8 -*-
"""
This is a scraper for the Austrian Lobbying-Register. It fetches the HTML, saves it locally
and converts the relevant data into a json file.
"""
import re
from datetime import datetime, date
import time
import json
import os
import urllib.request
from bs4 import BeautifulSoup
#import dataset
__author__ = "Stefan Kasberger"
__copyright__ = "Copyright 2015"
__license__ = "MIT"
__version__ = "0.3"
__maintainer__ = "Stefan Kasberger"
__email__ = "mail@stefankasberger.at"
__status__ = "Production" # 'Development', 'Production' or 'Prototype'
### GLOBAL ###
ROOT_FOLDER = os.path.dirname(os.getcwd())
FOLDER_RAW_HTML = ROOT_FOLDER + '/data/raw/html/'
FOLDER_RAW_PDF = ROOT_FOLDER + '/data/raw/pdf/'
FOLDER_JSON = ROOT_FOLDER + '/data/json/'
FOLDER_CSV = ROOT_FOLDER + '/data/csv/'
FILENAME_BASE = 'lobbyingregister'
BASE_URL = 'http://www.lobbyreg.justiz.gv.at/edikte/ir/iredi18.nsf'
BASE_URL_ATTACHMENTS = 'http://www.lobbyreg.justiz.gv.at'
QUERY_URL = BASE_URL+'/liste!OpenForm&subf=a'
DELAY_TIME = 2 # in seconds
# TS = datetime.now().strftime('%Y-%m-%d-%H-%M')
TS = '2017-07-13-21-47'
### FUNCTIONS ###
def SetupEnvironment():
"""Sets up the folder structure and working environment.
"""
if not os.path.exists(FOLDER_RAW_HTML):
os.makedirs(FOLDER_RAW_HTML)
if not os.path.exists(FOLDER_RAW_PDF):
os.makedirs(FOLDER_RAW_PDF)
if not os.path.exists(FOLDER_JSON):
os.makedirs(FOLDER_JSON)
if not os.path.exists(FOLDER_CSV):
os.makedirs(FOLDER_CSV)
def FetchHtml(url):
"""Fetches html from the url
Args:
url: url to fetch (string).
Returns:
html: html string
"""
time.sleep(DELAY_TIME)
response = urllib.request.urlopen(url)
html = response.read()
return html
def FetchHtmlOverview(url, folder):
"""Fetches html from the overview list of the lobbyingregister entries and saves it locally.
Args:
url: url to fetch (string).
folder: directory to save the html files in.
"""
rawHtml = FetchHtml(url)
if not os.path.exists(folder):
os.makedirs(folder)
Save2File(rawHtml.decode(), folder+'/overview-page.html')
def FetchHtmlEntries(entries, folder):
"""Fetches html from an entry in the table and saves it locally with the unique id as postfix.
Args:
entries: list with sequencial dict() of organisations.
folder: to save the html in.
"""
for entry in entries:
html = FetchHtml(entry['url'])
Save2File(html.decode(), folder+'/entry-'+str(entry['ID'])+'.html')
def Save2File(data, filename):
"""Saves data on specified place on harddrive.
Args:
data: string to save.
filename: string of the filepath.
"""
try:
text_file = open(filename, "w")
text_file.write(data)
text_file.close()
except:
print('Error writing', filename)
return False
def ReadFile(filename):
"""Reads file and returns the text.
Args:
filename: name of the file
Returns:
string: content of file as string
"""
f = open(filename, 'r')
string = f.read()
return string
def ReadEntryFilesInFolder(folder):
"""Reads-in all entry html-files from specified folder.
Args:
folder: folder where the html-files are stored in.
Returns:
sortedList: list[] of html texts sorted by file-postfix, which is the unique id of the entry from the table.
"""
htmlList = []
sortedList = []
for filename in os.listdir(folder):
if filename.find('entry-') >= 0:
rawHtml = ReadFile(folder+'/'+filename)
fileIndex = filename.split('.')[0].split('-')[1]
htmlList.append((int(fileIndex), rawHtml))
# sort list of duppels after first element (the filename postfix) and store to list[]
htmlList = sorted(htmlList, key=lambda htmlList: htmlList[0])
for idx, html in htmlList:
sortedList.append(html)
return sortedList
def ParseTable(html):
"""Parses the needed facts out of the overview table.
Args:
html: html (string) to parse.
Returns:
list[] of dict() of entries.
'ID': serial number created from scraper when parsing through the table rows.
'entryDescription': description of the entry.
'lobbyingOrgaType': type of lobbying organisation (A1, A2, B, C, D).
'url': url of the detail page.
'lastUpdate': last update of detail page.
'registryNumber': number of company in lobbying-register.
"""
lobbyList = []
counter = 1
soup = BeautifulSoup(html, 'html.parser')
# loop over table rows
for tr in soup.tbody.find_all('tr'):
tds = tr.find_all('td')
# assign variables from html table to dict
entry = {}
entry['ID'] = str(counter)
entry['entryDescription'] = tds[1].string
entry['lobbyingOrgaType'] = tds[3].string
entry['url'] = BASE_URL+'/'+tds[2].a['href']
entry['lastUpdate'] = str(datetime.strptime(tds[5].string, '%d.%m.%Y'))
entry['registryNumber'] = tds[2].string
lobbyList.append(entry)
counter += 1
return lobbyList
def ParseEntries(htmlList, entries):
"""Parses the needed facts out of the organisation html.
Args:
htmlList: list() of html strings.
entries: list() of dict() of entries.
Returns:
list[] of dict() of entries.
'ID': serial number created from scraper when parsing through the table rows.
'entryDescription': description of the entry.
'lobbyingOrgaType': type of lobbying organisation (A1, A2, B, C, D).
'url': url of the detail page.
'lastUpdate': last update of detail page.
'registryNumber': number of company in lobbying-register.
'orgaName': name of organisation.
'companyRegisterNumber': number of the national company register.
'businessActivities': area in which the organisation is active in business.
'postalAddress': postal address.
'registeredOffice': Place, where the company is officially registered.
'businessYearStart': Day, when the business year starts.
'legalFoundation':
'codeOfConduct': Code of conduct.
'website': url of the website
'lobbyists': list of lobbyists.
'lobbyingRevenue': Revenue from lobbying.
'lobbyingRequests': Number of lobbying requests.
'numLobbyists': Number of lobbyists.
'lobbyingCostsGreater100000':
'lobbyingCosts': Costs of lobbying.
'suborganisations': List of suborganisations.
'attachmentUrls': url to an attachment.
'comment': comment to precise other fields.
"""
entriesList = []
for entry in entries:
soup = BeautifulSoup(htmlList[int(entry['ID'])-1], "html.parser")
html = str(soup)
# regex testing type of registry department: B, C
regDepartment = re.findall(r'Registerabteilung:</strong></dt>\n<dd><strong>(.*)</strong></dd></dl>', html)
if regDepartment:
if entry['lobbyingOrgaType'] != regDepartment[0]:
print('ERROR: lobbying organisation type differs!')
# regex testing register number: B, C
regNum = re.findall(r'Registerzahl:</strong></dt>\n<dd><strong>(.*)</strong></dd></dl>', html)
if regNum:
if entry['registryNumber'] != regNum[0]:
print('ERROR: company register number differs!')
# regex name: A1, B, C
name = re.findall(r'Name.*:</strong></dt>\n<dd><strong>(.*)</strong></dd></dl>', html)
if name:
entry['orgaName'] = name[0]
# regex date announced: A1, C
announceDate = re.findall(r'Bekannt gemacht am:</dt>\n<dd>(.*)</dd>', html)
if announceDate:
entry['dateAnnounced'] = str(datetime.strptime(announceDate[0], '%d.%m.%Y'))
# regex testing last update: A1, B, C
lastUpdate = re.findall(r'Letzte .*nderung:</dt>\n<dd>(.*)</dd>', html)
if lastUpdate:
if entry['lastUpdate'] != str(datetime.strptime(lastUpdate[0], '%d.%m.%Y')):
print("ERROR: register last update differs!")
# regex corporate-number: A1, B
corporateNumber = re.findall(r'Firmenbuchnummer:</dt>\n<dd>(.*)</dd>', html)
if corporateNumber:
entry['companyRegisterNumber'] = corporateNumber[0]
# regex registered office address: A1, C, D
regOfficeAddress = re.findall(r'itz:</dt>\n<dd>(.*)</dd></dl>', html)
if regOfficeAddress:
entry['registeredOffice'] = regOfficeAddress[0]
# regex mail address: A1, C, D
postalAddress = re.findall(r'nschrift:</dt>\n<dd>(.*)</dd></dl>', html)
if postalAddress:
entry['postalAddress'] = postalAddress[0]
# regex start business year: A1, B
startBusinessYear = re.findall(r'ftsjahres:</dt>\n<dd>(.*)</dd></dl>', html)
if startBusinessYear:
entry['businessYearStart'] = startBusinessYear[0]
# regex legal foundation: C
legalFoundation = re.findall(r'Gesetzliche Grundlage:</dt>\n<dd>(.*)</dd></dl>', html)
if legalFoundation:
entry['legalFoundation'] = legalFoundation[0]
# regex area of activities: A1, B, D
areaActivities = re.findall(r'bereich:</dt>\n<dd>(.*)</dd></dl>', html)
if areaActivities:
entry['businessActivities'] = areaActivities[0]
# regex codex: A1, B
codex = re.findall(r'Verhaltenskodex:</dt>\n<dd>(.*)</dd></dl>', html)
if codex:
entry['codeOfConduct'] = codex[0]
# regex website: A1, B, C, D
website = re.findall(r'Homepage:</dt>\n<dd><a href="(.*)" target="_blank">.*</a></dd></dl>', html)
if website:
entry['website'] = website[0]
# regex lobbyists: A1
lobbyists = re.findall(r'obbyist.*:</dt>\n<dd>(.*)</dd></dl>', html)
if lobbyists:
entry['lobbyists'] = lobbyists[0].split('<br/>')
# regex lobbying revenue: A1, B
lobbyingRevenue = re.findall(r'Lobbying-Umsatz:</dt>\n<dd>(.*)</dd></dl>', html)
if lobbyingRevenue:
entry['lobbyingRevenue'] = lobbyingRevenue[0]
# regex lobbying request: A1
lobbyingRequest = re.findall(r'<dt title="Anzahl der bearbeiteten Lobbying-Auftr.*ge:</dt>\n<dd>(.*)</dd></dl>', html)
if lobbyingRequest:
entry['lobbyingRequests'] = lobbyingRequest[0]
# regex number of lobbyists: C, D
numLobbyists = re.findall(r'Anzahl Interessenvertreter:</dt>\n<dd>(.*)</dd>', html)
if numLobbyists:
entry['numLobbyists'] = numLobbyists[0]
# regex costs lobbying: B
costsB = soup.find_all(title="Aufwand für Lobbying-Tätigkeiten im abgelaufenen Wirtschaftsjahr übersteigt EUR 100.000,-")
if costsB:
nextElem = str(costsB[0].find_next_siblings('dd'))
answer = re.findall(r'<dd>(.*)</dd>', nextElem)[0]
if answer == 'Ja':
entry['lobbyingCostsGreater100000'] = 'Ja'
if answer == 'Nein':
entry['lobbyingCostsGreater100000'] = 'Nein'
# regex costs lobbying: C, D
costsC = re.findall(r'Kosten der Interessenvertretung:</dt>\n<dd>(.*)</dd>', html)
if costsC:
entry['lobbyingCosts'] = costsC[0]
# regex atttachments: C
attDiv = soup.find(id='UplDL')
if attDiv:
entry['attachmentUrlss'] = []
for link in attDiv.find_all('a'):
entry['attachmentUrlss'].append(BASE_URL_ATTACHMENTS + link.get('href'))
# regex subentries: C, D
subOrganisations = re.findall(r'<dt>Unterorganisation.*</dt>\n<dd>(.*)</dd></dl>', html)
if subOrganisations:
entry['suborganisations'] = subOrganisations[0]
# regex comment: C, D
comment = re.findall(r'<dt>Kommentar:</dt>\n<dd>(.*)</dd></dl>', html)
if comment:
entry['comment'] = comment[0]
entriesList.append(entry)
return entriesList
def Save2CSV(entries, filename):
"""Exports the dict into a csv file.
Args:
entries: list of dict() with all the lobbying register entries.
filename: name of the file with folder.
"""
csvString = '"ID","entryDescription","orgaName","businessActivities","lobbyingOrgaType","lobbyists","lobbyingRevenue","lobbyingRequests","numLobbyists","lobbyingCostsGreater100000","lobbyingCosts","registryNumber","companyRegisterNumber","suborganisations","legalFoundation","codeOfConduct","registeredOffice","website","postalAddress","lastUpdate","dateAnnounced","businessYearStart","url","attachmentUrls","comment"\n'
# iterate over each entry
for entry in entries:
uniqueId = '""'
entryDescription = '""'
orgaName = '""'
busActivities = '""'
lobOrgaType = '""'
lobbyists = '""'
lobbyingRevenue = '""'
lobbyingRequest = '""'
numLobbyists = '""'
costsB = '""'
costsC = '""'
regNum = '""'
compRegNumber = '""'
subOrganisations = '""'
legalFoundation = '""'
codex = '""'
regOfficeAddress = '""'
lastUpdate = '""'
website = '""'
postalAddress = '""'
lastUpdate = '""'
announceDate = '""'
startBusinessYear = '""'
url = '""'
attachments = '""'
comment = '""'
# read out each attribute
for elem in list(entry.keys()):
val = entry[elem]
if elem == 'ID':
uniqueId = '"'+val+'"'
if elem == 'entryDescription':
entryDescription = '"'+val+'"'
if elem == 'orgaName':
orgaName = '"'+val+'"'
if elem == 'businessActivities':
busActivities = '"'+val.replace('"', '').replace('\n', '').replace('\r', '')+'"'
if elem == 'lobbyingOrgaType':
lobOrgaType = '"'+val+'"'
if elem == 'lobbyists':
lobbyists = '"'
for lob in val:
lobbyists += lob+', '
lobbyists = lobbyists[:-1]
lobbyists += '"'
if elem == 'lobbyingRevenue':
lobbyingRevenue = '"'+val+'"'
if elem == 'lobbyingRequests':
lobbyingRequest = '"'+val+'"'
if elem == 'numLobbyists':
numLobbyists = '"'+val+'"'
if elem == 'lobbyingCostsGreater100000':
costsB = '"'+val+'"'
if elem == 'lobbyingCosts':
costsC = '"'+val+'"'
if elem == 'registryNumber':
regNum = '"'+val+'"'
if elem == 'companyRegisterNumber':
compRegNumber = '"'+val+'"'
if elem == 'suborganisations':
subOrganisations = '"'+val+'"'
if elem == 'legalFoundation':
legalFoundation = '"'+val+'"'
if elem == 'codeOfConduct':
codex = '"'+val.replace('"', '').replace('\n', '').replace('\r', '')+'"'
if elem == 'registeredOffice':
regOfficeAddress = '"'+val+'"'
if elem == 'website':
website = '"'+val+'"'
if elem == 'postalAddress':
postalAddress = '"'+val+'"'
if elem == 'lastUpdate':
lastUpdate = '"'+val+'"'
if elem == 'dateAnnounced':
announceDate = '"'+val+'"'
if elem == 'businessYearStart':
startBusinessYear = '"'+val+'"'
if elem == 'url':
url = '"'+val+'"'
if elem == 'attachmentUrlss':
attachments = '"'
for att in val:
attachments += att+', '
attachments +='"'
if elem == 'comment':
comment = '"'+val+'"'
csvString += uniqueId+','+entryDescription+','+orgaName+','+busActivities+','+lobOrgaType+','+lobbyists+','+lobbyingRevenue+','+lobbyingRequest+','+numLobbyists+','+costsB+','+costsC+','+regNum+','+compRegNumber+','+subOrganisations+','+legalFoundation+','+codex+','+regOfficeAddress+','+website+','+postalAddress+','+lastUpdate+','+announceDate+','+startBusinessYear+','+url+','+attachments+','+comment+'\n'
Save2File(csvString, filename)
print('Lobbying data exported as CSV:',filename)
def FetchAttachments(entries, folder):
"""Fetches all attachments from the lobbying-register entries.
Args:
entries: list[] of dict() with all the lobbying-register data.
folder: directory, where the files are stored in.
"""
for entry in entries:
if 'attachmentUrls' in list(entry.keys()):
for url in entry['attachmentUrls']:
DownloadFile(url, folder+'/attachment-'+entry['ID']+'_'+url.split('/')[-1])
def DownloadFile(url, filename):
"""Downloads and stores an attachment.
Args:
url: url of the attachment (string).
filename: full filepath for the saving.
"""
if not os.path.exists(os.path.dirname(os.path.abspath(filename))):
os.makedirs(os.path.dirname(os.path.abspath(filename)))
response = urllib.request.urlopen(url)
file = open(filename, 'w')
file.write(response.read())
file.close()
time.sleep(DELAY_TIME)
#def Save2SQLite(lobbyEntries):
"""Saves the lobbing register entries in a SQLite database. This is not working, because of key-value issues of the dicts().
Args:
lobbyEntries: list[] of dicts() with lobbying register entries.
"""
#db = dataset.connect('sqlite:///:memory:')
#table = db['lobbyRegisterAT']
#for entry in lobbyEntries:
# print entry
# table.insert(entry)
# Wien = table.find_one(registeredOffice='Wien')
# print Wien
### MAIN ###
if __name__ == '__main__':
# setup
startTime = datetime.now()
print('start:', startTime)
SetupEnvironment()
DOWNLOAD_FILES = False
PARSE_FILES = True
DOWNLOAD_ATTACHMENTS = False
EXPORT_DATA = True
if DOWNLOAD_FILES:
FetchHtmlOverview(QUERY_URL, FOLDER_RAW_HTML+TS)
htmlOverview = ReadFile(FOLDER_RAW_HTML+TS+'/overview-page.html')
lobbyList = ParseTable(htmlOverview)
Save2File(json.dumps(lobbyList, indent=2, sort_keys=True), FOLDER_JSON+TS+'_'+FILENAME_BASE+'.json')
FetchHtmlEntries(lobbyList, FOLDER_RAW_HTML+TS)
if PARSE_FILES:
htmlOverview = ReadFile(FOLDER_RAW_HTML+TS+'/overview-page.html')
lobbyList = ParseTable(htmlOverview)
#lobbyList = lobbyList[:4]
htmlEntries = ReadEntryFilesInFolder(FOLDER_RAW_HTML+TS)
#htmlEntries = htmlEntries[:4]
lobbyEntries = ParseEntries(htmlEntries, lobbyList)
Save2File(json.dumps(lobbyEntries, indent=2, sort_keys=True), FOLDER_JSON+TS+'_'+FILENAME_BASE+'.json')
if DOWNLOAD_ATTACHMENTS:
lobbyEntries = ReadFile(FOLDER_JSON+TS+'_'+FILENAME_BASE+'.json')
FetchAttachments(lobbyEntries, FOLDER_RAW_PDF+TS)
if EXPORT_DATA:
lobbyEntries = json.loads(ReadFile(FOLDER_JSON+TS+'_'+FILENAME_BASE+'.json'))
Save2CSV(lobbyEntries, FOLDER_CSV+TS+'_'+FILENAME_BASE+'.csv')
# Save2SQLite(lobbyEntries) # does not run!
print('runtime:', (datetime.now() - startTime))
| OKFNat/lobbyscraper | code/lobbyscraper.py | Python | mit | 19,855 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
########### DESCRIPTION ############
## Ce script (qui n'est pas utilisé par l'UI) sert à calculer les impôts dûs par les différentes combinaisons
## de foyers fiscaux quand les jeunes adultes ont le choix d'être rattachés au foyer fiscal de leurs parents
## Il prend en entrée un scenario contenant un unique foyer fiscal, où sont rattachés les enfants.
## Il ne gère ni le cas de séparation des parents, ni les pensions alimentaires. Même si pour la séparation, il suffit
## de faire tourner le programme deux fois en rattachant successivement les enfants au parent1 puis au parent2 ;
## et pour les pensions il suffit d'inscrire une pension versée et reçue au sein même du foyer (mais le script n'aide pas
## à calculer la pension optimale - qui est la plupart du temps la pension maximale (5698€ si l'enfant n'habite pas chez
## les parents)
#TODO: gestion des APL et autres prestations (pour l'instant on retourne l'irpp, je ne sais pas si en retournant le
# revenu disponible on gérerait les droits aux prestations)
import copy
import logging
import numpy as np
import os
import openfisca_france
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def split(scenario):
# On fait l'hypothèse que le scénario ne contient qu'un seul foyer fiscal
tax_benefit_system = scenario.tax_benefit_system
test_case = scenario.test_case
foyer_fiscal = test_case['foyers_fiscaux'][0]
individus = test_case['individus']
year = scenario.year
rattachements_possibles = [] # Contient en réalité les détachements possibles puisqu'au départ tous les membres sous rattachés au même foyer
detachements_impossibles = []
scenarios = []
impots = []
for pac_index, pac_id in enumerate(foyer_fiscal.pop('personnes_a_charge')):
pac = individus[pac_id].copy()
age = year - pac.pop('birth').year - 1
if 18 <= age < (21 + 4 * (pac['activite'] == 2)): # Exprime la condition de rattachement au foyer pour les majeurs
rattachements_possibles.append(pac_id)
else:
detachements_impossibles.append(pac_id)
foyers_possibles = partiesDe(list(rattachements_possibles))
n = len(foyers_possibles)
j = 1
min_ = [-1, 0]
for i in range(0, n):
scenarios.append(scenario.__class__())
scenarios[i].__dict__ = copy.copy(scenario.__dict__)
scenarios[i].test_case = copy.deepcopy(scenario.test_case)
scenarios[i].test_case['foyers_fiscaux'][0]['personnes_a_charge'] = foyers_possibles[i]+detachements_impossibles
for jeune in rattachements_possibles:
if jeune not in foyers_possibles[i]:
scenarios[i].test_case['foyers_fiscaux'][j] = { 'declarants': [jeune], 'personnes_a_charge': [] }
j += 1
scenarios[i].suggest()
simulation = scenarios[i].new_simulation()
irpp = - round(np.sum(simulation.calculate('irpp')))
if irpp < min_[1] or min_[0] == -1:
min_ = [i, irpp]
impots.append(irpp)
print "Le plus avantageux pour votre famille est que les jeunes rattachés à votre foyer fiscal soient : {}. Vous paierez alors {}€ d'impôts. (Seuls les jeunes éligibles au rattachement sont indiqués (18 <= age < 21 si pas étudiant / 25 sinon. Le calculateur a émis l'hypothèse qu'il n'y avait qu'un seul foyer fiscal au départ, auquel tous les jeunes éligibles étaient rattachés.)".format(foyers_possibles[min_[0]],min_[1])
return impots
def partiesDe(tab): # Calcule l'ensemble des parties des éléments d'un array, sous forme d'un array d'arrays
n = len(tab)
if n == 0:
return [[]]
else:
a = tab.pop()
tab2 = partiesDe(tab)
return add(a, tab2)
def add(a, tab): # Concatène un array d'arrays (tab) avec le même array où un élément (a) aura été rajouté à chaque sous-array
n = len(tab)
for i in range (0, n):
b = list(tab[i])
tab.append(b)
tab[i].append(a)
return tab
def define_scenario(year):
scenario = tax_benefit_system.new_scenario()
scenario.init_single_entity(
parent1 = dict(
activite = u'Actif occupé',
birth = 1973,
# cadre = True,
salaire_imposable = 90000,
statmarit = u'Célibataire',
),
enfants = [
dict(
activite = u'Étudiant, élève',
birth = '1992-02-01',
),
dict(
activite = u'Étudiant, élève',
birth = '2000-04-17',
),
],
foyer_fiscal = dict( #TODO: pb avec f2ck
# f7cn = 1500,
f7rd = 100000
),
period = year,
)
scenario.suggest()
return scenario
def main():
split(define_scenario(2014))
return 0
if __name__ == "__main__":
# sys.exit(main())
main()
| SophieIPP/openfisca-france | openfisca_france/scripts/rattachement.py | Python | agpl-3.0 | 5,128 |
from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from .models import (Building, Child, Device, Port, Item, Country, Connection,
ClientStatus, State, Client, SpecialClient, TUser, Person, Student,
Organizer, Class, Enrollment, Hen, Chick, A, B, C)
class SelectRelatedRegressTests(TestCase):
def test_regression_7110(self):
"""
Regression test for bug #7110.
When using select_related(), we must query the
Device and Building tables using two different aliases (each) in order to
differentiate the start and end Connection fields. The net result is that
both the "connections = ..." queries here should give the same results
without pulling in more than the absolute minimum number of tables
(history has shown that it's easy to make a mistake in the implementation
and include some unnecessary bonus joins).
"""
b = Building.objects.create(name='101')
dev1 = Device.objects.create(name="router", building=b)
dev2 = Device.objects.create(name="switch", building=b)
dev3 = Device.objects.create(name="server", building=b)
port1 = Port.objects.create(port_number='4', device=dev1)
port2 = Port.objects.create(port_number='7', device=dev2)
port3 = Port.objects.create(port_number='1', device=dev3)
c1 = Connection.objects.create(start=port1, end=port2)
c2 = Connection.objects.create(start=port2, end=port3)
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
connections = Connection.objects.filter(start__device__building=b, end__device__building=b).select_related().order_by('id')
self.assertEqual([(c.id, six.text_type(c.start), six.text_type(c.end)) for c in connections],
[(c1.id, 'router/4', 'switch/7'), (c2.id, 'switch/7', 'server/1')])
# This final query should only have seven tables (port, device and building
# twice each, plus connection once). Thus, 6 joins plus the FROM table.
self.assertEqual(str(connections.query).count(" JOIN "), 6)
def test_regression_8106(self):
"""
Regression test for bug #8106.
Same sort of problem as the previous test, but this time there are
more extra tables to pull in as part of the select_related() and some
of them could potentially clash (so need to be kept separate).
"""
us = TUser.objects.create(name="std")
usp = Person.objects.create(user=us)
uo = TUser.objects.create(name="org")
uop = Person.objects.create(user=uo)
s = Student.objects.create(person=usp)
o = Organizer.objects.create(person=uop)
c = Class.objects.create(org=o)
Enrollment.objects.create(std=s, cls=c)
e_related = Enrollment.objects.all().select_related()[0]
self.assertEqual(e_related.std.person.user.name, "std")
self.assertEqual(e_related.cls.org.person.user.name, "org")
def test_regression_8036(self):
"""
Regression test for bug #8036
the first related model in the tests below
("state") is empty and we try to select the more remotely related
state__country. The regression here was not skipping the empty column results
for country before getting status.
"""
Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
client = Client.objects.create(name='client', status=active)
self.assertEqual(client.status, active)
self.assertEqual(Client.objects.select_related()[0].status, active)
self.assertEqual(Client.objects.select_related('state')[0].status, active)
self.assertEqual(Client.objects.select_related('state', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country')[0].status, active)
self.assertEqual(Client.objects.select_related('state__country', 'status')[0].status, active)
self.assertEqual(Client.objects.select_related('status')[0].status, active)
def test_multi_table_inheritance(self):
""" Exercising select_related() with multi-table model inheritance. """
c1 = Child.objects.create(name="child1", value=42)
Item.objects.create(name="item1", child=c1)
Item.objects.create(name="item2")
self.assertQuerysetEqual(
Item.objects.select_related("child").order_by("name"),
["<Item: item1>", "<Item: item2>"]
)
def test_regression_12851(self):
"""
Regression for #12851
Deferred fields are used correctly if you select_related a subset
of fields.
"""
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
Client.objects.create(name='Brian Burke', state=wa, status=active)
burke = Client.objects.select_related('state').defer('state__name').get(name='Brian Burke')
self.assertEqual(burke.name, 'Brian Burke')
self.assertEqual(burke.state.name, 'Western Australia')
# Still works if we're dealing with an inherited class
SpecialClient.objects.create(name='Troy Buswell', state=wa, status=active, value=42)
troy = SpecialClient.objects.select_related('state').defer('state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Still works if we defer an attribute on the inherited class
troy = SpecialClient.objects.select_related('state').defer('value', 'state__name').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
# Also works if you use only, rather than defer
troy = SpecialClient.objects.select_related('state').only('name', 'state').get(name='Troy Buswell')
self.assertEqual(troy.name, 'Troy Buswell')
self.assertEqual(troy.value, 42)
self.assertEqual(troy.state.name, 'Western Australia')
def test_null_join_promotion(self):
australia = Country.objects.create(name='Australia')
active = ClientStatus.objects.create(name='active')
wa = State.objects.create(name="Western Australia", country=australia)
bob = Client.objects.create(name='Bob', status=active)
jack = Client.objects.create(name='Jack', status=active, state=wa)
qs = Client.objects.filter(state=wa).select_related('state')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [jack])
self.assertEqual(qs[0].state, wa)
# The select_related join wasn't promoted as there was already an
# existing (even if trimmed) inner join to state.
self.assertNotIn('LEFT OUTER', str(qs.query))
qs = Client.objects.select_related('state').order_by('name')
with self.assertNumQueries(1):
self.assertEqual(list(qs), [bob, jack])
self.assertIs(qs[0].state, None)
self.assertEqual(qs[1].state, wa)
# The select_related join was promoted as there is already an
# existing join.
self.assertIn('LEFT OUTER', str(qs.query))
def test_regression_19870(self):
hen = Hen.objects.create(name='Hen')
Chick.objects.create(name='Chick', mother=hen)
self.assertEqual(Chick.objects.all()[0].mother.name, 'Hen')
self.assertEqual(Chick.objects.select_related()[0].mother.name, 'Hen')
def test_regression_10733(self):
a = A.objects.create(name='a', lots_of_text='lots_of_text_a', a_field='a_field')
b = B.objects.create(name='b', lots_of_text='lots_of_text_b', b_field='b_field')
c = C.objects.create(name='c', lots_of_text='lots_of_text_c', is_published=True,
c_a=a, c_b=b)
results = C.objects.all().only('name', 'lots_of_text', 'c_a', 'c_b', 'c_b__lots_of_text',
'c_a__name', 'c_b__name').select_related()
self.assertQuerysetEqual(results, [c], lambda x: x)
with self.assertNumQueries(0):
qs_c = results[0]
self.assertEqual(qs_c.name, 'c')
self.assertEqual(qs_c.lots_of_text, 'lots_of_text_c')
self.assertEqual(qs_c.c_b.lots_of_text, 'lots_of_text_b')
self.assertEqual(qs_c.c_a.name, 'a')
self.assertEqual(qs_c.c_b.name, 'b')
def test_regression_22508(self):
building = Building.objects.create(name='101')
device = Device.objects.create(name="router", building=building)
Port.objects.create(port_number='1', device=device)
device = Device.objects.get()
port = device.port_set.select_related('device__building').get()
with self.assertNumQueries(0):
port.device.building
| webostin/django-btc | tests/select_related_regress/tests.py | Python | bsd-3-clause | 9,397 |
from __future__ import division
import twitter_auth #calling the Twitter auth module
def get_details(twitter_handle):
# Get the User object for twitter...
user = twitter_auth.api.get_user(twitter_handle)
#Calculating the followers vs following ratio
ff_ratio = round(int(user.followers_count) / int(user.friends_count),2)
#Returning Twitter details as a Tuple
return (user.name,user.friends_count,user.followers_count,ff_ratio,user.created_at) | ajotwani/alfred-show-me-the-klout | development/get_twitter_details.py | Python | mit | 452 |
# -*- coding: utf-8 -*-
# Scrapy settings for MYZXcrawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'MYZXcrawler'
SPIDER_MODULES = ['MYZXcrawler.spiders']
NEWSPIDER_MODULE = 'MYZXcrawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 1
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 5
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4',
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'MYZXcrawler.middlewares.MyzxcrawlerSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'MYZXcrawler.middlewares.MyCustomDownloaderMiddleware': None,
'MYZXcrawler.middlewares.RandomUserAgentMiddlewate': 543
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'MYZXcrawler.pipelines.MyzxcrawlerPipeline': None,
'MYZXcrawler.pipelines.WangyinewMysqlPipline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# LOG_LEVEL = 'ERROR'
# LOG_FILE = 'wangyi.log'
# my spider's config
REDIS_DELAY_REQUEST_TIME = 60 * 60
MYSQL_CONFIG = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '123456',
'charset': 'utf8mb4',
'db': 'mydata'
}
# scrapy_redis settings
# SCHEDULER = 'MYZXcrawler.MYZXcrawler.rewrite.MyScheduler'
DUPEFILTER_CLASS = 'MYZXcrawler.rewrite.MyDupefilter'
SCHEDULER = 'scrapy_redis.scheduler.Scheduler'
# DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderQueue'
SCHEDULER_PERSIST = True
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_PARAMS = {'db': 2}
USER_AGENTS = ['Mozilla/4.0 (compatible; MSIE 5.0; Windows 3.1; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/4.0 (compatible; MSIE 6.0; Macintosh; U; Intel Mac OS X 10_6_8; en-us; SV1; LBBROWSER)',
'Mozilla/4.0 (compatible; MSIE 6.0; Mac_PowerPC Mac OS X; ja) Opera 8.01',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows Me; Trident/7.0; rv:11.0; JuziBrowser) like Gecko',
'Mozilla/4.0 (compatible; MSIE 7.0; Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/4.0; Gecko/20100101 Firefox/4.0.1)',
'Mozilla/4.0 (compatible; MSIE 8.0; Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/4.0; Gecko/20100101 Firefox/4.0.1; JuziBrowser)',
'Mozilla/4.0 (compatible; MSIE 8.0; Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/4.0; Gecko/20100101 Firefox/4.0.1)',
'Mozilla/4.0 (compatible; MSIE 8.0; Macintosh; Intel Mac OS X 10_7_0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows 98; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; compatible; MSIE 7.0; Trident/6.0; JuziBrowser)',
'Mozilla/5.0 (compatible; MSIE 10.0; compatible; MSIE 7.0; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; compatible; MSIE 8.0; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_0; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_0; Trident/7.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; U; Intel Mac OS X 10_6_8; en-us; Trident/6.0; JuziBrowser)',
'Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; U; Intel Mac OS X 10_6_8; en-us; Trident/6.0)',
'Mozilla/5.0 (compatible; MSIE 7.0; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 8.0; Trident/7.0; LCJB; rv:11.0; JuziBrowser) like Gecko',
'Mozilla/5.0 (compatible; MSIE 8.0; Trident/7.0; rv:11.0; JuziBrowser) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 7.0; Trident/5.0; 360SE)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 7.0; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 7.0; Trident/7.0; LCJB)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 7.0; Trident/7.0; MALNJS)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 7.0; Trident/7.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 8.0; Trident/5.0; JuziBrowser)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 8.0; Trident/5.0; KB974487)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 8.0; Trident/5.0; LSIE 1.2.2.42)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 8.0; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 8.0; Trident/7.0; LCTE)',
'Mozilla/5.0 (compatible; MSIE 9.0; compatible; MSIE 8.0; Trident/7.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/7.0; LCJB)',
'Mozilla/5.0 (compatible; MSIE 9.0; Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/7.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Macintosh; Intel Mac OS X 10_7_0; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Macintosh; Intel Mac OS X 10_7_0; Trident/7.0; LCJB)',
'Mozilla/5.0 (compatible; MSIE 9.0; Macintosh; Intel Mac OS X 10_7_0; Trident/7.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Macintosh; U; Intel Mac OS X 10_6_8; en-us; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 9.0; Macintosh; U; Intel Mac OS X 10_6_8; en-us; Trident/7.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1; rv:37.0) Gecko/20100101 Firefox/37.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:38.0) Gecko/20100101 Firefox/38.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:39.0) Gecko/20100101 Firefox/39.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:41.0) Gecko/20100101 Firefox/41.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:48.0) Gecko/20100101 Firefox/48.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:48.0) Gecko/20100101 Firefox/48.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.5; rv:10.0.2) Gecko/20100101 Firefox/10.0.2',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:16.0) Gecko/20100101 Firefox/16.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/7.0; rv:11.0; 2345Explorer 5.0.0.14136) like Gecko',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:48.0) Gecko/20100101 Firefox/48.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0; Trident/7.0; rv:11.0; JuziBrowser) like Gecko',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4; rv:20.0) Gecko/20130326150557 Firefox/20.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:41.0) Gecko/20100101 Firefox/41.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:48.0) Gecko/20100101 Firefox/48.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:41.0) Gecko/20100101 Firefox/41.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:48.0) Gecko/20100101 Firefox/48.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:29.0) Gecko/20100101 Firefox/29.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:48.0) Gecko/20100101 Firefox/48.0',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.4; en-US; rv:1.9.2.2) Gecko/20100316 Firefox/3.6.2',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; zh-CN; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us; Trident/7.0; rv:11.0; JuziBrowser) like Gecko',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.13; ) Gecko/20101203',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.16) Gecko/20110319 Firefox/3.6.16',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ko; rv:1.9.2.14) Gecko/20110218 Firefox/12.6.14',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.9.2; rv:13.0) Gecko/20100101 Firefox/13.0.8',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X Mach-O; en-US; rv:1.8.1.7pre) Gecko/20070815 Firefox/2.0.0.6 Navigator/9.0b3',
'Mozilla/5.0 (Windows 10; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0',
'Mozilla/5.0 (Windows; U; Win 9x 4.90; SG; rv:1.9.2.4) Gecko/20101104 Netscape/9.1.0285',
'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (X11; Linux i686; rv:10.0.4) Gecko/20100101 Firefox/10.0.4',
'Mozilla/5.0 (X11; Linux i686; rv:17.0) Gecko/20100101 Firefox/17.0',
'Mozilla/5.0 (X11; Linux i686; rv:20.0) Gecko/20100101 Firefox/20.0',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b12pre) Gecko/20100101 Firefox/4.0b12pre',
'Mozilla/5.0 (X11; Linux i686; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (X11; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.12) Gecko/20130104 Firefox/10.0.12',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome)',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/47.0 (Chrome)',
'Mozilla/5.0 (X11; Linux x86_64; rv:18.0) Gecko/20100101 Firefox/18.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0',
'Mozilla/5.0 (X11; Mageia; Linux x86_64; rv:10.0.9) Gecko/20100101 Firefox/10.0.9',
'Mozilla/5.0 (X11; OpenBSD amd64; rv:28.0) Gecko/20100101 Firefox/28.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:15.0) Gecko/20100101 Firefox/15.0.1',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:17.0) Gecko/20100101 Firefox/17.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:17.0) Gecko/20100101 Firefox/17.0/Nutch-1.4',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:41.0) Gecko/20100101 Firefox/41.0',
'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:44.0) Gecko/20100101 Firefox/44.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0.1',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20100101 Firefox/21.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:21.0) Gecko/20130331 Firefox/21.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:48.0) Gecko/20100101 Firefox/48.0',
'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.9.0.14) Gecko/2009082505 Red Hat/3.0.14-1.el5_4 Firefox/3.0.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.2.1) Gecko/20021204',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.3) Gecko/2008092416 Firefox/3.0.3',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.9) Gecko/2009040820 Firefox/3.0.9',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.11) Gecko/20101013 Ubuntu/10.04 (lucid) Firefox/3.6.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; fi-FI; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux i686)Gecko/20071127 Firefox/2.0.0.11',
'Mozilla/5.0 (X11; U; Linux i686; ja; rv:1.8.0.2) Gecko/20060308 Firefox/1.5.0.2',
'Mozilla/5.0 (X11; U; Linux i686; ru; rv:1.9b5) Gecko/2008032600 SUSE/2.9.95-25.1 Firefox/3.0b5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-GB; rv:1.9.0.10) Gecko/2009042523 Ubuntu/9.04 (jaunty) Firefox/3.0.10',
'Mozilla/5.0 (X11; U; Linux x86_64; en-GB; rv:1.9.2.13) Gecko/20101206 Red Hat/3.6-2.el5 Firefox/3.6.13',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.7) Gecko/2009031915 Gentoo Firefox/3.0.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101206 Ubuntu/10.04 (lucid) Firefox/3.6.13',
'Mozilla/5.0 (X11; U; Linux x86_64; es-ES; rv:1.9.2.12) Gecko/20101027 Fedora/3.6.12-1.fc13 Firefox/3.6.12',
'Mozilla/5.0 (X11; U; Linux x86_64; fr; rv:1.9.2.3) Gecko/20100403 Fedora/3.6.3-4.fc13 Firefox/3.6.3',
'Mozilla/5.0 (X11; U; Linux x86_64; it; rv:1.9.0.8) Gecko/2009032712 Ubuntu/8.10 (intrepid) Firefox/3.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
'Mozilla/8.0 (compatible; MSIE 8.0; Windows 7)'
]
| jeffreyzzh/MYZXcrawler | MYZXcrawler/MYZXcrawler/settings.py | Python | mit | 18,797 |