code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
# -*- coding: utf-8 -*- from django import template register = template.Library() @register.filter def product_url(product, category=None): return product.get_absolute_url(category=category)
fusionbox/satchless
satchless/category/templatetags/category.py
Python
bsd-3-clause
197
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._operations import Operations from ._availability_sets_operations import AvailabilitySetsOperations from ._proximity_placement_groups_operations import ProximityPlacementGroupsOperations from ._dedicated_host_groups_operations import DedicatedHostGroupsOperations from ._dedicated_hosts_operations import DedicatedHostsOperations from ._virtual_machine_extension_images_operations import VirtualMachineExtensionImagesOperations from ._virtual_machine_extensions_operations import VirtualMachineExtensionsOperations from ._virtual_machine_images_operations import VirtualMachineImagesOperations from ._usage_operations import UsageOperations from ._virtual_machines_operations import VirtualMachinesOperations from ._virtual_machine_sizes_operations import VirtualMachineSizesOperations from ._images_operations import ImagesOperations from ._virtual_machine_scale_sets_operations import VirtualMachineScaleSetsOperations from ._virtual_machine_scale_set_extensions_operations import VirtualMachineScaleSetExtensionsOperations from ._virtual_machine_scale_set_rolling_upgrades_operations import VirtualMachineScaleSetRollingUpgradesOperations from ._virtual_machine_scale_set_vm_extensions_operations import VirtualMachineScaleSetVMExtensionsOperations from ._virtual_machine_scale_set_vms_operations import VirtualMachineScaleSetVMsOperations from ._log_analytics_operations import LogAnalyticsOperations from ._disks_operations import DisksOperations from ._snapshots_operations import SnapshotsOperations from ._disk_encryption_sets_operations import DiskEncryptionSetsOperations from ._galleries_operations import GalleriesOperations from ._gallery_images_operations import GalleryImagesOperations from ._gallery_image_versions_operations import GalleryImageVersionsOperations from ._gallery_applications_operations import GalleryApplicationsOperations from ._gallery_application_versions_operations import GalleryApplicationVersionsOperations from ._virtual_machine_run_commands_operations import VirtualMachineRunCommandsOperations __all__ = [ 'Operations', 'AvailabilitySetsOperations', 'ProximityPlacementGroupsOperations', 'DedicatedHostGroupsOperations', 'DedicatedHostsOperations', 'VirtualMachineExtensionImagesOperations', 'VirtualMachineExtensionsOperations', 'VirtualMachineImagesOperations', 'UsageOperations', 'VirtualMachinesOperations', 'VirtualMachineSizesOperations', 'ImagesOperations', 'VirtualMachineScaleSetsOperations', 'VirtualMachineScaleSetExtensionsOperations', 'VirtualMachineScaleSetRollingUpgradesOperations', 'VirtualMachineScaleSetVMExtensionsOperations', 'VirtualMachineScaleSetVMsOperations', 'LogAnalyticsOperations', 'DisksOperations', 'SnapshotsOperations', 'DiskEncryptionSetsOperations', 'GalleriesOperations', 'GalleryImagesOperations', 'GalleryImageVersionsOperations', 'GalleryApplicationsOperations', 'GalleryApplicationVersionsOperations', 'VirtualMachineRunCommandsOperations', ]
Azure/azure-sdk-for-python
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/operations/__init__.py
Python
mit
3,510
# -*- coding: utf-8 -*- from __future__ import unicode_literals from setuptools import setup, find_packages import codecs def _read_file(name, encoding='utf-8'): """ Read the contents of a file. :param name: The name of the file in the current directory. :param encoding: The encoding of the file; defaults to utf-8. :return: The contents of the file. """ with codecs.open(name, encoding=encoding) as f: return f.read() setup( name='nibble', version='0.1.0', description='Speed, distance and time calculations around quantities of ' 'digital information.', long_description=_read_file('README.rst'), license='MIT', url='https://github.com/gebn/nibble', author='George Brighton', author_email='oss@gebn.co.uk', packages=find_packages(), zip_safe=True, install_requires=[ 'six>=1.9.0', 'PLY>=3.6' ], test_suite='nose.collector', tests_require=[ 'nose', 'mock' ], classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Information Technology', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Python Modules' ], entry_points={ 'console_scripts': [ 'nibble = nibble.__main__:main_cli', ] } )
gebn/nibble
setup.py
Python
mit
1,923
# -*- coding: utf-8 -*- import time import EafIO import warnings class Eaf: """Read and write Elan's Eaf files. .. note:: All times are in milliseconds and can't have decimals. :var dict annotation_document: Annotation document TAG entries. :var dict licences: Licences included in the file. :var dict header: XML header. :var list media_descriptors: Linked files, where every file is of the form: ``{attrib}``. :var list properties: Properties, where every property is of the form: ``(value, {attrib})``. :var list linked_file_descriptors: Secondary linked files, where every linked file is of the form: ``{attrib}``. :var dict timeslots: Timeslot data of the form: ``{TimslotID -> time(ms)}``. :var dict tiers: Tier data of the form: ``{tier_name -> (aligned_annotations, reference_annotations, attributes, ordinal)}``, aligned_annotations of the form: ``[{annotation_id -> (begin_ts, end_ts, value, svg_ref)}]``, reference annotations of the form: ``[{annotation_id -> (reference, value, previous, svg_ref)}]``. :var list linguistic_types: Linguistic types, where every type is of the form: ``{id -> attrib}``. :var list locales: Locales, where every locale is of the form: ``{attrib}``. :var dict constraints: Constraint data of the form: ``{stereotype -> description}``. :var dict controlled_vocabularies: Controlled vocabulary data of the form: ``{id -> (descriptions, entries, ext_ref)}``, descriptions of the form: ``[(lang_ref, text)]``, entries of the form: ``{id -> (values, ext_ref)}``, values of the form: ``[(lang_ref, description, text)]``. :var list external_refs: External references, where every reference is of the form ``[id, type, value]``. :var list lexicon_refs: Lexicon references, where every reference is of the form: ``[{attribs}]``. """ def __init__(self, file_path=None, author='pympi'): """Construct either a new Eaf file or read on from a file/stream. :param str file_path: Path to read from, - for stdin. If ``None`` an empty Eaf file will be created. :param str author: Author of the file. """ self.naive_gen_ann, self.naive_gen_ts = False, False self.annotation_document = { 'AUTHOR': author, 'DATE': time.strftime("%Y-%m-%dT%H:%M:%S%z"), 'VERSION': '2.8', 'FORMAT': '2.8', 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', 'xsi:noNamespaceSchemaLocation': 'http://www.mpi.nl/tools/elan/EAFv2.8.xsd'} self.constraints = {} self.controlled_vocabularies = {} self.header = {} self.licences = {} self.linguistic_types = {} self.tiers = {} self.timeslots = {} self.external_refs = [] self.lexicon_refs = [] self.linked_file_descriptors = [] self.locales = [] self.media_descriptors = [] self.properties = [] self.new_time, self.new_ann = 0, 0 if file_path is None: self.add_linguistic_type('default-lt', None) self.constraints = {'Time_Subdivision': 'Time subdivision of paren' 't annotation\'s time interval, no time gaps a' 'llowed within this interval', 'Symbolic_Subdivision': 'Symbolic subdivision ' 'of a parent annotation. Annotations refering ' 'to the same parent are ordered', 'Symbolic_Association': '1-1 association with ' 'a parent annotation', 'Included_In': 'Time alignable annotations wit' 'hin the parent annotation\'s time interval, g' 'aps are allowed'} self.properties.append(('0', {'NAME': 'lastUsedAnnotation'})) self.add_tier('default') else: EafIO.parse_eaf(file_path, self) def to_file(self, file_path, pretty=True): """Write the object to a file, if the file already exists a backup will be created with the ``.bak`` suffix. :param str file_path: Path to write to, - for stdout. :param bool pretty: Flag for pretty XML printing. """ EafIO.to_eaf(file_path, self, pretty) def to_textgrid(self, excluded_tiers=[], included_tiers=[]): """Convert the object to a :class:`pympi.Praat.TextGrid` object. :param list excluded_tiers: Specifically exclude these tiers. :param list included_tiers: Only include this tiers, when empty all are included. :returns: :class:`pympi.Praat.TextGrid` object :raises ImportError: If the pympi.Praat module can't be loaded. """ from Praat import TextGrid tgout = TextGrid() tiers = [a for a in self.tiers if a not in excluded_tiers] if included_tiers: tiers = [a for a in tiers if a in included_tiers] for tier in tiers: currentTier = tgout.add_tier(tier) for interval in self.get_annotation_data_for_tier(tier): if interval[0] == interval[1]: continue currentTier.add_interval(interval[0]/1000.0, interval[1]/1000.0, interval[2]) return tgout def extract(self, start, end): """Extracts the selected time frame as a new object. :param int start: Start time. :param int end: End time. :returns: The extracted frame in a new object. """ from copy import deepcopy eaf_out = deepcopy(self) for tier in eaf_out.tiers.itervalues(): rems = [] for ann in tier[0]: if eaf_out.timeslots[tier[0][ann][1]] > end or\ eaf_out.timeslots[tier[0][ann][0]] < start: rems.append(ann) for r in rems: del tier[0][r] return eaf_out def get_linked_files(self): """Give all linked files.""" return self.media_descriptors def add_linked_file(self, file_path, relpath=None, mimetype=None, time_origin=None, ex_from=None): """Add a linked file. :param str file_path: Path of the file. :param str relpath: Relative path of the file. :param str mimetype: Mimetype of the file, if ``None`` it tries to guess it according to the file extension which currently only works for wav, mpg, mpeg and xml. :param int time_origin: Time origin for the media file. :param str ex_from: Extracted from field. :raises KeyError: If mimetype had to be guessed and a non standard extension or an unknown mimetype. """ if mimetype is None: mimes = {'wav': 'audio/x-wav', 'mpg': 'video/mpeg', 'mpeg': 'video/mpg', 'xml': 'text/xml'} mimetype = mimes[file_path.split('.')[-1]] self.media_descriptors.append({ 'MEDIA_URL': file_path, 'RELATIVE_MEDIA_URL': relpath, 'MIME_TYPE': mimetype, 'TIME_ORIGIN': time_origin, 'EXTRACTED_FROM': ex_from}) def copy_tier(self, eaf_obj, tier_name): """Copies a tier to another :class:`pympi.Elan.Eaf` object. :param pympi.Elan.Eaf eaf_obj: Target Eaf object. :param str tier_name: Name of the tier. :raises KeyError: If the tier doesn't exist. """ eaf_obj.remove_tier(tier_name) eaf_obj.add_tier(tier_name, tier_dict=self.tiers[tier_name][3]) for ann in self.get_annotation_data_for_tier(tier_name): eaf_obj.insert_annotation(tier_name, ann[0], ann[1], ann[2]) def add_tier(self, tier_id, ling='default-lt', parent=None, locale=None, part=None, ann=None, tier_dict=None): """Add a tier. :param str tier_id: Name of the tier. :param str ling: Linguistic type, if the type is not available it will warn and pick the first available type. :param str parent: Parent tier name. :param str locale: Locale. :param str part: Participant. :param str ann: Annotator. :param dict tier_dict: TAG attributes, when this is not ``None`` it will ignore all other options. """ if ling not in self.linguistic_types: warnings.warn( 'add_tier: Linguistic type non existent, choosing the first') ling = self.linguistic_types.keys()[0] if tier_dict is None: self.tiers[tier_id] = ({}, {}, { 'TIER_ID': tier_id, 'LINGUISTIC_TYPE_REF': ling, 'PARENT_REF': parent, 'PARTICIPANT': part, 'DEFAULT_LOCALE': locale, 'ANNOTATOR': ann}, len(self.tiers)) else: self.tiers[tier_id] = ({}, {}, tier_dict, len(self.tiers)) def remove_tiers(self, tiers): """Remove multiple tiers, note that this is a lot faster then removing them individually because of the delayed cleaning of timeslots. :param list tiers: Names of the tier to remove. :raises KeyError: If a tier is non existent. """ for a in tiers: self.remove_tier(a, check=False, clean=False) self.clean_time_slots() def remove_tier(self, id_tier, clean=True): """Remove tier. :param str id_tier: Name of the tier. :param bool clean: Flag to also clean the timeslots. :raises KeyError: If tier is non existent. """ del(self.tiers[id_tier]) if clean: self.clean_time_slots() def get_tier_names(self): """List all the tier names. :returns: List of all tier names """ return self.tiers.keys() def get_parameters_for_tier(self, id_tier): """Give the parameter dictionary, this is usaable in :func:`add_tier`. :param str id_tier: Name of the tier. :returns: Dictionary of parameters. :raises KeyError: If the tier is non existent. """ return self.tiers[id_tier][2] def child_tiers_for(self, id_tier): """Give all child tiers for a tier. :param str id_tier: Name of the tier. :returns: List of all children :raises KeyError: If the tier is non existent. """ return [m for m in self.tiers if 'PARENT_REF' in self.tiers[m][2] and self.tiers[m][2]['PARENT_REF'] == id_tier] def get_annotation_data_for_tier(self, id_tier): """Gives a list of annotations of the form: ``(begin, end, value)`` :param str id_tier: Name of the tier. :raises KeyError: If the tier is non existent. """ a = self.tiers[id_tier][0] return [(self.timeslots[a[b][0]], self.timeslots[a[b][1]], a[b][2]) for b in a] def get_annotation_data_at_time(self, id_tier, time): """Give the annotations at the given time. :param str id_tier: Name of the tier. :param int time: Time of the annotation. :returns: List of annotations at that time. :raises KeyError: If the tier is non existent. """ anns = self.tiers[id_tier][0] return sorted( [(self.timeslots[m[0]], self.timeslots[m[1]], m[2]) for m in anns.itervalues() if self.timeslots[m[0]] <= time and self.timeslots[m[1]] >= time]) def get_annotation_datas_between_times(self, id_tier, start, end): """Gives the annotations within the times. :param str id_tier: Name of the tier. :param int start: Start time of the annotation. :param int end: End time of the annotation. :returns: List of annotations within that time. :raises KeyError: If the tier is non existent. """ anns = self.tiers[id_tier][0] return sorted([ (self.timeslots[m[0]], self.timeslots[m[1]], m[2]) for m in anns.itervalues() if self.timeslots[m[1]] >= start and self.timeslots[m[0]] <= end]) def remove_all_annotations_from_tier(self, id_tier): """remove all annotations from a tier :param str id_tier: Name of the tier. :raises KeyError: If the tier is non existent. """ self.tiers[id_tier][0], self.tiers[id_tier][1] = {}, {} self.clean_time_slots() def insert_annotation(self, id_tier, start, end, value='', svg_ref=None): """Insert an annotation. :param str id_tier: Name of the tier. :param int start: Start time of the annotation. :param int end: End time of the annotation. :param str value: Value of the annotation. :param str svg_ref: Svg reference. :raises KeyError: If the tier is non existent. """ start_ts = self.generate_ts_id(start) end_ts = self.generate_ts_id(end) self.tiers[id_tier][0][self.generate_annotation_id()] =\ (start_ts, end_ts, value, svg_ref) def remove_annotation(self, id_tier, time, clean=True): """Remove an annotation in a tier, if you need speed the best thing is to clean the timeslots after the last removal. :param str id_tier: Name of the tier. :param int time: Timepoint within the annotation. :param bool clean: Flag to clean the timeslots afterwards. :raises KeyError: If the tier is non existent. """ for b in [a for a in self.tiers[id_tier][0].iteritems() if a[1][0] >= time and a[1][1] <= time]: del(self.tiers[id_tier][0][b[0]]) if clean: self.clean_time_slots() def insert_ref_annotation(self, id_tier, ref, value, prev, svg_ref=None): """Insert a reference annotation. :param str id_tier: Name of the tier. :param str ref: Id of the referenced annotation. :param str value: Value of the annotation. :param str prev: Id of the previous annotation. :param str svg_ref: Svg reference. :raises KeyError: If the tier is non existent. """ self.tiers[id_tier][1][self.generate_annotation_id()] =\ (ref, value, prev, svg_ref) def get_ref_annotation_data_for_tier(self, id_tier): """"Give a list of all reference annotations of the form: ``[{id -> (ref, value, previous, svg_ref}]`` :param str id_tier: Name of the tier. :raises KeyError: If the tier is non existent. """ return self.tiers[id_tier][1] def remove_controlled_vocabulary(self, cv): """Remove a controlled vocabulary. :param str cv: Controlled vocabulary id. :raises KeyError: If the controlled vocabulary is non existent. """ del(self.controlled_vocabularies[cv]) def generate_annotation_id(self): """Generate the next annotation id, this function is mainly used internally. """ if self.naive_gen_ann: new = self.last_ann+1 self.last_ann = new else: new = 1 anns = {int(ann[1:]) for tier in self.tiers.itervalues() for ann in tier[0]} if len(anns) > 0: newann = set(xrange(1, max(anns))).difference(anns) if len(newann) == 0: new = max(anns)+1 self.naive_gen_ann = True self.last_ann = new else: new = sorted(newann)[0] return 'a%d' % new def generate_ts_id(self, time=None): """Generate the next timeslot id, this function is mainly used internally :param int time: Initial time to assign to the timeslot """ if self.naive_gen_ts: new = self.last_ts+1 self.last_ts = new else: new = 1 tss = {int(x[2:]) for x in self.timeslots} if len(tss) > 0: newts = set(xrange(1, max(tss))).difference(tss) if len(newts) == 0: new = max(tss)+1 self.naive_gen_ts = True self.last_ts = new else: new = sorted(newts)[0] ts = 'ts%d' % new self.timeslots[ts] = time return ts def clean_time_slots(self): """Clean up all unused timeslots. .. warning:: This can and will take time for larger tiers. When you want to do a lot of operations on a lot of tiers please unset the flags for cleaning in the functions so that the cleaning is only performed afterwards. """ ts_in_tier = set(sum([a[0:2] for tier in self.tiers.itervalues() for a in tier[0].itervalues()], ())) ts_avail = set(self.timeslots) for a in ts_in_tier.symmetric_difference(ts_avail): del(self.timeslots[a]) self.naive_gen_ts = False self.naive_gen_ann = False def generate_annotation_concat(self, tiers, start, end, sep='-'): """Give a string of concatenated annotation values for annotations within a timeframe. :param list tiers: List of tier names. :param int start: Start time. :param int end: End time. :param str sep: Separator string to use. :returns: String containing a concatenation of annotation values. :raises KeyError: If a tier is non existent. """ return sep.join( set(d[2] for t in tiers if t in self.tiers for d in self.get_annotation_datas_between_times(t, start, end))) def merge_tiers(self, tiers, tiernew=None, gaptresh=1): """Merge tiers into a new tier and when the gap is lower then the threshhold glue the annotations together. :param list tiers: List of tier names. :param str tiernew: Name for the new tier, if ``None`` the name will be generated. :param int gapthresh: Threshhold for the gaps. :raises KeyError: If a tier is non existent. :raises TypeError: If there are no annotations within the tiers. """ if tiernew is None: tiernew = '%s_Merged' % '_'.join(tiers) self.remove_tier(tiernew) self.add_tier(tiernew) timepts = sorted(set.union( *[set(j for j in xrange(d[0], d[1])) for d in [ann for tier in tiers for ann in self.get_annotation_data_for_tier(tier)]])) if len(timepts) > 1: start = timepts[0] for i in xrange(1, len(timepts)): if timepts[i]-timepts[i-1] > gaptresh: self.insert_annotation( tiernew, start, timepts[i-1], self.generate_annotation_concat(tiers, start, timepts[i-1])) start = timepts[i] self.insert_annotation( tiernew, start, timepts[i-1], self.generate_annotation_concat(tiers, start, timepts[i-1])) def shift_annotations(self, time): """Shift all annotations in time, this creates a new object. :param int time: Time shift width, negative numbers make a right shift. :returns: Shifted :class:`pympi.Elan.Eaf' object. """ e = self.extract( -1*time, self.get_full_time_interval()[1]) if time < 0 else\ self.extract(0, self.get_full_time_interval()[1]-time) for tier in e.tiers.itervalues(): for ann in tier[0].itervalues(): e.timeslots[ann[0]] = e.timeslots[ann[0]]+time e.timeslots[ann[1]] = e.timeslots[ann[1]]+time e.clean_time_slots() return e def filterAnnotations(self, tier, tier_name=None, filtin=None, filtex=None): """Filter annotations in a tier :param str tier: Name of the tier: :param str tier_name: Name of the new tier, when ``None`` the name will be generated. :param list filtin: List of strings to be included, if None all annotations all is included. :param list filtex: List of strings to be excluded, if None no strings are excluded. :raises KeyError: If the tier is non existent. """ if tier_name is None: tier_name = '%s_filter' % tier self.remove_tier(tier_name) self.add_tier(tier_name) for a in [b for b in self.get_annotation_data_for_tier(tier) if (filtex is None or b[2] not in filtex) and (filtin is None or b[2] in filtin)]: self.insert_annotation(tier_name, a[0], a[1], a[2]) def glue_annotations_in_tier(self, tier, tier_name=None, treshhold=85, filtin=None, filtex=None): """Glue annotatotions together in a tier. :param str tier: Name of the tier. :param str tier_name: Name of the new tier, if ``None`` the name will be generated. :param int threshhold: Threshhold for the maximum gap to still glue. :param list filtin: List of strings to be included, if None all annotations all is included. :param list filtex: List of strings to be excluded, if None no strings are excluded. :raises KeyError: If the tier is non existent. """ if tier_name is None: tier_name = '%s_glued' % tier self.remove_tier(tier_name) self.add_tier(tier_name) tier_data = sorted(self.get_annotation_data_for_tier(tier)) tier_data = [t for t in tier_data if (filtin is None or t[2] in filtin) and (filtex is None or t[2] not in filtex)] currentAnn = None for i in xrange(0, len(tier_data)): if currentAnn is None: currentAnn = (tier_data[i][0], tier_data[i][1], tier_data[i][2]) elif tier_data[i][0] - currentAnn[1] < treshhold: currentAnn = (currentAnn[0], tier_data[i][1], '%s_%s' % (currentAnn[2], tier_data[i][2])) else: self.insert_annotation(tier_name, currentAnn[0], currentAnn[1], currentAnn[2]) currentAnn = tier_data[i] if currentAnn is not None: self.insert_annotation(tier_name, currentAnn[0], tier_data[len(tier_data)-1][1], currentAnn[2]) def get_full_time_interval(self): """Give the full time interval of the file. :returns: Tuple of the form: ``(min_time, max_time``. """ return (min(self.timeslots.itervalues()), max(self.timeslots.itervalues())) def create_gaps_and_overlaps_tier(self, tier1, tier2, tier_name=None, maxlen=-1): """Create a tier with the gaps and overlaps of the annotations. For types see :func:`get_gaps_and_overlaps_duration` :param str tier1: Name of the first tier. :param str tier2: Name of the second tier. :param str tier_name: Name of the new tier, if ``None`` the name will be generated. :param int maxlen: Maximum length of gaps (skip longer ones), if ``-1`` no maximum will be used. :returns: List of gaps and overlaps of the form: ``[(type, start, end)]``. :raises KeyError: If a tier is non existent. :raises IndexError: If no annotations are available in the tiers. """ if tier_name is None: tier_name = '%s_%s_ftos' % (tier1, tier2) self.remove_tier(tier_name) self.add_tier(tier_name) ftos = self.get_gaps_and_overlaps_duration(tier1, tier2, maxlen) for fto in ftos: self.insert_annotation(tier_name, fto[1], fto[2], fto[0]) return ftos def get_gaps_and_overlaps_duration(self, tier1, tier2, maxlen=-1, progressbar=False): """Give gaps and overlaps. The return types are shown in the table below. The string will be of the format: ``id_tiername_tiername``. For example when a gap occurs between tier1 and tier2 and they are called ``speakerA`` and ``speakerB`` the annotation value of that gap will be ``G12_speakerA_speakerB``. | The gaps and overlaps are calculated using Heldner and Edlunds method found in: | *Heldner, M., & Edlund, J. (2010). Pauses, gaps and overlaps in conversations. Journal of Phonetics, 38(4), 555–568. doi:10.1016/j.wocn.2010.08.002* +-----+--------------------------------------------+ | id | Description | +=====+============================================+ | O12 | Overlap from tier1 to tier2 | +-----+--------------------------------------------+ | O21 | Overlap from tier2 to tier1 | +-----+--------------------------------------------+ | G12 | Gap from tier1 to tier2 | +-----+--------------------------------------------+ | G21 | Gap from tier2 to tier1 | +-----+--------------------------------------------+ | P1 | Pause for tier1 | +-----+--------------------------------------------+ | P2 | Pause for tier2 | +-----+--------------------------------------------+ | B12 | Within speaker overlap from tier1 to tier2 | +-----+--------------------------------------------+ | B21 | Within speaker overlap from tier2 to tier1 | +-----+--------------------------------------------+ :param str tier1: Name of the first tier. :param str tier2: Name of the second tier. :param int maxlen: Maximum length of gaps (skip longer ones), if ``-1`` no maximum will be used. :param bool progressbar: Flag for debugging purposes that shows the progress during the process. :returns: List of gaps and overlaps of the form: ``[(type, start, end)]``. :raises KeyError: If a tier is non existent. :raises IndexError: If no annotations are available in the tiers. """ spkr1anns = sorted((self.timeslots[a[0]], self.timeslots[a[1]]) for a in self.tiers[tier1][0].values()) spkr2anns = sorted((self.timeslots[a[0]], self.timeslots[a[1]]) for a in self.tiers[tier2][0].values()) line1 = [] isin = lambda x, lst: False if\ len([i for i in lst if i[0] <= x and i[1] >= x]) == 0 else True minmax = (min(spkr1anns[0][0], spkr2anns[0][0]), max(spkr1anns[-1][1], spkr2anns[-1][1])) last = (1, minmax[0]) lastP = 0 for ts in xrange(*minmax): in1, in2 = isin(ts, spkr1anns), isin(ts, spkr2anns) if in1 and in2: # Both speaking if last[0] == 'B': continue ty = 'B' elif in1: # Only 1 speaking if last[0] == '1': continue ty = '1' elif in2: # Only 2 speaking if last[0] == '2': continue ty = '2' else: # None speaking if last[0] == 'N': continue ty = 'N' line1.append((last[0], last[1], ts)) last = (ty, ts) if progressbar and int((ts*1.0/minmax[1])*100) > lastP: lastP = int((ts*1.0/minmax[1])*100) print '%d%%' % lastP line1.append((last[0], last[1], minmax[1])) ftos = [] for i in xrange(len(line1)): if line1[i][0] == 'N': if i != 0 and i < len(line1) - 1 and\ line1[i-1][0] != line1[i+1][0]: ftos.append(('G12_%s_%s' % (tier1, tier2) if line1[i-1][0] == '1' else 'G21_%s_%s' % (tier2, tier1), line1[i][1], line1[i][2])) else: ftos.append(('P_%s' % (tier1 if line1[i-1][0] == '1' else tier2), line1[i][1], line1[i][2])) elif line1[i][0] == 'B': if i != 0 and i < len(line1) - 1 and\ line1[i-1][0] != line1[i+1][0]: ftos.append(('O12_%s_%s' % ((tier1, tier2) if line1[i-1][0] else 'O21_%s_%s' % (tier2, tier1)), line1[i][1], line1[i][2])) else: ftos.append(('B_%s_%s' % ((tier1, tier2) if line1[i-1][0] == '1' else (tier2, tier1)), line1[i][1], line1[i][2])) return [f for f in ftos if maxlen == -1 or abs(f[2] - f[1]) < maxlen] def create_controlled_vocabulary(self, cv_id, descriptions, entries, ext_ref=None): """Create a controlled vocabulary. .. warning:: This is a very raw implementation and you should check the Eaf file format specification for the entries. :param str cv_id: Name of the controlled vocabulary. :param list descriptions: List of descriptions. :param dict entries: Entries dictionary. :param str ext_ref: External reference. """ self.controlledvocabularies[cv_id] = (descriptions, entries, ext_ref) def get_tier_ids_for_linguistic_type(self, ling_type, parent=None): """Give a list of all tiers matching a linguistic type. :param str ling_type: Name of the linguistic type. :param str parent: Only match tiers from this parent, when ``None`` this option will be ignored. :returns: List of tiernames. :raises KeyError: If a tier or linguistic type is non existent. """ return [t for t in self.tiers if self.tiers[t][2]['LINGUISTIC_TYPE_REF'] == ling_type and (parent is None or self.tiers[t][2]['PARENT_REF'] == parent)] def remove_linguistic_type(self, ling_type): """Remove a linguistic type. :param str ling_type: Name of the linguistic type. """ del(self.linguistic_types[ling_type]) def add_linguistic_type(self, lingtype, constraints=None, timealignable=True, graphicreferences=False, extref=None): """Add a linguistic type. :param str lingtype: Name of the linguistic type. :param list constraints: Constraint names. :param bool timealignable: Flag for time alignable. :param bool graphicreferences: Flag for graphic references. :param str extref: External reference. """ self.linguistic_types[lingtype] = { 'LINGUISTIC_TYPE_ID': lingtype, 'TIME_ALIGNABLE': str(timealignable).lower(), 'GRAPHIC_REFERENCES': str(graphicreferences).lower(), 'CONSTRAINTS': constraints} if extref is not None: self.linguistic_types[lingtype]['EXT_REF'] = extref def get_linguistic_types(self): """Give a list of available linguistic types. :returns: List of linguistic type names. """ return self.linguistic_types.keys()
acuriel/Nixtla
nixtla/core/tools/pympi/Elan.py
Python
gpl-2.0
33,330
# coding=utf-8 # UrbanFootprint v1.5 # Copyright (C) 2017 Calthorpe Analytics # # This file is part of UrbanFootprint version 1.5 # # UrbanFootprint is distributed under the terms of the GNU General # Public License version 3, as published by the Free Software Foundation. This # code is distributed WITHOUT ANY WARRANTY, without implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License v3 for more details; see <http://www.gnu.org/licenses/>. import urllib2 from logging import getLogger import psycopg2 from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.db import models, connections, connection from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection from footprint.main.managers.database.managers import InformationSchemaManager, PGNamespaceManager from footprint.main.utils.utils import parse_schema_and_table, increment_key from footprint.utils.postgres_utils import pg_connection_parameters logger = getLogger(__name__) __author__ = 'calthorpe_analytics' class InformationSchema(models.Model): table_catalog = models.CharField(max_length = 100) table_schema = models.CharField(max_length = 100) table_name = models.CharField(max_length = 100) # Pretend this is the primary key since the table doesn't have a single column primary key column_name = models.CharField(max_length = 100, null=False, primary_key=True) data_type = models.CharField(max_length = 100) udt_name = models.CharField(max_length = 100, null=False, primary_key=True) objects = InformationSchemaManager() def __unicode__(self): return "Catalog: {0}, Schema: {1}, Table: {2}, Column: {3}, Type: {4}".format(self.table_catalog, self.table_schema, self.table_name, self.column_name, self.data_type) def full_table_name(self): return "{0}.{1}".format(self.table_schema, self.table_name) @classmethod def create_primary_key_column_from_another_column(cls, schema, table, primary_key_column, from_column=None): """ Adds the column of the given type to the given table if absent :param schema: The database schema name :param table: The table name :param primary_key_column: Name of primary key column to create. If a primary key already exists it will be renamed from this, unless from_column is specified, in which case the existing primary_key will lose its constraint """ full_tablename = '"{schema}"."{table}"'.format(schema=schema, table=table) conn = psycopg2.connect(**pg_connection_parameters(settings.DATABASES['default'])) conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) cursor = conn.cursor() existing_primary_key = InformationSchema.get_primary_key_name(schema, table) if existing_primary_key: logger.info('Found existing primary key %s' % existing_primary_key) if not InformationSchema.objects.has_column(schema, table, primary_key_column): # If not create a primary key or alter the existing one's name # Copy values from the from_column to the new primary_key_column if existing_primary_key and not from_column: # Rename the primary key to primary_key_column and end alter_source_id_sql = 'alter table {full_tablename} rename column {existing_primary_key} to {primary_key_column}'.format( full_tablename=full_tablename, existing_primary_key=existing_primary_key, primary_key_column=primary_key_column) logger.info('Existing primary key exists and no from column is specified. Executing: %s' % alter_source_id_sql) cursor.execute(alter_source_id_sql) return if from_column: # Create a new primary key column without values create_column_sql = 'alter table {full_tablename} add column {primary_key_column} integer'.format( full_tablename=full_tablename, primary_key_column=primary_key_column) logger.info('From column is specified to be primary key source. Executing: %s' % create_column_sql) cursor.execute(create_column_sql) # Copy values from the from_column, always casting to integer update_sql = 'update {full_tablename} set {primary_key_column} = cast({from_column} AS integer)'.format( full_tablename=full_tablename, primary_key_column=primary_key_column, from_column=from_column) logger.info('Copying values from column to primary key. Executing: %s' % update_sql) cursor.execute(update_sql) else: # Populate with a serial primary key alter_source_id_sql = 'alter table {full_tablename} add column {primary_key_column} serial primary key'.format( full_tablename=full_tablename, primary_key_column=primary_key_column) logger.info('Copying values from column to primary key. Executing: %s' % alter_source_id_sql) cursor.execute(alter_source_id_sql) # Drop the original_primary_key column if it exists if existing_primary_key: alter_source_id_sql = 'alter table {full_tablename} drop column {existing_primary_key}'.format( full_tablename=full_tablename, existing_primary_key=existing_primary_key) logger.info('Existing primary key being removed. Executing: %s' % alter_source_id_sql) cursor.execute(alter_source_id_sql) if from_column: # Create the primary key constraint if we haven't yet alter_source_id_sql = 'alter table {full_tablename} add constraint {table}_{schema}_{primary_key_column}_pk primary key ({primary_key_column})'.format( full_tablename=full_tablename, table=table, schema=schema, primary_key_column=primary_key_column) logger.info('Adding constraint to primary key. Executing: %s' % alter_source_id_sql) cursor.execute(alter_source_id_sql) elif existing_primary_key != primary_key_column: # If there a column matching primary_key_column that isn't the primary key, we need to rename # the primary_key_column to something unique and then rename existing_primary_key to primary_key_column # Find a unique column to rename primary_key_column (e.g. renamed id to id_1, or id_2, etc) unique_column = increment_key(primary_key_column) while InformationSchema.objects.has_column(schema, table, unique_column): unique_column = increment_key(unique_column) # Rename the primary_key_column rename_primary_key_column_name_sql = 'alter table {full_tablename} rename column {primary_key_column} to {unique_column}'.format( full_tablename=full_tablename, primary_key_column=primary_key_column, unique_column=unique_column) logger.info('Existing column with primary key name exists that needs to be renamed: %s' % rename_primary_key_column_name_sql) cursor.execute(rename_primary_key_column_name_sql) # Rename the existing_primary_key to primary_key_column (e.g. rename to ogc_fid to id for uploaded tables) rename_existing_primary_key_sql = 'alter table {full_tablename} rename column {existing_primary_key} to {primary_key_column}'.format( full_tablename=full_tablename, existing_primary_key=existing_primary_key, primary_key_column=primary_key_column) logger.info('Existing primary key exists that needs to be renamed to desired primary key column name. Executing: %s' % rename_existing_primary_key_sql) cursor.execute(rename_existing_primary_key_sql) @classmethod def get_primary_key_name(cls, schema, table): """ Uses the inspection code to find the primary key column name, if one exists :param schema: :param table: :return: The primary key name or None """ connection = connections['default'] cursor = connection.cursor() table_name = '"{schema}"."{table}"'.format(schema=schema, table=table) # Use our own class to make up for lack of schema support in table queries smart_database_introspection = SmartDatabaseIntrospection(connection) try: indexes = smart_database_introspection.get_indexes(cursor, table_name) except NotImplementedError: indexes = {} # Fill this dict with field definitions for i, row in enumerate(smart_database_introspection.get_table_description(cursor, table_name)): column_name = row[0] # Add primary_key and unique, if necessary. if column_name in indexes: if indexes[column_name]['primary_key']: return column_name class Meta(object): db_table = '"information_schema"."columns"' class PGNamespace(models.Model): """ This class is just needed to list schemas and see if they exist if they have no tables """ # Pretend this is the primary key since the table doesn't have a single column primary key nspname = models.CharField(max_length = 100, null=False, primary_key=True) objects = PGNamespaceManager() class Meta(object): db_table = 'pg_namespace' class SouthMigrationHistory(models.Model): """ This class is just needed to list schemas and see if they exist if they have no tables """ # Pretend this is the primary key since the table doesn't have a single column primary key id = models.IntegerField(null=False, primary_key=True) app_name = models.CharField(max_length=100) migration = models.CharField(max_length=100) applied = models.DateTimeField() class Meta(object): db_table = 'south_migrationhistory' class SpatialRefSys(models.Model): proj4text = models.CharField(max_length=2048) srtext = models.CharField(max_length=2048) auth_srid = models.IntegerField() auth_name = models.CharField(max_length=2048) srid = models.IntegerField(primary_key=True) class Meta(object): db_table = 'spatial_ref_sys' class GeometryColumns(models.Model): f_table_catalog = models.CharField(max_length=256, null=False) f_table_schema = models.CharField(max_length=256, null=False, primary_key=True) f_table_name = models.CharField(max_length=256, null=False, primary_key=True) f_geometry_column = models.CharField(max_length=256, null=False, primary_key=True) coord_dimension = models.IntegerField(null=False) srid = models.IntegerField(null=False) type = models.CharField(max_length=30, null=False) class Meta(object): db_table = 'geometry_columns' def sync_geometry_columns(schema=None, table=None): """ Adds one or more entries to the PostGIS geometry_columns :param schema: Optional database schema to which to limit search :param table: Optional table name to which to limit search :return: """ tables_with_geometry = InformationSchema.objects.tables_with_geometry(schema=schema, table=table) for information_scheme in tables_with_geometry: conn = psycopg2.connect(**pg_connection_parameters(settings.DATABASES['default'])) conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) cursor = conn.cursor() sql = "select ST_CoordDim({2}), ST_SRID({2}), ST_GeometryType({2}) from {1}.{0}".format(information_scheme.table_name, information_scheme.table_schema, information_scheme.column_name) ret = cursor.execute(sql) if ret and len(ret) > 0: coord, srid, geom_type = ret[0] else: coord, srid, geom_type = (2, 4326, 'GEOMETRY') geometry_record, new_record = GeometryColumns.objects.get_or_create( f_table_name=information_scheme.table_name, f_geometry_column=information_scheme.column_name, f_table_schema=information_scheme.table_schema, defaults=dict( coord_dimension=coord, srid=srid, type=geom_type, )) if not new_record: geometry_record.coord_dimension = coord geometry_record.srid = srid geometry_record.type = geom_type geometry_record.save() def scrape_insert_from_spatialreference(authority, srid): address = "http://www.spatialreference.org/ref/{1}/{0}/postgis/".format(srid, authority) logger.info('Looking up {authority}:{srid}'.format(srid=srid, authority=authority)) try: return urllib2.urlopen(address).read() except: logger.warn('Could not find SRID {srid}!'.format(srid=srid)) return None def verify_srid(srid): try: srs = SpatialRefSys.objects.get(auth_srid=int(srid)) logger.info("Using SRID: " + srid) return srs except ObjectDoesNotExist: insert = scrape_insert_from_spatialreference('esri', srid) if insert: logger.info("Inserting {srid} into spatial_ref_sys table".format(srid=srid)) logger.info(insert) connection.cursor().execute(insert) srs = SpatialRefSys.objects.filter(auth_srid=int(srid)) if srs.count(): return srs[0] return False class SmartDatabaseIntrospection(DatabaseIntrospection): def describe_table_columns(self, cursor, full_table_name): schema, table = parse_schema_and_table(full_table_name) # conn = psycopg2.connect(**pg_connection_parameters(settings.DATABASES['default'])) # conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # cursor = conn.cursor() cursor.execute(""" SELECT column_name, is_nullable FROM information_schema.columns WHERE table_name = %s and table_schema = %s""", [table, schema]) null_map = dict(cursor.fetchall()) cursor.execute('SELECT * FROM "{schema}"."{table}" LIMIT 1'.format(schema=schema, table=table)) return cursor.description def get_table_description(self, cursor, full_table_name): """ Override the parent method to take schemas into account, sigh :param cursor: :param full_table_name: :return: """ schema, table = parse_schema_and_table(full_table_name) # conn = psycopg2.connect(**pg_connection_parameters(settings.DATABASES['default'])) # conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # cursor = conn.cursor() cursor.execute(""" SELECT column_name, is_nullable FROM information_schema.columns WHERE table_name = %s and table_schema = %s""", [table, schema]) null_map = dict(cursor.fetchall()) cursor.execute('SELECT * FROM "{schema}"."{table}" LIMIT 1'.format(schema=schema, table=table)) return [tuple([item for item in line[:6]] + [null_map[line[0]]==u'YES']) for line in cursor.description] def get_indexes(self, cursor, table_name): """ OVERRIDDEN to work with schemas, sigh Returns a dictionary of fieldname -> infodict for the given table, where each infodict is in the format: {'primary_key': boolean representing whether it's the primary key, 'unique': boolean representing whether it's a unique index} """ schema, table = parse_schema_and_table(table_name) # This query retrieves each index on the given table, including the # first associated field name cursor.execute(""" SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index idx, pg_catalog.pg_attribute attr, information_schema.columns isc WHERE c.oid = idx.indrelid AND idx.indexrelid = c2.oid AND attr.attrelid = c.oid AND attr.attnum = idx.indkey[0] AND c.relname = %s AND c.relname = isc.table_name AND isc.table_schema = %s AND isc.column_name = attr.attname """, [table, schema]) indexes = {} for row in cursor.fetchall(): # row[1] (idx.indkey) is stored in the DB as an array. It comes out as # a string of space-separated integers. This designates the field # indexes (1-based) of the fields that have indexes on the table. # Here, we skip any indexes across multiple fields. if ' ' in row[1]: continue indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]} return indexes
CalthorpeAnalytics/urbanfootprint
footprint/main/models/database/information_schema.py
Python
gpl-3.0
17,077
from heapq import heappush, heappop def heapsort(v): h = [] for x in v: heappush(h, x) return [heappop(h) for i in range(len(h))] from random import shuffle v = list(range(8)) shuffle(v) print (v) v = heapsort(v) print (v)
Gigers/data-struct
Aulas/aula11/teacher_code/heapSort.py
Python
bsd-2-clause
233
from random import randint board = [] for x in range(5): board.append(["O"] * 5) def print_board(board): for row in board: print " ".join(row) print "Let's play Battleship!" print_board(board) def random_row(board): return randint(0, len(board) - 1) def random_col(board): return randint(0, len(board[0]) - 1) ship_row = random_row(board) ship_col = random_col(board) # Everything from here on should go in your for loop! # Be sure to indent four spaces! for turn in range(4): guess_row = int(raw_input("Guess Row:")) guess_col = int(raw_input("Guess Col:")) if guess_row == ship_row and guess_col == ship_col: print "Congratulations! You sunk my battleship!" break else: if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4): print "Oops, that's not even in the ocean." elif(board[guess_row][guess_col] == "X"): print "You guessed that one already." else: print "You missed my battleship!" board[guess_row][guess_col] = "X" print_board(board) if turn==3: print "Game Over !!" else : print "turn number ",turn+1
mandeepjadon/python-game
battleship.py
Python
mit
1,203
"""Run all test cases. """ import sys import os import unittest try: # For Pythons w/distutils pybsddb import bsddb3 as bsddb except ImportError: # For Python 2.3 import bsddb if sys.version_info[0] >= 3 : charset = "iso8859-1" # Full 8 bit class logcursor_py3k(object) : def __init__(self, env) : self._logcursor = env.log_cursor() def __getattr__(self, v) : return getattr(self._logcursor, v) def __next__(self) : v = getattr(self._logcursor, "next")() if v is not None : v = (v[0], v[1].decode(charset)) return v next = __next__ def first(self) : v = self._logcursor.first() if v is not None : v = (v[0], v[1].decode(charset)) return v def last(self) : v = self._logcursor.last() if v is not None : v = (v[0], v[1].decode(charset)) return v def prev(self) : v = self._logcursor.prev() if v is not None : v = (v[0], v[1].decode(charset)) return v def current(self) : v = self._logcursor.current() if v is not None : v = (v[0], v[1].decode(charset)) return v def set(self, lsn) : v = self._logcursor.set(lsn) if v is not None : v = (v[0], v[1].decode(charset)) return v class cursor_py3k(object) : def __init__(self, db, *args, **kwargs) : self._dbcursor = db.cursor(*args, **kwargs) def __getattr__(self, v) : return getattr(self._dbcursor, v) def _fix(self, v) : if v is None : return None key, value = v if isinstance(key, bytes) : key = key.decode(charset) return (key, value.decode(charset)) def __next__(self) : v = getattr(self._dbcursor, "next")() return self._fix(v) next = __next__ def previous(self) : v = self._dbcursor.previous() return self._fix(v) def last(self) : v = self._dbcursor.last() return self._fix(v) def set(self, k) : if isinstance(k, str) : k = bytes(k, charset) v = self._dbcursor.set(k) return self._fix(v) def set_recno(self, num) : v = self._dbcursor.set_recno(num) return self._fix(v) def set_range(self, k, dlen=-1, doff=-1) : if isinstance(k, str) : k = bytes(k, charset) v = self._dbcursor.set_range(k, dlen=dlen, doff=doff) return self._fix(v) def dup(self, flags=0) : cursor = self._dbcursor.dup(flags) return dup_cursor_py3k(cursor) def next_dup(self) : v = self._dbcursor.next_dup() return self._fix(v) def next_nodup(self) : v = self._dbcursor.next_nodup() return self._fix(v) def put(self, key, data, flags=0, dlen=-1, doff=-1) : if isinstance(key, str) : key = bytes(key, charset) if isinstance(data, str) : value = bytes(data, charset) return self._dbcursor.put(key, data, flags=flags, dlen=dlen, doff=doff) def current(self, flags=0, dlen=-1, doff=-1) : v = self._dbcursor.current(flags=flags, dlen=dlen, doff=doff) return self._fix(v) def first(self) : v = self._dbcursor.first() return self._fix(v) def pget(self, key=None, data=None, flags=0) : # Incorrect because key can be a bare number, # but enough to pass testsuite if isinstance(key, int) and (data is None) and (flags == 0) : flags = key key = None if isinstance(key, str) : key = bytes(key, charset) if isinstance(data, int) and (flags==0) : flags = data data = None if isinstance(data, str) : data = bytes(data, charset) v=self._dbcursor.pget(key=key, data=data, flags=flags) if v is not None : v1, v2, v3 = v if isinstance(v1, bytes) : v1 = v1.decode(charset) if isinstance(v2, bytes) : v2 = v2.decode(charset) v = (v1, v2, v3.decode(charset)) return v def join_item(self) : v = self._dbcursor.join_item() if v is not None : v = v.decode(charset) return v def get(self, *args, **kwargs) : l = len(args) if l == 2 : k, f = args if isinstance(k, str) : k = bytes(k, "iso8859-1") args = (k, f) elif l == 3 : k, d, f = args if isinstance(k, str) : k = bytes(k, charset) if isinstance(d, str) : d = bytes(d, charset) args =(k, d, f) v = self._dbcursor.get(*args, **kwargs) if v is not None : k, v = v if isinstance(k, bytes) : k = k.decode(charset) v = (k, v.decode(charset)) return v def get_both(self, key, value) : if isinstance(key, str) : key = bytes(key, charset) if isinstance(value, str) : value = bytes(value, charset) v=self._dbcursor.get_both(key, value) return self._fix(v) class dup_cursor_py3k(cursor_py3k) : def __init__(self, dbcursor) : self._dbcursor = dbcursor class DB_py3k(object) : def __init__(self, *args, **kwargs) : args2=[] for i in args : if isinstance(i, DBEnv_py3k) : i = i._dbenv args2.append(i) args = tuple(args2) for k, v in kwargs.items() : if isinstance(v, DBEnv_py3k) : kwargs[k] = v._dbenv self._db = bsddb._db.DB_orig(*args, **kwargs) def __contains__(self, k) : if isinstance(k, str) : k = bytes(k, charset) return getattr(self._db, "has_key")(k) def __getitem__(self, k) : if isinstance(k, str) : k = bytes(k, charset) v = self._db[k] if v is not None : v = v.decode(charset) return v def __setitem__(self, k, v) : if isinstance(k, str) : k = bytes(k, charset) if isinstance(v, str) : v = bytes(v, charset) self._db[k] = v def __delitem__(self, k) : if isinstance(k, str) : k = bytes(k, charset) del self._db[k] def __getattr__(self, v) : return getattr(self._db, v) def __len__(self) : return len(self._db) def has_key(self, k, txn=None) : if isinstance(k, str) : k = bytes(k, charset) return self._db.has_key(k, txn=txn) def set_re_delim(self, c) : if isinstance(c, str) : # We can use a numeric value byte too c = bytes(c, charset) return self._db.set_re_delim(c) def set_re_pad(self, c) : if isinstance(c, str) : # We can use a numeric value byte too c = bytes(c, charset) return self._db.set_re_pad(c) def get_re_source(self) : source = self._db.get_re_source() return source.decode(charset) def put(self, key, data, txn=None, flags=0, dlen=-1, doff=-1) : if isinstance(key, str) : key = bytes(key, charset) if isinstance(data, str) : value = bytes(data, charset) return self._db.put(key, data, flags=flags, txn=txn, dlen=dlen, doff=doff) def append(self, value, txn=None) : if isinstance(value, str) : value = bytes(value, charset) return self._db.append(value, txn=txn) def get_size(self, key) : if isinstance(key, str) : key = bytes(key, charset) return self._db.get_size(key) def exists(self, key, *args, **kwargs) : if isinstance(key, str) : key = bytes(key, charset) return self._db.exists(key, *args, **kwargs) def get(self, key, default="MagicCookie", txn=None, flags=0, dlen=-1, doff=-1) : if isinstance(key, str) : key = bytes(key, charset) if default != "MagicCookie" : # Magic for 'test_get_none.py' v=self._db.get(key, default=default, txn=txn, flags=flags, dlen=dlen, doff=doff) else : v=self._db.get(key, txn=txn, flags=flags, dlen=dlen, doff=doff) if (v is not None) and isinstance(v, bytes) : v = v.decode(charset) return v def pget(self, key, txn=None) : if isinstance(key, str) : key = bytes(key, charset) v=self._db.pget(key, txn=txn) if v is not None : v1, v2 = v if isinstance(v1, bytes) : v1 = v1.decode(charset) v = (v1, v2.decode(charset)) return v def get_both(self, key, value, txn=None, flags=0) : if isinstance(key, str) : key = bytes(key, charset) if isinstance(value, str) : value = bytes(value, charset) v=self._db.get_both(key, value, txn=txn, flags=flags) if v is not None : v = v.decode(charset) return v def delete(self, key, txn=None) : if isinstance(key, str) : key = bytes(key, charset) return self._db.delete(key, txn=txn) def keys(self) : k = self._db.keys() if len(k) and isinstance(k[0], bytes) : return [i.decode(charset) for i in self._db.keys()] else : return k def items(self) : data = self._db.items() if not len(data) : return data data2 = [] for k, v in data : if isinstance(k, bytes) : k = k.decode(charset) data2.append((k, v.decode(charset))) return data2 def associate(self, secondarydb, callback, flags=0, txn=None) : class associate_callback(object) : def __init__(self, callback) : self._callback = callback def callback(self, key, data) : if isinstance(key, str) : key = key.decode(charset) data = data.decode(charset) key = self._callback(key, data) if (key != bsddb._db.DB_DONOTINDEX) : if isinstance(key, str) : key = bytes(key, charset) elif isinstance(key, list) : key2 = [] for i in key : if isinstance(i, str) : i = bytes(i, charset) key2.append(i) key = key2 return key return self._db.associate(secondarydb._db, associate_callback(callback).callback, flags=flags, txn=txn) def cursor(self, txn=None, flags=0) : return cursor_py3k(self._db, txn=txn, flags=flags) def join(self, cursor_list) : cursor_list = [i._dbcursor for i in cursor_list] return dup_cursor_py3k(self._db.join(cursor_list)) class DBEnv_py3k(object) : def __init__(self, *args, **kwargs) : self._dbenv = bsddb._db.DBEnv_orig(*args, **kwargs) def __getattr__(self, v) : return getattr(self._dbenv, v) def log_cursor(self, flags=0) : return logcursor_py3k(self._dbenv) def get_lg_dir(self) : return self._dbenv.get_lg_dir().decode(charset) def get_tmp_dir(self) : return self._dbenv.get_tmp_dir().decode(charset) def get_data_dirs(self) : return tuple( (i.decode(charset) for i in self._dbenv.get_data_dirs())) class DBSequence_py3k(object) : def __init__(self, db, *args, **kwargs) : self._db=db self._dbsequence = bsddb._db.DBSequence_orig(db._db, *args, **kwargs) def __getattr__(self, v) : return getattr(self._dbsequence, v) def open(self, key, *args, **kwargs) : return self._dbsequence.open(bytes(key, charset), *args, **kwargs) def get_key(self) : return self._dbsequence.get_key().decode(charset) def get_dbp(self) : return self._db bsddb._db.DBEnv_orig = bsddb._db.DBEnv bsddb._db.DB_orig = bsddb._db.DB if bsddb.db.version() <= (4, 3) : bsddb._db.DBSequence_orig = None else : bsddb._db.DBSequence_orig = bsddb._db.DBSequence def do_proxy_db_py3k(flag) : flag2 = do_proxy_db_py3k.flag do_proxy_db_py3k.flag = flag if flag : bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = DBEnv_py3k bsddb.DB = bsddb.db.DB = bsddb._db.DB = DB_py3k bsddb._db.DBSequence = DBSequence_py3k else : bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = bsddb._db.DBEnv_orig bsddb.DB = bsddb.db.DB = bsddb._db.DB = bsddb._db.DB_orig bsddb._db.DBSequence = bsddb._db.DBSequence_orig return flag2 do_proxy_db_py3k.flag = False do_proxy_db_py3k(True) try: # For Pythons w/distutils pybsddb from bsddb3 import db, dbtables, dbutils, dbshelve, \ hashopen, btopen, rnopen, dbobj except ImportError: # For Python 2.3 from bsddb import db, dbtables, dbutils, dbshelve, \ hashopen, btopen, rnopen, dbobj try: from bsddb3 import test_support except ImportError: if sys.version_info[0] < 3 : from test import test_support else : from test import support as test_support try: if sys.version_info[0] < 3 : from threading import Thread, currentThread del Thread, currentThread else : from threading import Thread, current_thread del Thread, current_thread have_threads = True except ImportError: have_threads = False verbose = 0 if 'verbose' in sys.argv: verbose = 1 sys.argv.remove('verbose') if 'silent' in sys.argv: # take care of old flag, just in case verbose = 0 sys.argv.remove('silent') def print_versions(): print print '-=' * 38 print db.DB_VERSION_STRING print 'bsddb.db.version(): %s' % (db.version(), ) if db.version() >= (5, 0) : print 'bsddb.db.full_version(): %s' %repr(db.full_version()) print 'bsddb.db.__version__: %s' % db.__version__ print 'bsddb.db.cvsid: %s' % db.cvsid # Workaround for allowing generating an EGGs as a ZIP files. suffix="__" print 'py module: %s' % getattr(bsddb, "__file"+suffix) print 'extension module: %s' % getattr(bsddb, "__file"+suffix) print 'python version: %s' % sys.version print 'My pid: %s' % os.getpid() print '-=' * 38 def get_new_path(name) : get_new_path.mutex.acquire() try : import os path=os.path.join(get_new_path.prefix, name+"_"+str(os.getpid())+"_"+str(get_new_path.num)) get_new_path.num+=1 finally : get_new_path.mutex.release() return path def get_new_environment_path() : path=get_new_path("environment") import os try: os.makedirs(path,mode=0700) except os.error: test_support.rmtree(path) os.makedirs(path) return path def get_new_database_path() : path=get_new_path("database") import os if os.path.exists(path) : os.remove(path) return path # This path can be overriden via "set_test_path_prefix()". import os, os.path get_new_path.prefix=os.path.join(os.environ.get("TMPDIR", os.path.join(os.sep,"tmp")), "z-Berkeley_DB") get_new_path.num=0 def get_test_path_prefix() : return get_new_path.prefix def set_test_path_prefix(path) : get_new_path.prefix=path def remove_test_path_directory() : test_support.rmtree(get_new_path.prefix) if have_threads : import threading get_new_path.mutex=threading.Lock() del threading else : class Lock(object) : def acquire(self) : pass def release(self) : pass get_new_path.mutex=Lock() del Lock class PrintInfoFakeTest(unittest.TestCase): def testPrintVersions(self): print_versions() # This little hack is for when this module is run as main and all the # other modules import it so they will still be able to get the right # verbose setting. It's confusing but it works. if sys.version_info[0] < 3 : import test_all test_all.verbose = verbose else : import sys print >>sys.stderr, "Work to do!" def suite(module_prefix='', timing_check=None): test_modules = [ 'test_associate', 'test_basics', 'test_dbenv', 'test_db', 'test_compare', 'test_compat', 'test_cursor_pget_bug', 'test_dbobj', 'test_dbshelve', 'test_dbtables', 'test_distributed_transactions', 'test_early_close', 'test_fileid', 'test_get_none', 'test_join', 'test_lock', 'test_misc', 'test_pickle', 'test_queue', 'test_recno', 'test_replication', 'test_sequence', 'test_thread', ] alltests = unittest.TestSuite() for name in test_modules: #module = __import__(name) # Do it this way so that suite may be called externally via # python's Lib/test/test_bsddb3. module = __import__(module_prefix+name, globals(), locals(), name) alltests.addTest(module.test_suite()) if timing_check: alltests.addTest(unittest.makeSuite(timing_check)) return alltests def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(PrintInfoFakeTest)) return suite if __name__ == '__main__': print_versions() unittest.main(defaultTest='suite')
Jeff-Tian/mybnb
Python27/Lib/bsddb/test/test_all.py
Python
apache-2.0
19,765
from ptypes import * v = 0 # FIXME: this file format is busted class seq_parameter_set_rbsp(pbinary.struct): class __pic_order_type_1(pbinary.struct): _fields_ = [ (1, 'delta_pic_order_always_zero_flag'), (v, 'offset_for_non_ref_pic'), (v, 'offset_for_top_to_bottom_field'), (v, 'num_ref_frames_in_pic_order_cnt_cycle'), (lambda s: dyn.array( dyn.clone(pbinary.struct,_fields_=[(v,'offset_for_ref_frame')]), s['num_ref_frames_in_pic_order_cnt_cycle']), 'ref_frames') ] def __pic_order(self): type = self['pic_order_cnt_type'] if type == 0: return dyn.clone(pbinary.struct, _fields_=[(v, 'log2_max_pic_order_cnt_lsb')]) elif type == 1: return __pic_order_type_1 raise NotImplementedError(type) class __frame_crop_offset(pbinary.struct): _fields_ = [ (v, 'frame_crop_left_offset'), (v, 'frame_crop_right_offset'), (v, 'frame_crop_top_offset'), (v, 'frame_crop_bottom_offset'), ] def __frame_crop(self): if self['frame_cropping_flag']: return __frame_crop_offset return dyn.clone(pbinary.struct,_fields_=[]) def __rbsp_trailing_bits(self): return 0 _fields_ = [ (8, 'profile_idc'), (1, 'constraint_set0_flag'), (1, 'constraint_set1_flag'), (1, 'constraint_set2_flag'), (5, 'reserved_zero_5bits'), (8, 'level_idc'), (v, 'seq_parameter_set_id'), (v, 'pic_order_cnt_type'), (__pic_order, 'pic_order'), (v, 'num_ref_frames'), (1, 'gaps_in_frame_num_value_allowed_flag'), (v, 'pic_width_in_mbs_minus1'), (v, 'pic_height_in_map_units_minus1'), (1, 'frame_mbs_only_flag'), (lambda s: [0,1][s['frame_mbs_only_flag']], 'mb_adaptive_frame_field_flag'), (1, 'direct_8x8_inference_flag'), (1, 'frame_cropping_flag'), (__frame_crop, 'frame_crop'), (1, 'vul_parameters_present_flag'), (lambda s: [dyn.clone(pbinary.struct,_fields_=[]),__vul_parameters][s['vul_parameters_present_flag']], 'vul_parameters'), (__rbsp_trailing_bits, 'rbsp_trailing_bits'), ]
arizvisa/syringe
template/video/h264.py
Python
bsd-2-clause
2,279
# textGridworldDisplay.py # ----------------------- # Licensing Information: You are free to use or extend these projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to UC Berkeley, including a link to http://ai.berkeley.edu. # # Attribution Information: The Pacman AI projects were developed at UC Berkeley. # The core projects and autograders were primarily created by John DeNero # (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). # Student side autograding was added by Brad Miller, Nick Hay, and # Pieter Abbeel (pabbeel@cs.berkeley.edu). import util class TextGridworldDisplay: def __init__(self, gridworld): self.gridworld = gridworld def start(self): pass def pause(self): pass def displayValues(self, agent, currentState = None, message = None): if message != None: print(message) values = util.Counter() policy = {} states = self.gridworld.getStates() for state in states: values[state] = agent.getValue(state) policy[state] = agent.getPolicy(state) prettyPrintValues(self.gridworld, values, policy, currentState) def displayNullValues(self, agent, currentState = None, message = None): if message != None: print(message) prettyPrintNullValues(self.gridworld, currentState) def displayQValues(self, agent, currentState = None, message = None): if message != None: print(message) qValues = util.Counter() states = self.gridworld.getStates() for state in states: for action in self.gridworld.getPossibleActions(state): qValues[(state, action)] = agent.getQValue(state, action) prettyPrintQValues(self.gridworld, qValues, currentState) def prettyPrintValues(gridWorld, values, policy=None, currentState = None): grid = gridWorld.grid maxLen = 11 newRows = [] for y in range(grid.height): newRow = [] for x in range(grid.width): state = (x, y) value = values[state] action = None if policy != None and state in policy: action = policy[state] actions = gridWorld.getPossibleActions(state) if action not in actions and 'exit' in actions: action = 'exit' valString = None if action == 'exit': valString = border('%.2f' % value) else: valString = '\n\n%.2f\n\n' % value valString += ' '*maxLen if grid[x][y] == 'S': valString = '\n\nS: %.2f\n\n' % value valString += ' '*maxLen if grid[x][y] == '#': valString = '\n#####\n#####\n#####\n' valString += ' '*maxLen pieces = [valString] text = ("\n".join(pieces)).split('\n') if currentState == state: l = len(text[1]) if l == 0: text[1] = '*' else: text[1] = "|" + ' ' * int((l-1)/2-1) + '*' + ' ' * int((l)/2-1) + "|" if action == 'east': text[2] = ' ' + text[2] + ' >' elif action == 'west': text[2] = '< ' + text[2] + ' ' elif action == 'north': text[0] = ' ' * int(maxLen/2) + '^' +' ' * int(maxLen/2) elif action == 'south': text[4] = ' ' * int(maxLen/2) + 'v' +' ' * int(maxLen/2) newCell = "\n".join(text) newRow.append(newCell) newRows.append(newRow) numCols = grid.width for rowNum, row in enumerate(newRows): row.insert(0,"\n\n"+str(rowNum)) newRows.reverse() colLabels = [str(colNum) for colNum in range(numCols)] colLabels.insert(0,' ') finalRows = [colLabels] + newRows print(indent(finalRows,separateRows=True,delim='|', prefix='|',postfix='|', justify='center',hasHeader=True)) def prettyPrintNullValues(gridWorld, currentState = None): grid = gridWorld.grid maxLen = 11 newRows = [] for y in range(grid.height): newRow = [] for x in range(grid.width): state = (x, y) # value = values[state] action = None # if policy != None and state in policy: # action = policy[state] # actions = gridWorld.getPossibleActions(state) if action not in actions and 'exit' in actions: action = 'exit' valString = None # if action == 'exit': # valString = border('%.2f' % value) # else: # valString = '\n\n%.2f\n\n' % value # valString += ' '*maxLen if grid[x][y] == 'S': valString = '\n\nS\n\n' valString += ' '*maxLen elif grid[x][y] == '#': valString = '\n#####\n#####\n#####\n' valString += ' '*maxLen elif type(grid[x][y]) == float or type(grid[x][y]) == int: valString = border('%.2f' % float(grid[x][y])) else: valString = border(' ') pieces = [valString] text = ("\n".join(pieces)).split('\n') if currentState == state: l = len(text[1]) if l == 0: text[1] = '*' else: text[1] = "|" + ' ' * int((l-1)/2-1) + '*' + ' ' * int((l)/2-1) + "|" if action == 'east': text[2] = ' ' + text[2] + ' >' elif action == 'west': text[2] = '< ' + text[2] + ' ' elif action == 'north': text[0] = ' ' * int(maxLen/2) + '^' +' ' * int(maxLen/2) elif action == 'south': text[4] = ' ' * int(maxLen/2) + 'v' +' ' * int(maxLen/2) newCell = "\n".join(text) newRow.append(newCell) newRows.append(newRow) numCols = grid.width for rowNum, row in enumerate(newRows): row.insert(0,"\n\n"+str(rowNum)) newRows.reverse() colLabels = [str(colNum) for colNum in range(numCols)] colLabels.insert(0,' ') finalRows = [colLabels] + newRows print(indent(finalRows,separateRows=True,delim='|', prefix='|',postfix='|', justify='center',hasHeader=True)) def prettyPrintQValues(gridWorld, qValues, currentState=None): grid = gridWorld.grid maxLen = 11 newRows = [] for y in range(grid.height): newRow = [] for x in range(grid.width): state = (x, y) actions = gridWorld.getPossibleActions(state) if actions == None or len(actions) == 0: actions = [None] bestQ = max([qValues[(state, action)] for action in actions]) bestActions = [action for action in actions if qValues[(state, action)] == bestQ] # display cell qStrings = dict([(action, "%.2f" % qValues[(state, action)]) for action in actions]) northString = ('north' in qStrings and qStrings['north']) or ' ' southString = ('south' in qStrings and qStrings['south']) or ' ' eastString = ('east' in qStrings and qStrings['east']) or ' ' westString = ('west' in qStrings and qStrings['west']) or ' ' exitString = ('exit' in qStrings and qStrings['exit']) or ' ' eastLen = len(eastString) westLen = len(westString) if eastLen < westLen: eastString = ' '*(westLen-eastLen)+eastString if westLen < eastLen: westString = westString+' '*(eastLen-westLen) if 'north' in bestActions: northString = '/'+northString+'\\' if 'south' in bestActions: southString = '\\'+southString+'/' if 'east' in bestActions: eastString = ''+eastString+'>' else: eastString = ''+eastString+' ' if 'west' in bestActions: westString = '<'+westString+'' else: westString = ' '+westString+'' if 'exit' in bestActions: exitString = '[ '+exitString+' ]' ewString = westString + " " + eastString if state == currentState: ewString = westString + " * " + eastString if state == gridWorld.getStartState(): ewString = westString + " S " + eastString if state == currentState and state == gridWorld.getStartState(): ewString = westString + " S:* " + eastString text = [northString, "\n"+exitString, ewString, ' '*maxLen+"\n", southString] if grid[x][y] == '#': text = ['', '\n#####\n#####\n#####', ''] newCell = "\n".join(text) newRow.append(newCell) newRows.append(newRow) numCols = grid.width for rowNum, row in enumerate(newRows): row.insert(0,"\n\n\n"+str(rowNum)) newRows.reverse() colLabels = [str(colNum) for colNum in range(numCols)] colLabels.insert(0,' ') finalRows = [colLabels] + newRows print(indent(finalRows,separateRows=True,delim='|',prefix='|',postfix='|', justify='center',hasHeader=True)) def border(text): length = len(text) pieces = ['-' * (length+2), '|'+' ' * (length+2)+'|', ' | '+text+' | ', '|'+' ' * (length+2)+'|','-' * (length+2)] return '\n'.join(pieces) # INDENTING CODE # Indenting code based on a post from George Sakkis # (http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662) import operator from io import StringIO def indent(rows, hasHeader=False, headerChar='-', delim=' | ', justify='left', separateRows=False, prefix='', postfix='', wrapfunc=lambda x:x): """Indents a table by column. - rows: A sequence of sequences of items, one sequence per row. - hasHeader: True if the first row consists of the columns' names. - headerChar: Character to be used for the row separator line (if hasHeader==True or separateRows==True). - delim: The column delimiter. - justify: Determines how are data justified in their column. Valid values are 'left','right' and 'center'. - separateRows: True if rows are to be separated by a line of 'headerChar's. - prefix: A string prepended to each printed row. - postfix: A string appended to each printed row. - wrapfunc: A function f(text) for wrapping text; each element in the table is first wrapped by this function.""" # closure for breaking logical rows to physical, using wrapfunc def rowWrapper(row): newRows = [wrapfunc(item).split('\n') for item in row] return [[substr or '' for substr in item] for item in map(None,*newRows)] # break each logical row into one or more physical ones logicalRows = [rowWrapper(row) for row in rows] # columns of physical rows columns = map(None,*reduce(operator.add,logicalRows)) # get the maximum of each column by the string length of its items maxWidths = [max([len(str(item)) for item in column]) for column in columns] rowSeparator = headerChar * (len(prefix) + len(postfix) + sum(maxWidths) + \ len(delim)*(len(maxWidths)-1)) # select the appropriate justify method justify = {'center':str.center, 'right':str.rjust, 'left':str.ljust}[justify.lower()] output=cStringIO.StringIO() if separateRows: print >> output, rowSeparator for physicalRows in logicalRows: for row in physicalRows: print >> output, \ prefix \ + delim.join([justify(str(item),width) for (item,width) in zip(row,maxWidths)]) \ + postfix if separateRows or hasHeader: print >> output, rowSeparator; hasHeader=False return output.getvalue() import math def wrap_always(text, width): """A simple word-wrap function that wraps text on exactly width characters. It doesn't split the text in words.""" return '\n'.join([ text[width*i:width*(i+1)] \ for i in xrange(int(math.ceil(1.*len(text)/width))) ]) # TEST OF DISPLAY CODE if __name__ == '__main__': import gridworld, util grid = gridworld.getCliffGrid3() print(grid.getStates()) policy = dict([(state,'east') for state in grid.getStates()]) values = util.Counter(dict([(state,1000.23) for state in grid.getStates()])) prettyPrintValues(grid, values, policy, currentState = (0,0)) stateCrossActions = [[(state, action) for action in grid.getPossibleActions(state)] for state in grid.getStates()] qStates = reduce(lambda x,y: x+y, stateCrossActions, []) qValues = util.Counter(dict([((state, action), 10.5) for state, action in qStates])) qValues = util.Counter(dict([((state, action), 10.5) for state, action in reduce(lambda x,y: x+y, stateCrossActions, [])])) prettyPrintQValues(grid, qValues, currentState = (0,0))
hsgui/interest-only
deeplearning/reinforcementlearning/textGridworldDisplay.py
Python
gpl-2.0
13,229
from django.conf.urls import url from .conf import settings from .views import ( BlogIndexView, DateBasedPostDetailView, ManageCreatePost, ManageDeletePost, ManagePostList, ManageUpdatePost, SecretKeyPostDetailView, SectionIndexView, SlugUniquePostDetailView, StaffPostDetailView, ajax_preview, blog_feed, ) app_name = "pinax_blog" urlpatterns = [ url(r"^$", BlogIndexView.as_view(), name="blog"), url(r"^section/(?P<section>[-\w]+)/$", SectionIndexView.as_view(), name="blog_section"), url(r"^post/(?P<post_pk>\d+)/$", StaffPostDetailView.as_view(), name="blog_post_pk"), url(r"^post/(?P<post_secret_key>\w+)/$", SecretKeyPostDetailView.as_view(), name="blog_post_secret"), url(r"^feed/(?P<section>[-\w]+)/(?P<feed_type>[-\w]+)/$", blog_feed, name="blog_feed"), # authoring url(r"^manage/posts/$", ManagePostList.as_view(), name="manage_post_list"), url(r"^manage/posts/create/$", ManageCreatePost.as_view(), name="manage_post_create"), url(r"^manage/posts/(?P<post_pk>\d+)/update/$", ManageUpdatePost.as_view(), name="manage_post_update"), url(r"^manage/posts/(?P<post_pk>\d+)/delete/$", ManageDeletePost.as_view(), name="manage_post_delete"), url(r"^ajax/markdown/preview/$", ajax_preview, name="ajax_preview") ] if settings.PINAX_BLOG_SLUG_UNIQUE: urlpatterns += [ url(r"^(?P<post_slug>[-\w]+)/$", SlugUniquePostDetailView.as_view(), name="blog_post_slug") ] else: urlpatterns += [ url(r"^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$", DateBasedPostDetailView.as_view(), name="blog_post"), ]
pinax/pinax-blog
pinax/blog/urls.py
Python
mit
1,697
#!/usr/bin/env python # -*- coding: utf-8 -*- """Water Supply model - WaterSupplySectorModel implements SectorModel - wraps ExampleWaterSupplySimulationModel - instantiate a model instance """ import logging import numpy as np from smif.model.sector_model import SectorModel class WaterSupplySectorModel(SectorModel): """Example of a class implementing the SectorModel interface, using one of the toy water models below to simulate the water supply system. """ def simulate(self, data): """Simulate water supply Arguments ========= data - inputs/parameters, implicitly includes: - scenario data, e.g. expected level of rainfall - data output from other models in workflow - parameters set or adjusted for this model run - system state data, e.g. reservoir level at year start - system state, implicity includes: - initial existing system/network - decisions, e.g. asset build instructions, demand-side interventions to apply """ # State current_interventions = data.get_current_interventions() self.logger.debug("Current interventions: {}".format(current_interventions)) number_of_treatment_plants = 2 # Inputs per_capita_water_demand = data.get_parameter( 'per_capita_water_demand').as_ndarray() # liter/person population = data.get_data('population').as_ndarray() # people water_demand = data.get_data('water_demand').as_ndarray() # liter final_water_demand = (population * per_capita_water_demand) + water_demand raininess = sum(data.get_data('precipitation').as_ndarray()) # milliliters to mega if data.current_timestep == data.base_timestep: reservoir_level = data.get_data('reservoir_level', 2009) else: reservoir_level = data.get_previous_timestep_data('reservoir_level') reservoir_level = sum(reservoir_level.as_ndarray()) # megaliters self.logger.info('Total reservoir level before timestep: %s', reservoir_level) self.logger.debug( "Parameters:\n " "Population: %s\n" "Raininess: %s\n " "Reservoir level: %s\n " "Final demand: %s\n", population.sum(), raininess.sum(), reservoir_level, final_water_demand ) # Parameters self.logger.debug(data.get_parameters()) # simulate (wrapping toy model) instance = ExampleWaterSupplySimulationModel() instance.raininess = raininess instance.number_of_treatment_plants = number_of_treatment_plants instance.reservoir_level = reservoir_level # run water, cost = instance.run() self.logger.info( "Water: %s, Cost: %s, Reservoir: %s", water, cost, instance.reservoir_level) # set results data.set_results('water', np.ones((3, )) * water / 3) data.set_results("cost", np.ones((3, )) * cost / 3) data.set_results("energy_demand", np.ones((3, )) * 3) data.set_results("reservoir_level", np.ones((3, )) * instance.reservoir_level / 3) def extract_obj(self, results): return results['cost'].sum() class ExampleWaterSupplySimulationModel(object): """An example simulation model used for testing purposes Parameters ========== raininess : int The amount of rain produced in each simulation number_of_treatment_plants : int The amount of water is a function of the number of treatment plants and the amount of raininess """ def __init__(self, raininess=None, number_of_treatment_plants=None, reservoir_level=None): self.raininess = raininess self.number_of_treatment_plants = number_of_treatment_plants self.reservoir_level = reservoir_level def run(self): """Runs the water supply model Only 1 unit of water is produced per treatment plant, no matter how rainy. Each treatment plant costs 1.0 unit. """ logger = logging.getLogger(__name__) logger.debug("There are %s plants", self.number_of_treatment_plants) logger.debug("It is %s rainy", self.raininess) logger.debug("Reservoir level was %s", self.reservoir_level) self.reservoir_level += self.raininess water = min(self.number_of_treatment_plants, self.reservoir_level) logger.debug("The system produces %s water", water) self.reservoir_level -= water logger.debug("Reservoir level now %s", self.reservoir_level) cost = 1.264 * self.number_of_treatment_plants logger.debug("The system costs £%s", cost) return water, cost if __name__ == '__main__': """Run core model if this script is run from the command line """ CORE_MODEL = ExampleWaterSupplySimulationModel(1, 1, 2) CORE_MODEL.run()
nismod/smif
src/smif/sample_project/models/water_supply.py
Python
mit
5,099
import _plotly_utils.basevalidators class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__(self, plotly_name="hoverinfosrc", parent_name="treemap", **kwargs): super(HoverinfosrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs )
plotly/python-api
packages/python/plotly/plotly/validators/treemap/_hoverinfosrc.py
Python
mit
453
from datetime import datetime, timedelta from django.contrib.auth import get_user_model from django.contrib.auth.models import Group from django.utils import timezone import factory from . import models UserModel = get_user_model() class UserFactory(factory.django.DjangoModelFactory): email = factory.Sequence(lambda n: 'email_{0}@gmail.com'.format(n)) first_name = factory.Sequence(lambda n: 'first_name_{0}'.format(n)) last_name = factory.Sequence(lambda n: 'last_name_{0}'.format(n)) password = first_name cell_phone = first_name cell_phone_is_valid = True class Meta: model = UserModel @classmethod def _create(cls, model_class, *args, **kwargs): """Override the default ``_create`` with our custom call.""" manager = cls._get_manager(model_class) # The default would use ``manager.create(*args, **kwargs)`` return manager.create_user(*args, **kwargs) class SiteFactory(factory.django.DjangoModelFactory): class Meta: model = models.Site name = factory.Sequence(lambda n: 'name_{0}'.format(n)) description = factory.Sequence(lambda n: 'description_{0}'.format(n)) address = factory.Sequence(lambda n: 'address_{0}'.format(n)) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = Group @factory.post_generation def groups(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return self.user_set.add(UserFactory()) class StaffRoleFactory(factory.django.DjangoModelFactory): class Meta: model = models.StaffRole class ActivityTypeFactory(factory.django.DjangoModelFactory): default_role = factory.SubFactory(StaffRoleFactory) class Meta: model = models.ActivityType class CourtFactory(factory.django.DjangoModelFactory): class Meta: model = models.Court site = factory.SubFactory(SiteFactory) description = factory.Sequence(lambda n: 'description_{0}'.format(n)) admin_group = factory.SubFactory(GroupFactory) activity_type = factory.SubFactory(ActivityTypeFactory) class EventFactory(factory.django.DjangoModelFactory): class Meta: model = models.Event preliminary_price = factory.Sequence(lambda n: n * 10) start_at = factory.Sequence(lambda n: timezone.now() + timedelta(days=n)) description = factory.Sequence(lambda n: 'description_{0}'.format(n)) court = factory.SubFactory(CourtFactory) class VisitFactory(factory.django.DjangoModelFactory): class Meta: model = models.Visit user = factory.SubFactory(UserFactory) event = factory.SubFactory(EventFactory) class ApplicationFactory(factory.django.DjangoModelFactory): comment = factory.Sequence(lambda n: 'comment_{0}'.format(n)) user = factory.SubFactory(UserFactory) event = factory.SubFactory(EventFactory) status = models.ApplicationStatuses.ACTIVE class Meta: model = models.Application class ProposalFactory(factory.django.DjangoModelFactory): comment = factory.Sequence(lambda n: 'comment_{0}'.format(n)) user = factory.SubFactory(UserFactory) event = factory.SubFactory(EventFactory) status = models.ProposalStatuses.ACTIVE class Meta: model = models.Proposal
oleg-chubin/let_me_play
let_me_app/factories.py
Python
apache-2.0
3,328
import numpy as np import cudarray as ca from ..feedforward.layers import Activation, FullyConnected from ..loss import Loss from ..base import Model, PickleMixin from ..input import Input from ..parameter import Parameter class Autoencoder(Model, PickleMixin): def __init__(self, n_out, weights, bias=0.0, bias_prime=0.0, activation='sigmoid', loss='bce'): self.name = 'autoenc' self.n_out = n_out self.activation = Activation(activation) self.activation_decode = Activation(activation) self.loss = Loss.from_any(loss) self.weights = Parameter.from_any(weights) self.bias = Parameter.from_any(bias) self.bias_prime = Parameter.from_any(bias_prime) self._initialized = False self._tmp_x = None self._tmp_y = None def _setup(self, input): if self._initialized: return next_shape = input.x_shape n_in = next_shape[1] self.weights._setup((n_in, self.n_out)) if not self.weights.name: self.weights.name = self.name + '_w' self.bias._setup(self.n_out) if not self.bias.name: self.bias.name = self.name + '_b' self.bias_prime._setup(n_in) if not self.bias_prime.name: self.bias_prime.name = self.name + '_b_prime' self.loss._setup((next_shape[0], self.n_out)) self._initialized = True @property def _params(self): return self.weights, self.bias, self.bias_prime @_params.setter def _params(self, params): self.weights, self.bias, self.bias_prime = params def output_shape(self, input_shape): return (input_shape[0], self.n_out) def encode(self, x): self._tmp_x = x y = ca.dot(x, self.weights.array) + self.bias.array return self.activation.fprop(y, '') def decode(self, y): self._tmp_y = y x = ca.dot(y, self.weights.array.T) + self.bias_prime.array return self.activation_decode.fprop(x, '') def decode_bprop(self, x_grad): x_grad = self.activation_decode.bprop(x_grad) ca.dot(x_grad.T, self._tmp_y, out=self.weights.grad_array) ca.sum(x_grad, axis=0, out=self.bias_prime.grad_array) return ca.dot(x_grad, self.weights.array) def encode_bprop(self, y_grad): y_grad = self.activation.bprop(y_grad) # Because the weight gradient has already been updated by # decode_bprop() we must add the contribution. w_grad = self.weights.grad_array w_grad += ca.dot(self._tmp_x.T, y_grad) ca.sum(y_grad, axis=0, out=self.bias.grad_array) return ca.dot(y_grad, self.weights.array.T) def _update(self, x): y_prime = self.encode(x) x_prime = self.decode(y_prime) x_prime_grad = self.loss.grad(x, x_prime) y_grad = self.decode_bprop(x_prime_grad) self.encode_bprop(y_grad) return self.loss.loss(x, x_prime) def _reconstruct_batch(self, x): y = self.encode(x) return self.decode(y) def reconstruct(self, input): """ Returns the reconstructed input. """ input = Input.from_any(input) x_prime = np.empty(input.x.shape) offset = 0 for x_batch in input.batches('test'): x_prime_batch = np.array(self._reconstruct_batch(x_batch)) batch_size = x_prime_batch.shape[0] x_prime[offset:offset+batch_size, ...] = x_prime_batch offset += batch_size return x_prime def _embed_batch(self, x): return self.encode(x) def embed(self, input): """ Returns the embedding of the input. """ input = Input.from_any(input) y = np.empty(self.output_shape(input.x.shape)) offset = 0 for x_batch in input.batches('test'): y_batch = np.array(self._embed_batch(x_batch)) batch_size = y_batch.shape[0] y[offset:offset+batch_size, ...] = y_batch offset += batch_size return y def feedforward_layers(self): return [FullyConnected(self.n_out, self.weights.array, self.bias.array), self.activation] class DenoisingAutoencoder(Autoencoder): def __init__(self, n_out, weights, bias=0.0, bias_prime=0.0, corruption=0.25, activation='sigmoid', loss='bce'): super(DenoisingAutoencoder, self).__init__( n_out=n_out, weights=weights, bias=bias, bias_prime=bias_prime, activation=activation, loss=loss ) self.corruption = corruption def corrupt(self, x): mask = ca.random.uniform(size=x.shape) < (1-self.corruption) return x * mask def _update(self, x): x_tilde = self.corrupt(x) y_prime = self.encode(x_tilde) x_prime = self.decode(y_prime) x_prime_grad = self.loss.grad(x, x_prime) y_grad = self.decode_bprop(x_prime_grad) self.encode_bprop(y_grad) return self.loss.loss(x, x_prime)
lre/deeppy
deeppy/autoencoder/autoencoder.py
Python
mit
5,076
import unittest import urllib from nose.tools import nottest from mock import patch from lsapi import lsapi from tests.mocks.ls_socket import SocketMocks class LsapiHostsTestCase(unittest.TestCase): version = 'v1' testhost = 'host-aut-1af.example.com' host_filter_correct = '{"eq":["display_name","%s"]}' % testhost host_filter_incorrect = '{"bad":[' host_filter_wrong_operator = '{"nex":["display_name","%s"]}' % testhost host_columns_correct = '["display_name", "address"]' host_columns_incorrect = '["learn_to_write_json' host_columns_not_a_list = '{"cols":["display_name", "address"]}' downtime_data = '''{ "downtime": { "start_time": 1757940301, "end_time": 1789476309, "author": "coelmueller", "comment": "a test downtime" } }''' def setUp(self): lsapi.app.config['TESTING'] = True self.app = lsapi.app.test_client() self.verify_downtimes_timeout = 0.1 self.orig_verify_downtimes = lsapi.ls_query.verify_downtimes lsapi.ls_query.verify_downtimes = self.mock_verify_downtimes def tearDown(self): lsapi.ls_query.verify_downtimes = self.orig_verify_downtimes @nottest def mock_verify_downtimes(self, count): return self.orig_verify_downtimes(count, timeout=self.verify_downtimes_timeout) # /hosts GET endpoint without parameter @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_get(self): response = self.app.get('%s/hosts' % self.version) assert response.status_code == 200 assert self.testhost in response.data # /hosts GET endpoint with a correct filter parameter @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_get_with_correct_filter_parameter(self): get_parameter = urllib.quote_plus('%s' % self.host_filter_correct) response = self.app.get('%s/hosts?filter=%s' % (self.version, get_parameter)) assert response.status_code == 200 assert self.testhost in response.data # /hosts GET endpoint with an incorrect filter parameter (faulty json) @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_get_with_faulty_filter_parameter(self): get_parameter = urllib.quote_plus('%s' % self.host_filter_incorrect) response = self.app.get('%s/hosts?filter=%s' % (self.version, get_parameter)) assert response.status_code == 400 assert "filter parameter can't be parsed as json" in response.data # /hosts GET endpoint with an incorrect filter parameter (unknown operator) @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_get_with_unknown_filter_parameter(self): get_parameter = urllib.quote_plus('%s' % self.host_filter_wrong_operator) response = self.app.get('%s/hosts?filter=%s' % (self.version, get_parameter)) assert response.status_code == 400 assert "wrong bool filter" in response.data # /hosts GET endpoint with a correct columns parameter # livestatus decides, which columns are returned, so in a mocked world, we don't see any difference @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_get_with_correct_columns_parameter(self): get_parameter = urllib.quote_plus('%s' % self.host_columns_correct) response = self.app.get('%s/hosts?columns=%s' % (self.version, get_parameter)) assert response.status_code == 200 assert self.testhost in response.data # /hosts GET endpoint with a incorrect columns parameter (faulty json) # should return 400/BAD_REQUEST @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_get_with_incorrect_columns_parameter(self): get_parameter = urllib.quote_plus('%s' % self.host_columns_incorrect) response = self.app.get('%s/hosts?columns=%s' % (self.version, get_parameter)) assert response.status_code == 400 assert "cannot parse columns parameter" in response.data # /hosts GET endpoint with a incorrect columns parameter (not resulting in list) # should return 400/BAD_REQUEST @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_get_with_non_list_columns_parameter(self): get_parameter = urllib.quote_plus('%s' % self.host_columns_not_a_list) response = self.app.get('%s/hosts?columns=%s' % (self.version, get_parameter)) assert response.status_code == 400 assert "can't convert parameter columns to a list" in response.data # /hosts POST endpoint without parameter. Should return 400/BAD_REQUEST, since a filter parameter is required @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_post(self): response = self.app.post('%s/hosts' % self.version, data=self.downtime_data) assert response.status_code == 400 assert "no filter given, not setting downtime on all hosts" in response.data # /hosts POST endpoint with parameter. Should return 500/INTERNAL_SERVER_ERROR, # with message: "downtimes not found within 5 seconds" # thats ok, because we actually don't set any downtime @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_post(self): get_parameter = urllib.quote_plus('%s' % self.host_filter_correct) response = self.app.post('%s/hosts?filter=%s' % (self.version, get_parameter), data=self.downtime_data) assert response.status_code == 500 assert "downtimes not found within {timeout} seconds".format( timeout=self.verify_downtimes_timeout) in response.data # /hosts/{hostname} GET endpoint @patch('lsapi.lsapi.ls_query.ls_accessor', new=SocketMocks('lsquery mock')) def test_hosts_hostname_get(self): # not really verifying the output, since it depends on a filtered livestatus output, which isn't # handled by this app itself but by livestatus. We're using a mocked livestatus response response = self.app.get('%s/hosts/%s' % (self.version, self.testhost)) assert response.status_code == 200 assert self.testhost in response.data
zwopiR/lsapi
tests/lsapi_hosts_tests.py
Python
mit
6,431
# $Id: body.py 7267 2011-12-20 14:14:21Z milde $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ Directives for additional body elements. See `docutils.parsers.rst.directives` for API details. """ __docformat__ = 'reStructuredText' import sys from docutils import nodes from docutils.parsers.rst import Directive from docutils.parsers.rst import directives from docutils.parsers.rst.roles import set_classes from docutils.utils.code_analyzer import Lexer, LexerError, NumberLines class BasePseudoSection(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True option_spec = {'class': directives.class_option, 'name': directives.unchanged} has_content = True node_class = None """Node class to be used (must be set in subclasses).""" def run(self): if not (self.state_machine.match_titles or isinstance(self.state_machine.node, nodes.sidebar)): raise self.error('The "%s" directive may not be used within ' 'topics or body elements.' % self.name) self.assert_has_content() title_text = self.arguments[0] textnodes, messages = self.state.inline_text(title_text, self.lineno) titles = [nodes.title(title_text, '', *textnodes)] # Sidebar uses this code. if 'subtitle' in self.options: textnodes, more_messages = self.state.inline_text( self.options['subtitle'], self.lineno) titles.append(nodes.subtitle(self.options['subtitle'], '', *textnodes)) messages.extend(more_messages) text = '\n'.join(self.content) node = self.node_class(text, *(titles + messages)) node['classes'] += self.options.get('class', []) self.add_name(node) if text: self.state.nested_parse(self.content, self.content_offset, node) return [node] class Topic(BasePseudoSection): node_class = nodes.topic class Sidebar(BasePseudoSection): node_class = nodes.sidebar option_spec = BasePseudoSection.option_spec.copy() option_spec['subtitle'] = directives.unchanged_required def run(self): if isinstance(self.state_machine.node, nodes.sidebar): raise self.error('The "%s" directive may not be used within a ' 'sidebar element.' % self.name) return BasePseudoSection.run(self) class LineBlock(Directive): option_spec = {'class': directives.class_option, 'name': directives.unchanged} has_content = True def run(self): self.assert_has_content() block = nodes.line_block(classes=self.options.get('class', [])) self.add_name(block) node_list = [block] for line_text in self.content: text_nodes, messages = self.state.inline_text( line_text.strip(), self.lineno + self.content_offset) line = nodes.line(line_text, '', *text_nodes) if line_text.strip(): line.indent = len(line_text) - len(line_text.lstrip()) block += line node_list.extend(messages) self.content_offset += 1 self.state.nest_line_block_lines(block) return node_list class ParsedLiteral(Directive): option_spec = {'class': directives.class_option, 'name': directives.unchanged} has_content = True def run(self): set_classes(self.options) self.assert_has_content() text = '\n'.join(self.content) text_nodes, messages = self.state.inline_text(text, self.lineno) node = nodes.literal_block(text, '', *text_nodes, **self.options) node.line = self.content_offset + 1 self.add_name(node) return [node] + messages class CodeBlock(Directive): """Parse and mark up content of a code block. Configuration setting: syntax_highlight Highlight Code content with Pygments? Possible values: ('long', 'short', 'none') """ optional_arguments = 1 option_spec = {'class': directives.class_option, 'name': directives.unchanged, 'number-lines': directives.unchanged # integer or None } has_content = True def run(self): self.assert_has_content() if self.arguments: language = self.arguments[0] else: language = '' set_classes(self.options) classes = ['code'] if language: classes.append(language) if 'classes' in self.options: classes.extend(self.options['classes']) # set up lexical analyzer try: tokens = Lexer(u'\n'.join(self.content), language, self.state.document.settings.syntax_highlight) except LexerError, error: raise self.warning(error) if 'number-lines' in self.options: # optional argument `startline`, defaults to 1 try: startline = int(self.options['number-lines'] or 1) except ValueError: raise self.error(':number-lines: with non-integer start value') endline = startline + len(self.content) # add linenumber filter: tokens = NumberLines(tokens, startline, endline) node = nodes.literal_block('\n'.join(self.content), classes=classes) self.add_name(node) # if called from "include", set the source if 'source' in self.options: node.attributes['source'] = self.options['source'] # analyze content and add nodes for every token for classes, value in tokens: # print (classes, value) if classes: node += nodes.inline(value, value, classes=classes) else: # insert as Text to decrease the verbosity of the output node += nodes.Text(value, value) return [node] class MathBlock(Directive): option_spec = {'class': directives.class_option, 'name': directives.unchanged} ## TODO: Add Sphinx' ``mathbase.py`` option 'nowrap'? # 'nowrap': directives.flag, has_content = True def run(self): set_classes(self.options) self.assert_has_content() # join lines, separate blocks content = '\n'.join(self.content).split('\n\n') _nodes = [] for block in content: if not block: continue node = nodes.math_block(self.block_text, block, **self.options) node.line = self.content_offset + 1 self.add_name(node) _nodes.append(node) return _nodes class Rubric(Directive): required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True option_spec = {'class': directives.class_option, 'name': directives.unchanged} def run(self): set_classes(self.options) rubric_text = self.arguments[0] textnodes, messages = self.state.inline_text(rubric_text, self.lineno) rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options) self.add_name(rubric) return [rubric] + messages class BlockQuote(Directive): has_content = True classes = [] def run(self): self.assert_has_content() elements = self.state.block_quote(self.content, self.content_offset) for element in elements: if isinstance(element, nodes.block_quote): element['classes'] += self.classes return elements class Epigraph(BlockQuote): classes = ['epigraph'] class Highlights(BlockQuote): classes = ['highlights'] class PullQuote(BlockQuote): classes = ['pull-quote'] class Compound(Directive): option_spec = {'class': directives.class_option, 'name': directives.unchanged} has_content = True def run(self): self.assert_has_content() text = '\n'.join(self.content) node = nodes.compound(text) node['classes'] += self.options.get('class', []) self.add_name(node) self.state.nested_parse(self.content, self.content_offset, node) return [node] class Container(Directive): optional_arguments = 1 final_argument_whitespace = True option_spec = {'name': directives.unchanged} has_content = True def run(self): self.assert_has_content() text = '\n'.join(self.content) try: if self.arguments: classes = directives.class_option(self.arguments[0]) else: classes = [] except ValueError: raise self.error( 'Invalid class attribute value for "%s" directive: "%s".' % (self.name, self.arguments[0])) node = nodes.container(text) node['classes'].extend(classes) self.add_name(node) self.state.nested_parse(self.content, self.content_offset, node) return [node]
JulienMcJay/eclock
windows/Python27/Lib/site-packages/docutils/parsers/rst/directives/body.py
Python
gpl-2.0
9,243
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br) # # This file is part of libSigNetSim. # # libSigNetSim is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # libSigNetSim is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>. """ This file ... """ from numpy import amin, amax, linspace, interp from libsignetsim.data.ExperimentalData import ExperimentalData class ListOfExperimentalData(dict): def __init__(self): self.currentId = 0 def add(self, experimental_data): self.update({self.currentId: experimental_data}) self.currentId += 1 def readNuML(self, list_of_data): for data in list_of_data.getContents(): exp_data = ExperimentalData() exp_data.readNuML(data) self.add(exp_data) def writeNuML(self, composite_value): for data in list(self.values()): data.writeNuML(composite_value) def getMaxTime(self): max_time = 0 for data in list(self.values()): if data.t > max_time: max_time = data.t return max_time def getSpecies(self): """ Returns an array of sbml ids""" species = [] for data in list(self.values()): species.append(data.name) return list(set(species)) def getTimes(self): times = [] for data in list(self.values()): times.append(data.t) return times def getByVariable(self): result = {} for species in self.getSpecies(): result.update({species: []}) for data in list(self.values()): result[data.name].append(data) return result def getValuesOfSpecies(self): values = {} for data in list(self.values()): if data.name not in list(values.keys()): values.update({data.name: []}) values[data.name].append(data.value) return values def getTimesOfSpecies(self): times = {} for data in list(self.values()): if data.name not in list(times.keys()): times.update({data.name: []}) times[data.name].append(data.t) return times def getValues(self): values = {} for data in list(self.values()): if data.name not in list(values.keys()): values.update({data.name: []}) values[data.name].append((data.t, data.value)) return values def interpolate(self, size=101): new_experimental_data = {} new_currentId = 0 list_species = list(set([data.name for data in list(self.values())])) for species in list_species: times = [] values = [] for data in list(self.values()): if data.name == species: times.append(data.t) values.append(data.value) if amin(times) != amax(times): times_interpolation = linspace(amin(times), amax(times), size) values_interpolation = interp(times_interpolation, times, values) for i_data, data in enumerate(values_interpolation): t_data = ExperimentalData() t_data.readDB(species, times_interpolation[i_data], data) new_experimental_data.update({new_currentId: t_data}) new_currentId += 1 else: t_data = ExperimentalData() t_data.readDB(species, times[0], values[0]) new_experimental_data.update({new_currentId: t_data}) new_currentId += 1 dict.clear(self) dict.update(self, new_experimental_data) self.currentId = new_currentId def getVariables(self): variables = [] for data in list(self.values()): variables.append(data.name) return list(set(variables))
vincent-noel/libSigNetSim
libsignetsim/data/ListOfExperimentalData.py
Python
gpl-3.0
3,811
#!/usr/bin/env python # Copyright (c) 2008-14 Qtrac Ltd. All rights reserved. # This program or module is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 2 of the License, or # version 3 of the License, or (at your option) any later version. It is # provided for educational purposes and is distributed in the hope that # it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See # the GNU General Public License for more details. from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future_builtins import * from PyQt4.QtCore import (QRegExp, Qt) from PyQt4.QtCore import pyqtSignal as Signal from PyQt4.QtGui import (QCheckBox, QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QMessageBox, QRegExpValidator, QSpinBox) class NumberFormatDlg(QDialog): changed = Signal() def __init__(self, format, parent=None): super(NumberFormatDlg, self).__init__(parent) self.setAttribute(Qt.WA_DeleteOnClose) self.format = format self.create_widgets() self.layout_widgets() self.create_connections() self.setWindowTitle("Set Number Format (Modeless)") def create_widgets(self): punctuationRe = QRegExp(r"[ ,;:.]") self.thousandsLabel = QLabel("&Thousands separator") self.thousandsEdit = QLineEdit(self.format["thousandsseparator"]) self.thousandsLabel.setBuddy(self.thousandsEdit) self.thousandsEdit.setMaxLength(1) self.thousandsEdit.setValidator( QRegExpValidator(punctuationRe, self)) self.decimalMarkerLabel = QLabel("Decimal &marker") self.decimalMarkerEdit = QLineEdit(self.format["decimalmarker"]) self.decimalMarkerLabel.setBuddy(self.decimalMarkerEdit) self.decimalMarkerEdit.setMaxLength(1) self.decimalMarkerEdit.setValidator( QRegExpValidator(punctuationRe, self)) self.decimalMarkerEdit.setInputMask("X") self.decimalPlacesLabel = QLabel("&Decimal places") self.decimalPlacesSpinBox = QSpinBox() self.decimalPlacesLabel.setBuddy(self.decimalPlacesSpinBox) self.decimalPlacesSpinBox.setRange(0, 6) self.decimalPlacesSpinBox.setValue(self.format["decimalplaces"]) self.redNegativesCheckBox = QCheckBox("&Red negative numbers") self.redNegativesCheckBox.setChecked(self.format["rednegatives"]) self.buttonBox = QDialogButtonBox(QDialogButtonBox.Apply| QDialogButtonBox.Close) def layout_widgets(self): grid = QGridLayout() grid.addWidget(self.thousandsLabel, 0, 0) grid.addWidget(self.thousandsEdit, 0, 1) grid.addWidget(self.decimalMarkerLabel, 1, 0) grid.addWidget(self.decimalMarkerEdit, 1, 1) grid.addWidget(self.decimalPlacesLabel, 2, 0) grid.addWidget(self.decimalPlacesSpinBox, 2, 1) grid.addWidget(self.redNegativesCheckBox, 3, 0, 1, 2) grid.addWidget(self.buttonBox, 4, 0, 1, 2) self.setLayout(grid) def create_connections(self): self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect( self.apply) self.buttonBox.rejected.connect(self.reject) def apply(self): thousands = unicode(self.thousandsEdit.text()) decimal = unicode(self.decimalMarkerEdit.text()) if thousands == decimal: QMessageBox.warning(self, "Format Error", "The thousands separator and the decimal marker " "must be different.") self.thousandsEdit.selectAll() self.thousandsEdit.setFocus() return if len(decimal) == 0: QMessageBox.warning(self, "Format Error", "The decimal marker may not be empty.") self.decimalMarkerEdit.selectAll() self.decimalMarkerEdit.setFocus() return self.format["thousandsseparator"] = thousands self.format["decimalmarker"] = decimal self.format["decimalplaces"] = ( self.decimalPlacesSpinBox.value()) self.format["rednegatives"] = ( self.redNegativesCheckBox.isChecked()) self.changed.emit()
manashmndl/LearningPyQt
pyqt/chap05/numberformatdlg2.py
Python
mit
4,502
#!/usr/bin/env python # -*- coding: utf-8 -*- from sys import getrefcount import sys import copy class Leaf(object): """Leaf of an red-black tree.""" def __init__(self): self.color = "BLACK" self.parent = None def insert(self, interval): node = RBIntervalNode(interval, "RED", self.parent) if self.parent.right == self: self.parent.right = node else: self.parent.left = node return [node] def s(self, depth): """DEBUG ONLY: Print informations about this leaf.""" if not self.parent: dire = "ROOT" elif self is self.parent.left: dire = "L" else: dire = "R" s = "{0}{3} -{4}- : {1} - {2}\n".format(" "*depth, "LEAF", self.color, depth, dire) return s def search(self, interval): return [] def read_hit(self, interval): pass class RBIntervalNode(object): """Base element of a red-black tree. Describe a genomic interval.""" def __init__(self, root=None, color="BLACK", parent=None, left=None, right=None): self.root = root self.color = color self.parent = parent if not left: self.left = Leaf() else: self.left = left self.left.parent = self if not right: self.right = Leaf() else: self.right = right self.right.parent = self self.nprev = None # predecessor self.previnit = None # end position of prev self.nnext = None # successor self.nextinit = None # start position of next self.cutnext = False # Multiple next node? self.cutprev = False # Multiple prev node? self.int_cover = [] # intervals covered by at least one read self.num_hit = 0 # read mapped to this block self.int_len = 0 # length of this node (without intron) def read_hit(self, interval): """Test the parameter falls within this interval and update values accordingly.""" #print >> sys.stderr, "INTERVALLO:", interval if interval[0] < self.root[0]: self.left.read_hit(interval) if interval[1] > self.root[1]: self.right.read_hit(interval) if interval[1] > self.root[0] and interval[0] < self.root[1]: cover_interval = [interval[0], interval[1]] if cover_interval[0] < self.root[0]: cover_interval[0] = self.root[0] if cover_interval[1] > self.root[1]: cover_interval[1] = self.root[1] self.int_cover.append(cover_interval) self.int_cover.sort(key=lambda tup : tup[0]) y = [self.int_cover[0]] for x in self.int_cover[1:]: if y[-1][1] < x[0]: y.append(x) elif y[-1][1] == x[0]: y[-1][1] = x[1] elif y[-1][1] < x[1]: y[-1][1] = x[1] self.int_cover = y self.num_hit += 1 def search(self, interval): """Test if this interval falls within the parameter and return the correct nodes describing it.""" ret_int = [] if interval[1] < self.root[0]: ret_int += self.left.search(interval) elif self.root[1] < interval[0]: ret_int += self.right.search(interval) else: if interval[0] < self.root[0]: new_interval = [interval[0], self.root[0]-1] interval[0] = self.root[0] ret_int += self.left.search(new_interval) ret_int += self.search(interval) elif interval[1] > self.root[1]: new_interval = [self.root[1]+1, interval[1]] interval[1] = self.root[1] ret_int += self.search(interval) ret_int += self.right.search(new_interval) elif interval == self.root: ret_int.append(self) return ret_int def insert(self, interval): """Insert interval in this subtree.""" ret_int = [] if self.root is None: # First node self.root = interval return [self.root] elif self.root == interval: return [self] elif self.root[0] > interval[1]: # interval < self if self.left.__class__ is Leaf().__class__: self.left = RBIntervalNode(interval, "RED", self) return [self.left] else: return self.left.insert(interval) elif self.root[1] < interval[0]: # interval > self if self.right.__class__ is Leaf().__class__: self.right = RBIntervalNode(interval, "RED", self) return [self.right] else: return self.right.insert(interval) else: # interval e self is sovrappongono in qualche modo if interval[0] < self.root[0]: new_interval = [self.root[0], interval[1]] interval[1] = self.root[0]-1 # Questo inserimento ricade nella prima casistica. # self.prev viene modificato. ret_int += self.insert(interval) # Questo inserimento ricade nell'ultima casistica. # se new_interval != self.root self.prev e self.succ # vengono modificati ret_int += self.insert(new_interval) return ret_int elif interval[1] > self.root[1]: new_interval = [interval[0], self.root[1]] interval[0] = self.root[1]+1 # Questo inserimento non dovrebbe inserire nulla ret_int += self.insert(new_interval) ret_int += self.insert(interval) return ret_int else: if interval[0] == self.root[0]: # interval inizia dove inizia self.root e finisce prima di # self.root self.root[0] = interval[1] interval[1] = interval[1] -1 if interval[0] > interval[1]: return [self] if self.left.__class__ is Leaf().__class__: self.left = RBIntervalNode(interval, "RED", self) if self.prev: self.prev.succ = self.left self.left.prev = self.prev self.left.cutprev = self.cutprev self.left.succ = self self.prev = None self.cutprev = False ret_int = [self.left] else: self.prev = None ret_int = self.left.insert(interval) ret_int[0].cutprev = self.cutprev ret_int[-1].succ = self self.cutprev = False if interval[1] == self.root[1]: # interval finisce dove finisce self.root ma inizia dopo di # self.root self.root[1] = interval[0] interval[0] = interval[0] +1 if interval[1] < interval[0]: return [self] if self.right.__class__ is Leaf().__class__: self.right = RBIntervalNode(interval, "RED", self) if self.succ: self.succ.prev = self.right self.right.succ = self.succ self.right.cutnext = self.cutnext self.right.prev = self self.succ = None self.cutnext = False ret_int.append(self.right) else: self.succ = None ret_int = self.right.insert(interval) ret_int[-1].cutnext = self.cutnext ret_int[0].prev = self self.cutnext = False return ret_int if self.root[0] < interval[0] < interval[1] < self.root[1]: # interval cade in mezzo a self.root left_interval = [self.root[0], interval[0]-1] right_interval = [interval[1]+1, self.root[1]] # non è necessario modificare self.root in quanto # le seguenti chiamate ricorsive vengono effettuate sullo # stesso nodo e ne modificano il valore ret_int += self.insert(left_interval) ret_int += self.insert(right_interval) return ret_int return ret_int def __str__(self): return "ELEMENT: {0}".format(self.root) def s(self, depth): """DEBUG ONLY: Print informations about this leaf.""" if not self.parent: dire = "ROOT" elif self is self.parent.left: dire = "L" else: dire = "R" s = "{0}{3} -{4}- : {1} - {2}\n".format(" "*depth, self, self.color, depth, dire) s += self.left.s(depth+1) s += self.right.s(depth+1) return s def __eq__(self, other): return self.root == other.root def __ne__(self, other): return self.root != other.root @property def prevsucc(self): return [self.prev, self.succ] @property def succ(self): """Return the successor node.""" return self.nnext @succ.setter def succ(self, value): """Set the successor node. """ if value is not None and self.nnext is not None: if self.nextinit != value.root[0]: self.cutnext = True self.nnext = value if self.nnext: self.nextinit = self.nnext.root[0] else: self.nextinit = None @property def prev(self): """Return the predecessor node.""" return self.nprev @prev.setter def prev(self, value): """Set the predecessor node.""" if value is not None and self.nprev is not None: if self.previnit != value.root[1]: self.cutprev = True self.nprev = value if self.nprev: self.previnit = self.nprev.root[1] else: self.previnit = None @property def grandparent(self): """Return the grandparent, if present.""" if self.parent: return self.parent.parent else: return None def get_max( self ): if self.right.__class__ is Leaf( ).__class__: return self.root[ 1 ] else: return self.right.get_max( ) class RBIntervalTree(object): """Red-black interval tree.""" def __init__(self): self.root = None def read_hit(self, interval): """Test if the parameter is present in this tree and update values accordingly. """ if not self.root: return else: self.root.read_hit(interval) pass def rbinsert(self, interval): """Insert interval in this tree.""" added_nodes = [] if not self.root: self.root = RBIntervalNode(interval, "BLACK") added_nodes = [self.root] else: added_nodes = self.root.insert(interval) for node in added_nodes: while node is not self.root and node and node.parent.color is "RED": if node.parent is node.grandparent.left: y = node.grandparent.right if y.color is "RED": # Recolor node.parent.color = "BLACK" y.color = "BLACK" node.grandparent.color = "RED" node = node.grandparent else: if node is node.parent.right: # Rotate left node.parent.right = node.left node.parent.right.parent = node.parent node.left = node.parent node.parent = node.grandparent node.parent.left = node node.left.parent = node else: node = node.parent p = node.parent if p.parent is None: self.root = node # Rotate right p.left = node.right p.left.parent = p node.right = p node.parent = p.parent p.parent = node if node.parent is not None: if p is node.parent.right: node.parent.right = node else: node.parent.left = node node.color = "BLACK" p.color = "RED" node = node.parent else: y = node.grandparent.left if y.color is "RED": # Recolor node.parent.color = "BLACK" y.color = "BLACK" node.grandparent.color = "RED" node = node.grandparent else: if node is node.parent.left: # Rotate right node.parent.left = node.right node.parent.left.parent = node.parent node.right = node.parent node.parent = node.grandparent node.parent.right = node node.right.parent = node else: node = node.parent p = node.parent if p.parent is None: self.root = node # Rotate left p.right = node.left p.right.parent = p node.left = p node.parent = p.parent p.parent = node if node.parent is not None: if p is node.parent.right: node.parent.right = node else: node.parent.left = node node.color = "BLACK" p.color = "RED" node = node.parent self.root.color = "BLACK" return added_nodes def search(self, interval): """Search interval in this tree and return the nodes that describe it.""" return self.root.search(interval) def __str__(self): if not self.root: return "" return self.root.s(0) def get_max( self ): return self.root.get_max( )
AlgoLab/PIntron-scripts
cutfiller/rbtree.py
Python
agpl-3.0
15,824
import base64 from datetime import datetime import requests class TwitterConnection: """ Twitter API client for searching tweets :cvar timeout: Requests timeout in seconds :cvar tweet_api_url: API URL for searching tweets :ivar session: Twitter authorized session (connection) """ timeout = 5 tweet_api_url = 'https://api.twitter.com/1.1/search/tweets.json' def __init__(self, api_key, api_secret, session=None): """ Start client by setting up the session for communication with Twitter API """ self.session = session or requests.Session() self._start_session(api_key, api_secret) def _start_session(self, api_key, api_secret): """Start authorized session with Twitter API""" secret = '{}:{}'.format(api_key, api_secret) secret64 = base64.b64encode(secret.encode('ascii')).decode('ascii') headers = { 'Authorization': 'Basic {}'.format(secret64), 'Host': 'api.twitter.com', } r = self.session.post('https://api.twitter.com/oauth2/token', headers=headers, data={'grant_type': 'client_credentials'}, timeout=self.timeout) r.raise_for_status() bearer_token = r.json()['access_token'] def bearer_auth(req): req.headers['Authorization'] = 'Bearer ' + bearer_token return req self.session.auth = bearer_auth def get_tweets(self, params): """Request and return tweets from Twitter API with given params""" r = self.session.get(self.tweet_api_url, params=params, timeout=self.timeout) r.raise_for_status() return [Tweet(t) for t in reversed(r.json()['statuses'])] class Tweet: """ Twitter tweet data wrapper concentrating getters :cvar dformat: Twitter's tweet datetime format :cvar tweet_url: Tweet URL template (need to fill author and ID) :ivar data: Wrapper tweet data from JSON """ dformat = '%a %b %d %H:%M:%S +0000 %Y' tweet_url = 'https://twitter.com/{}/statuses/{}' def __init__(self, jsondata): """Construct new Tweet by providing data from JSON""" self.data = jsondata def __getitem__(self, key): """Get item directly from wrapped data""" return self.data[key] def __setitem__(self, key, value): """Set value to item directly from wrapped data""" self.data[key] = value def get_id(self): """Get tweet ID""" return self.data['id'] def get_text(self): """Get full text of the tweet""" return self.data['text'] def get_nretweets(self): """Get number of retweets of tweet""" return self.data['retweet_count'] def get_author_name(self): """Get full name of author of tweet""" return self.data['user']['name'] def get_author_nick(self): """Get nick (username) of author of tweet""" return self.data['user']['screen_name'] def get_nfollows(self): """Get number of followers of author of tweet""" return self.data['user']['followers_count'] def get_created(self): """Get datetime when tweet was created""" return datetime.strptime(self.data['created_at'], self.dformat) def get_url(self): """Get url of tweet""" return self.tweet_url.format(self.get_author_nick(), self.get_id()) def is_retweet(self): """Check if this tweet is just retweet of another tweet""" return 'retweeted_status' in self.data def get_entities_of_type(self, type): """Get list of tweet entities of desired type""" return self.data.get('entities', {}).get(type, [])
MarekSuchanek/PYT-TwitterWall
twitterwall/common.py
Python
mit
3,887
""" Attempt to generate templates for module reference with Sphinx To include extension modules, first identify them as valid in the ``_uri2path`` method, then handle them in the ``_parse_module_with_import`` script. Notes ----- This parsing is based on import and introspection of modules. Previously functions and classes were found by parsing the text of .py files. Extension modules should be discovered and included as well. This is a modified version of a script originally shipped with the PyMVPA project, then adapted for use first in NIPY and then in skimage. PyMVPA is an MIT-licensed project. """ # Stdlib imports import os import re from inspect import getmodule from types import BuiltinFunctionType # suppress print statements (warnings for empty files) DEBUG = True class ApiDocWriter(object): ''' Class for automatic detection and parsing of API docs to Sphinx-parsable reST format''' # only separating first two levels rst_section_levels = ['*', '=', '-', '~', '^'] def __init__(self, package_name, rst_extension='.txt', package_skip_patterns=None, module_skip_patterns=None, other_defines = True ): ''' Initialize package for parsing Parameters ---------- package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default '.rst' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for searching by these regexps. If is None, gives default. Default is: ['\.tests$'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: ['\.setup$', '\._'] other_defines : {True, False}, optional Whether to include classes and functions that are imported in a particular module but not defined there. ''' if package_skip_patterns is None: package_skip_patterns = ['\\.tests$'] if module_skip_patterns is None: module_skip_patterns = ['\\.setup$', '\\._'] self.package_name = package_name self.rst_extension = rst_extension self.package_skip_patterns = package_skip_patterns self.module_skip_patterns = module_skip_patterns self.other_defines = other_defines def get_package_name(self): return self._package_name def set_package_name(self, package_name): ''' Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True ''' # It's also possible to imagine caching the module parsing here self._package_name = package_name root_module = self._import(package_name) self.root_path = root_module.__path__[-1] self.written_modules = None package_name = property(get_package_name, set_package_name, None, 'get/set package_name') def _import(self, name): ''' Import namespace package ''' mod = __import__(name) components = name.split('.') for comp in components[1:]: mod = getattr(mod, comp) return mod def _get_object_name(self, line): ''' Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass' ''' name = line.split()[1].split('(')[0].strip() # in case we have classes which are not derived from object # ie. old style classes return name.rstrip(':') def _uri2path(self, uri): ''' Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples -------- >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path('sphinx.builder') >>> res == os.path.join(modpath, 'builder.py') True >>> res = docwriter._uri2path('sphinx') >>> res == os.path.join(modpath, '__init__.py') True >>> docwriter._uri2path('sphinx.does_not_exist') ''' if uri == self.package_name: return os.path.join(self.root_path, '__init__.py') path = uri.replace(self.package_name + '.', '') path = path.replace('.', os.path.sep) path = os.path.join(self.root_path, path) # XXX maybe check for extensions as well? if os.path.exists(path + '.py'): # file path += '.py' elif os.path.exists(os.path.join(path, '__init__.py')): path = os.path.join(path, '__init__.py') else: return None return path def _path2uri(self, dirpath): ''' Convert directory path to uri ''' package_dir = self.package_name.replace('.', os.path.sep) relpath = dirpath.replace(self.root_path, package_dir) if relpath.startswith(os.path.sep): relpath = relpath[1:] return relpath.replace(os.path.sep, '.') def _parse_module(self, uri): ''' Parse module defined in *uri* ''' filename = self._uri2path(uri) if filename is None: print(filename, 'erk') # nothing that we could handle here. return ([],[]) f = open(filename, 'rt') functions, classes = self._parse_lines(f) f.close() return functions, classes def _parse_module_with_import(self, uri): """Look for functions and classes in an importable module. Parameters ---------- uri : str The name of the module to be parsed. This module needs to be importable. Returns ------- functions : list of str A list of (public) function names in the module. classes : list of str A list of (public) class names in the module. """ mod = __import__(uri, fromlist=[uri]) # find all public objects in the module. obj_strs = [obj for obj in dir(mod) if not obj.startswith('_')] functions = [] classes = [] for obj_str in obj_strs: # find the actual object from its string representation if obj_str not in mod.__dict__: continue obj = mod.__dict__[obj_str] # Check if function / class defined in module if not self.other_defines and not getmodule(obj) == mod: continue # figure out if obj is a function or class if hasattr(obj, 'func_name') or \ isinstance(obj, BuiltinFunctionType): functions.append(obj_str) else: try: issubclass(obj, object) classes.append(obj_str) except TypeError: # not a function or class pass return functions, classes def _parse_lines(self, linesource): ''' Parse lines of text for functions and classes ''' functions = [] classes = [] for line in linesource: if line.startswith('def ') and line.count('('): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): functions.append(name) elif line.startswith('class '): # exclude private stuff name = self._get_object_name(line) if not name.startswith('_'): classes.append(name) else: pass functions.sort() classes.sort() return functions, classes def generate_api_doc(self, uri): '''Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- head : string Module name, table of contents. body : string Function and class docstrings. ''' # get the names of all classes and functions functions, classes = self._parse_module_with_import(uri) if not len(functions) and not len(classes) and DEBUG: print('WARNING: Empty -', uri) # dbg # Make a shorter version of the uri that omits the package name for # titles uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) head = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' body = '' # Set the chapter title to read 'module' for all modules except for the # main packages if '.' in uri_short: title = 'Module: :mod:`' + uri_short + '`' head += title + '\n' + self.rst_section_levels[2] * len(title) else: title = ':mod:`' + uri_short + '`' head += title + '\n' + self.rst_section_levels[1] * len(title) head += '\n.. automodule:: ' + uri + '\n' head += '\n.. currentmodule:: ' + uri + '\n' body += '\n.. currentmodule:: ' + uri + '\n\n' for c in classes: body += '\n:class:`' + c + '`\n' \ + self.rst_section_levels[3] * \ (len(c)+9) + '\n\n' body += '\n.. autoclass:: ' + c + '\n' # must NOT exclude from index to keep cross-refs working body += ' :members:\n' \ ' :undoc-members:\n' \ ' :show-inheritance:\n' \ '\n' \ ' .. automethod:: __init__\n\n' head += '.. autosummary::\n\n' for f in classes + functions: head += ' ' + f + '\n' head += '\n' for f in functions: # must NOT exclude from index to keep cross-refs working body += f + '\n' body += self.rst_section_levels[3] * len(f) + '\n' body += '\n.. autofunction:: ' + f + '\n\n' return head, body def _survives_exclude(self, matchstr, match_type): ''' Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append('^\\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False ''' if match_type == 'module': patterns = self.module_skip_patterns elif match_type == 'package': patterns = self.package_skip_patterns else: raise ValueError('Cannot interpret match type "%s"' % match_type) # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] for pat in patterns: try: pat.search except AttributeError: pat = re.compile(pat) if pat.search(matchstr): return False return True def discover_modules(self): ''' Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>> ''' modules = [self.package_name] # raw directory parsing for dirpath, dirnames, filenames in os.walk(self.root_path): # Check directory names for packages root_uri = self._path2uri(os.path.join(self.root_path, dirpath)) # Normally, we'd only iterate over dirnames, but since # dipy does not import a whole bunch of modules we'll # include those here as well (the *.py filenames). filenames = [f[:-3] for f in filenames if f.endswith('.py') and not f.startswith('__init__')] for filename in filenames: package_uri = '/'.join((dirpath, filename)) for subpkg_name in dirnames + filenames: package_uri = '.'.join((root_uri, subpkg_name)) package_path = self._uri2path(package_uri) if (package_path and self._survives_exclude(package_uri, 'package')): modules.append(package_uri) return sorted(modules) def write_modules_api(self, modules, outdir): # upper-level modules main_module = modules[0].split('.')[0] ulms = ['.'.join(m.split('.')[:2]) if m.count('.') >= 1 else m.split('.')[0] for m in modules] from collections import OrderedDict module_by_ulm = OrderedDict() for v, k in zip(modules, ulms): if k in module_by_ulm: module_by_ulm[k].append(v) else: module_by_ulm[k] = [v] written_modules = [] for ulm, mods in module_by_ulm.items(): print "Generating docs for %s:" % ulm document_head = [] document_body = [] for m in mods: print " -> " + m head, body = self.generate_api_doc(m) document_head.append(head) document_body.append(body) out_module = ulm + self.rst_extension outfile = os.path.join(outdir, out_module) fileobj = open(outfile, 'wt') fileobj.writelines(document_head + document_body) fileobj.close() written_modules.append(out_module) self.written_modules = written_modules def write_api_docs(self, outdir): """Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules """ if not os.path.exists(outdir): os.mkdir(outdir) # compose list of modules modules = self.discover_modules() self.write_modules_api(modules,outdir) def write_index(self, outdir, froot='gen', relative_to=None): """Make a reST API index file from written files Parameters ---------- path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to 'gen'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is. """ if self.written_modules is None: raise ValueError('No modules written') # Get full filename path path = os.path.join(outdir, froot+self.rst_extension) # Path written into index is relative to rootpath if relative_to is not None: relpath = (outdir + os.path.sep).replace(relative_to + os.path.sep, '') else: relpath = outdir idx = open(path,'wt') w = idx.write w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') title = "API Reference" w(title + "\n") w("=" * len(title) + "\n\n") w('.. toctree::\n\n') for f in self.written_modules: w(' %s\n' % os.path.join(relpath,f)) idx.close()
mdesco/dipy
doc/tools/apigen.py
Python
bsd-3-clause
17,931
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2018, Simon Weald <ansible@simonweald.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: memset_dns_reload author: "Simon Weald (@glitchcrab)" version_added: "2.6" short_description: Request reload of Memset's DNS infrastructure, notes: - DNS reload requests are a best-effort service provided by Memset; these generally happen every 15 minutes by default, however you can request an immediate reload if later tasks rely on the records being created. An API key generated via the Memset customer control panel is required with the following minimum scope - I(dns.reload). If you wish to poll the job status to wait until the reload has completed, then I(job.status) is also required. description: - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes. options: api_key: required: true description: - The API key obtained from the Memset control panel. poll: default: false type: bool description: - Boolean value, if set will poll the reload job's status and return when the job has completed (unless the 30 second timeout is reached first). If the timeout is reached then the task will not be marked as failed, but stderr will indicate that the polling failed. ''' EXAMPLES = ''' - name: submit DNS reload and poll. memset_dns_reload: api_key: 5eb86c9196ab03919abcf03857163741 poll: True delegate_to: localhost ''' RETURN = ''' --- memset_api: description: Raw response from the Memset API. returned: always type: complex contains: error: description: Whether the job ended in error state. returned: always type: bool sample: true finished: description: Whether the job completed before the result was returned. returned: always type: bool sample: true id: description: Job ID. returned: always type: str sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8" status: description: Job status. returned: always type: str sample: "DONE" type: description: Job type. returned: always type: str sample: "dns" ''' from time import sleep from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.memset import memset_api_call def poll_reload_status(api_key=None, job_id=None, payload=None): ''' We poll the `job.status` endpoint every 5 seconds up to a maximum of 6 times. This is a relatively arbitrary choice of timeout, however requests rarely take longer than 15 seconds to complete. ''' memset_api, stderr, msg = None, None, None payload['id'] = job_id api_method = 'job.status' _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) while not response.json()['finished']: counter = 0 while counter < 6: sleep(5) _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload) counter += 1 if response.json()['error']: # the reload job was submitted but polling failed. Don't return this as an overall task failure. stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status." else: memset_api = response.json() msg = None return(memset_api, msg, stderr) def reload_dns(args=None): ''' DNS reloads are a single API call and therefore there's not much which can go wrong outside of auth errors. ''' retvals, payload = dict(), dict() has_changed, has_failed = False, False memset_api, msg, stderr = None, None, None api_method = 'dns.reload' has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) if has_failed: # this is the first time the API is called; incorrect credentials will # manifest themselves at this point so we need to ensure the user is # informed of the reason. retvals['failed'] = has_failed retvals['memset_api'] = response.json() retvals['msg'] = msg return(retvals) # set changed to true if the reload request was accepted. has_changed = True memset_api = msg # empty msg var as we don't want to return the API's json response twice. msg = None if args['poll']: # hand off to the poll function. job_id = response.json()['id'] memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload) # assemble return variables. retvals['failed'] = has_failed retvals['changed'] = has_changed for val in ['msg', 'stderr', 'memset_api']: if val is not None: retvals[val] = eval(val) return(retvals) def main(): global module module = AnsibleModule( argument_spec=dict( api_key=dict(required=True, type='str', no_log=True), poll=dict(required=False, default=False, type='bool') ), supports_check_mode=False ) # populate the dict with the user-provided vars. args = dict() for key, arg in module.params.items(): args[key] = arg retvals = reload_dns(args) if retvals['failed']: module.fail_json(**retvals) else: module.exit_json(**retvals) if __name__ == '__main__': main()
alxgu/ansible
lib/ansible/modules/cloud/memset/memset_dns_reload.py
Python
gpl-3.0
5,850
import web login_form = web.form.Form( web.form.Textbox('usuario', web.form.notnull), ) search_form = web.form.Form( web.form.Textbox('buscar', web.form.notnull), ) useradd_form = web.form.Form( web.form.Textbox('username', web.form.notnull), web.form.Textbox('name', web.form.notnull), ) gradeadd_form = web.form.Form( web.form.Textbox('curso', web.form.notnull), ) groupadd_form = web.form.Form( web.form.Textbox('grupo', web.form.notnull), ) def studadd_form (grades, groups): f = web.form.Form( web.form.Textbox('nombre', web.form.notnull, size=64), web.form.Dropdown('curso', grades), web.form.Dropdown('grupo', groups), web.form.Textbox('tutor', size=64), web.form.Textbox('profesiones', size=64), #web.form.Textbox('hermano', size=12), web.form.Textbox('tel1', size=12), web.form.Textbox('tel2', size=12), web.form.Textbox('mail1',size=48), web.form.Textbox('mail2', size=48), ) return f def bookadd_form (grades, groups): f = web.form.Form( web.form.Textbox('titulo', web.form.notnull, size=64), web.form.Dropdown('curso', grades), web.form.Dropdown('grupo', groups), web.form.Textbox('editorial', size=32), web.form.Textbox('isbn', size=32), web.form.Textbox('precio', size=12), web.form.Textbox('stock', size=12), ) return f
vicnala/ampabooks
forms.py
Python
gpl-3.0
1,426
import numpy as np import spm1d #(0) Load dataset: dataset = spm1d.data.uv0d.anova2onerm.Santa23() dataset = spm1d.data.uv0d.anova2onerm.Southampton2onerm() # dataset = spm1d.data.uv0d.anova2onerm.RSXLDrug() # dataset = spm1d.data.uv0d.anova2onerm.SPM1D3x3() # dataset = spm1d.data.uv0d.anova2onerm.SPM1D3x4() # dataset = spm1d.data.uv0d.anova2onerm.SPM1D3x4A() # dataset = spm1d.data.uv0d.anova2onerm.SPM1D3x5() # dataset = spm1d.data.uv0d.anova2onerm.SPM1D4x4() # dataset = spm1d.data.uv0d.anova2onerm.SPM1D4x5() y,A,B,SUBJ = dataset.get_data() print( dataset ) #(1) Conduct non-parametric test: np.random.seed(0) alpha = 0.05 snpmlist = spm1d.stats.nonparam.anova2onerm(y, A, B, SUBJ) snpmilist = snpmlist.inference(alpha, iterations=200) print( 'Non-parametric results:') print( snpmilist ) #(2) Compare to parametric test: spmlist = spm1d.stats.anova2onerm(y, A, B, SUBJ, equal_var=True) spmilist = spmlist.inference(alpha) print( 'Parametric results:') print( spmilist )
0todd0000/spm1d
spm1d/examples/nonparam/0d/ex_anova2onerm.py
Python
gpl-3.0
1,031
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2021-07-30 11:54 from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [("release_changed", "0001_initial")] operations = [ migrations.AddField( model_name="releasechanged", name="label", field=models.CharField(default="daily", max_length=128), preserve_default=False, ), migrations.AlterField( model_name="releasechanged", name="vmtype", field=models.CharField( choices=[ ("CE", "spce"), ("PRO", "sppro"), ("CARRIER", "carrier"), ], max_length=7, ), ), migrations.AlterUniqueTogether( name="releasechanged", unique_together=set([("version", "vmtype", "label")]), ), ]
sipwise/repoapi
release_changed/migrations/0002_add_label.py
Python
gpl-3.0
980
import collections import numpy as np import pandas as pd import pickers as pck def col_contains(value): def cell_contains_value(cell_list): return value in cell_list return cell_contains_value class BaseFrame(pd.DataFrame): # Overriding the DataFrame constructor so that new instances # derived from this class take the type of the subclass @property def _constructor(self): return self.__class__ # Support the propagation of attributes across data frames # from: https://github.com/pandas-dev/pandas/issues/2485#issuecomment-174577149 def _combine_const(self, other, *args, **kwargs): return super(BaseFrame, self)._combine_const(other, *args, **kwargs).__finalize__(self) class MatchesFrame(BaseFrame): """Compositional wrapper around a dataframe for a number of matches. Provides helpers specfic for manipulating cricket match data. """ def filter_team(self, team): return self.teams.apply(col_contains(team)) def filter_umpire(self, umpire): return self.umpires.apply(col_contains(umpire)) def won_matches(self): df = self won_matches = df[df.outcome.apply(lambda oc: 'winner' in oc)] return MatchesFrame(won_matches) def toss_winner_won(self): df = self.won_matches() toss_winner_won = df[df.toss.apply(pck.pick_winner) == df.outcome.apply(pck.pick_winner)] return MatchesFrame(toss_winner_won) def team_names(self): """List of teams who have at least one match in the matches.""" all_teams = [(t, True) for tpair in self.teams for t in tpair] team_names = collections.OrderedDict(all_teams) return np.array(team_names.keys()) def team_innings(self, team_name): """Returns a series of innings of the team_name batting""" team_innings = [inn1 if inn1.attrs['batting'] == team_name else inn2 if inn2.attrs['batting'] == team_name else np.nan for (inn1, inn2) in zip(self['1st innings_frame'], self['2nd innings_frame'])] return pd.Series(team_innings, index=self.index)
kochhar/cric
cric/wrappers.py
Python
agpl-3.0
2,225
from pandac.PandaModules import * from direct.gui.DirectGui import * from pandac.PandaModules import * from toontown.toonbase import ToontownGlobals from direct.showbase import DirectObject from direct.fsm import ClassicFSM, State from direct.fsm import State from direct.directnotify import DirectNotifyGlobal from otp.avatar.Avatar import teleportNotify import ToonAvatarDetailPanel from toontown.toonbase import TTLocalizer from toontown.hood import ZoneUtil globalTeleport = None def showTeleportPanel(avId, avName, avDisableName): global globalTeleport if globalTeleport != None: globalTeleport.cleanup() globalTeleport = None globalTeleport = ToonTeleportPanel(avId, avName, avDisableName) return def hideTeleportPanel(): global globalTeleport if globalTeleport != None: globalTeleport.cleanup() globalTeleport = None return def unloadTeleportPanel(): global globalTeleport if globalTeleport != None: globalTeleport.cleanup() globalTeleport = None return class ToonTeleportPanel(DirectFrame): notify = DirectNotifyGlobal.directNotify.newCategory('ToonTeleportPanel') def __init__(self, avId, avName, avDisableName): DirectFrame.__init__(self, pos=(-1.01, 0.1, -0.35), parent=base.a2dTopRight, image_color=ToontownGlobals.GlobalDialogColor, image_scale=(1.0, 1.0, 0.6), text='', text_wordwrap=13.5, text_scale=0.06, text_pos=(0.0, 0.18)) messenger.send('releaseDirector') self['image'] = DGG.getDefaultDialogGeom() self.avId = avId self.avName = avName self.avDisableName = avDisableName self.fsm = ClassicFSM.ClassicFSM('ToonTeleportPanel', [ State.State('off', self.enterOff, self.exitOff), State.State('begin', self.enterBegin, self.exitBegin), State.State('checkAvailability', self.enterCheckAvailability, self.exitCheckAvailability), State.State('notAvailable', self.enterNotAvailable, self.exitNotAvailable), State.State('ignored', self.enterIgnored, self.exitIgnored), State.State('notOnline', self.enterNotOnline, self.exitNotOnline), State.State('wentAway', self.enterWentAway, self.exitWentAway), State.State('self', self.enterSelf, self.exitSelf), State.State('unknownHood', self.enterUnknownHood, self.exitUnknownHood), State.State('unavailableHood', self.enterUnavailableHood, self.exitUnavailableHood), State.State('otherShard', self.enterOtherShard, self.exitOtherShard), State.State('teleport', self.enterTeleport, self.exitTeleport)], 'off', 'off') from toontown.friends import FriendInviter FriendInviter.hideFriendInviter() ToonAvatarDetailPanel.hideAvatarDetail() buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui') self.bOk = DirectButton(self, image=(buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')), relief=None, text=TTLocalizer.TeleportPanelOK, text_scale=0.05, text_pos=(0.0, -0.1), pos=(0.0, 0.0, -0.1), command=self.__handleOk) self.bOk.hide() self.bCancel = DirectButton(self, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), relief=None, text=TTLocalizer.TeleportPanelCancel, text_scale=0.05, text_pos=(0.0, -0.1), pos=(0.0, 0.0, -0.1), command=self.__handleCancel) self.bCancel.hide() self.bYes = DirectButton(self, image=(buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')), relief=None, text=TTLocalizer.TeleportPanelYes, text_scale=0.05, text_pos=(0.0, -0.1), pos=(-0.15, 0.0, -0.15), command=self.__handleYes) self.bYes.hide() self.bNo = DirectButton(self, image=(buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')), relief=None, text=TTLocalizer.TeleportPanelNo, text_scale=0.05, text_pos=(0.0, -0.1), pos=(0.15, 0.0, -0.15), command=self.__handleNo) self.bNo.hide() buttons.removeNode() self.accept(self.avDisableName, self.__handleDisableAvatar) self.show() self.fsm.enterInitialState() self.fsm.request('begin') return def cleanup(self): self.fsm.request('off') del self.fsm self.ignore(self.avDisableName) self.destroy() def enterOff(self): pass def exitOff(self): pass def enterBegin(self): myId = base.localAvatar.doId hasManager = hasattr(base.cr, 'playerFriendsManager') if self.avId == myId: self.fsm.request('self') elif self.avId in base.cr.doId2do: self.fsm.request('checkAvailability') elif base.cr.isFriend(self.avId): if base.cr.isFriendOnline(self.avId): self.fsm.request('checkAvailability') else: self.fsm.request('notOnline') elif hasManager and base.cr.playerFriendsManager.getAvHandleFromId(self.avId): id = base.cr.playerFriendsManager.findPlayerIdFromAvId(self.avId) info = base.cr.playerFriendsManager.getFriendInfo(id) if info: if info.onlineYesNo: self.fsm.request('checkAvailability') else: self.fsm.request('notOnline') else: self.fsm.request('wentAway') else: self.fsm.request('wentAway') def exitBegin(self): pass def enterCheckAvailability(self): myId = base.localAvatar.getDoId() base.cr.ttiFriendsManager.d_teleportQuery(self.avId) self['text'] = TTLocalizer.TeleportPanelCheckAvailability % self.avName self.accept('teleportResponse', self.__teleportResponse) self.bCancel.show() def exitCheckAvailability(self): self.ignore('teleportResponse') self.bCancel.hide() def enterNotAvailable(self): self['text'] = TTLocalizer.TeleportPanelNotAvailable % self.avName self.bOk.show() def exitNotAvailable(self): self.bOk.hide() def enterIgnored(self): self['text'] = TTLocalizer.TeleportPanelNotAvailable % self.avName self.bOk.show() def exitIgnored(self): self.bOk.hide() def enterNotOnline(self): self['text'] = TTLocalizer.TeleportPanelNotOnline % self.avName self.bOk.show() def exitNotOnline(self): self.bOk.hide() def enterWentAway(self): self['text'] = TTLocalizer.TeleportPanelWentAway % self.avName self.bOk.show() def exitWentAway(self): self.bOk.hide() def enterUnknownHood(self, hoodId): self['text'] = TTLocalizer.TeleportPanelUnknownHood % base.cr.hoodMgr.getFullnameFromId(hoodId) self.bOk.show() def exitUnknownHood(self): self.bOk.hide() def enterUnavailableHood(self, hoodId): self['text'] = TTLocalizer.TeleportPanelUnavailableHood % base.cr.hoodMgr.getFullnameFromId(hoodId) self.bOk.show() def exitUnavailableHood(self): self.bOk.hide() def enterSelf(self): self['text'] = TTLocalizer.TeleportPanelDenySelf self.bOk.show() def exitSelf(self): self.bOk.hide() def enterOtherShard(self, shardId, hoodId, zoneId): shardName = base.cr.getShardName(shardId) if shardName is None: self.fsm.request('notAvailable') return myShardName = base.cr.getShardName(base.localAvatar.defaultShard) pop = None for shard in base.cr.listActiveShards(): if shard[0] == shardId: pop = shard[2] self.bYes.show() self.bNo.show() if pop and pop > localAvatar.shardPage.midPop: self.notify.warning('Entering full shard: issuing performance warning') self['text'] = TTLocalizer.TeleportPanelBusyShard % {'avName': self.avName} self.bYes.hide() self.bNo.hide() self.bOk.show() else: self['text'] = TTLocalizer.TeleportPanelOtherShard % {'avName': self.avName, 'shardName': shardName, 'myShardName': myShardName} self.shardId = shardId self.hoodId = hoodId self.zoneId = zoneId return def exitOtherShard(self): self.bYes.hide() self.bNo.hide() def enterTeleport(self, shardId, hoodId, zoneId): shardName = base.cr.getShardName(shardId) if shardName is None: shardName = 'unknown' print 'enterTeleport: %r, %r, %r, %r, %r' % (shardId, shardName, hoodId, zoneId, self.avId) hoodsVisited = base.localAvatar.hoodsVisited canonicalHoodId = ZoneUtil.getCanonicalZoneId(hoodId) if hoodId == ToontownGlobals.MyEstate: teleportNotify.debug('enterTeleport: estate') if shardId == base.localAvatar.defaultShard: shardId = None place = base.cr.playGame.getPlace() place.requestTeleport(hoodId, zoneId, shardId, self.avId) unloadTeleportPanel() elif canonicalHoodId not in hoodsVisited + ToontownGlobals.HoodsAlwaysVisited: teleportNotify.debug('enterTeleport: unknownHood') self.fsm.request('unknownHood', [hoodId]) elif canonicalHoodId not in base.cr.hoodMgr.getAvailableZones(): print 'hoodId %d not ready' % hoodId self.fsm.request('unavailableHood', [hoodId]) else: if shardId == base.localAvatar.defaultShard: shardId = None teleportNotify.debug('enterTeleport: requesting teleport') place = base.cr.playGame.getPlace() place.requestTeleport(hoodId, zoneId, shardId, self.avId) unloadTeleportPanel() return def exitTeleport(self): pass def __handleOk(self): unloadTeleportPanel() def __handleCancel(self): unloadTeleportPanel() def __handleYes(self): self.fsm.request('teleport', [self.shardId, self.hoodId, self.zoneId]) def __handleNo(self): unloadTeleportPanel() def __teleportResponse(self, avId, available, shardId, hoodId, zoneId): teleportNotify.debug('__teleportResponse%s' % ((avId, available, shardId, hoodId, zoneId),)) if avId != self.avId: return if available == 0: teleportNotify.debug('__teleportResponse: not available') self.fsm.request('notAvailable') elif available == 2: teleportNotify.debug('__teleportResponse: ignored') self.fsm.request('ignored') elif shardId != base.localAvatar.defaultShard: teleportNotify.debug('__teleportResponse: otherShard') self.fsm.request('otherShard', [shardId, hoodId, zoneId]) else: teleportNotify.debug('__teleportResponse: teleport') self.fsm.request('teleport', [shardId, hoodId, zoneId]) def __handleDisableAvatar(self): self.fsm.request('wentAway')
Spiderlover/Toontown
toontown/toon/ToonTeleportPanel.py
Python
mit
11,675
from datetime import datetime, timedelta import json from django.conf import settings from django.contrib.sites.models import Site from django.core import mail from django.core.cache import cache import mock from nose.tools import eq_, nottest from kitsune.products.tests import ProductFactory, TopicFactory from kitsune.sumo.templatetags.jinja_helpers import urlparams from kitsune.sumo.tests import post, get, attrs_eq, MobileTestCase, MinimalViewTestCase from kitsune.sumo.tests import SumoPyQuery as pq, template_used from kitsune.sumo.urlresolvers import reverse from kitsune.users.tests import UserFactory, add_permission from kitsune.wiki.events import ( EditDocumentEvent, ReadyRevisionEvent, ReviewableRevisionInLocaleEvent, ApproveRevisionInLocaleEvent, get_diff_for) from kitsune.wiki.models import ( Document, Revision, HelpfulVote, HelpfulVoteMetadata) from kitsune.wiki.config import ( SIGNIFICANCES, MEDIUM_SIGNIFICANCE, ADMINISTRATION_CATEGORY, TROUBLESHOOTING_CATEGORY, CATEGORIES, CANNED_RESPONSES_CATEGORY, TEMPLATES_CATEGORY, TEMPLATE_TITLE_PREFIX) from kitsune.wiki.tasks import send_reviewed_notification from kitsune.wiki.tests import ( TestCaseBase, DocumentFactory, DraftRevisionFactory, RevisionFactory, ApprovedRevisionFactory, RedirectRevisionFactory, TranslatedRevisionFactory, LocaleFactory, new_document_data) READY_FOR_REVIEW_EMAIL_CONTENT = u"""\ %(user)s submitted a new revision to the document %(title)s. Fixing all the typos!!!!!11!!!one!!!! To review this revision, click the following link, or paste it into your \ browser's location bar: https://testserver/en-US/kb/%(slug)s/review/%(new_id)s?utm_campaign=\ wiki-ready-review&utm_medium=email&utm_source=notification -- Summary: %(summary)s -- Changes: %(diff)s -- Unsubscribe from these emails: https://testserver/en-US/unsubscribe/%(watcher)s?s=%(secret)s""" DOCUMENT_EDITED_EMAIL_CONTENT = u"""\ %(user)s created a new revision to the document %(title)s. Fixing all the typos!!!!!11!!!one!!!! To view this document's history, click the following link, or paste it \ into your browser's location bar: https://testserver/en-US/kb/%(slug)s/history?utm_campaign=wiki-edit&\ utm_medium=email&utm_source=notification -- Summary: %(summary)s -- Changes: %(diff)s -- Unsubscribe from these emails: https://testserver/en-US/unsubscribe/%(watcher)s?s=%(secret)s""" APPROVED_EMAIL_CONTENT = u"""\ %(reviewer)s has approved the revision to the document %(document_title)s. To view the updated document, click the following link, or paste it into \ your browser's location bar: https://testserver/en-US/kb/%(document_slug)s?utm_campaign=wiki-approved&\ utm_medium=email&utm_source=notification -- Summary: %(summary)s -- Changes: %(diff)s -- Unsubscribe from these emails: https://testserver/en-US/unsubscribe/%(watcher)s?s=%(secret)s""" class DocumentTests(TestCaseBase): """Tests for the Document template""" def setUp(self): super(DocumentTests, self).setUp() ProductFactory() def test_document_view(self): """Load the document view page and verify the title and content.""" r = ApprovedRevisionFactory(summary='search summary', content='Some text.') response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_(r.document.title, doc('article h1.title').text()) eq_(pq(r.document.html)('div').text(), doc('#doc-content div').text()) # There's a canonical URL in the <head>. eq_(settings.CANONICAL_URL + r.document.get_absolute_url(), doc('link[rel=canonical]').attr('href')) # The summary is in <meta name="description"... eq_('search summary', doc('meta[name=description]').attr('content')) def test_english_document_no_approved_content(self): """Load an English document with no approved content.""" r = RevisionFactory(content='Some text.', is_approved=False) response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_(r.document.title, doc('article h1.title').text()) eq_("This article doesn't have approved content yet.", doc('#doc-content').text()) def test_translation_document_no_approved_content(self): """Load a non-English document with no approved content, with a parent with no approved content either.""" r = RevisionFactory(content='Some text.', is_approved=False) d2 = DocumentFactory(parent=r.document, locale='fr', slug='french') RevisionFactory(document=d2, content='Moartext', is_approved=False) response = self.client.get(d2.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_(d2.title, doc('article h1.title').text()) # Avoid depending on localization, assert just that there is only text # d.html would definitely have a <p> in it, at least. eq_(doc('#doc-content').html().strip(), doc('#doc-content').text()) def test_document_fallback_with_translation(self): """The document template falls back to English if translation exists but it has no approved revisions.""" r = ApprovedRevisionFactory(content='Test') d2 = DocumentFactory(parent=r.document, locale='fr', slug='french') RevisionFactory(document=d2, is_approved=False) url = reverse('wiki.document', args=[d2.slug], locale='fr') response = self.client.get(url) doc = pq(response.content) eq_(d2.title, doc('article h1.title').text()) # Fallback message is shown. eq_(1, len(doc('#doc-pending-fallback'))) # Removing this as it shows up in text(), and we don't want to depend # on its localization. doc('#doc-pending-fallback').remove() # Included content is English. eq_(pq(r.document.html).text(), doc('#doc-content').text()) def test_document_fallback_with_translation_english_slug(self): """The document template falls back to English if translation exists but it has no approved revisions, while visiting the English slug.""" r = ApprovedRevisionFactory(content='Test') d2 = DocumentFactory(parent=r.document, locale='fr', slug='french') RevisionFactory(document=d2, is_approved=False) url = reverse('wiki.document', args=[r.document.slug], locale='fr') response = self.client.get(url, follow=True) eq_('/fr/kb/french', response.redirect_chain[0][0]) doc = pq(response.content) # Fallback message is shown. eq_(1, len(doc('#doc-pending-fallback'))) # Removing this as it shows up in text(), and we don't want to depend # on its localization. doc('#doc-pending-fallback').remove() # Included content is English. eq_(pq(r.document.html).text(), doc('#doc-content').text()) def test_document_fallback_no_translation(self): """The document template falls back to English if no translation exists.""" r = ApprovedRevisionFactory(content='Some text.') url = reverse('wiki.document', args=[r.document.slug], locale='fr') response = self.client.get(url) doc = pq(response.content) eq_(r.document.title, doc('article h1.title').text()) # Removing this as it shows up in text(), and we don't want to depend # on its localization. doc('#doc-pending-fallback').remove() # Included content is English. eq_(pq(r.document.html)('div').text(), doc('#doc-content div').text()) def test_document_fallback_no_translation_not_ready_for_l10n(self): """Prompt to localize an article isn't shown when there is a pending localization.""" # Creating a revision not ready for localization r = ApprovedRevisionFactory(content='Some text.', is_ready_for_localization=False) url = reverse('wiki.document', args=[r.document.slug], locale='de') response = self.client.get(url) doc = pq(response.content) # Fallback message is not shown. eq_(0, len(doc('#doc-pending-fallback'))) def test_document_fallback_no_translation_ready_for_l10n(self): """Prompt to localize an article is shown when there are no pending localizations.""" # Creating a revision ready for localization r = ApprovedRevisionFactory(content='Some text.', is_ready_for_localization=True) url = reverse('wiki.document', args=[r.document.slug], locale='de') response = self.client.get(url) doc = pq(response.content) # Fallback message is shown. eq_(1, len(doc('#doc-pending-fallback'))) def test_redirect(self): """Make sure documents with REDIRECT directives redirect properly. Also check the backlink to the redirect page. """ target = DocumentFactory() target_url = target.get_absolute_url() # Ordinarily, a document with no approved revisions cannot have HTML, # but we shove it in manually here as a shortcut: redirect = RedirectRevisionFactory(target=target).document redirect_url = redirect.get_absolute_url() response = self.client.get(redirect_url, follow=True) self.assertRedirects( response, urlparams(target_url, redirectlocale=redirect.locale, redirectslug=redirect.slug)) self.assertContains(response, redirect_url + '?redirect=no') # There's a canonical URL in the <head>. doc = pq(response.content) eq_(settings.CANONICAL_URL + target_url, doc('link[rel=canonical]').attr('href')) def test_redirect_no_vote(self): """Make sure documents with REDIRECT directives have no vote form. """ target = DocumentFactory() redirect = RedirectRevisionFactory(target=target).document redirect_url = redirect.get_absolute_url() response = self.client.get(redirect_url + '?redirect=no') doc = pq(response.content) assert not doc('.document-vote') def test_redirect_from_nonexistent(self): """The template shouldn't crash or print a backlink if the "from" page doesn't exist.""" d = DocumentFactory() response = self.client.get(urlparams(d.get_absolute_url(), redirectlocale='en-US', redirectslug='nonexistent')) self.assertNotContains(response, 'Redirected from ') def test_watch_includes_csrf(self): """The watch/unwatch forms should include the csrf tag.""" u = UserFactory() self.client.login(username=u.username, password='testpass') d = DocumentFactory() resp = self.client.get(d.get_absolute_url()) doc = pq(resp.content) assert doc('#doc-watch input[type=hidden]') def test_non_localizable_translate_disabled(self): """Non localizable document doesn't show tab for 'Localize'.""" u = UserFactory() self.client.login(username=u.username, password='testpass') d = DocumentFactory(is_localizable=True) resp = self.client.get(d.get_absolute_url()) doc = pq(resp.content) assert 'Translate' in doc('#doc-tools li').text() # Make it non-localizable d.is_localizable = False d.save() resp = self.client.get(d.get_absolute_url()) doc = pq(resp.content) assert 'Localize' not in doc('#doc-tools li').text() def test_obsolete_hide_edit(self): """Make sure Edit sidebar link is hidden for obsolete articles.""" d = DocumentFactory(is_archived=True) r = self.client.get(d.get_absolute_url()) doc = pq(r.content) assert not doc('#doc-tabs li.edit') def test_obsolete_no_vote(self): """No voting on is_archived documents.""" d = DocumentFactory(is_archived=True) RevisionFactory(document=d, is_approved=True) response = self.client.get(d.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) assert not doc('.document-vote') def test_templates_noindex(self): """Document templates should have a noindex meta tag.""" # Create a document and verify there is no robots:noindex d = DocumentFactory() r = ApprovedRevisionFactory(document=d) response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_(0, len(doc('meta[name=robots]'))) # Convert the document to a template and verify robots:noindex d.category = TEMPLATES_CATEGORY d.title = TEMPLATE_TITLE_PREFIX + d.title d.save() # This page is cached cache.clear() response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_(doc('meta[name=robots]')[0].attrib['content'], 'noindex') def test_archived_noindex(self): """Archived documents should have a noindex meta tag.""" # Create a document and verify there is no robots:noindex r = ApprovedRevisionFactory(content='Some text.') response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_(0, len(doc('meta[name=robots]'))) # Archive the document and verify robots:noindex d = r.document d.is_archived = True d.save() cache.clear() response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_('noindex', doc('meta[name=robots]')[0].attrib['content']) def test_administration_noindex(self): """Administration documents should have a noindex meta tag.""" # Create a document and verify there is no robots:noindex r = ApprovedRevisionFactory(content='Some text.') response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_(0, len(doc('meta[name=robots]'))) # Archive the document and verify robots:noindex d = r.document d.category = ADMINISTRATION_CATEGORY d.save() cache.clear() response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_('noindex', doc('meta[name=robots]')[0].attrib['content']) def test_canned_responses_noindex(self): """Canned response documents should have a noindex meta tag.""" # Create a document and verify there is no robots:noindex r = ApprovedRevisionFactory(content='Some text.') response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_(0, len(doc('meta[name=robots]'))) # Archive the document and verify robots:noindex d = r.document d.category = CANNED_RESPONSES_CATEGORY d.save() cache.clear() response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) eq_('noindex', doc('meta[name=robots]')[0].attrib['content']) def test_links_follow(self): """Links in kb should not have rel=nofollow""" r = ApprovedRevisionFactory(content='Some link http://test.com') response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) doc = pq(response.content) assert 'rel' not in doc('#doc-content a')[0].attrib def test_document_with_fallback_locale(self): """The document template falls back to fallback locale if there is custom wiki fallback mapping for the locale and the locale have no translation exists.""" # Create an English document and a es translated document en_rev = ApprovedRevisionFactory(is_ready_for_localization=True) trans_doc = DocumentFactory(parent=en_rev.document, locale='es') trans_rev = ApprovedRevisionFactory(document=trans_doc) # Mark the created revision as the current revision for the document trans_doc.current_revision = trans_rev trans_doc.save() # Get the ca version of the document. # Resolve to the ca version # because ca has es set in FALLBACK_LOCALES in wiki/config.py url = reverse('wiki.document', args=[en_rev.document.slug], locale='ca') response = self.client.get(url) doc = pq(response.content) eq_(trans_doc.title, doc('article h1.title').text()) # Display fallback message to the user. eq_(1, len(doc('#doc-pending-fallback'))) # Check Translate article is showing in the side tools bar assert 'Translate Article' in doc('#editing-tools-sidebar').text() # Removing this as it shows up in text(), and we don't want to depend # on its localization. doc('#doc-pending-fallback').remove() # Check that content is available in es eq_(pq(trans_doc.html)('div').text(), doc('#doc-content div').text()) def test_document_share_link_escape(self): """Ensure that the share link isn't escaped.""" r = ApprovedRevisionFactory( content='Test', document__share_link='https://www.example.org', ) response = self.client.get(r.document.get_absolute_url()) doc = pq(response.content) eq_(doc('.wiki-doc .share-link a').attr('href'), 'https://www.example.org') class MobileArticleTemplate(MobileTestCase): def setUp(self): super(MobileArticleTemplate, self).setUp() ProductFactory() def test_document_view(self): """Verify mobile template doesn't 500.""" r = ApprovedRevisionFactory(content='Some text.') response = self.client.get(r.document.get_absolute_url()) eq_(200, response.status_code) assert template_used(response, 'wiki/mobile/document.html') def test_document_share_link_escape(self): """Ensure that the share link isn't escaped.""" r = ApprovedRevisionFactory( content='Test', document__share_link='https://www.example.org', ) response = self.client.get(r.document.get_absolute_url()) doc = pq(response.content) eq_(doc('#wiki-doc .share-link a').attr('href'), 'https://www.example.org') class MinimalArticleTemplate(MinimalViewTestCase): def setUp(self): super(MinimalArticleTemplate, self).setUp() ProductFactory() def test_document_share_link_escape(self): """Ensure that the share link isn't escaped.""" r = ApprovedRevisionFactory( content='Test', document__share_link='https://www.example.org', ) response = self.client.get(self.get_minimal_url(r.document)) doc = pq(response.content) eq_(doc('#wiki-doc .share-link a').attr('href'), 'https://www.example.org') class RevisionTests(TestCaseBase): """Tests for the Revision template""" def setUp(self): super(RevisionTests, self).setUp() self.client.logout() def test_revision_view(self): """Load the revision view page and verify the title and content.""" d = _create_document() r = d.current_revision r.created = datetime(2011, 1, 1) r.reviewed = datetime(2011, 1, 2) r.readied_for_localization = datetime(2011, 1, 3) r.save() url = reverse('wiki.revision', args=[d.slug, r.id]) response = self.client.get(url) eq_(200, response.status_code) doc = pq(response.content) eq_('Revision id: %s' % r.id, doc('div.revision-info li').first().text()) eq_(d.title, doc('h1.title').text()) eq_(pq(r.content_parsed)('div').text(), doc('#doc-content div').text()) eq_('Created:\n Jan 1, 2011, 12:00:00 AM', doc('.revision-info li')[1].text_content().strip()) eq_('Reviewed:\n Jan 2, 2011, 12:00:00 AM', doc('.revision-info li')[5].text_content().strip()) # is reviewed? eq_('Yes', doc('.revision-info li').eq(4).find('span').text()) # is current revision? eq_('Yes', doc('.revision-info li').eq(8).find('span').text()) @mock.patch.object(ReadyRevisionEvent, 'fire') def test_mark_as_ready_POST(self, fire): """HTTP POST to mark a revision as ready for l10n.""" u = UserFactory() add_permission(u, Revision, 'mark_ready_for_l10n') self.client.login(username=u.username, password='testpass') r = ApprovedRevisionFactory( is_ready_for_localization=False, significance=MEDIUM_SIGNIFICANCE) url = reverse('wiki.mark_ready_for_l10n_revision', args=[r.document.slug, r.id]) response = self.client.post(url, data={}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(200, response.status_code) r2 = Revision.objects.get(pk=r.pk) assert fire.called assert r2.is_ready_for_localization assert r2.readied_for_localization eq_(r2.readied_for_localization_by, u) eq_(r2.document.latest_localizable_revision, r2) @mock.patch.object(ReadyRevisionEvent, 'fire') def test_mark_as_ready_GET(self, fire): """HTTP GET to mark a revision as ready for l10n must fail.""" r = ApprovedRevisionFactory(is_ready_for_localization=False) u = UserFactory() add_permission(u, Revision, 'mark_ready_for_l10n') self.client.login(username=u.username, password='testpass') url = reverse('wiki.mark_ready_for_l10n_revision', args=[r.document.slug, r.id]) response = self.client.get(url, data={}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(405, response.status_code) r2 = Revision.objects.get(pk=r.pk) assert not fire.called assert not r2.is_ready_for_localization @mock.patch.object(ReadyRevisionEvent, 'fire') def test_mark_as_ready_no_perm(self, fire): """Mark a revision as ready for l10n without perm must fail.""" r = ApprovedRevisionFactory() u = UserFactory() self.client.login(username=u.username, password='testpass') url = reverse('wiki.mark_ready_for_l10n_revision', args=[r.document.slug, r.id]) response = self.client.post(url, data={}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(403, response.status_code) r2 = Revision.objects.get(pk=r.pk) assert not fire.called assert not r2.is_ready_for_localization @mock.patch.object(ReadyRevisionEvent, 'fire') def test_mark_as_ready_no_login(self, fire): """Mark a revision as ready for l10n without login must fail.""" r = ApprovedRevisionFactory(is_ready_for_localization=False) url = reverse('wiki.mark_ready_for_l10n_revision', args=[r.document.slug, r.id]) response = self.client.post(url, data={}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(403, response.status_code) r2 = Revision.objects.get(pk=r.pk) assert not fire.called assert not r2.is_ready_for_localization @mock.patch.object(ReadyRevisionEvent, 'fire') def test_mark_as_ready_no_approval(self, fire): """Mark an unapproved revision as ready for l10n must fail.""" r = RevisionFactory(is_approved=False, is_ready_for_localization=False) u = UserFactory() add_permission(u, Revision, 'mark_ready_for_l10n') self.client.login(username=u.username, password='testpass') url = reverse('wiki.mark_ready_for_l10n_revision', args=[r.document.slug, r.id]) response = self.client.post(url, data={}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(400, response.status_code) r2 = Revision.objects.get(pk=r.pk) assert not fire.called assert not r2.is_ready_for_localization class NewDocumentTests(TestCaseBase): """Tests for the New Document template""" def setUp(self): super(NewDocumentTests, self).setUp() u = UserFactory() self.client.login(username=u.username, password='testpass') def test_new_document_GET_with_perm(self): """HTTP GET to new document URL renders the form.""" self.client.login(username='admin', password='testpass') response = self.client.get(reverse('wiki.new_document')) eq_(200, response.status_code) doc = pq(response.content) eq_(1, len(doc('#document-form input[name="title"]'))) def test_new_document_form_defaults(self): """Verify that new document form defaults are correct.""" self.client.login(username='admin', password='testpass') response = self.client.get(reverse('wiki.new_document')) doc = pq(response.content) # TODO: Do we want to re-implement the initial product # checked? Maybe add a column to the table and use that to # figure out which are initial? # eq_(1, len(doc('input[name="products"][checked=checked]'))) eq_(None, doc('input[name="tags"]').attr('required')) eq_('checked', doc('input#id_allow_discussion').attr('checked')) eq_(None, doc('input#id_allow_discussion').attr('required')) @mock.patch.object(ReviewableRevisionInLocaleEvent, 'fire') def test_new_document_POST(self, ready_fire): """HTTP POST to new document URL creates the document.""" self.client.login(username='admin', password='testpass') data = new_document_data() response = self.client.post(reverse('wiki.new_document'), data, follow=True) d = Document.objects.get(title=data['title']) eq_([('/en-US/kb/%s/history' % d.slug, 302)], response.redirect_chain) eq_(settings.WIKI_DEFAULT_LANGUAGE, d.locale) eq_(data['category'], d.category) r = d.revisions.all()[0] eq_(data['keywords'], r.keywords) eq_(data['summary'], r.summary) eq_(data['content'], r.content) assert ready_fire.called @mock.patch.object(ReviewableRevisionInLocaleEvent, 'fire') @mock.patch.object(Site.objects, 'get_current') def test_new_document_other_locale(self, get_current, ready_fire): """Make sure we can create a document in a non-default locale.""" get_current.return_value.domain = 'testserver' self.client.login(username='admin', password='testpass') data = new_document_data() doc_locale = 'es' self.client.post(reverse('wiki.new_document', locale=doc_locale), data, follow=True) d = Document.objects.get(title=data['title']) eq_(doc_locale, d.locale) assert ready_fire.called def test_new_document_POST_empty_title(self): """Trigger required field validation for title.""" self.client.login(username='admin', password='testpass') data = new_document_data() data['title'] = '' response = self.client.post(reverse('wiki.new_document'), data, follow=True) doc = pq(response.content) ul = doc('#document-form > ul.errorlist') eq_(1, len(ul)) eq_('Please provide a title.', ul('li').text()) def test_new_document_POST_empty_content(self): """Trigger required field validation for content.""" self.client.login(username='admin', password='testpass') data = new_document_data() data['content'] = '' response = self.client.post(reverse('wiki.new_document'), data, follow=True) doc = pq(response.content) ul = doc('#document-form > ul.errorlist') eq_(1, len(ul)) eq_('Please provide content.', ul('li').text()) def test_new_document_POST_invalid_category(self): """Try to create a new document with an invalid category value.""" self.client.login(username='admin', password='testpass') data = new_document_data() data['category'] = 963 response = self.client.post(reverse('wiki.new_document'), data, follow=True) doc = pq(response.content) ul = doc('#document-form > ul.errorlist') eq_(1, len(ul)) assert ('Select a valid choice. 963 is not one of the available ' 'choices.' in ul('li').text()) def test_new_document_missing_category(self): """Test the DocumentForm's category validation. Submit the form without a category set, and it should complain, even though it's not a strictly required field (because it cannot be set for translations). """ self.client.login(username='admin', password='testpass') data = new_document_data() del data['category'] response = self.client.post(reverse('wiki.new_document'), data, follow=True) self.assertContains(response, 'Please choose a category.') def test_new_document_POST_invalid_product(self): """Try to create a new document with an invalid product.""" self.client.login(username='admin', password='testpass') data = new_document_data() data['products'] = ['l337'] response = self.client.post(reverse('wiki.new_document'), data, follow=True) doc = pq(response.content) ul = doc('#document-form > ul.errorlist') eq_(1, len(ul)) eq_('Select a valid choice. l337 is not one of the available choices. ' 'Please select at least one product.', ul('li').text()) def test_slug_collision_validation(self): """Trying to create document with existing locale/slug should show validation error.""" d = _create_document() self.client.login(username='admin', password='testpass') data = new_document_data() data['slug'] = d.slug response = self.client.post(reverse('wiki.new_document'), data) eq_(200, response.status_code) doc = pq(response.content) ul = doc('#document-form > ul.errorlist') eq_(1, len(ul)) eq_('Document with this Slug and Locale already exists.', ul('li').text()) def test_title_collision_validation(self): """Trying to create document with existing locale/slug should show validation error.""" d = _create_document() self.client.login(username='admin', password='testpass') data = new_document_data() data['title'] = d.title response = self.client.post(reverse('wiki.new_document'), data) eq_(200, response.status_code) doc = pq(response.content) ul = doc('#document-form > ul.errorlist') eq_(1, len(ul)) eq_('Document with this Title and Locale already exists.', ul('li').text()) @mock.patch.object(Site.objects, 'get_current') def test_slug_3_chars(self, get_current): """Make sure we can create a slug with only 3 characters.""" get_current.return_value.domain = 'testserver' self.client.login(username='admin', password='testpass') data = new_document_data() data['slug'] = 'ask' response = self.client.post(reverse('wiki.new_document'), data) eq_(302, response.status_code) eq_('ask', Document.objects.order_by('-id')[0].slug) class NewRevisionTests(TestCaseBase): """Tests for the New Revision template""" def setUp(self): super(NewRevisionTests, self).setUp() rev = ApprovedRevisionFactory(document__topics=[]) self.d = rev.document self.user = UserFactory() self.client.login(username=self.user.username, password='testpass') def test_new_revision_GET_logged_out(self): """Creating a revision without being logged in redirects to login page. """ self.client.logout() response = self.client.get(reverse('wiki.edit_document', args=[self.d.slug])) eq_(302, response.status_code) def test_new_revision_GET_with_perm(self): """HTTP GET to new revision URL renders the form.""" response = self.client.get(reverse('wiki.edit_document', args=[self.d.slug])) eq_(200, response.status_code) doc = pq(response.content) eq_(1, len(doc('#revision-form textarea[name="content"]'))) comment = doc('#id_comment')[0] assert 'value' not in comment.attrib eq_('255', comment.attrib['maxlength']) def test_new_revision_GET_based_on(self): """HTTP GET to new revision URL based on another revision. This case should render the form with the fields pre-populated with the based-on revision info. """ r = Revision(document=self.d, keywords='ky1, kw2', summary='the summary', content='<div>The content here</div>', creator_id=UserFactory().id) r.save() add_permission(self.user, Revision, 'edit_keywords') response = self.client.get(reverse('wiki.new_revision_based_on', args=[self.d.slug, r.id])) eq_(200, response.status_code) doc = pq(response.content) eq_(doc('#id_keywords')[0].value, r.keywords) eq_(doc('#id_summary')[0].value.strip(), r.summary) eq_(doc('#id_content')[0].value.strip(), r.content) @mock.patch.object(Site.objects, 'get_current') @mock.patch.object(settings._wrapped, 'TIDINGS_CONFIRM_ANONYMOUS_WATCHES', False) def test_new_revision_POST_document_with_current(self, get_current): """HTTP POST to new revision URL creates the revision on a document. The document in this case already has a current_revision, therefore the document document fields are not editable. Also assert that the edited and reviewable notifications go out. """ get_current.return_value.domain = 'testserver' # Sign up for notifications: edit_watch = EditDocumentEvent.notify('sam@example.com', self.d) edit_watch.activate().save() review_user = UserFactory(email='joe@example.com') add_permission(review_user, Revision, 'review_revision') reviewable_watch = ReviewableRevisionInLocaleEvent.notify(review_user, locale='en-US') reviewable_watch.activate().save() reviewable_watch_no_permission = ReviewableRevisionInLocaleEvent.notify( UserFactory(), locale='en-US') reviewable_watch_no_permission.activate().save() # Edit a document: response = self.client.post( reverse('wiki.edit_document', args=[self.d.slug]), {'summary': 'A brief summary', 'content': 'The article content', 'keywords': 'keyword1 keyword2', 'comment': 'Fixing all the typos!!!!!11!!!one!!!!', 'based_on': self.d.current_revision.id, 'form': 'rev'}) eq_(302, response.status_code) eq_(2, self.d.revisions.count()) new_rev = self.d.revisions.order_by('-id')[0] eq_(self.d.current_revision, new_rev.based_on) if new_rev.based_on is not None: diff = get_diff_for(new_rev.based_on.document, new_rev.based_on, new_rev) else: diff = '' # No based_on, so diff wouldn't make sense. # Assert notifications fired and have the expected content: eq_(2, len(mail.outbox)) attrs_eq( mail.outbox[0], subject=u'%s is ready for review (%s)' % ( self.d.title, new_rev.creator), body=READY_FOR_REVIEW_EMAIL_CONTENT % { 'user': self.user.profile.name, 'title': self.d.title, 'slug': self.d.slug, 'new_id': new_rev.id, 'summary': new_rev.summary, 'diff': diff, 'watcher': reviewable_watch.pk, 'secret': reviewable_watch.secret }, to=['joe@example.com']) attrs_eq( mail.outbox[1], subject=u'%s was edited by %s' % ( self.d.title, new_rev.creator), body=DOCUMENT_EDITED_EMAIL_CONTENT % { 'user': self.user.profile.name, 'title': self.d.title, 'slug': self.d.slug, 'watcher': edit_watch.pk, 'secret': edit_watch.secret, 'summary': new_rev.summary, 'diff': diff, }, to=['sam@example.com']) @mock.patch.object(ReviewableRevisionInLocaleEvent, 'fire') @mock.patch.object(EditDocumentEvent, 'fire') @mock.patch.object(Site.objects, 'get_current') def test_new_revision_POST_document_without_current( self, get_current, edited_fire, ready_fire): """HTTP POST to new revision URL creates the revision on a document. The document in this case doesn't have a current_revision, therefore the document fields are open for editing. """ get_current.return_value.domain = 'testserver' self.d.current_revision = None self.d.save() data = new_document_data() data['form'] = 'rev' response = self.client.post(reverse('wiki.edit_document', args=[self.d.slug]), data) eq_(302, response.status_code) eq_(2, self.d.revisions.count()) new_rev = self.d.revisions.order_by('-id')[0] # There are no approved revisions, so it's based_on nothing: eq_(None, new_rev.based_on) assert edited_fire.called assert ready_fire.called def test_edit_document_POST_removes_old_tags(self): """Changing the tags on a document removes the old tags from that document.""" self.d.current_revision = None self.d.save() topics = [TopicFactory(), TopicFactory(), TopicFactory()] self.d.topics.add(*topics) eq_(self.d.topics.count(), len(topics)) new_topics = [topics[0], TopicFactory()] data = new_document_data(t.id for t in new_topics) data['form'] = 'doc' self.client.post(reverse('wiki.edit_document', args=[self.d.slug]), data) topic_ids = self.d.topics.values_list('id', flat=True) eq_(2, len(topic_ids)) assert new_topics[0].id in topic_ids assert new_topics[1].id in topic_ids @mock.patch.object(Site.objects, 'get_current') def test_new_form_maintains_based_on_rev(self, get_current): """Revision.based_on should be the rev that was current when the Edit button was clicked, even if other revisions happen while the user is editing.""" get_current.return_value.domain = 'testserver' _test_form_maintains_based_on_rev( self.client, self.d, 'wiki.edit_document', {'summary': 'Windy', 'content': 'gerbils', 'form': 'rev'}, locale=None) def _test_new_revision_warning(self, doc): """When editing based on current revision, we should show a warning if there are newer unapproved revisions.""" # Create a new revision that is at least 1 second newer than current created = datetime.now() + timedelta(seconds=1) r = RevisionFactory(document=doc, created=created) # Verify there is a warning box response = self.client.get( reverse('wiki.edit_document', locale=doc.locale, args=[doc.slug])) assert len(pq(response.content)('.user-messages .warning')) # Verify there is no warning box if editing the latest unreviewed response = self.client.get( reverse('wiki.new_revision_based_on', locale=doc.locale, args=[doc.slug, r.id])) assert not len(pq(response.content)('div.warning-box')) # Create a newer unreviewed revision and now warning shows created = created + timedelta(seconds=1) RevisionFactory(document=doc, created=created) response = self.client.get( reverse('wiki.new_revision_based_on', locale=doc.locale, args=[doc.slug, r.id])) assert len(pq(response.content)('.user-messages .warning')) def test_new_revision_warning(self,): """When editing based on current revision, we should show a warning if there are newer unapproved revisions.""" self._test_new_revision_warning(self.d) def test_new_revision_warning_l10n(self,): """When translating based on current revision, we should show a warning if there are newer unapproved revisions.""" # Make the en-US revision ready for l10n first r = self.d.current_revision r.is_ready_for_localization = True r.save() # Create the localization. l10n = DocumentFactory(parent=self.d, locale='es') r = RevisionFactory(document=l10n, is_approved=True) l10n.current_revision = r l10n.save() self._test_new_revision_warning(l10n) def test_keywords_require_permission(self): """Test keywords require permission.""" doc = self.d old_rev = doc.current_revision u = UserFactory() self.client.login(username=u.username, password='testpass') # Edit the document: response = self.client.post( reverse('wiki.edit_document', args=[doc.slug]), {'summary': 'A brief summary', 'content': 'The article content', 'keywords': 'keyword1 keyword2', 'based_on': old_rev.id, 'form': 'rev'}) eq_(302, response.status_code) # Keywords should remain the same as in old revision. new_rev = Revision.objects.filter(document=doc).order_by('-id')[0] eq_(old_rev.keywords, new_rev.keywords) # Grant the permission. add_permission(u, Revision, 'edit_keywords') # Edit the document: response = self.client.post( reverse('wiki.edit_document', args=[doc.slug]), {'summary': 'A brief summary', 'content': 'The article content', 'keywords': 'keyword1 keyword2', 'based_on': old_rev.id, 'form': 'rev'}) eq_(302, response.status_code) # Keywords should be updated now new_rev = Revision.objects.filter(document=doc).order_by('-id')[0] eq_('keyword1 keyword2', new_rev.keywords) def test_draft_button(self): rev = ApprovedRevisionFactory(is_ready_for_localization=True, document__is_localizable=True) doc = rev.document # Check the Translation page has Save Draft Button trans_url = reverse('wiki.translate', locale='bn', args=[doc.slug]) trans_resp = self.client.get(trans_url) eq_(200, trans_resp.status_code) trans_content = pq(trans_resp.content) eq_(0, len(trans_content('.user-messages li'))) eq_(2, len(trans_content('.submit .btn-draft'))) def test_restore_draft_revision(self): draft = DraftRevisionFactory(creator=self.user) trans_url = reverse('wiki.translate', locale=draft.locale, args=[draft.document.slug]) trans_resp = self.client.get(trans_url) trans_content = pq(trans_resp.content) # Check user message is shown there eq_(1, len(trans_content('.user-messages li'))) # Check there are two buttons for restore and discard eq_(2, len(trans_content('.user-messages .info form .btn'))) # Restore with the draft data draft_request = {'restore': 'Restore'} trans_resp = self.client.get(trans_url, draft_request) trans_content = pq(trans_resp.content) # No user message is shown there eq_(0, len(trans_content('.user-messages li'))) # Check title, slug, keywords, content etc are restored eq_(draft.title, trans_content('#id_title').val()) eq_(draft.slug, trans_content('#id_slug').val()) eq_(draft.keywords, trans_content('#id_keywords').val()) eq_(draft.summary, trans_content('#id_summary').text()) eq_(draft.content, trans_content('#id_content').text()) eq_(draft.based_on.id, int(trans_content('#id_based_on').val())) @nottest def test_restore_draft_revision_with_older_based_on(self): """Test restoring a draft which is based on an old based on""" draft = DraftRevisionFactory(creator=self.user) rev1 = draft.based_on doc = draft.document # Create another revision in the parent doc rev2 = ApprovedRevisionFactory(document=doc, is_ready_for_localization=True) # Check this rev2 revision content is the translation page trans_url = reverse('wiki.translate', locale=draft.locale, args=[doc.slug]) trans_resp = self.client.get(trans_url) trans_content = pq(trans_resp.content) eq_(rev2.content, trans_content('.content textarea').text()) # Now Restore the draft which is based on the past revision draft_request = {'restore': 'Restore'} cache.clear() trans_resp = self.client.get(trans_url, draft_request) trans_content = pq(trans_resp.content) eq_(rev1.content, trans_content('.content textarea').text()) # As the old based on draft is restored, the latest revision content should not be there assert rev2.content != trans_content('.content textarea').text() def test_draft_restoring_works_while_updating_translation(self): trans_rev = TranslatedRevisionFactory() trans = trans_rev.document doc = trans.parent user = trans_rev.creator doc.allows(user, 'create_revision') self.client.login(username=user.username, password='testpass') # Create a draft revision # As the user will not see the title and slug, it should be blank draft = DraftRevisionFactory( document=doc, creator=user, locale=trans.locale, based_on=trans_rev.based_on, title='', slug='') # Now Restore the draft draft_request = {'restore': 'Restore'} trans_url = reverse('wiki.translate', locale=trans.locale, args=[doc.slug]) trans_resp = self.client.get(trans_url, draft_request) trans_content = pq(trans_resp.content) # Check the data is restored eq_(draft.keywords, trans_content('#id_keywords').val()) eq_(draft.summary, trans_content('#id_summary').text()) eq_(draft.content, trans_content('#id_content').text()) eq_(draft.based_on.id, int(trans_content('#id_based_on').val())) def test_warning_showing_while_new_revision(self): """Check warning is being showed if another updated revision""" # Create a translation rev = TranslatedRevisionFactory() trans = rev.document doc = trans.parent # Create a draft revision draft = DraftRevisionFactory( creator=self.user, locale=trans.locale, document=doc, based_on=rev.based_on) # Create another revision in the english document rev2 = ApprovedRevisionFactory(document=doc, is_ready_for_localization=True) # Create a revision in translated article which is based on the later revision RevisionFactory(based_on=rev2, document=trans) # Now restore the draft draft_request = {'restore': 'Restore'} trans_url = reverse('wiki.translate', locale=draft.locale, args=[doc.slug]) trans_resp = self.client.get(trans_url, draft_request) trans_content = pq(trans_resp.content) # Check there is a warning message eq_(1, len(trans_content('.user-messages li.draft-warning'))) class HistoryTests(TestCaseBase): """Test the history listing of a document.""" def setUp(self): super(HistoryTests, self).setUp() self.client.login(username='admin', password='testpass') def test_history_noindex(self): """Document history should have a noindex meta tag.""" # Create a document and verify there is no robots:noindex r = ApprovedRevisionFactory(content='Some text.') response = get(self.client, 'wiki.document_revisions', args=[r.document.slug]) eq_(200, response.status_code) doc = pq(response.content) eq_('noindex, nofollow', doc('meta[name=robots]')[0].attrib['content']) def test_history_category_appears(self): """Document history should have a category on page""" category = CATEGORIES[1] r = ApprovedRevisionFactory(content='Some text.', document__category=category[0]) response = get(self.client, 'wiki.document_revisions', args=[r.document.slug]) eq_(200, response.status_code) self.assertContains(response, category[1]) def test_translation_history_with_english_slug(self): """Request in en-US slug but translated locale should redirect to translation history""" doc = DocumentFactory(locale=settings.WIKI_DEFAULT_LANGUAGE) trans = DocumentFactory(parent=doc, locale='bn', slug='bn_trans_slug') ApprovedRevisionFactory(document=trans) # Get the page with the en-US slug url = reverse('wiki.document_revisions', args=[doc.slug], locale=trans.locale) response = self.client.get(url) # Check redirection happens eq_(302, response.status_code) url = '/bn/kb/bn_trans_slug/history' eq_(url, response['Location']) def test_translation_history_with_english_slug_while_no_trans(self): """Request in en-US slug but untranslated locale should raise 404""" doc = DocumentFactory(locale=settings.WIKI_DEFAULT_LANGUAGE) url = reverse('wiki.document_revisions', args=[doc.slug], locale='bn') response = self.client.get(url) # Check raises 404 error eq_(404, response.status_code) class DocumentEditTests(TestCaseBase): """Test the editing of document level fields.""" def setUp(self): super(DocumentEditTests, self).setUp() self.d = _create_document() u = UserFactory() add_permission(u, Document, 'change_document') self.client.login(username=u.username, password='testpass') def test_can_save_document_with_translations(self): """Make sure we can save a document with translations.""" # Create a translation _create_document(title='Document Prueba', parent=self.d, locale='es') # Make sure is_localizable hidden field is rendered response = get(self.client, 'wiki.edit_document', args=[self.d.slug]) eq_(200, response.status_code) doc = pq(response.content) is_localizable = doc('input[name="is_localizable"]') eq_(1, len(is_localizable)) eq_('True', is_localizable[0].attrib['value']) # And make sure we can update the document data = new_document_data() new_title = 'A brand new title' data.update(title=new_title) data.update(form='doc') data.update(is_localizable='True') response = post(self.client, 'wiki.edit_document', data, args=[self.d.slug]) eq_(200, response.status_code) doc = Document.objects.get(pk=self.d.pk) eq_(new_title, doc.title) def test_change_slug_case(self): """Changing the case of some letters in the slug should work.""" data = new_document_data() new_slug = 'Test-Document' data.update(slug=new_slug) data.update(form='doc') response = post(self.client, 'wiki.edit_document', data, args=[self.d.slug]) eq_(200, response.status_code) doc = Document.objects.get(pk=self.d.pk) eq_(new_slug, doc.slug) def test_change_title_case(self): """Changing the case of some letters in the title should work.""" data = new_document_data() new_title = 'TeST DoCuMent' data.update(title=new_title) data.update(form='doc') response = post(self.client, 'wiki.edit_document', data, args=[self.d.slug]) eq_(200, response.status_code) doc = Document.objects.get(pk=self.d.pk) eq_(new_title, doc.title) def test_archive_permission_off(self): """Shouldn't be able to change is_archive bit without permission.""" u = UserFactory() add_permission(u, Document, 'change_document') self.client.login(username=u.username, password='testpass') data = new_document_data() # Try to set is_archived, even though we shouldn't have permission to: data.update(form='doc', is_archived='on') response = post(self.client, 'wiki.edit_document', data, args=[self.d.slug]) eq_(200, response.status_code) doc = Document.objects.get(pk=self.d.pk) assert not doc.is_archived # TODO: Factor with test_archive_permission_off. def test_archive_permission_on(self): """Shouldn't be able to change is_archive bit without permission.""" u = UserFactory() add_permission(u, Document, 'change_document') add_permission(u, Document, 'archive_document') self.client.login(username=u.username, password='testpass') data = new_document_data() data.update(form='doc', is_archived='on') response = post(self.client, 'wiki.edit_document', data, args=[self.d.slug]) eq_(200, response.status_code) doc = Document.objects.get(pk=self.d.pk) assert doc.is_archived @mock.patch.object(EditDocumentEvent, 'notify') def test_watch_article_from_edit_page(self, notify_on_edit): """Make sure we can watch the article when submitting an edit.""" data = new_document_data() data['form'] = 'rev' data['notify-future-changes'] = 'Yes' response = post(self.client, 'wiki.edit_document', data, args=[self.d.slug]) eq_(200, response.status_code) assert notify_on_edit.called @mock.patch.object(EditDocumentEvent, 'notify') def test_not_watch_article_from_edit_page(self, notify_on_edit): """Make sure editing an article does not cause a watch.""" data = new_document_data() data['form'] = 'rev' response = post(self.client, 'wiki.edit_document', data, args=[self.d.slug]) eq_(200, response.status_code) assert not notify_on_edit.called class DocumentListTests(TestCaseBase): """Tests for the All and Category template""" def setUp(self): super(DocumentListTests, self).setUp() self.locale = settings.WIKI_DEFAULT_LANGUAGE self.doc = _create_document(locale=self.locale) _create_document(locale=self.locale, title='Another one') # Create a document in different locale to make sure it doesn't show _create_document(parent=self.doc, locale='es') def test_category_list(self): """Verify the category documents list view.""" response = self.client.get(reverse('wiki.category', args=[self.doc.category])) doc = pq(response.content) cat = self.doc.category eq_(Document.objects.filter(category=cat, locale=self.locale).count(), len(doc('#document-list ul.documents li'))) def test_all_list(self): """Verify the all documents list view.""" response = self.client.get(reverse('wiki.all_documents')) doc = pq(response.content) eq_(Document.objects.filter(locale=self.locale).count(), len(doc('#document-list ul.documents li'))) class DocumentRevisionsTests(TestCaseBase): """Tests for the Document Revisions template""" def test_document_revisions_list(self): """Verify the document revisions list view.""" creator = UserFactory() d = DocumentFactory() ApprovedRevisionFactory(document=d) RevisionFactory( summary="a tweak", content='lorem ipsum dolor', keywords='kw1 kw2', document=d, reviewed=None, creator=creator) r2 = RevisionFactory( summary="another tweak", content='lorem dimsum dolor', keywords='kw1 kw2', document=d, reviewed=None, creator=creator) response = self.client.get(reverse('wiki.document_revisions', args=[d.slug])) eq_(200, response.status_code) doc = pq(response.content) eq_(4, len(doc('#revision-list li'))) # Verify there is no Review link eq_(0, len(doc('#revision-list div.status a'))) eq_('Unreviewed', doc('#revision-list li:not(.header) div.status').first().text()) # Log in as user with permission to review reviewer = UserFactory() add_permission(reviewer, Revision, 'review_revision') self.client.login(username=reviewer.username, password='testpass') response = self.client.get(reverse('wiki.document_revisions', args=[d.slug])) eq_(200, response.status_code) doc = pq(response.content) # Verify there are Review links now eq_(2, len(doc('#revision-list div.status a'))) eq_('Review', doc('#revision-list li:not(.header) div.status').first().text()) # Verify edit revision link eq_('/en-US/kb/{slug}/edit/{rev_id}'.format(slug=d.slug, rev_id=r2.id), doc('#revision-list div.edit a')[0].attrib['href']) def test_revisions_ready_for_l10n(self): """Verify that the ready for l10n icon is only present on en-US.""" d = _create_document() user = UserFactory() r1 = RevisionFactory( summary="a tweak", content='lorem ipsum dolor', keywords='kw1 kw2', document=d, creator=user) d2 = _create_document(locale='es') RevisionFactory( summary="a tweak", content='lorem ipsum dolor', keywords='kw1 kw2', document=d2, creator=user) response = self.client.get(reverse('wiki.document_revisions', args=[r1.document.slug])) eq_(200, response.status_code) doc = pq(response.content) eq_(1, len(doc('#revision-list li.header div.l10n'))) response = self.client.get(reverse('wiki.document_revisions', args=[d2.slug], locale='es')) eq_(200, response.status_code) doc = pq(response.content) eq_(0, len(doc('#revision-list div.l10n-head'))) class ReviewRevisionTests(TestCaseBase): """Tests for Review Revisions and Translations""" def setUp(self): super(ReviewRevisionTests, self).setUp() self.document = _create_document() user_ = UserFactory() self.revision = Revision(summary="lipsum", content='<div>Lorem {for mac}Ipsum{/for} ' 'Dolor</div>', keywords='kw1 kw2', document=self.document, creator=user_) self.revision.save() self.user = UserFactory() add_permission(self.user, Revision, 'review_revision') add_permission(self.user, Document, 'edit_needs_change') self.client.login(username=self.user.username, password='testpass') def test_fancy_renderer(self): """Make sure it renders the whizzy new wiki syntax.""" # The right branch of the template renders only when there's no current # revision. self.document.current_revision = None self.document.save() response = get(self.client, 'wiki.review_revision', args=[self.document.slug, self.revision.id]) # Does the {for} syntax seem to have rendered? assert pq(response.content)('span[class=for]') @mock.patch.object(send_reviewed_notification, 'delay') @mock.patch.object(Site.objects, 'get_current') @mock.patch.object(settings._wrapped, 'TIDINGS_CONFIRM_ANONYMOUS_WATCHES', False) def test_approve_revision(self, get_current, reviewed_delay): """Verify revision approval with proper notifications.""" # TODO: This isn't a great unit test. The problem here is that # the unit test code duplicates the code it's testing. So if # the code is bad, it'll be bad in both places and that's not # particularly helpful. Probably better to change the test so # that it sets up the data correctly, then compares the output # with hard-coded expected output. get_current.return_value.domain = 'testserver' # Subscribe to approvals: watch = ApproveRevisionInLocaleEvent.notify('joe@example.com', locale='en-US') watch.activate().save() # Subscribe the approver to approvals so we can assert (by counting the # mails) that he didn't get notified. ApproveRevisionInLocaleEvent.notify(self.user, locale='en-US').activate().save() # Approve something: significance = SIGNIFICANCES[0][0] response = post(self.client, 'wiki.review_revision', {'approve': 'Approve Revision', 'significance': significance, 'comment': 'something', 'needs_change': True, 'needs_change_comment': 'comment'}, args=[self.document.slug, self.revision.id]) eq_(200, response.status_code) r = Revision.objects.get(pk=self.revision.id) eq_(significance, r.significance) assert r.reviewed assert r.is_approved assert r.document.needs_change assert not r.is_ready_for_localization eq_('comment', r.document.needs_change_comment) # Verify that revision creator is now in contributors assert r.creator in self.document.contributors.all() # The "reviewed" mail should be sent to the creator, and the "approved" # mail should be sent to any subscribers: reviewed_delay.assert_called_with(r, r.document, 'something') if r.based_on is not None: old_rev = r.document.current_Revision else: old_rev = r.document.revisions.filter(is_approved=True).order_by('-created')[1] diff = get_diff_for(r.document, old_rev, r) expected_body = (APPROVED_EMAIL_CONTENT % { 'reviewer': r.reviewer.profile.name, 'document_title': self.document.title, 'document_slug': self.document.slug, 'watcher': watch.pk, 'secret': watch.secret, 'summary': old_rev.summary, 'diff': diff, 'content': r.content}) eq_(1, len(mail.outbox)) attrs_eq(mail.outbox[0], subject=(u'{0} ({1}) has a new approved revision ({2})' .format(self.document.title, self.document.locale, self.user.username)), body=expected_body, to=['joe@example.com']) def test_approve_and_ready_for_l10n_revision(self): """Verify revision approval with ready for l10n.""" add_permission(self.user, Revision, 'mark_ready_for_l10n') # Approve something: significance = SIGNIFICANCES[1][0] response = post(self.client, 'wiki.review_revision', {'approve': 'Approve Revision', 'significance': significance, 'comment': 'something', 'needs_change': True, 'needs_change_comment': 'comment', 'is_ready_for_localization': True}, args=[self.document.slug, self.revision.id]) eq_(200, response.status_code) r = Revision.objects.get(pk=self.revision.id) assert r.is_ready_for_localization eq_(r.reviewer, r.readied_for_localization_by) eq_(r.reviewed, r.readied_for_localization) @mock.patch.object(send_reviewed_notification, 'delay') @mock.patch.object(Site.objects, 'get_current') def test_reject_revision(self, get_current, delay): """Verify revision rejection.""" get_current.return_value.domain = 'testserver' comment = 'no good' response = post(self.client, 'wiki.review_revision', {'reject': 'Reject Revision', 'comment': comment}, args=[self.document.slug, self.revision.id]) eq_(200, response.status_code) r = Revision.objects.get(pk=self.revision.pk) assert r.reviewed assert not r.is_approved delay.assert_called_with(r, r.document, comment) # Verify that revision creator is not in contributors assert r.creator not in r.document.contributors.all() @mock.patch.object(send_reviewed_notification, 'delay') @mock.patch.object(Site.objects, 'get_current') def test_reject_with_needs_change(self, get_current, delay): """Verify needs_change bit isn't changed when rejecting.""" get_current.return_value.domain = 'testserver' comment = 'no good' d = self.document d.needs_change = True d.needs_change_comment = comment d.save() response = post(self.client, 'wiki.review_revision', {'reject': 'Reject Revision', 'comment': comment}, args=[d.slug, self.revision.id]) eq_(200, response.status_code) r = Revision.objects.get(pk=self.revision.pk) assert r.reviewed assert not r.is_approved d = Document.objects.get(pk=d.pk) assert d.needs_change eq_(comment, d.needs_change_comment) def test_review_without_permission(self): """Make sure unauthorized users can't review revisions.""" u = UserFactory() self.client.login(username=u.username, password='testpass') response = post(self.client, 'wiki.review_revision', {'reject': 'Reject Revision'}, args=[self.document.slug, self.revision.id]) eq_(403, response.status_code) def test_review_as_l10n_leader(self): """Reviewing a revision as an l10n leader should work.""" u = UserFactory() l10n = LocaleFactory(locale='en-US') l10n.leaders.add(u) self.client.login(username=u.username, password='testpass') response = post(self.client, 'wiki.review_revision', {'reject': 'Reject Revision'}, args=[self.document.slug, self.revision.id]) eq_(200, response.status_code) def test_review_as_l10n_reviewer(self): """Reviewing a revision as an l10n reviewer should work.""" u = UserFactory() l10n = LocaleFactory(locale='en-US') l10n.reviewers.add(u) self.client.login(username=u.username, password='testpass') response = post(self.client, 'wiki.review_revision', {'reject': 'Reject Revision'}, args=[self.document.slug, self.revision.id]) eq_(200, response.status_code) def test_review_logged_out(self): """Make sure logged out users can't review revisions.""" self.client.logout() response = post(self.client, 'wiki.review_revision', {'reject': 'Reject Revision'}, args=[self.document.slug, self.revision.id]) redirect = response.redirect_chain[0] eq_(302, redirect[1]) eq_('/{0}{1}?next=/en-US/kb/test-document/review/{2}' .format(settings.LANGUAGE_CODE, settings.LOGIN_URL, str(self.revision.id)), redirect[0]) @mock.patch.object(Site.objects, 'get_current') def test_review_translation(self, get_current): """Make sure it works for localizations as well.""" get_current.return_value.domain = 'testserver' doc = self.document user = UserFactory() # Create the translated document based on the current revision doc_es = _create_document(locale='es', parent=doc) rev_es1 = doc_es.current_revision rev_es1.based_on = doc.current_revision rev_es1.save() # Add a new revision to the parent and set it as the current one rev = ApprovedRevisionFactory( summary="another tweak", content='lorem dimsum dolor', significance=SIGNIFICANCES[0][0], keywords='kw1 kw2', document=doc, creator=user, based_on=self.revision) rev.save() # Create a new translation based on the new current revision rev_es2 = RevisionFactory( summary="lipsum", content='<div>Lorem {for mac}Ipsum{/for} Dolor</div>', keywords='kw1 kw2', document=doc_es, creator=user, based_on=doc.current_revision) # Whew, now render the review page self.client.login(username='admin', password='testpass') url = reverse('wiki.review_revision', locale='es', args=[doc_es.slug, rev_es2.id]) response = self.client.get(url, follow=True) eq_(200, response.status_code) doc = pq(response.content) diff_heading = doc('div.revision-diff h3').text() assert str(rev_es1.based_on.id) in diff_heading assert str(rev.id) in diff_heading # And finally, approve the translation response = self.client.post(url, {'approve': 'Approve Translation', 'comment': 'something'}, follow=True) eq_(200, response.status_code) d = Document.objects.get(pk=doc_es.id) r = Revision.objects.get(pk=rev_es2.id) eq_(d.current_revision, r) assert r.reviewed assert r.is_approved def test_review_translation_of_unapproved_parent(self): """Translate unapproved English document a 2nd time. Reviewing a revision of a translation when the English document does not have a current revision should fall back to the latest English revision. """ en_revision = RevisionFactory(is_approved=False) # Create the translated document based on the current revision es_document = DocumentFactory(locale='es', parent=en_revision.document) # Create first revision RevisionFactory(document=es_document, is_approved=True) es_revision = RevisionFactory( document=es_document, reviewed=None, is_approved=False, reviewer=None) # Now render the review page self.client.login(username='admin', password='testpass') url = reverse('wiki.review_revision', args=[es_document.slug, es_revision.id]) response = self.client.get(url, follow=True) eq_(200, response.status_code) doc = pq(response.content) # There's no 'Recent English Changes' <details> section eq_(3, len(doc('details'))) eq_('Approved English version:', doc('#content-fields h3').eq(0).text()) rev_message = doc('#content-fields p').eq(0).text() assert 'by %s' % en_revision.creator.username in rev_message def test_review_translation_of_rejected_parent(self): """Translate rejected English document a 2nd time. Reviewing a revision of a translation when the English document has only rejected revisions should show a message. """ user = UserFactory() en_revision = RevisionFactory(is_approved=False, reviewer=user, reviewed=datetime.now()) # Create the translated document based on the current revision es_document = DocumentFactory(locale='es', parent=en_revision.document) # Create first revision RevisionFactory(document=es_document, is_approved=True) es_revision = RevisionFactory( document=es_document, reviewed=None, is_approved=False, reviewer=None) # Now render the review page self.client.login(username='admin', password='testpass') url = reverse('wiki.review_revision', args=[es_document.slug, es_revision.id]) response = self.client.get(url, follow=True) eq_(200, response.status_code) doc = pq(response.content) # There's no 'Recent English Changes' <details> section eq_(3, len(doc('details'))) eq_('The English version has no approved content to show.', doc('details .warning-box').text()) def test_default_significance(self): """Verify the default significance is MEDIUM_SIGNIFICANCE.""" response = get(self.client, 'wiki.review_revision', args=[self.document.slug, self.revision.id]) eq_(200, response.status_code) doc = pq(response.content) eq_(MEDIUM_SIGNIFICANCE, int(doc('input[name=significance][checked]')[0].attrib['value'])) def test_self_approve_without_revision_contributors(self): """Verify review page when self approving and no other contributors. Textarea for approve/defer message should not be included in the page. """ rev = RevisionFactory(is_approved=False) u = rev.creator add_permission(u, Revision, 'review_revision') self.client.login(username=u.username, password='testpass') response = get(self.client, 'wiki.review_revision', args=[rev.document.slug, rev.id]) eq_(200, response.status_code) doc = pq(response.content) eq_(0, len(doc('textarea[name="comment"]'))) def test_self_approve_with_revision_contributors(self): """Verify review page when self approving and other contributors. Textarea for approve/defer message should be included in the page. """ rev1 = RevisionFactory(is_approved=False) rev2 = RevisionFactory(is_approved=False, document=rev1.document) u = rev2.creator add_permission(u, Revision, 'review_revision') self.client.login(username=u.username, password='testpass') response = get(self.client, 'wiki.review_revision', args=[rev2.document.slug, rev2.id]) eq_(200, response.status_code) doc = pq(response.content) eq_(2, len(doc('textarea[name="comment"]'))) label = doc('div.message label').text() assert rev1.creator.username in label assert u.username not in label def test_review_past_revision(self): """Verify that its not possible to review a revision older than the current revision""" r1 = RevisionFactory(is_approved=False) r2 = RevisionFactory(document=r1.document, is_approved=True) r1.document.current_revision = r2 r1.document.save() u = UserFactory() add_permission(u, Revision, 'review_revision') self.client.login(username=u.username, password='testpass') # Get the data of the document response = get(self.client, 'wiki.review_revision', args=[r1.document.slug, r1.id]) eq_(200, response.status_code) message1 = 'A newer revision has already been reviewed.' message2 = ('This revision is outdated, but there is a new revision available. ' 'Please review the latest revision.') # While there is no unapproved revision after the current revision. doc = pq(response.content) doc_content = doc('#review-revision').text() assert message1 in doc_content assert message2 not in doc_content # While there is Unapproved revision after the Current Revision RevisionFactory(document=r1.document, is_approved=False) response = get(self.client, 'wiki.review_revision', args=[r1.document.slug, r1.id]) doc = pq(response.content) doc_content = doc('#review-revision').text() assert message1 not in doc_content assert message2 in doc_content def test_revision_comments(self): """Verify that reviewing revision comment and past revision comments are showing""" d = self.document # Create 7 Revisions in the Document revs = [RevisionFactory(document=d, is_approved=False, comment="test-{0}".format(i)) for i in range(7)] # Create a user with Review permission and login with the user u = UserFactory() add_permission(u, Revision, 'review_revision') self.client.login(username=u.username, password='testpass') # Review the latest revision and Get the data of the document response = get(self.client, 'wiki.review_revision', args=[d.slug, revs[6].id]) eq_(200, response.status_code) # Check there is comment of the revision that is being reviewed doc = pq(response.content) doc_content = doc('#review-revision').text() assert revs[6].comment in doc_content # Check that the Plural message is shown when there are multiple revision comments subject = doc('.unreviewed-revision').text() message = 'Unreviewed Revisions:' assert message in subject # Check *Revision Comment* text is in <label> to to be bolded text = doc('ul.revision-comment li label')[0].text_content() eq_('Revision Comment:', text) # Check whether past revisions Comments are there # As the comments are reversed means that the latest ones comment will be at 1st # the 2nd latest ones comment will be at second and like that. # So the revs[5] comment will be at first and revs[1] comment will be at last revision_comment = doc('ul.revision-comment li') assert revs[5].comment in revision_comment[0].text_content() assert revs[4].comment in revision_comment[1].text_content() assert revs[3].comment in revision_comment[2].text_content() assert revs[2].comment in revision_comment[3].text_content() assert revs[1].comment in revision_comment[4].text_content() # Verify that there is highest 5 revision comments. The 6th revision comment is not there assert revs[0].comment not in revision_comment.text() # Check that there is no comment of the revisions which is older than Current Revision # Also there is no comment of Current Revision revs[4].reviewed = datetime.now() revs[4].is_approved = True revs[4].save() d.current_revision = revs[4] d.save() response = get(self.client, 'wiki.review_revision', args=[d.slug, revs[6].id]) doc = pq(response.content) revision_comment = doc('ul.revision-comment li').text() assert revs[5].comment in revision_comment assert revs[4].comment not in revision_comment assert revs[3].comment not in revision_comment assert revs[2].comment not in revision_comment assert revs[1].comment not in revision_comment assert revs[0].comment not in revision_comment # Check that the Singular message is shown when there is single revision comment subject = doc('.unreviewed-revision').text() message = 'Unreviewed Revision:' assert message in subject class CompareRevisionTests(TestCaseBase): """Tests for Review Revisions""" def setUp(self): super(CompareRevisionTests, self).setUp() self.document = _create_document() self.revision1 = self.document.current_revision u = UserFactory() self.revision2 = Revision(summary="lipsum", content='<div>Lorem Ipsum Dolor</div>', keywords='kw1 kw2', document=self.document, creator=u) self.revision2.save() u = UserFactory() self.client.login(username=u.username, password='testpass') def test_compare_revisions(self): """Compare two revisions""" url = reverse('wiki.compare_revisions', args=[self.document.slug]) query = {'from': self.revision1.id, 'to': self.revision2.id} url = urlparams(url, **query) response = self.client.get(url) eq_(200, response.status_code) def test_compare_revisions_invalid_to_int(self): """Provide invalid 'to' int for revision ids.""" url = reverse('wiki.compare_revisions', args=[self.document.slug]) query = {'from': '', 'to': 'invalid'} url = urlparams(url, **query) response = self.client.get(url) eq_(404, response.status_code) def test_compare_revisions_invalid_from_int(self): """Provide invalid 'from' int for revision ids.""" url = reverse('wiki.compare_revisions', args=[self.document.slug]) query = {'from': 'invalid', 'to': ''} url = urlparams(url, **query) response = self.client.get(url) eq_(404, response.status_code) def test_compare_revisions_missing_query_param(self): """Try to compare two revisions, with a missing query string param.""" url = reverse('wiki.compare_revisions', args=[self.document.slug]) query = {'from': self.revision1.id} url = urlparams(url, **query) response = self.client.get(url) eq_(404, response.status_code) url = reverse('wiki.compare_revisions', args=[self.document.slug]) query = {'to': self.revision1.id} url = urlparams(url, **query) response = self.client.get(url) eq_(404, response.status_code) class TranslateTests(TestCaseBase): """Tests for the Translate page""" def setUp(self): super(TranslateTests, self).setUp() self.d = _create_document() self.user = UserFactory() self.client.login(username=self.user.username, password='testpass') def test_translate_GET_logged_out(self): """Try to create a translation while logged out.""" self.client.logout() url = reverse('wiki.translate', locale='es', args=[self.d.slug]) response = self.client.get(url) eq_(302, response.status_code) def test_translate_GET_with_perm(self): """HTTP GET to translate URL renders the form.""" url = reverse('wiki.translate', locale='es', args=[self.d.slug]) response = self.client.get(url) eq_(200, response.status_code) doc = pq(response.content) eq_(1, len(doc('form textarea[name="content"]'))) assert 'value' not in doc('#id_comment')[0].attrib eq_('checked', doc('#id_allow_discussion')[0].attrib['checked']) def test_translate_disallow(self): """HTTP GET to translate URL returns 400 when not localizable.""" self.d.is_localizable = False self.d.save() url = reverse('wiki.translate', locale='es', args=[self.d.slug]) response = self.client.get(url) eq_(400, response.status_code) def test_invalid_document_form(self): """Make sure we handle invalid document form without a 500.""" url = reverse('wiki.translate', locale='es', args=[self.d.slug]) data = _translation_data() data['slug'] = '' # Invalid slug response = self.client.post(url, data) eq_(200, response.status_code) def test_invalid_revision_form(self): """When creating a new translation, an invalid revision form shouldn't result in a new Document being created.""" url = reverse('wiki.translate', locale='es', args=[self.d.slug]) data = _translation_data() data['content'] = '' # Content is required response = self.client.post(url, data) eq_(200, response.status_code) eq_(0, self.d.translations.count()) @mock.patch.object(ReviewableRevisionInLocaleEvent, 'fire') @mock.patch.object(EditDocumentEvent, 'fire') @mock.patch.object(Site.objects, 'get_current') def test_first_translation_to_locale(self, get_current, edited_fire, ready_fire): """Create the first translation of a doc to new locale.""" get_current.return_value.domain = 'testserver' url = reverse('wiki.translate', locale='es', args=[self.d.slug]) data = _translation_data() response = self.client.post(url, data) eq_(302, response.status_code) new_doc = Document.objects.get(slug=data['slug']) eq_('es', new_doc.locale) eq_(data['title'], new_doc.title) eq_(self.d, new_doc.parent) rev = new_doc.revisions.all()[0] eq_(data['keywords'], rev.keywords) eq_(data['summary'], rev.summary) eq_(data['content'], rev.content) assert edited_fire.called assert ready_fire.called def _create_and_approve_first_translation(self): """Returns the revision.""" # First create the first one with test above self.test_first_translation_to_locale() # Approve the translation rev_es = Revision.objects.filter(document__locale='es')[0] rev_es.is_approved = True rev_es.save() return rev_es @mock.patch.object(ReviewableRevisionInLocaleEvent, 'fire') @mock.patch.object(EditDocumentEvent, 'fire') def test_another_translation_to_locale(self, edited_fire, ready_fire): """Create the second translation of a doc.""" rev_es = self._create_and_approve_first_translation() # Create and approve a new en-US revision rev_enUS = Revision(summary="lipsum", content='lorem ipsum dolor sit amet new', significance=SIGNIFICANCES[0][0], keywords='kw1 kw2', document=self.d, creator_id=UserFactory().id, is_ready_for_localization=True, is_approved=True) rev_enUS.save() # Verify the form renders with correct content url = reverse('wiki.translate', locale='es', args=[self.d.slug]) response = self.client.get(url) doc = pq(response.content) eq_(rev_es.content, doc('#id_content').text()) eq_(rev_enUS.content, doc('#content-fields textarea[readonly]').text()) eq_(2, len(doc('.recent-revisions li'))) # Post the translation and verify data = _translation_data() data['content'] = 'loremo ipsumo doloro sito ameto nuevo' response = self.client.post(url, data) eq_(302, response.status_code) eq_('/es/kb/un-test-articulo/history', response['location']) doc = Document.objects.get(slug=data['slug']) rev = doc.revisions.filter(content=data['content'])[0] eq_(data['keywords'], rev.keywords) eq_(data['summary'], rev.summary) eq_(data['content'], rev.content) assert not rev.is_approved assert edited_fire.called assert ready_fire.called @mock.patch.object(Site.objects, 'get_current') def test_translate_form_maintains_based_on_rev(self, get_current): """Revision.based_on should be the rev that was current when the Translate button was clicked, even if other revisions happen while the user is editing.""" get_current.return_value.domain = 'testserver' _test_form_maintains_based_on_rev(self.client, self.d, 'wiki.translate', _translation_data(), locale='es') def test_translate_update_doc_only(self): """Submitting the document form should update document. No new revisions should be created.""" add_permission(self.user, Document, 'change_document') rev_es = self._create_and_approve_first_translation() url = reverse('wiki.translate', locale='es', args=[self.d.slug]) data = _translation_data() new_title = 'Un nuevo titulo' data['title'] = new_title data['form'] = 'doc' response = self.client.post(url, data) eq_(302, response.status_code) eq_('/es/kb/un-test-articulo/edit?opendescription=1', response['location']) revisions = rev_es.document.revisions.all() eq_(1, revisions.count()) # No new revisions d = Document.objects.get(id=rev_es.document.id) eq_(new_title, d.title) # Title is updated def test_translate_update_rev_only(self): """Submitting the revision form should create a new revision. No document fields should be updated.""" rev_es = self._create_and_approve_first_translation() orig_title = rev_es.document.title url = reverse('wiki.translate', locale='es', args=[self.d.slug]) data = _translation_data() new_title = 'Un nuevo titulo' data['title'] = new_title data['form'] = 'rev' response = self.client.post(url, data) eq_(302, response.status_code) eq_('/es/kb/un-test-articulo/history', response['location']) revisions = rev_es.document.revisions.all() eq_(2, revisions.count()) # New revision is created d = Document.objects.get(id=rev_es.document.id) eq_(orig_title, d.title) # Title isn't updated def test_translate_form_content_fallback(self): """If there are existing but unapproved translations, prefill content with latest.""" self.test_first_translation_to_locale() url = reverse('wiki.translate', locale='es', args=[self.d.slug]) response = self.client.get(url) doc = pq(response.content) document = Document.objects.filter(locale='es')[0] existing_rev = document.revisions.all()[0] eq_(existing_rev.content, doc('#id_content').text()) def test_translate_based_on(self): """Test translating based on a non-current revision.""" # Create the base revision base_rev = self._create_and_approve_first_translation() # Create a new current revision r = ApprovedRevisionFactory(document=base_rev.document) d = Document.objects.get(pk=base_rev.document.id) eq_(r, base_rev.document.current_revision) url = reverse('wiki.new_revision_based_on', locale='es', args=[d.slug, base_rev.id]) response = self.client.get(url) eq_(200, response.status_code) doc = pq(response.content) eq_(doc('#id_content')[0].value.strip(), base_rev.content) def test_translate_rejected_parent(self): """Translate view of rejected English document shows warning.""" user = UserFactory() en_revision = RevisionFactory(is_approved=False, reviewer=user, reviewed=datetime.now()) url = reverse('wiki.translate', locale='es', args=[en_revision.document.slug]) response = self.client.get(url) doc = pq(response.content) assert doc('.user-messages .warning').text() def test_skip_unready_when_first_translation(self): """Never offer an unready-for-localization revision as initial translation source text.""" # Create an English document all ready to translate: en_doc = DocumentFactory(is_localizable=True) ApprovedRevisionFactory( document=en_doc, is_ready_for_localization=True, content='I am the ready!') ApprovedRevisionFactory(document=en_doc, is_ready_for_localization=False) url = reverse('wiki.translate', locale='de', args=[en_doc.slug]) response = self.client.get(url) self.assertContains(response, 'I am the ready!') def test_skip_unready_when_not_first_translation(self): """Never offer an unready-for-localization revision as diff text when bringing an already translated article up to date.""" # Create an initial translated revision so the version of the template # with the English-to-English diff shows up: initial_rev = TranslatedRevisionFactory(is_approved=True) doc = initial_rev.document en_doc = doc.parent ready = ApprovedRevisionFactory(document=en_doc, is_ready_for_localization=True) ApprovedRevisionFactory(document=en_doc, is_ready_for_localization=False) url = reverse('wiki.translate', locale=doc.locale, args=[en_doc.slug]) response = self.client.get(url) eq_(200, response.status_code) # Get the link to the rev on the right side of the diff: to_link = pq(response.content)('.revision-diff h3 a')[1].attrib['href'] assert to_link.endswith('/%s' % ready.pk) def test_translate_no_update_based_on(self): """Test translating based on a non-current revision.""" # Set up the base es revision base_es_rev = self._create_and_approve_first_translation() es_doc = base_es_rev.document enUS_doc = es_doc.parent base_es_rev.based_on = enUS_doc.current_revision base_es_rev.save() # Create a new current revision on the parent document. r = ApprovedRevisionFactory(document=es_doc.parent, is_ready_for_localization=True) url = reverse('wiki.edit_document', locale='es', args=[es_doc.slug]) data = _translation_data() data['form'] = 'rev' data['based_on'] = enUS_doc.current_revision_id # Passing no-update will create a new revision based on the same one # as the older revision. data['no-update'] = 'Yes' self.client.post(url, data) new_es_rev = es_doc.revisions.order_by('-id')[0] eq_(base_es_rev.based_on_id, new_es_rev.based_on_id) # Not passing no-update will create a new revision based on the latest # approved and ready for l10n en-US revision. del data['no-update'] self.client.post(url, data) new_es_rev = es_doc.revisions.order_by('-id')[0] eq_(r.id, new_es_rev.based_on_id) def test_show_translations_page(self): en = settings.WIKI_DEFAULT_LANGUAGE en_doc = DocumentFactory(locale=en, slug='english-slug') DocumentFactory(locale='de', parent=en_doc) url = reverse('wiki.show_translations', locale=settings.WIKI_DEFAULT_LANGUAGE, args=[en_doc.slug]) r = self.client.get(url) doc = pq(r.content) translated_locales = doc(".translated_locale") eq_(translated_locales.length, 2) eq_("English (en-US)", doc(".translated_locale").first().text()) eq_("Deutsch (de)", doc(".translated_locale:eq(1)").text()) def test_keywords_dont_require_permission(self): """Test keywords don't require permission when translating.""" old_rev = self._create_and_approve_first_translation() doc = old_rev.document u = UserFactory() self.client.login(username=u.username, password='testpass') # Edit the document: response = self.client.post( reverse('wiki.edit_document', args=[doc.slug], locale=doc.locale), {'summary': 'A brief summary', 'content': 'The article content', 'keywords': 'keyword1 keyword2', 'based_on': doc.parent.current_revision_id, 'form': 'rev'}) eq_(302, response.status_code) # Keywords should be updated new_rev = Revision.objects.filter(document=doc).order_by('-id')[0] eq_('keyword1 keyword2', new_rev.keywords) def _test_form_maintains_based_on_rev(client, doc, view, post_data, locale=None): """Confirm that the based_on value set in the revision created by an edit or translate form is the current_revision of the document as of when the form was first loaded, even if other revisions have been approved in the meantime.""" response = client.get(reverse(view, locale=locale, args=[doc.slug])) orig_rev = doc.current_revision eq_(orig_rev.id, int(pq(response.content)('input[name=based_on]').attr('value'))) # While Fred is editing the above, Martha approves a new rev: ApprovedRevisionFactory(document=doc) # Then Fred saves his edit: post_data_copy = {'based_on': orig_rev.id} post_data_copy.update(post_data) # Don't mutate arg. response = client.post(reverse(view, locale=locale, args=[doc.slug]), data=post_data_copy) eq_(302, response.status_code) fred_rev = Revision.objects.all().order_by('-id')[0] eq_(orig_rev, fred_rev.based_on) class DocumentWatchTests(TestCaseBase): """Tests for un/subscribing to document edit notifications.""" def setUp(self): super(DocumentWatchTests, self).setUp() self.document = _create_document() ProductFactory() self.user = UserFactory() self.client.login(username=self.user.username, password='testpass') def test_watch_GET_405(self): """Watch document with HTTP GET results in 405.""" response = get(self.client, 'wiki.document_watch', args=[self.document.slug]) eq_(405, response.status_code) def test_unwatch_GET_405(self): """Unwatch document with HTTP GET results in 405.""" response = get(self.client, 'wiki.document_unwatch', args=[self.document.slug]) eq_(405, response.status_code) def test_watch_unwatch(self): """Watch and unwatch a document.""" # Subscribe response = post(self.client, 'wiki.document_watch', args=[self.document.slug]) eq_(200, response.status_code) assert EditDocumentEvent.is_notifying(self.user, self.document), ( 'Watch was not created') # Unsubscribe response = post(self.client, 'wiki.document_unwatch', args=[self.document.slug]) eq_(200, response.status_code) assert not EditDocumentEvent.is_notifying(self.user, self.document), ( 'Watch was not destroyed') class LocaleWatchTests(TestCaseBase): """Tests for un/subscribing to a locale's ready for review emails.""" def setUp(self): super(LocaleWatchTests, self).setUp() self.user = UserFactory() self.client.login(username=self.user, password='testpass') def test_watch_GET_405(self): """Watch document with HTTP GET results in 405.""" response = get(self.client, 'wiki.locale_watch') eq_(405, response.status_code) def test_unwatch_GET_405(self): """Unwatch document with HTTP GET results in 405.""" response = get(self.client, 'wiki.locale_unwatch') eq_(405, response.status_code) def test_watch_and_unwatch_by_locale(self): """Watch and unwatch a locale.""" # Subscribe response = post(self.client, 'wiki.locale_watch') eq_(200, response.status_code) assert ReviewableRevisionInLocaleEvent.is_notifying(self.user, locale='en-US') # Unsubscribe response = post(self.client, 'wiki.locale_unwatch') eq_(200, response.status_code) assert not ReviewableRevisionInLocaleEvent.is_notifying(self.user, locale='en-US') def test_watch_and_unwatch_by_locale_and_product(self): # Subscribe response = post(self.client, 'wiki.locale_watch', args=['firefox-os']) eq_(200, response.status_code) assert ReviewableRevisionInLocaleEvent.is_notifying( self.user, locale='en-US', product='firefox-os') # Unsubscribe response = post(self.client, 'wiki.locale_unwatch', args=['firefox-os']) eq_(200, response.status_code) assert not ReviewableRevisionInLocaleEvent.is_notifying( self.user, locale='en-US', product='firefox-os') class ArticlePreviewTests(TestCaseBase): """Tests for preview view and template.""" def setUp(self): super(ArticlePreviewTests, self).setUp() u = UserFactory() self.client.login(username=u.username, password='testpass') def test_preview_GET_405(self): """Preview with HTTP GET results in 405.""" response = get(self.client, 'wiki.preview') eq_(405, response.status_code) def test_preview(self): """Preview the wiki syntax content.""" d = _create_document() response = post(self.client, 'wiki.preview', { 'content': '=Test Content=', 'slug': d.slug, 'locale': d.locale, }) eq_(200, response.status_code) doc = pq(response.content) eq_('Test Content', doc('#doc-content h1').text()) def test_preview_locale(self): """Preview the wiki syntax content.""" # Create a test document and translation. d = _create_document() _create_document(title='Prueba', parent=d, locale='es') # Preview content that links to it and verify link is in locale. url = reverse('wiki.preview', locale='es') response = self.client.post(url, { 'content': '[[Test Document]]', 'slug': d.slug, 'locale': d.locale, }) eq_(200, response.status_code) doc = pq(response.content) link = doc('#doc-content a') eq_('Prueba', link.text()) eq_('/es/kb/prueba', link[0].attrib['href']) class HelpfulVoteTests(TestCaseBase): def setUp(self): super(HelpfulVoteTests, self).setUp() self.document = _create_document() ProductFactory() def test_vote_yes(self): """Test voting helpful.""" r = self.document.current_revision user_ = UserFactory() referrer = 'http://google.com/?q=test' query = 'test' self.client.login(username=user_.username, password='testpass') response = post(self.client, 'wiki.document_vote', {'helpful': 'Yes', 'revision_id': r.id, 'referrer': referrer, 'query': query}, args=[self.document.slug]) eq_(200, response.status_code) votes = HelpfulVote.objects.filter(revision=r, creator=user_) eq_(1, votes.count()) assert votes[0].helpful metadata = HelpfulVoteMetadata.objects.values_list('key', 'value') eq_(2, len(metadata)) metadata_dict = dict((k, v) for (k, v) in metadata) eq_(referrer, metadata_dict['referrer']) eq_(query, metadata_dict['query']) def test_vote_no(self): """Test voting not helpful.""" r = self.document.current_revision user_ = UserFactory() referrer = 'inproduct' query = '' self.client.login(username=user_.username, password='testpass') response = post(self.client, 'wiki.document_vote', {'not-helpful': 'No', 'revision_id': r.id, 'referrer': referrer, 'query': query}, args=[self.document.slug]) eq_(200, response.status_code) votes = HelpfulVote.objects.filter(revision=r, creator=user_) eq_(1, votes.count()) assert not votes[0].helpful metadata = HelpfulVoteMetadata.objects.values_list('key', 'value') eq_(1, len(metadata)) metadata_dict = dict((k, v) for (k, v) in metadata) eq_(referrer, metadata_dict['referrer']) def test_vote_anonymous(self): """Test that voting works for anonymous user.""" r = self.document.current_revision referrer = 'search' query = 'cookies' response = post(self.client, 'wiki.document_vote', {'helpful': 'Yes', 'revision_id': r.id, 'referrer': referrer, 'query': query}, args=[self.document.slug]) eq_(200, response.status_code) votes = HelpfulVote.objects.filter(revision=r, creator=None) votes = votes.exclude(anonymous_id=None) eq_(1, votes.count()) assert votes[0].helpful metadata = HelpfulVoteMetadata.objects.values_list('key', 'value') eq_(2, len(metadata)) metadata_dict = dict((k, v) for (k, v) in metadata) eq_(referrer, metadata_dict['referrer']) eq_(query, metadata_dict['query']) def test_vote_ajax(self): """Test voting via ajax.""" r = self.document.current_revision referrer = '' query = '' url = reverse('wiki.document_vote', args=[self.document.slug]) response = self.client.post( url, data={'helpful': 'Yes', 'revision_id': r.id, 'referrer': referrer, 'query': query}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') eq_(200, response.status_code) eq_('{"message": "Glad to hear it &mdash; thanks for the feedback!"}', response.content) votes = HelpfulVote.objects.filter(revision=r, creator=None) votes = votes.exclude(anonymous_id=None) eq_(1, votes.count()) assert votes[0].helpful metadata = HelpfulVoteMetadata.objects.values_list('key', 'value') eq_(0, len(metadata)) def test_helpfulvotes_graph_async_yes(self): r = self.document.current_revision response = post(self.client, 'wiki.document_vote', {'helpful': 'Yes', 'revision_id': r.id}, args=[self.document.slug]) eq_(200, response.status_code) resp = get(self.client, 'wiki.get_helpful_votes_async', args=[r.document.slug]) eq_(200, resp.status_code) data = json.loads(resp.content) eq_(1, len(data['datums'])) assert 'yes' in data['datums'][0] assert 'no' in data['datums'][0] def test_helpfulvotes_graph_async_no(self): r = self.document.current_revision response = post(self.client, 'wiki.document_vote', {'helpful': 'No', 'revision_id': r.id}, args=[self.document.slug]) eq_(200, response.status_code) resp = get(self.client, 'wiki.get_helpful_votes_async', args=[r.document.slug]) eq_(200, resp.status_code) data = json.loads(resp.content) eq_(1, len(data['datums'])) assert 'yes' in data['datums'][0] assert 'no' in data['datums'][0] def test_helpfulvotes_graph_async_no_votes(self): r = self.document.current_revision resp = get(self.client, 'wiki.get_helpful_votes_async', args=[r.document.slug]) eq_(200, resp.status_code) data = json.loads(resp.content) eq_(0, len(data['datums'])) class SelectLocaleTests(TestCaseBase): """Test the locale selection page""" def setUp(self): super(SelectLocaleTests, self).setUp() self.d = _create_document() u = UserFactory() self.client.login(username=u.username, password='testpass') def test_page_renders_locales(self): """Load the page and verify it contains all the locales for l10n.""" response = get(self.client, 'wiki.select_locale', args=[self.d.slug]) eq_(200, response.status_code) doc = pq(response.content) eq_(len(settings.LANGUAGE_CHOICES), # All Locals including ' en-US'. len(doc('#select-locale ul.locales li'))) class RevisionDeleteTestCase(TestCaseBase): def test_delete_revision_without_permissions(self): """Deleting a revision without permissions sends 403.""" u = UserFactory() doc = DocumentFactory() rev = ApprovedRevisionFactory(document=doc) self.client.login(username=u.username, password='testpass') response = get(self.client, 'wiki.delete_revision', args=[doc.slug, rev.id]) eq_(403, response.status_code) response = post(self.client, 'wiki.delete_revision', args=[doc.slug, rev.id]) eq_(403, response.status_code) def test_delete_revision_logged_out(self): """Deleting a revision while logged out redirects to login.""" doc = DocumentFactory() rev = RevisionFactory(document=doc) response = get(self.client, 'wiki.delete_revision', args=[doc.slug, rev.id]) redirect = response.redirect_chain[0] eq_(302, redirect[1]) eq_('/%s%s?next=/en-US/kb/%s/revision/%s/delete' % (settings.LANGUAGE_CODE, settings.LOGIN_URL, doc.slug, rev.id), redirect[0]) response = post(self.client, 'wiki.delete_revision', args=[doc.slug, rev.id]) redirect = response.redirect_chain[0] eq_(302, redirect[1]) eq_('/%s%s?next=/en-US/kb/%s/revision/%s/delete' % (settings.LANGUAGE_CODE, settings.LOGIN_URL, doc.slug, rev.id), redirect[0]) def _test_delete_revision_with_permission(self): doc = DocumentFactory() rev1 = ApprovedRevisionFactory(document=doc) rev2 = ApprovedRevisionFactory(document=doc) response = get(self.client, 'wiki.delete_revision', args=[doc.slug, rev2.id]) eq_(200, response.status_code) response = post(self.client, 'wiki.delete_revision', args=[doc.slug, rev2.id]) eq_(200, response.status_code) assert not Revision.objects.filter(pk=rev2.id).exists() assert Revision.objects.filter(pk=rev1.id).exists() def test_delete_revision_with_permission(self): """Deleting a revision with permissions should work.""" u = UserFactory() add_permission(u, Revision, 'delete_revision') self.client.login(username=u.username, password='testpass') self._test_delete_revision_with_permission() def test_delete_revision_as_l10n_leader(self): """Deleting a revision as l10n leader should work.""" u = UserFactory() l10n = LocaleFactory(locale='en-US') l10n.leaders.add(u) self.client.login(username=u.username, password='testpass') self._test_delete_revision_with_permission() def test_delete_revision_as_l10n_reviewer(self): """Deleting a revision as l10n reviewer should work.""" u = UserFactory() l10n = LocaleFactory(locale='en-US') l10n.reviewers.add(u) self.client.login(username=u.username, password='testpass') self._test_delete_revision_with_permission() def test_delete_current_revision(self): """Deleting the current_revision of a document should update the current_revision to previous version.""" doc = DocumentFactory() rev1 = ApprovedRevisionFactory(document=doc) rev2 = ApprovedRevisionFactory(document=doc) u = UserFactory() add_permission(u, Revision, 'delete_revision') self.client.login(username=u.username, password='testpass') eq_(rev2, doc.current_revision) res = post(self.client, 'wiki.delete_revision', args=[doc.slug, rev2.id]) eq_(res.status_code, 200) doc = Document.objects.get(pk=doc.pk) eq_(rev1, doc.current_revision) def test_delete_only_revision(self): """If there is only one revision, it can't be deleted.""" u = UserFactory() add_permission(u, Revision, 'delete_revision') self.client.login(username=u.username, password='testpass') # Create document with only 1 revision doc = DocumentFactory() rev = RevisionFactory(document=doc) # Confirm page should show the message response = get(self.client, 'wiki.delete_revision', args=[doc.slug, rev.id]) eq_(200, response.status_code) eq_('Unable to delete only revision of the document', pq(response.content)('h1.title').text()) # POST should return bad request and revision should still exist response = post(self.client, 'wiki.delete_revision', args=[doc.slug, rev.id]) eq_(400, response.status_code) Revision.objects.get(id=rev.id) class ApprovedWatchTests(TestCaseBase): """Tests for un/subscribing to revision approvals.""" def setUp(self): super(ApprovedWatchTests, self).setUp() self.user = UserFactory() self.client.login(username=self.user.username, password='testpass') def test_watch_GET_405(self): """Watch with HTTP GET results in 405.""" response = get(self.client, 'wiki.approved_watch') eq_(405, response.status_code) def test_unwatch_GET_405(self): """Unwatch with HTTP GET results in 405.""" response = get(self.client, 'wiki.approved_unwatch') eq_(405, response.status_code) def test_watch_unwatch(self): """Watch and unwatch a document.""" locale = 'es' # Subscribe response = post(self.client, 'wiki.approved_watch', locale=locale) eq_(200, response.status_code) assert ApproveRevisionInLocaleEvent.is_notifying( self.user, locale=locale) # Unsubscribe response = post(self.client, 'wiki.approved_unwatch', locale=locale) eq_(200, response.status_code) assert not ApproveRevisionInLocaleEvent.is_notifying( self.user, locale=locale) class DocumentDeleteTestCase(TestCaseBase): """Tests for document delete.""" def setUp(self): super(DocumentDeleteTestCase, self).setUp() self.document = DocumentFactory() self.user = UserFactory(username='testuser') def test_delete_document_without_permissions(self): """Deleting a document without permissions sends 403.""" self.client.login(username='testuser', password='testpass') response = get(self.client, 'wiki.document_delete', args=[self.document.slug]) eq_(403, response.status_code) response = post(self.client, 'wiki.document_delete', args=[self.document.slug]) eq_(403, response.status_code) def test_delete_document_logged_out(self): """Deleting a document while logged out redirects to login.""" response = get(self.client, 'wiki.document_delete', args=[self.document.slug]) redirect = response.redirect_chain[0] eq_(302, redirect[1]) eq_('/%s%s?next=/en-US/kb/%s/delete' % (settings.LANGUAGE_CODE, settings.LOGIN_URL, self.document.slug), redirect[0]) response = post(self.client, 'wiki.document_delete', args=[self.document.slug]) redirect = response.redirect_chain[0] eq_(302, redirect[1]) eq_('/%s%s?next=/en-US/kb/%s/delete' % (settings.LANGUAGE_CODE, settings.LOGIN_URL, self.document.slug), redirect[0]) def test_document_as_l10n_leader(self): """Deleting a document as l10n leader should work.""" l10n = LocaleFactory(locale='en-US') l10n.leaders.add(self.user) self._test_delete_document_with_permission() def test_document_as_l10n_reviewer(self): """Deleting a document as l10n leader should NOT work.""" l10n = LocaleFactory(locale='en-US') l10n.reviewers.add(self.user) self.test_delete_document_without_permissions() def _test_delete_document_with_permission(self): self.client.login(username='testuser', password='testpass') response = get(self.client, 'wiki.document_delete', args=[self.document.slug]) eq_(200, response.status_code) response = post(self.client, 'wiki.document_delete', args=[self.document.slug]) eq_(0, Document.objects.filter(pk=self.document.id).count()) def test_revision_with_permission(self): """Deleting a document with delete_document permission should work.""" add_permission(self.user, Document, 'delete_document') self._test_delete_document_with_permission() class RecentRevisionsTest(TestCaseBase): def setUp(self): self.u1 = UserFactory() self.u2 = UserFactory() eq_(Document.objects.count(), 0) _create_document(title='1', rev_kwargs={'creator': self.u1}) _create_document(title='2', rev_kwargs={ 'creator': self.u1, 'created': datetime(2013, 3, 1, 0, 0, 0, 0), }) _create_document( title='3', locale='de', rev_kwargs={'creator': self.u2}) _create_document( title='4', locale='fr', rev_kwargs={'creator': self.u2}) _create_document( title='5', locale='fr', rev_kwargs={'creator': self.u2}) self.url = reverse('wiki.revisions') def test_basic(self): res = self.client.get(self.url) eq_(res.status_code, 200) doc = pq(res.content) eq_(len(doc('#revisions-fragment ul li:not(.header)')), 5) def test_locale_filtering(self): url = urlparams(self.url, locale='fr') res = self.client.get(url) eq_(res.status_code, 200) doc = pq(res.content) eq_(len(doc('#revisions-fragment ul li:not(.header)')), 2) url = urlparams(self.url, locale='de') res = self.client.get(url) eq_(res.status_code, 200) doc = pq(res.content) eq_(len(doc('#revisions-fragment ul li:not(.header)')), 1) def test_bad_locale(self): """A bad locale should not filter anything.""" url = urlparams(self.url, locale='asdf') res = self.client.get(url) eq_(res.status_code, 200) doc = pq(res.content) eq_(len(doc('#revisions-fragment ul li:not(.header)')), 5) def test_user_filtering(self): url = urlparams(self.url, users=self.u1.username) res = self.client.get(url) eq_(res.status_code, 200) doc = pq(res.content) eq_(len(doc('#revisions-fragment ul li:not(.header)')), 2) def test_date_filtering(self): url = urlparams(self.url, start='2013-03-02') res = self.client.get(url) eq_(res.status_code, 200) doc = pq(res.content) eq_(len(doc('#revisions-fragment ul li:not(.header)')), 4) url = urlparams(self.url, end='2013-03-02') res = self.client.get(url) eq_(res.status_code, 200) doc = pq(res.content) eq_(len(doc('#revisions-fragment ul li:not(.header)')), 1) # TODO: This should be a factory subclass def _create_document(title='Test Document', parent=None, locale=settings.WIKI_DEFAULT_LANGUAGE, doc_kwargs={}, rev_kwargs={}): d = DocumentFactory( title=title, html='<div>Lorem Ipsum</div>', category=TROUBLESHOOTING_CATEGORY, locale=locale, parent=parent, is_localizable=True, **doc_kwargs) d.save() r = ApprovedRevisionFactory( document=d, keywords='key1, key2', summary='lipsum', content='<div>Lorem Ipsum</div>', significance=SIGNIFICANCES[0][0], is_ready_for_localization=True, comment="Good job!", **rev_kwargs) r.created = r.created - timedelta(days=10) r.save() return d def _translation_data(): return { 'title': 'Un Test Articulo', 'slug': 'un-test-articulo', 'keywords': 'keyUno, keyDos, keyTres', 'summary': 'lipsumo', 'content': 'loremo ipsumo doloro sito ameto'}
anushbmx/kitsune
kitsune/wiki/tests/test_templates.py
Python
bsd-3-clause
121,308
from calculadora_tests import Calculadora class CalculadoraHP(Calculadora): def obter_entradas(self): valor = input('Digite o sinal da operação desejada: ') self.sinal = valor valor = input('Digite o primeiro número: ') self.entrada = int(valor) valor = input('Digite o segundo número: ') self.entrada2 = int(valor)
renzon/reqgithub
calculadora_extendida.py
Python
mit
373
"""Configuration for ACLs.""" # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. # Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd. # Copyright (C) 2015--2019 The Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import netaddr from os_ken.ofproto import ether from faucet import valve_of from faucet import valve_acl from faucet.valve_of import MATCH_FIELDS, OLD_MATCH_FIELDS from faucet.conf import Conf, test_config_condition, InvalidConfigError from faucet.valve_table import wildcard_table class ACL(Conf): """Contains the state for an ACL, including the configuration. ACL Config ACLs are configured under the 'acls' configuration block. The acls block contains a dictionary of individual acls each keyed by its name. Each acl contains a list of rules, a packet will have the first matching rule applied to it. Each rule is a dictionary containing the single key 'rule' with the value the matches and actions for the rule. The matches are key/values based on the ryu RESTFul API. The key 'actions' contains a dictionary with keys/values as follows: * allow (int): if 1 allow the packet to continue through the Faucet \ pipeline, if 0 drop the packet. * force_port_vlan (int): if 1, do not verify the VLAN/port association \ for this packet and override any VLAN ACL on the forced VLAN. * meter (str): meter to apply to the packet * output (dict): used to output a packet directly. details below. * cookie (int): set flow cookie to this value on this flow The output action contains a dictionary with the following elements: * tunnel (dict): the tunnel formation, creates a tunnel from the applied port(s) \ to the specified destination * port (int or string): the port to output the packet to * ports (list): a list of the ports (int or string) to output the packet to * set_fields (list): a list of fields to set with values * pop_vlans: (int): pop the packet vlan before outputting * vlan_vid: (int): push the vlan vid on the packet when outputting * vlan_vids: (list): push the list of vlans on the packet when outputting, with option eth_type * swap_vid (int): rewrite the vlan vid of the packet when outputting * failover (dict): Output with a failover port (experimental) """ defaults = { 'rules': None, 'exact_match': False, 'dot1x_assigned': False, } defaults_types = { 'rules': list, 'exact_match': bool, 'dot1x_assigned': bool, } rule_types = { 'cookie': int, 'actions': dict, 'description': str, } actions_types = { 'meter': str, 'mirror': (str, int), 'output': (dict, list), 'allow': int, 'force_port_vlan': int, 'ct': dict, } output_actions_types = { 'tunnel': dict, 'port': (str, int), 'ports': list, 'failover': dict, 'set_fields': list, 'pop_vlans': int, 'swap_vid': int, 'vlan_vid': int, 'vlan_vids': list, } ct_action_types = { 'flags': int, 'alg': int, 'table': int, 'zone': int, 'zone_src': int, 'clear': bool, 'nat': dict, } ct_action_nat_types = { 'flags': int, 'range_ipv4_min': str, 'range_ipv4_max': str, 'range_ipv6_min': str, 'range_ipv6_max': str, 'range_proto_min': int, 'range_proto_max': int } tunnel_types = { 'type': (str, None), 'tunnel_id': (str, int, None), 'dp': str, 'port': (str, int, None), 'exit_instructions': (list, None), 'maintain_encapsulation': bool, 'bi_directional': bool, 'reverse': bool, } mutable_attrs = frozenset(['tunnel_sources']) def __init__(self, _id, dp_id, conf): self.rules = [] self.exact_match = None self.dot1x_assigned = None self.meter = False self.matches = {} self.set_fields = set() self._ports_resolved = False # Tunnel info maintains the tunnel output information for each tunnel rule self.tunnel_dests = {} # Tunnel sources is a list of the sources in the network for this ACL self.tunnel_sources = {} # Tunnel rules is the rules for each tunnel in the ACL for each source self.dyn_tunnel_rules = {} self.dyn_reverse_tunnel_rules = {} for match_fields in (MATCH_FIELDS, OLD_MATCH_FIELDS): self.rule_types.update({match: (str, int) for match in match_fields}) conf = copy.deepcopy(conf) if isinstance(conf, dict): rules = conf.get('rules', []) elif isinstance(conf, list): rules = conf conf = {} else: raise InvalidConfigError( 'ACL conf is an invalid type %s' % _id) conf['rules'] = [] for rule in rules: normalized_rule = rule if isinstance(rule, dict): normalized_rule = rule.get('rule', rule) if normalized_rule is None: normalized_rule = {k: v for k, v in rule.items() if v is not None} test_config_condition(not isinstance(normalized_rule, dict), ( 'ACL rule is %s not %s (%s)' % (type(normalized_rule), dict, rules))) conf['rules'].append(normalized_rule) super().__init__(_id, dp_id, conf) def finalize(self): self._ports_resolved = True super().finalize() def check_config(self): test_config_condition( not self.rules, 'no rules found for ACL %s' % self._id) for rule in self.rules: self._check_conf_types(rule, self.rule_types) for rule_field, rule_conf in rule.items(): if rule_field == 'cookie': test_config_condition( rule_conf < 0 or rule_conf > 2**16, 'rule cookie value must be 0-2**16') elif rule_field == 'actions': test_config_condition( not rule_conf, 'Missing rule actions in ACL %s' % self._id) self._check_conf_types(rule_conf, self.actions_types) for action_name, action_conf in rule_conf.items(): if action_name == 'output': if isinstance(action_conf, (list, tuple)): # New ordered format for subconf in action_conf: # Make sure only one specified action per list element test_config_condition( len(subconf) > 1, 'ACL ordered output must have only one action per element') # Ensure correct action format self._check_conf_types(subconf, self.output_actions_types) else: # Old format self._check_conf_types( action_conf, self.output_actions_types) elif action_name == 'ct': self._check_conf_types(action_conf, self.ct_action_types) # if clear set, make sure nothing else is if 'clear' in action_conf and action_conf['clear']: test_config_condition( len(action_conf) != 1, "no other parameters can be set when 'clear' set on " "conntrack ACL") else: test_config_condition( 'table' not in action_conf, "required parameter 'table' not set for conntrack ACL") test_config_condition( 'zone' not in action_conf, "required parameter 'zone' not set for conntrack ACL") if 'nat' in action_conf: self._check_conf_types(action_conf['nat'], self.ct_action_nat_types) def build(self, meters, vid, port_num): """Check that ACL can be built from config.""" self.matches = {} self.set_fields = set() self.meter = False if self.rules: try: ofmsgs = valve_acl.build_acl_ofmsgs( [self], wildcard_table, [valve_of.goto_table(wildcard_table)], [valve_of.goto_table(wildcard_table)], 2**16 - 1, meters, self.exact_match, vlan_vid=vid, port_num=port_num) except (netaddr.core.AddrFormatError, KeyError, ValueError) as err: raise InvalidConfigError from err test_config_condition(not ofmsgs, 'OF messages is empty') for ofmsg in ofmsgs: try: valve_of.verify_flowmod(ofmsg) except (KeyError, ValueError) as err: raise InvalidConfigError from err except Exception as err: raise err if valve_of.is_flowmod(ofmsg): apply_actions = [] for inst in ofmsg.instructions: if valve_of.is_apply_actions(inst): apply_actions.extend(inst.actions) elif valve_of.is_meter(inst): self.meter = True for action in apply_actions: if valve_of.is_set_field(action): self.set_fields.add(action.key) for match, value in ofmsg.match.items(): has_mask = isinstance(value, (tuple, list)) if has_mask or match not in self.matches: self.matches[match] = has_mask for tunnel_rules in self.tunnel_dests.values(): if 'exit_instructions' in tunnel_rules: exit_inst = tunnel_rules['exit_instructions'] try: ofmsgs = valve_acl.build_tunnel_ofmsgs( exit_inst, wildcard_table, 1) except (netaddr.core.AddrFormatError, KeyError, ValueError) as err: raise InvalidConfigError from err test_config_condition(not ofmsgs, 'OF messages is empty') for ofmsg in ofmsgs: try: valve_of.verify_flowmod(ofmsg) except (KeyError, ValueError) as err: raise InvalidConfigError from err except Exception as err: raise err if valve_of.is_flowmod(ofmsg): apply_actions = [] for inst in ofmsg.instructions: if valve_of.is_apply_actions(inst): apply_actions.extend(inst.actions) elif valve_of.is_meter(inst): self.meter = True for action in apply_actions: if valve_of.is_set_field(action): self.set_fields.add(action.key) for match, value in ofmsg.match.items(): has_mask = isinstance(value, (tuple, list)) if has_mask or match not in self.matches: self.matches[match] = has_mask return (self.matches, self.set_fields, self.meter) def get_meters(self): """Yield meters for each rule in ACL""" for rule in self.rules: if 'actions' not in rule or 'meter' not in rule['actions']: continue yield rule['actions']['meter'] def get_mirror_destinations(self): """Yield mirror destinations for each rule in ACL""" for rule in self.rules: if 'actions' not in rule or 'mirror' not in rule['actions']: continue yield rule['actions']['mirror'] def _resolve_ordered_output_ports(self, output_list, resolve_port_cb, resolve_tunnel_objects): """Resolve output actions in the ordered list format""" result = [] for action in output_list: for key, value in action.items(): if key == 'tunnel': tunnel = value # Fetch tunnel items from the tunnel output dict test_config_condition( 'dp' not in tunnel, 'ACL (%s) tunnel DP not defined' % self._id) tunnel_dp = tunnel['dp'] tunnel_port = tunnel.get('port', None) tunnel_id = tunnel.get('tunnel_id', None) tunnel_type = tunnel.get('type', 'vlan') tunnel_exit_instructions = tunnel.get('exit_instructions', []) tunnel_direction = tunnel.get('bi_directional', False) tunnel_maintain = tunnel.get('maintain_encapsulation', False) tunnel_reverse = tunnel.get('reverse', False) test_config_condition( tunnel_reverse and tunnel_direction, ('Tunnel ACL %s cannot contain values for the fields' '`bi_directional` and `reverse` at the same time' % self._id)) # Resolve the tunnel items dst_dp, dst_port, tunnel_id = resolve_tunnel_objects( tunnel_dp, tunnel_port, tunnel_id) # Compile the tunnel into an easy-access dictionary tunnel_dict = { 'dst_dp': dst_dp, 'dst_port': dst_port, 'tunnel_id': tunnel_id, 'type': tunnel_type, 'exit_instructions': tunnel_exit_instructions, 'bi_directional': tunnel_direction, 'maintain_encapsulation': tunnel_maintain, 'reverse': tunnel_reverse, } self.tunnel_dests[tunnel_id] = tunnel_dict result.append({key: tunnel_id}) elif key == 'port': port_name = value port = resolve_port_cb(port_name) test_config_condition( not port, 'ACL (%s) output port undefined in DP: %s' % (self._id, self.dp_id)) result.append({key: port}) elif key == 'ports': resolved_ports = [ resolve_port_cb(p) for p in value] test_config_condition( None in resolved_ports, 'ACL (%s) output port(s) not defined in DP: %s' % (self._id, self.dp_id)) result.append({key: resolved_ports}) elif key == 'failover': failover = value test_config_condition(not isinstance(failover, dict), ( 'failover is not a dictionary')) failover_dict = {} for failover_name, failover_values in failover.items(): if failover_name == 'ports': resolved_ports = [ resolve_port_cb(p) for p in failover_values] test_config_condition( None in resolved_ports, 'ACL (%s) failover port(s) not defined in DP: %s' % ( self._id, self.dp_id)) failover_dict[failover_name] = resolved_ports else: failover_dict[failover_name] = failover_values result.append({key: failover_dict}) else: result.append(action) return result def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_objects): """Resolve the values for output actions in the ACL""" if isinstance(action_conf, (list, tuple)): return self._resolve_ordered_output_ports( action_conf, resolve_port_cb, resolve_tunnel_objects) result = {} test_config_condition( 'vlan_vid' in action_conf and 'vlan_vids' in action_conf, 'ACL %s has both vlan_vid and vlan_vids defined' % self._id) test_config_condition( 'port' in action_conf and 'ports' in action_conf, 'ACL %s has both port and ports defined' % self._id) for output_action, output_action_values in action_conf.items(): if output_action == 'tunnel': tunnel = output_action_values # Fetch tunnel items from the tunnel output dict test_config_condition( 'dp' not in tunnel, 'ACL (%s) tunnel DP not defined' % self._id) tunnel_dp = tunnel['dp'] tunnel_port = tunnel.get('port', None) tunnel_id = tunnel.get('tunnel_id', None) tunnel_type = tunnel.get('type', 'vlan') tunnel_exit_instructions = tunnel.get('exit_instructions', []) tunnel_direction = tunnel.get('bi_directional', False) tunnel_maintain = tunnel.get('maintain_encapsulation', False) tunnel_reverse = tunnel.get('reverse', False) test_config_condition( tunnel_reverse and tunnel_direction, ('Tunnel ACL %s cannot contain values for the fields' '`bi_directional` and `reverse` at the same time' % self._id)) # Resolve the tunnel items dst_dp, dst_port, tunnel_id = resolve_tunnel_objects( tunnel_dp, tunnel_port, tunnel_id) # Compile the tunnel into an easy-access dictionary tunnel_dict = { 'dst_dp': dst_dp, 'dst_port': dst_port, 'tunnel_id': tunnel_id, 'type': tunnel_type, 'exit_instructions': tunnel_exit_instructions, 'bi_directional': tunnel_direction, 'maintain_encapsulation': tunnel_maintain, 'reverse': tunnel_reverse, } self.tunnel_dests[tunnel_id] = tunnel_dict result[output_action] = tunnel_id elif output_action == 'port': port_name = output_action_values port = resolve_port_cb(port_name) test_config_condition( not port, ('ACL (%s) output port undefined in DP: %s' % (self._id, self.dp_id)) ) result[output_action] = port elif output_action == 'ports': resolved_ports = [ resolve_port_cb(p) for p in output_action_values] test_config_condition( None in resolved_ports, ('ACL (%s) output port(s) not defined in DP: %s' % (self._id, self.dp_id)) ) result[output_action] = resolved_ports elif output_action == 'failover': failover = output_action_values test_config_condition(not isinstance(failover, dict), ( 'failover is not a dictionary')) result[output_action] = {} for failover_name, failover_values in failover.items(): if failover_name == 'ports': resolved_ports = [ resolve_port_cb(p) for p in failover_values] test_config_condition( None in resolved_ports, ('ACL (%s) failover port(s) not defined in DP: %s' % (self._id, self.dp_id)) ) result[output_action][failover_name] = resolved_ports else: result[output_action][failover_name] = failover_values else: result[output_action] = output_action_values return result def resolve_ports(self, resolve_port_cb, resolve_tunnel_objects): """Resolve the values for the actions of an ACL""" if self._ports_resolved: return for rule_conf in self.rules: if 'actions' in rule_conf: actions_conf = rule_conf['actions'] resolved_actions = {} test_config_condition(not isinstance(actions_conf, dict), ( 'actions value is not a dictionary')) for action_name, action_conf in actions_conf.items(): if action_name == 'mirror': resolved_port = resolve_port_cb(action_conf) test_config_condition( resolved_port is None, ('ACL (%s) mirror port is not defined in DP: %s' % (self._id, self.dp_id)) ) resolved_actions[action_name] = resolved_port elif action_name == 'output': resolved_action = self._resolve_output_ports( action_conf, resolve_port_cb, resolve_tunnel_objects) resolved_actions[action_name] = resolved_action else: resolved_actions[action_name] = action_conf rule_conf['actions'] = resolved_actions self._ports_resolved = True def requires_reverse_tunnel(self, tunnel_id): """Returns true if the tunnel requires a reverse pathway""" return self.tunnel_dests[tunnel_id]['bi_directional'] def get_num_tunnels(self): """Returns the number of tunnels specified in the ACL""" num_tunnels = 0 for rule_conf in self.rules: if self.does_rule_contain_tunnel(rule_conf): output_conf = rule_conf['actions']['output'] if isinstance(output_conf, list): for action in output_conf: for key in action: if key == 'tunnel': num_tunnels += 1 else: if 'tunnel' in output_conf: num_tunnels += 1 return num_tunnels def get_tunnel_rules(self, tunnel_id): """Return the list of rules that apply a specific tunnel ID""" rules = [] for rule_conf in self.rules: if self.does_rule_contain_tunnel(rule_conf): output_conf = rule_conf['actions']['output'] if isinstance(output_conf, (list, tuple)): for action in output_conf: for key, value in action.items(): if key == 'tunnel' and value == tunnel_id: rules.append(rule_conf) continue else: if output_conf['tunnel'] == tunnel_id: rules.append(rule_conf) return rules @staticmethod def does_rule_contain_tunnel(rule_conf): """Return true if the ACL rule contains a tunnel""" if 'actions' in rule_conf: if 'output' in rule_conf['actions']: output_conf = rule_conf['actions']['output'] if isinstance(output_conf, (list, tuple)): for action in output_conf: for key in action: if key == 'tunnel': return True else: if 'tunnel' in output_conf: return True return False def is_tunnel_acl(self): """Return true if the ACL contains a tunnel""" if self.tunnel_dests: return True for rule_conf in self.rules: if self.does_rule_contain_tunnel(rule_conf): return True return False @staticmethod def _tunnel_source_id(source): """Return ID for a tunnel source.""" return tuple(sorted(source.items())) def add_tunnel_source(self, dp_name, port, reverse=False, bi_directional=False): """Add a source dp/port pair for the tunnel ACL""" source = {'dp': dp_name, 'port': port, 'reverse': reverse, 'bi_directional': bi_directional} source_id = self._tunnel_source_id(source) self.tunnel_sources[source_id] = source for _id in self.tunnel_dests: self.dyn_tunnel_rules.setdefault(_id, {}) self.dyn_reverse_tunnel_rules.setdefault(_id, {}) def verify_tunnel_rules(self): """Make sure that matches & set fields are configured correctly to handle tunnels""" if 'eth_type' not in self.matches: self.matches['eth_type'] = False if 'in_port' not in self.matches: self.matches['in_port'] = False if 'vlan_vid' not in self.matches: self.matches['vlan_vid'] = False if 'vlan_vid' not in self.set_fields: self.set_fields.add('vlan_vid') if 'vlan_pcp' not in self.matches: self.matches['vlan_pcp'] = False if 'vlan_pcp' not in self.set_fields: self.set_fields.add('vlan_pcp') def update_reverse_tunnel_rules(self, curr_dp, source_id, tunnel_id, out_port, output_table): """Update the tunnel rulelist for when the output port has changed (reverse direction)""" if not self.requires_reverse_tunnel(tunnel_id): return False dst_dp = self.tunnel_sources[source_id]['dp'] src_dp = self.tunnel_dests[tunnel_id]['dst_dp'] prev_list = self.dyn_reverse_tunnel_rules[tunnel_id].get(source_id, []) new_list = [] if curr_dp == src_dp and curr_dp != dst_dp: # SRC DP: vlan_vid, vlan_pcp, actions=[out_port] # NOTE: For the bi_directional reverse tunnel, we assume that # the packet already has the required encapsulation new_list = [{'port': out_port}] elif curr_dp == dst_dp and curr_dp != src_dp: # DST DP: vlan_vid, vlan_pcp, actions=[pop_vlans, output] new_list = [{'pop_vlans': 1}] if out_port is None: # DP dest tunnel, so we fall through into the eth_dst output table new_list.append({'goto': output_table.table_id}) else: # Tunnel has port specified, so output to destination new_list.append({'port': out_port}) elif curr_dp == src_dp and curr_dp == dst_dp: # SINGLE DP: actions=[pop_vlans, out_port] new_list = [{'pop_vlans': 1}] if out_port is None: # DP dest tunnel, so we fall through into the eth_dst output table new_list.extend([{'goto': output_table.table_id}]) else: # Tunnel has port specified, so output to destination new_list.extend([{'port': out_port}]) else: # TRANSIT DP: vlan_vid, vlan_pcp, actions=[output] new_list = [{'port': out_port}] if new_list != prev_list: self.dyn_reverse_tunnel_rules[tunnel_id][source_id] = new_list return True return True def update_source_tunnel_rules(self, curr_dp, source_id, tunnel_id, out_port, output_table): """Update the tunnel rulelist for when the output port has changed""" src_dp = self.tunnel_sources[source_id]['dp'] dst_dp = self.tunnel_dests[tunnel_id]['dst_dp'] prev_list = self.dyn_tunnel_rules[tunnel_id].get(source_id, []) new_list = [] pcp_flag = valve_of.PCP_TUNNEL_FLAG if self.tunnel_dests[tunnel_id]['reverse']: pcp_flag = valve_of.PCP_TUNNEL_REVERSE_DIRECTION_FLAG if curr_dp == src_dp and curr_dp != dst_dp: # SRC DP: in_port, actions=[push_vlan, output, pop_vlans] # Ideally, we would be able to detect if the tunnel has an `allow` action clause. # However, this is difficult as a single ACL can have multiple rules using the same # tunnel, but with one instance requiring the `allow` clause and another, not. # This means it is easier to always append the `pop_vlans` in assumption that the # `allow` action does exist, and then optimize/reduce the redundant rules before # outputting the flowrule. # We also set the tunnel VLAN header with a PCP value indicating that we are in # the tunnel, which will save the VLANs from being reserved. new_list = [ {'vlan_vids': [{'vid': tunnel_id, 'eth_type': ether.ETH_TYPE_8021Q}]}, {'set_fields': [{'vlan_pcp': pcp_flag}]}, {'port': out_port}, {'pop_vlans': 1}] elif curr_dp == dst_dp and curr_dp != src_dp: # DST DP: in_port, vlan_vid, actions=[pop_vlan, additional_instructions, output] # If exit_instructions are applied, then we want to pop off the tunnel # VLAN header, then apply the additional instructions, then output if self.tunnel_dests[tunnel_id]['maintain_encapsulation']: # We wish to maintain tunnel encapsulation before outputting # So do not add the pop_vlans rule new_list = [] else: new_list = [{'pop_vlans': 1}] exit_instructions = self.tunnel_dests[tunnel_id].get('exit_instructions', []) new_list.extend(copy.copy(list(exit_instructions))) if out_port is None: # DP dest tunnel, so we fall through into the eth_dst output table new_list.append({'goto': output_table.table_id}) else: # Tunnel has port specified, so output to destination new_list.append({'port': out_port}) elif curr_dp == src_dp and curr_dp == dst_dp: # SINGLE DP: in_port, actions=[additional_instructions, out_port] exit_instructions = self.tunnel_dests[tunnel_id].get('exit_instructions', []) new_list.extend(copy.copy(list(exit_instructions))) if self.tunnel_dests[tunnel_id].get('maintain_encapsulation', False): # Maintain encapsulation implies we want the tunnel VID on the packet, # so ensure it is purposefully put onto the packet, even when # there would originally be no need to push on a tunnel VID new_list.extend([ {'vlan_vids': [{'vid': tunnel_id, 'eth_type': ether.ETH_TYPE_8021Q}]}, {'set_fields': [{'vlan_pcp': pcp_flag}]}]) if out_port is None: # DP dest tunnel, so we fall through into the eth_dst output table new_list.extend([{'goto': output_table.table_id}]) else: # Tunnel has port specified, so output to destination new_list.extend([{'port': out_port}]) else: # TRANSIT DP: in_port, vlan_vid, actions=[output] new_list = [{'port': out_port}] if new_list != prev_list: self.dyn_tunnel_rules[tunnel_id][source_id] = new_list return True return True # NOTE: 802.1x steals the port ACL table. PORT_ACL_8021X = ACL( 'port_acl_8021x', 0, {'rules': [ {'eth_type': 1, 'eth_src': '01:02:03:04:05:06', 'actions': {'output': { 'port': valve_of.ofp.OFPP_LOCAL, 'set_fields': [ {'eth_src': '01:02:03:04:05:06'}, {'eth_dst': '01:02:03:04:05:06'}]}}}]}) PORT_ACL_8021X.build({}, None, 1) MAB_ACL_8021X = ACL( 'mab_acl_8021x', 0, {'rules': [{ 'eth_type': valve_of.ether.ETH_TYPE_IP, 'eth_src': '01:02:03:04:05:06', 'ip_proto': valve_of.inet.IPPROTO_UDP, 'udp_src': 68, 'udp_dst': 67, 'actions': {'output': {'port': valve_of.ofp.OFPP_LOCAL}}}]}) MAB_ACL_8021X.build({}, None, 1)
REANNZ/faucet
faucet/acl.py
Python
apache-2.0
33,575
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8 compliant> import os import bpy import mathutils import bpy_extras.io_utils from progress_report import ProgressReport, ProgressReportSubstep def name_compat(name): if name is None: return 'None' else: return name.replace(' ', '_') def mesh_triangulate(me): import bmesh bm = bmesh.new() bm.from_mesh(me) bmesh.ops.triangulate(bm, faces=bm.faces) bm.to_mesh(me) bm.free() def write_mtl(scene, filepath, path_mode, copy_set, mtl_dict): from mathutils import Color, Vector world = scene.world if world: world_amb = world.ambient_color else: world_amb = Color((0.0, 0.0, 0.0)) source_dir = os.path.dirname(bpy.data.filepath) dest_dir = os.path.dirname(filepath) with open(filepath, "w", encoding="utf8", newline="\n") as f: fw = f.write fw('# Blender MTL File: %r\n' % (os.path.basename(bpy.data.filepath) or "None")) fw('# Material Count: %i\n' % len(mtl_dict)) mtl_dict_values = list(mtl_dict.values()) mtl_dict_values.sort(key=lambda m: m[0]) # Write material/image combinations we have used. # Using mtl_dict.values() directly gives un-predictable order. for mtl_mat_name, mat, face_img in mtl_dict_values: # Get the Blender data for the material and the image. # Having an image named None will make a bug, dont do it :) fw('\nnewmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname if mat: use_mirror = mat.raytrace_mirror.use and mat.raytrace_mirror.reflect_factor != 0.0 # convert from blenders spec to 0 - 1000 range. if mat.specular_shader == 'WARDISO': tspec = (0.4 - mat.specular_slope) / 0.0004 else: tspec = (mat.specular_hardness - 1) / 0.51 fw('Ns %.6f\n' % tspec) del tspec # Ambient if use_mirror: fw('Ka %.6f %.6f %.6f\n' % (mat.raytrace_mirror.reflect_factor * mat.mirror_color)[:]) else: fw('Ka %.6f %.6f %.6f\n' % (mat.ambient, mat.ambient, mat.ambient)) # Do not use world color! fw('Kd %.6f %.6f %.6f\n' % (mat.diffuse_intensity * mat.diffuse_color)[:]) # Diffuse fw('Ks %.6f %.6f %.6f\n' % (mat.specular_intensity * mat.specular_color)[:]) # Specular # Emission, not in original MTL standard but seems pretty common, see T45766. # XXX Blender has no color emission, it's using diffuse color instead... fw('Ke %.6f %.6f %.6f\n' % (mat.emit * mat.diffuse_color)[:]) if hasattr(mat, "raytrace_transparency") and hasattr(mat.raytrace_transparency, "ior"): fw('Ni %.6f\n' % mat.raytrace_transparency.ior) # Refraction index else: fw('Ni %.6f\n' % 1.0) fw('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve) # See http://en.wikipedia.org/wiki/Wavefront_.obj_file for whole list of values... # Note that mapping is rather fuzzy sometimes, trying to do our best here. if mat.use_shadeless: fw('illum 0\n') # ignore lighting elif mat.specular_intensity == 0: fw('illum 1\n') # no specular. elif use_mirror: if mat.use_transparency and mat.transparency_method == 'RAYTRACE': if mat.raytrace_mirror.fresnel != 0.0: fw('illum 7\n') # Reflection, Transparency, Ray trace and Fresnel else: fw('illum 6\n') # Reflection, Transparency, Ray trace elif mat.raytrace_mirror.fresnel != 0.0: fw('illum 5\n') # Reflection, Ray trace and Fresnel else: fw('illum 3\n') # Reflection and Ray trace elif mat.use_transparency and mat.transparency_method == 'RAYTRACE': fw('illum 9\n') # 'Glass' transparency and no Ray trace reflection... fuzzy matching, but... else: fw('illum 2\n') # light normaly else: # Write a dummy material here? fw('Ns 0\n') fw('Ka %.6f %.6f %.6f\n' % world_amb[:]) # Ambient, uses mirror color, fw('Kd 0.8 0.8 0.8\n') fw('Ks 0.8 0.8 0.8\n') fw('d 1\n') # No alpha fw('illum 2\n') # light normaly # Write images! if face_img: # We have an image on the face! filepath = face_img.filepath if filepath: # may be '' for generated images # write relative image path filepath = bpy_extras.io_utils.path_reference(filepath, source_dir, dest_dir, path_mode, "", copy_set, face_img.library) fw('map_Kd %s\n' % filepath) # Diffuse mapping image del filepath else: # so we write the materials image. face_img = None if mat: # No face image. if we havea material search for MTex image. image_map = {} # backwards so topmost are highest priority for mtex in reversed(mat.texture_slots): if mtex and mtex.texture and mtex.texture.type == 'IMAGE': image = mtex.texture.image if image: # texface overrides others if (mtex.use_map_color_diffuse and (face_img is None) and (mtex.use_map_warp is False) and (mtex.texture_coords != 'REFLECTION')): image_map["map_Kd"] = (mtex, image) if mtex.use_map_ambient: image_map["map_Ka"] = (mtex, image) # this is the Spec intensity channel but Ks stands for specular Color ''' if mtex.use_map_specular: image_map["map_Ks"] = (mtex, image) ''' if mtex.use_map_color_spec: # specular color image_map["map_Ks"] = (mtex, image) if mtex.use_map_hardness: # specular hardness/glossiness image_map["map_Ns"] = (mtex, image) if mtex.use_map_alpha: image_map["map_d"] = (mtex, image) if mtex.use_map_translucency: image_map["map_Tr"] = (mtex, image) if mtex.use_map_normal: image_map["map_Bump"] = (mtex, image) if mtex.use_map_displacement: image_map["disp"] = (mtex, image) if mtex.use_map_color_diffuse and (mtex.texture_coords == 'REFLECTION'): image_map["refl"] = (mtex, image) if mtex.use_map_emit: image_map["map_Ke"] = (mtex, image) for key, (mtex, image) in sorted(image_map.items()): filepath = bpy_extras.io_utils.path_reference(image.filepath, source_dir, dest_dir, path_mode, "", copy_set, image.library) options = [] if key == "map_Bump": if mtex.normal_factor != 1.0: options.append('-bm %.6f' % mtex.normal_factor) if mtex.offset != Vector((0.0, 0.0, 0.0)): options.append('-o %.6f %.6f %.6f' % mtex.offset[:]) if mtex.scale != Vector((1.0, 1.0, 1.0)): options.append('-s %.6f %.6f %.6f' % mtex.scale[:]) if options: fw('%s %s %s\n' % (key, " ".join(options), repr(filepath)[1:-1])) else: fw('%s %s\n' % (key, repr(filepath)[1:-1])) def test_nurbs_compat(ob): if ob.type != 'CURVE': return False for nu in ob.data.splines: if nu.point_count_v == 1 and nu.type != 'BEZIER': # not a surface and not bezier return True return False def write_nurb(fw, ob, ob_mat): tot_verts = 0 cu = ob.data # use negative indices for nu in cu.splines: if nu.type == 'POLY': DEG_ORDER_U = 1 else: DEG_ORDER_U = nu.order_u - 1 # odd but tested to be correct if nu.type == 'BEZIER': print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported") continue if nu.point_count_v > 1: print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported") continue if len(nu.points) <= DEG_ORDER_U: print("\tWarning, order_u is lower then vert count, skipping:", ob.name) continue pt_num = 0 do_closed = nu.use_cyclic_u do_endpoints = (do_closed == 0) and nu.use_endpoint_u for pt in nu.points: fw('v %.6f %.6f %.6f\n' % (ob_mat * pt.co.to_3d())[:]) pt_num += 1 tot_verts += pt_num fw('g %s\n' % (name_compat(ob.name))) # name_compat(ob.getData(1)) could use the data name too fw('cstype bspline\n') # not ideal, hard coded fw('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still curve_ls = [-(i + 1) for i in range(pt_num)] # 'curv' keyword if do_closed: if DEG_ORDER_U == 1: pt_num += 1 curve_ls.append(-1) else: pt_num += DEG_ORDER_U curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U] fw('curv 0.0 1.0 %s\n' % (" ".join([str(i) for i in curve_ls]))) # Blender has no U and V values for the curve # 'parm' keyword tot_parm = (DEG_ORDER_U + 1) + pt_num tot_parm_div = float(tot_parm - 1) parm_ls = [(i / tot_parm_div) for i in range(tot_parm)] if do_endpoints: # end points, force param for i in range(DEG_ORDER_U + 1): parm_ls[i] = 0.0 parm_ls[-(1 + i)] = 1.0 fw("parm u %s\n" % " ".join(["%.6f" % i for i in parm_ls])) fw('end\n') return tot_verts def write_file(filepath, objects, scene, EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_SMOOTH_GROUPS=False, EXPORT_SMOOTH_GROUPS_BITFLAGS=False, EXPORT_NORMALS=False, EXPORT_UV=True, EXPORT_MTL=True, EXPORT_APPLY_MODIFIERS=True, EXPORT_BLEN_OBS=True, EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False, EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True, EXPORT_GLOBAL_MATRIX=None, EXPORT_PATH_MODE='AUTO', progress=ProgressReport(), ): """ Basic write function. The context and options must be already set This can be accessed externaly eg. write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. """ if EXPORT_GLOBAL_MATRIX is None: EXPORT_GLOBAL_MATRIX = mathutils.Matrix() def veckey3d(v): return round(v.x, 4), round(v.y, 4), round(v.z, 4) def veckey2d(v): return round(v[0], 4), round(v[1], 4) def findVertexGroupName(face, vWeightMap): """ Searches the vertexDict to see what groups is assigned to a given face. We use a frequency system in order to sort out the name because a given vetex can belong to two or more groups at the same time. To find the right name for the face we list all the possible vertex group names with their frequency and then sort by frequency in descend order. The top element is the one shared by the highest number of vertices is the face's group """ weightDict = {} for vert_index in face.vertices: vWeights = vWeightMap[vert_index] for vGroupName, weight in vWeights: weightDict[vGroupName] = weightDict.get(vGroupName, 0.0) + weight if weightDict: return max((weight, vGroupName) for vGroupName, weight in weightDict.items())[1] else: return '(null)' with ProgressReportSubstep(progress, 2, "OBJ Export path: %r" % filepath, "OBJ Export Finished") as subprogress1: with open(filepath, "w", encoding="utf8", newline="\n") as f: fw = f.write # Write Header fw('# Blender v%s OBJ File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath))) fw('# www.blender.org\n') # Tell the obj file what material file to use. if EXPORT_MTL: mtlfilepath = os.path.splitext(filepath)[0] + ".mtl" # filepath can contain non utf8 chars, use repr fw('mtllib %s\n' % repr(os.path.basename(mtlfilepath))[1:-1]) # Initialize totals, these are updated each object totverts = totuvco = totno = 1 face_vert_index = 1 # A Dict of Materials # (material.name, image.name):matname_imagename # matname_imagename has gaps removed. mtl_dict = {} # Used to reduce the usage of matname_texname materials, which can become annoying in case of # repeated exports/imports, yet keeping unique mat names per keys! # mtl_name: (material.name, image.name) mtl_rev_dict = {} copy_set = set() # Get all meshes subprogress1.enter_substeps(len(objects)) for i, ob_main in enumerate(objects): # ignore dupli children if ob_main.parent and ob_main.parent.dupli_type in {'VERTS', 'FACES'}: # XXX subprogress1.step("Ignoring %s, dupli child..." % ob_main.name) continue obs = [(ob_main, ob_main.matrix_world)] if ob_main.dupli_type != 'NONE': # XXX print('creating dupli_list on', ob_main.name) ob_main.dupli_list_create(scene) obs += [(dob.object, dob.matrix) for dob in ob_main.dupli_list] # XXX debug print print(ob_main.name, 'has', len(obs) - 1, 'dupli children') subprogress1.enter_substeps(len(obs)) for ob, ob_mat in obs: with ProgressReportSubstep(subprogress1, 6) as subprogress2: uv_unique_count = no_unique_count = 0 # Nurbs curve support if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob): ob_mat = EXPORT_GLOBAL_MATRIX * ob_mat totverts += write_nurb(fw, ob, ob_mat) continue # END NURBS try: me = ob.to_mesh(scene, EXPORT_APPLY_MODIFIERS, 'PREVIEW', calc_tessface=False) except RuntimeError: me = None if me is None: continue me.transform(EXPORT_GLOBAL_MATRIX * ob_mat) if EXPORT_TRI: # _must_ do this first since it re-allocs arrays mesh_triangulate(me) if EXPORT_UV: faceuv = len(me.uv_textures) > 0 if faceuv: uv_texture = me.uv_textures.active.data[:] uv_layer = me.uv_layers.active.data[:] else: faceuv = False me_verts = me.vertices[:] # Make our own list so it can be sorted to reduce context switching face_index_pairs = [(face, index) for index, face in enumerate(me.polygons)] # faces = [ f for f in me.tessfaces ] if EXPORT_EDGES: edges = me.edges else: edges = [] if not (len(face_index_pairs) + len(edges) + len(me.vertices)): # Make sure there is something to write # clean up bpy.data.meshes.remove(me) continue # dont bother with this mesh. if EXPORT_NORMALS and face_index_pairs: me.calc_normals_split() # No need to call me.free_normals_split later, as this mesh is deleted anyway! loops = me.loops if (EXPORT_SMOOTH_GROUPS or EXPORT_SMOOTH_GROUPS_BITFLAGS) and face_index_pairs: smooth_groups, smooth_groups_tot = me.calc_smooth_groups(EXPORT_SMOOTH_GROUPS_BITFLAGS) if smooth_groups_tot <= 1: smooth_groups, smooth_groups_tot = (), 0 else: smooth_groups, smooth_groups_tot = (), 0 materials = me.materials[:] material_names = [m.name if m else None for m in materials] # avoid bad index errors if not materials: materials = [None] material_names = [name_compat(None)] # Sort by Material, then images # so we dont over context switch in the obj file. if EXPORT_KEEP_VERT_ORDER: pass else: if faceuv: if smooth_groups: sort_func = lambda a: (a[0].material_index, hash(uv_texture[a[1]].image), smooth_groups[a[1]] if a[0].use_smooth else False) else: sort_func = lambda a: (a[0].material_index, hash(uv_texture[a[1]].image), a[0].use_smooth) elif len(materials) > 1: if smooth_groups: sort_func = lambda a: (a[0].material_index, smooth_groups[a[1]] if a[0].use_smooth else False) else: sort_func = lambda a: (a[0].material_index, a[0].use_smooth) else: # no materials if smooth_groups: sort_func = lambda a: smooth_groups[a[1] if a[0].use_smooth else False] else: sort_func = lambda a: a[0].use_smooth face_index_pairs.sort(key=sort_func) del sort_func # Set the default mat to no material and no image. contextMat = 0, 0 # Can never be this, so we will label a new material the first chance we get. contextSmooth = None # Will either be true or false, set bad to force initialization switch. if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: name1 = ob.name name2 = ob.data.name if name1 == name2: obnamestring = name_compat(name1) else: obnamestring = '%s_%s' % (name_compat(name1), name_compat(name2)) if EXPORT_BLEN_OBS: fw('o %s\n' % obnamestring) # Write Object name else: # if EXPORT_GROUP_BY_OB: fw('g %s\n' % obnamestring) subprogress2.step() # Vert for v in me_verts: fw('v %.6f %.6f %.6f\n' % v.co[:]) subprogress2.step() # UV if faceuv: # in case removing some of these dont get defined. uv = f_index = uv_index = uv_key = uv_val = uv_ls = None uv_face_mapping = [None] * len(face_index_pairs) uv_dict = {} uv_get = uv_dict.get for f, f_index in face_index_pairs: uv_ls = uv_face_mapping[f_index] = [] for uv_index, l_index in enumerate(f.loop_indices): uv = uv_layer[l_index].uv # include the vertex index in the key so we don't share UV's between vertices, # allowed by the OBJ spec but can cause issues for other importers, see: T47010. # this works too, shared UV's for all verts #~ uv_key = veckey2d(uv) uv_key = loops[l_index].vertex_index, veckey2d(uv) uv_val = uv_get(uv_key) if uv_val is None: uv_val = uv_dict[uv_key] = uv_unique_count fw('vt %.4f %.4f\n' % uv[:]) uv_unique_count += 1 uv_ls.append(uv_val) del uv_dict, uv, f_index, uv_index, uv_ls, uv_get, uv_key, uv_val # Only need uv_unique_count and uv_face_mapping subprogress2.step() # NORMAL, Smooth/Non smoothed. if EXPORT_NORMALS: no_key = no_val = None normals_to_idx = {} no_get = normals_to_idx.get loops_to_normals = [0] * len(loops) for f, f_index in face_index_pairs: for l_idx in f.loop_indices: no_key = veckey3d(loops[l_idx].normal) no_val = no_get(no_key) if no_val is None: no_val = normals_to_idx[no_key] = no_unique_count fw('vn %.4f %.4f %.4f\n' % no_key) no_unique_count += 1 loops_to_normals[l_idx] = no_val del normals_to_idx, no_get, no_key, no_val else: loops_to_normals = [] if not faceuv: f_image = None subprogress2.step() # XXX if EXPORT_POLYGROUPS: # Retrieve the list of vertex groups vertGroupNames = ob.vertex_groups.keys() if vertGroupNames: currentVGroup = '' # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to vgroupsMap = [[] for _i in range(len(me_verts))] for v_idx, v_ls in enumerate(vgroupsMap): v_ls[:] = [(vertGroupNames[g.group], g.weight) for g in me_verts[v_idx].groups] for f, f_index in face_index_pairs: f_smooth = f.use_smooth if f_smooth and smooth_groups: f_smooth = smooth_groups[f_index] f_mat = min(f.material_index, len(materials) - 1) if faceuv: tface = uv_texture[f_index] f_image = tface.image # MAKE KEY if faceuv and f_image: # Object is always true. key = material_names[f_mat], f_image.name else: key = material_names[f_mat], None # No image, use None instead. # Write the vertex group if EXPORT_POLYGROUPS: if vertGroupNames: # find what vertext group the face belongs to vgroup_of_face = findVertexGroupName(f, vgroupsMap) if vgroup_of_face != currentVGroup: currentVGroup = vgroup_of_face fw('g %s\n' % vgroup_of_face) # CHECK FOR CONTEXT SWITCH if key == contextMat: pass # Context already switched, dont do anything else: if key[0] is None and key[1] is None: # Write a null material, since we know the context has changed. if EXPORT_GROUP_BY_MAT: # can be mat_image or (null) fw("g %s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name))) if EXPORT_MTL: fw("usemtl (null)\n") # mat, image else: mat_data = mtl_dict.get(key) if not mat_data: # First add to global dict so we can export to mtl # Then write mtl # Make a new names from the mat and image name, # converting any spaces to underscores with name_compat. # If none image dont bother adding it to the name # Try to avoid as much as possible adding texname (or other things) # to the mtl name (see [#32102])... mtl_name = "%s" % name_compat(key[0]) if mtl_rev_dict.get(mtl_name, None) not in {key, None}: if key[1] is None: tmp_ext = "_NONE" else: tmp_ext = "_%s" % name_compat(key[1]) i = 0 while mtl_rev_dict.get(mtl_name + tmp_ext, None) not in {key, None}: i += 1 tmp_ext = "_%3d" % i mtl_name += tmp_ext mat_data = mtl_dict[key] = mtl_name, materials[f_mat], f_image mtl_rev_dict[mtl_name] = key if EXPORT_GROUP_BY_MAT: # can be mat_image or (null) fw("g %s_%s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name), mat_data[0])) if EXPORT_MTL: fw("usemtl %s\n" % mat_data[0]) # can be mat_image or (null) contextMat = key if f_smooth != contextSmooth: if f_smooth: # on now off if smooth_groups: f_smooth = smooth_groups[f_index] fw('s %d\n' % f_smooth) else: fw('s 1\n') else: # was off now on fw('s off\n') contextSmooth = f_smooth f_v = [(vi, me_verts[v_idx], l_idx) for vi, (v_idx, l_idx) in enumerate(zip(f.vertices, f.loop_indices))] fw('f') if faceuv: if EXPORT_NORMALS: for vi, v, li in f_v: fw(" %d/%d/%d" % (totverts + v.index, totuvco + uv_face_mapping[f_index][vi], totno + loops_to_normals[li], )) # vert, uv, normal else: # No Normals for vi, v, li in f_v: fw(" %d/%d" % (totverts + v.index, totuvco + uv_face_mapping[f_index][vi], )) # vert, uv face_vert_index += len(f_v) else: # No UV's if EXPORT_NORMALS: for vi, v, li in f_v: fw(" %d//%d" % (totverts + v.index, totno + loops_to_normals[li])) else: # No Normals for vi, v, li in f_v: fw(" %d" % (totverts + v.index)) fw('\n') subprogress2.step() # Write edges. if EXPORT_EDGES: for ed in edges: if ed.is_loose: fw('l %d %d\n' % (totverts + ed.vertices[0], totverts + ed.vertices[1])) # Make the indices global rather then per mesh totverts += len(me_verts) totuvco += uv_unique_count totno += no_unique_count # clean up bpy.data.meshes.remove(me) if ob_main.dupli_type != 'NONE': ob_main.dupli_list_clear() subprogress1.leave_substeps("Finished writing geometry of '%s'." % ob_main.name) subprogress1.leave_substeps() subprogress1.step("Finished exporting geometry, now exporting materials") # Now we have all our materials, save them if EXPORT_MTL: write_mtl(scene, mtlfilepath, EXPORT_PATH_MODE, copy_set, mtl_dict) # copy all collected files. bpy_extras.io_utils.path_reference_copy(copy_set) def _write(context, filepath, EXPORT_TRI, # ok EXPORT_EDGES, EXPORT_SMOOTH_GROUPS, EXPORT_SMOOTH_GROUPS_BITFLAGS, EXPORT_NORMALS, # ok EXPORT_UV, # ok EXPORT_MTL, EXPORT_APPLY_MODIFIERS, # ok EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER, EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS, EXPORT_SEL_ONLY, # ok EXPORT_ANIMATION, EXPORT_GLOBAL_MATRIX, EXPORT_PATH_MODE, # Not used ): with ProgressReport(context.window_manager) as progress: base_name, ext = os.path.splitext(filepath) context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension scene = context.scene # Exit edit mode before exporting, so current object states are exported properly. if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') orig_frame = scene.frame_current # Export an animation? if EXPORT_ANIMATION: scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame. else: scene_frames = [orig_frame] # Dont export an animation. # Loop through all frames in the scene and export. progress.enter_substeps(len(scene_frames)) for frame in scene_frames: if EXPORT_ANIMATION: # Add frame to the filepath. context_name[2] = '_%.6d' % frame scene.frame_set(frame, 0.0) if EXPORT_SEL_ONLY: objects = context.selected_objects else: objects = scene.objects full_path = ''.join(context_name) # erm... bit of a problem here, this can overwrite files when exporting frames. not too bad. # EXPORT THE FILE. progress.enter_substeps(1) write_file(full_path, objects, scene, EXPORT_TRI, EXPORT_EDGES, EXPORT_SMOOTH_GROUPS, EXPORT_SMOOTH_GROUPS_BITFLAGS, EXPORT_NORMALS, EXPORT_UV, EXPORT_MTL, EXPORT_APPLY_MODIFIERS, EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER, EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS, EXPORT_GLOBAL_MATRIX, EXPORT_PATH_MODE, progress, ) progress.leave_substeps() scene.frame_set(orig_frame, 0.0) progress.leave_substeps() """ Currently the exporter lacks these features: * multiple scene export (only active scene is written) * particles """ def save(context, filepath, *, use_triangles=False, use_edges=True, use_normals=False, use_smooth_groups=False, use_smooth_groups_bitflags=False, use_uvs=True, use_materials=True, use_mesh_modifiers=True, use_blen_objects=True, group_by_object=False, group_by_material=False, keep_vertex_order=False, use_vertex_groups=False, use_nurbs=True, use_selection=True, use_animation=False, global_matrix=None, path_mode='AUTO' ): _write(context, filepath, EXPORT_TRI=use_triangles, EXPORT_EDGES=use_edges, EXPORT_SMOOTH_GROUPS=use_smooth_groups, EXPORT_SMOOTH_GROUPS_BITFLAGS=use_smooth_groups_bitflags, EXPORT_NORMALS=use_normals, EXPORT_UV=use_uvs, EXPORT_MTL=use_materials, EXPORT_APPLY_MODIFIERS=use_mesh_modifiers, EXPORT_BLEN_OBS=use_blen_objects, EXPORT_GROUP_BY_OB=group_by_object, EXPORT_GROUP_BY_MAT=group_by_material, EXPORT_KEEP_VERT_ORDER=keep_vertex_order, EXPORT_POLYGROUPS=use_vertex_groups, EXPORT_CURVE_AS_NURBS=use_nurbs, EXPORT_SEL_ONLY=use_selection, EXPORT_ANIMATION=use_animation, EXPORT_GLOBAL_MATRIX=global_matrix, EXPORT_PATH_MODE=path_mode, ) return {'FINISHED'}
Microvellum/Fluid-Designer
win64-vc/2.78/scripts/addons/io_scene_obj/export_obj.py
Python
gpl-3.0
38,130
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import os import re import time #Apache License Header ASF_LICENSE_HEADER = ''' # Copyright 2011 The Apache Software Foundation # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' # A Python replacement for java.util.Properties # Based on http://code.activestate.com/recipes # /496795-a-python-replacement-for-javautilproperties/ class Properties(object): def __init__(self, props=None): self._props = {} self._origprops = {} self._keymap = {} self.othercharre = re.compile(r'(?<!\\)(\s*\=)|(?<!\\)(\s*\:)') self.othercharre2 = re.compile(r'(\s*\=)|(\s*\:)') self.bspacere = re.compile(r'\\(?!\s$)') def __parse(self, lines): lineno = 0 i = iter(lines) for line in i: lineno += 1 line = line.strip() if not line: continue if line[0] == '#': continue escaped = False sepidx = -1 flag = 0 m = self.othercharre.search(line) if m: first, last = m.span() start, end = 0, first flag = 1 wspacere = re.compile(r'(?<![\\\=\:])(\s)') else: if self.othercharre2.search(line): wspacere = re.compile(r'(?<![\\])(\s)') start, end = 0, len(line) m2 = wspacere.search(line, start, end) if m2: first, last = m2.span() sepidx = first elif m: first, last = m.span() sepidx = last - 1 while line[-1] == '\\': nextline = i.next() nextline = nextline.strip() lineno += 1 line = line[:-1] + nextline if sepidx != -1: key, value = line[:sepidx], line[sepidx + 1:] else: key, value = line, '' self.process_pair(key, value) def process_pair(self, key, value): """ Adds or overrides the property with the given key. """ oldkey = key oldvalue = value keyparts = self.bspacere.split(key) strippable = False lastpart = keyparts[-1] if lastpart.find('\\ ') != -1: keyparts[-1] = lastpart.replace('\\', '') elif lastpart and lastpart[-1] == ' ': strippable = True key = ''.join(keyparts) if strippable: key = key.strip() oldkey = oldkey.strip() oldvalue = self.unescape(oldvalue) value = self.unescape(value) self._props[key] = None if value is None else value.strip() if self._keymap.has_key(key): oldkey = self._keymap.get(key) self._origprops[oldkey] = None if oldvalue is None else oldvalue.strip() else: self._origprops[oldkey] = None if oldvalue is None else oldvalue.strip() self._keymap[key] = oldkey def unescape(self, value): newvalue = value if not value is None: newvalue = value.replace('\:', ':') newvalue = newvalue.replace('\=', '=') return newvalue def removeOldProp(self, key): if self._origprops.has_key(key): del self._origprops[key] pass def removeProp(self, key): if self._props.has_key(key): del self._props[key] pass def load(self, stream): if type(stream) is not file: raise TypeError, 'Argument should be a file object!' if stream.mode != 'r': raise ValueError, 'Stream should be opened in read-only mode!' try: self.fileName = os.path.abspath(stream.name) lines = stream.readlines() self.__parse(lines) except IOError: raise def get_property(self, key): return self._props.get(key, '') def propertyNames(self): return self._props.keys() def getPropertyDict(self): return self._props def __getitem__(self, name): return self.get_property(name) def __getattr__(self, name): try: return self.__dict__[name] except KeyError: if hasattr(self._props, name): return getattr(self._props, name) def sort_props(self): tmp_props = {} for key in sorted(self._props.iterkeys()): tmp_props[key] = self._props[key] self._props = tmp_props pass def sort_origprops(self): tmp_props = self._origprops.copy() self._origprops.clear() for key in sorted(tmp_props.iterkeys()): self._origprops[key] = tmp_props[key] pass def store(self, out, header=""): """ Write the properties list to the stream 'out' along with the optional 'header' This function will attempt to close the file handler once it's done. """ if out.mode[0] != 'w': raise ValueError, 'Steam should be opened in write mode!' try: out.write(''.join(('#', ASF_LICENSE_HEADER, '\n'))) out.write(''.join(('#', header, '\n'))) # Write timestamp tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime()) out.write(''.join(('#', tstamp, '\n'))) # Write properties from the pristine dictionary for prop, val in self._origprops.items(): if val is not None: out.write(''.join((prop, '=', val, '\n'))) except IOError: raise finally: if out: out.close() def store_ordered(self, out, header=""): """ Write the properties list to the stream 'out' along with the optional 'header' """ if out.mode[0] != 'w': raise ValueError, 'Steam should be opened in write mode!' try: out.write(''.join(('#', ASF_LICENSE_HEADER, '\n'))) out.write(''.join(('#', header, '\n'))) # Write timestamp tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime()) out.write(''.join(('#', tstamp, '\n'))) # Write properties from the pristine dictionary for key in sorted(self._origprops.iterkeys()): val = self._origprops[key] if val is not None: out.write(''.join((key, '=', val, '\n'))) out.close() except IOError: raise
zouzhberk/ambaridemo
demo-server/src/main/python/ambari_server/properties.py
Python
apache-2.0
7,241
import codecs from setuptools import setup VERSION = '0.2.0' def read_long_description(): long_desc = [] with codecs.open('README.rst', 'r', 'utf8') as longdesc: long_desc.append(longdesc.read()) with codecs.open('HISTORY.rst', 'r', 'utf8') as history: long_desc.append(history.read()) return u'\n\n'.join(long_desc) LONG_DESCRIPTION = read_long_description() setup( name='get_image_size', url='https://github.com/scardine/image_size', version=VERSION, long_description=LONG_DESCRIPTION, author='github.com/scardine', author_email=' ', license='MIT', py_modules=['get_image_size'], entry_points={ 'console_scripts': [ 'get-image-size = get_image_size:main', ], }, )
scardine/image_size
setup.py
Python
mit
776
#!/usr/bin/env python3 # License MIT # Copyright 2016-2021 Alex Winkler # Version 3.0.0 import discord from discord.ext import commands from dislash import * from ftsbot import secrets from ftsbot.cogs.antispam import antispam from ftsbot.cogs.channelmoderation import channelmoderation from ftsbot.cogs.presence import presence from ftsbot.cogs.rolecommands import rolecommands from ftsbot.cogs.textcommands import textcommands from ftsbot.cogs.wikicommands import wikicommands intents = discord.Intents.default() intents.members = True bot = commands.Bot(command_prefix='!fobot ', intents=intents, help_command=None) SlashClient(bot) bot.add_cog(antispam(bot)) bot.add_cog(channelmoderation(bot)) bot.add_cog(presence(bot)) bot.add_cog(rolecommands(bot)) bot.add_cog(textcommands(bot)) bot.add_cog(wikicommands(bot)) bot.run(secrets.token)
FO-nTTaX/Liquipedia-Discord-Bot
discordbot.py
Python
mit
847
# Copyright 2012 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import textwrap from heatclient.common import utils import heatclient.exc as exc def format_parameters(params): ''' Reformat parameters into dict of format expected by the API ''' parameters = {} if params: for count, p in enumerate(params.split(';'), 1): (n, v) = p.split('=') parameters[n] = v return parameters def _set_template_fields(hc, args, fields): if args.template_file: fields['template'] = json.loads(open(args.template_file).read()) elif args.template_url: fields['template_url'] = args.template_url elif args.template_object: template_body = hc.raw_request('GET', args.template_object) if template_body: fields['template'] = json.loads(template_body) else: raise exc.CommandError('Could not fetch template from %s' % args.template_object) else: raise exc.CommandError('Need to specify exactly one of ' '--template-file, --template-url ' 'or --template-object') @utils.arg('-f', '--template-file', metavar='<FILE>', help='Path to the template.') @utils.arg('-u', '--template-url', metavar='<URL>', help='URL of template.') @utils.arg('-o', '--template-object', metavar='<URL>', help='URL to retrieve template object (e.g from swift)') @utils.arg('-c', '--create-timeout', metavar='<TIMEOUT>', default=60, type=int, help='Stack creation timeout in minutes. Default: 60') @utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>', help='Parameter values used to create the stack.') @utils.arg('name', metavar='<STACK_NAME>', help='Name of the stack to create.') def do_create(hc, args): '''Create the stack''' fields = {'stack_name': args.name, 'timeoutmins': args.create_timeout, 'parameters': format_parameters(args.parameters)} _set_template_fields(hc, args, fields) hc.stacks.create(**fields) do_list(hc) @utils.arg('id', metavar='<NAME/ID>', help='Name and ID of stack to delete.') def do_delete(hc, args): '''Delete the stack''' fields = {'stack_id': args.id} try: hc.stacks.delete(**fields) except exc.HTTPNotFound: raise exc.CommandError('Stack not found: %s' % args.id) else: do_list(hc) @utils.arg('id', metavar='<NAME/ID>', help='Name and ID of stack to describe.') def do_describe(hc, args): '''Describe the stack''' fields = {'stack_id': args.id} try: stack = hc.stacks.get(**fields) except exc.HTTPNotFound: raise exc.CommandError('Stack not found: %s' % args.id) else: text_wrap = lambda d: '\n'.join(textwrap.wrap(d, 55)) link_format = lambda links: '\n'.join([l['href'] for l in links]) json_format = lambda js: json.dumps(js, indent=2) formatters = { 'description': text_wrap, 'template_description': text_wrap, 'stack_status_reason': text_wrap, 'parameters': json_format, 'outputs': json_format, 'links': link_format } utils.print_dict(stack.to_dict(), formatters=formatters) @utils.arg('-f', '--template-file', metavar='<FILE>', help='Path to the template.') @utils.arg('-u', '--template-url', metavar='<URL>', help='URL of template.') @utils.arg('-o', '--template-object', metavar='<URL>', help='URL to retrieve template object (e.g from swift)') @utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>', help='Parameter values used to create the stack.') @utils.arg('id', metavar='<NAME/ID>', help='Name and ID of stack to update.') def do_update(hc, args): '''Update the stack''' fields = {'stack_id': args.id, 'parameters': format_parameters(args.parameters)} _set_template_fields(hc, args, fields) hc.stacks.update(**fields) do_list(hc) def do_list(hc, args={}): '''List the user's stacks''' kwargs = {} stacks = hc.stacks.list(**kwargs) field_labels = ['Name/ID', 'Status', 'Created'] fields = ['id', 'stack_status', 'creation_time'] formatters = { 'id': lambda row: '%s/%s' % (row.stack_name, row.id) } utils.print_list(stacks, fields, field_labels, formatters=formatters, sortby=2) @utils.arg('id', metavar='<NAME/ID>', help='Name and ID of stack to get the template for.') def do_gettemplate(hc, args): '''Get the template''' fields = {'stack_id': args.id} try: template = hc.stacks.template(**fields) except exc.HTTPNotFound: raise exc.CommandError('Stack not found: %s' % args.id) else: print json.dumps(template, indent=2) @utils.arg('-u', '--template-url', metavar='<URL>', help='URL of template.') @utils.arg('-f', '--template-file', metavar='<FILE>', help='Path to the template.') @utils.arg('-o', '--template-object', metavar='<URL>', help='URL to retrieve template object (e.g from swift)') @utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>', help='Parameter values to validate.') def do_validate(hc, args): '''Validate a template with parameters''' fields = {'parameters': format_parameters(args.parameters)} _set_template_fields(hc, args, fields) validation = hc.stacks.validate(**fields) print json.dumps(validation, indent=2) # TODO only need to implement this once the server supports it #@utils.arg('-u', '--template-url', metavar='<URL>', # help='URL of template.') #@utils.arg('-f', '--template-file', metavar='<FILE>', # help='Path to the template.') #def do_estimate_template_cost(hc, args): # '''Returns the estimated monthly cost of a template''' # pass # # #@utils.arg('id', metavar='<NAME/ID>', # help='Name and ID of stack to show the events for.') #def do_event_list(hc, args): # '''List events for a stack''' # pass # # #@utils.arg('-r', '--resource', metavar='<RESOURCE_ID>', # help='ID of the resource to show the details for.') #@utils.arg('id', metavar='<NAME/ID>', # help='Name and ID of stack to show the resource for.') #def do_resource(hc, args): # '''Describe the resource''' # pass # # #@utils.arg('id', metavar='<NAME/ID>', # help='Name and ID of stack to show the resources for.') #def do_resource_list(hc, args): # '''Show list of resources belonging to a stack''' # pass # # #@utils.arg('id', metavar='<NAME/ID>', # help='Name and ID of stack to show the resource details for.') #def do_resource_list_details(hc, args): # '''Detailed view of resources belonging to a stack''' # pass
radez/python-heatclient
heatclient/v1/shell.py
Python
apache-2.0
7,499
""" @summary: """ from argparse import ArgumentParser # parse command line arguments parser = ArgumentParser( description = """Computes position-specific conservation scores given a multiple sequence alignment.""" ) parser.add_argument( '-i', '--input', dest = 'msa', required = True, help = 'file containing the multiple sequence alignment' ) parser.add_argument( '-f', '--format', dest = 'format', required = True, default = 'fasta', choices = ['fasta', 'clustal'], help = 'format of the multiple sequence alignment' ) parser.add_argument( '-t', '--type', dest = 'type', required = False, default = 'Protein', choices = ['Protein', 'DNA', 'RNA'], help = 'type of sequences: DNA, RNA, Protein' ) parser.add_argument( '-o', '--output', dest = 'output', required = True, help = 'output filename' ) args = parser.parse_args() def read_fasta( filename ): """ """ # only sequences will be read in, ids are not of interest to this application seqs = [] with open( filename, 'rt' ) as f: cur_seq = '' for line in f.readlines(): if line.startswith( '>' ): # if cur_seq is not empty, it contains the preceding sequence if cur_seq != '': seqs.append( cur_seq.upper() ) # set cur_seq to empty for reading the current sequence in the else block cur_seq = '' else: cur_seq += line.strip() # add the last sequence seqs.append( cur_seq.upper() ) return seqs
computbiolgeek/seq_utils
src/score_residue_conservation.py
Python
gpl-3.0
1,637
#!/usr/bin/env python # test_homeDirectory.py vi:ts=4:sw=4:expandtab: # # Scalable Periodic LDAP Attribute Transmogrifier # Authors: # Nick Barkas <snb@threerings.net> # Based on ssh key helper tests by: # Landon Fuller <landonf@threerings.net> # Will Barton <wbb4@opendarwin.org> # # Copyright (c) 2006 Three Rings Design, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright owner nor the names of contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ LDAP Unit Tests """ from twisted.trial import unittest import ldap import os import splat from splat import plugin from splat.ldaputils.test import slapd from splat.ldaputils import client as ldapclient # Useful Constants from splat.test import DATA_DIR # Test Cases class HomeDirtestCase(unittest.TestCase): """ Test Splat Home Directory Helper """ # Return a valid options dictionary to parse. Note that these options are # not necessarily the same as the defaults specified in the various plugin # WriterContext classes. def _getDefaultOptions(self): # Ubuntu (and probably Debian and other linuxes) use /etc/skel instead # of /usr/share skel. skelDir = '/usr/share/skel' if (not os.path.isdir(skelDir)): if (os.path.isdir('/etc/skel')): skelDir = '/etc/skel' else: self.fail('Can not find a useable skeletal directory') return { 'home':'/home', 'minuid':'0', 'mingid':'0', 'skeldir':skelDir } def setUp(self): self.slapd = slapd.LDAPServer() self.conn = ldapclient.Connection(slapd.SLAPD_URI) self.options = self._getDefaultOptions() self.hc = plugin.HelperController('test', 'splat.helpers.homeDirectory', 5, 'dc=example,dc=com', '(objectClass=sshAccount)', False, self.options) self.entries = self.conn.search(self.hc.searchBase, ldap.SCOPE_SUBTREE, self.hc.searchFilter, self.hc.searchAttr) def tearDown(self): self.slapd.stop() def test_valid_options(self): """ Test Parsing of Valid Options """ options = self.options assert self.hc.helperClass.parseOptions(options) # Also make sure parser works when skeldir has not been defined del options['skeldir'] assert self.hc.helperClass.parseOptions(options) def test_invalid_options(self): """ Test Invalid Options """ # foo is not a valid option options = self.options options['foo'] = 'bar' self.assertRaises(splat.SplatError, self.hc.helperClass.parseOptions, options) def test_option_parse_home(self): """ Test Home Option Parser """ # Relative paths shouldn't be allowed for home options = self.options options['home'] = 'home' self.assertRaises(splat.SplatError, self.hc.helperClass.parseOptions, options) def test_option_parse_skeldir(self): """ Test Skel Directory Option Parser """ # Paths that don't exist should generate an exception options = self.options options['skeldir'] = '/asdf/jklh/qwer' self.assertRaises(splat.SplatError, self.hc.helperClass.parseOptions, options) def test_context(self): """ Test Context Consistency With Options """ context = self.hc.helperClass.parseOptions(self.options) self.assertEquals(context.home, '/home') self.assertEquals(context.minuid, 0) self.assertEquals(context.mingid, 0) def test_group_context(self): """ Test Group Context Consistency With Service Options """ filter = ldapclient.GroupFilter(slapd.BASEDN, ldap.SCOPE_SUBTREE, '(&(objectClass=groupOfUniqueNames)(cn=developers))', 'uniqueMember') self.hc.addGroup(filter) self.assertEquals(self.hc.groupsCtx[filter].home, '/home') self.assertEquals(self.hc.groupsCtx[filter].minuid, 0) self.assertEquals(self.hc.groupsCtx[filter].mingid, 0) def test_group_context_custom(self): """ Test Group Context Consistency With Group Specific Options """ options = self.options # Run parseOptions here to make sure the options dictionary is not # being modified by it. self.hc.helperClass.parseOptions(options) # Now update with a custom option for this group. options['minuid'] = '10' filter = ldapclient.GroupFilter(slapd.BASEDN, ldap.SCOPE_SUBTREE, '(&(objectClass=groupOfUniqueNames)(cn=developers))', 'uniqueMember') self.hc.addGroup(filter, options) self.assertEquals(self.hc.groupsCtx[filter].minuid, 10) self.assertEquals(self.hc.groupsCtx[filter].home, '/home') self.assertEquals(self.hc.groupsCtx[filter].mingid, 0)
threerings/splatd
splat/helpers/test/test_homeDirectory.py
Python
bsd-3-clause
6,191
# coding: utf-8 """ EVE Swagger Interface An OpenAPI for EVE Online OpenAPI spec version: 0.4.6 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class PostFleetsFleetIdMembersForbidden(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, error=None): """ PostFleetsFleetIdMembersForbidden - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'error': 'str' } self.attribute_map = { 'error': 'error' } self._error = error @property def error(self): """ Gets the error of this PostFleetsFleetIdMembersForbidden. Forbidden message :return: The error of this PostFleetsFleetIdMembersForbidden. :rtype: str """ return self._error @error.setter def error(self, error): """ Sets the error of this PostFleetsFleetIdMembersForbidden. Forbidden message :param error: The error of this PostFleetsFleetIdMembersForbidden. :type: str """ self._error = error def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, PostFleetsFleetIdMembersForbidden): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
minlexx/pyevemon
esi_client/models/post_fleets_fleet_id_members_forbidden.py
Python
gpl-3.0
3,027
# Copyright 2013 Daniel Narvaez # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os import tty import termios devnull = open("/dev/null", "w") def ensure_dir(path): try: os.makedirs(path) except OSError: pass def getch(): fd = sys.stdin.fileno() tty_attributes = termios.tcgetattr(fd) try: tty.setraw(fd) return sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, tty_attributes)
dnarvaez/osbuild
osbuild/utils.py
Python
apache-2.0
983
from __future__ import division import utm as UTM import pytest try: import numpy as np use_numpy = True except ImportError: use_numpy = False def assert_utm_equal(a, b): if use_numpy and isinstance(b[0], np.ndarray): assert np.allclose(a[0], b[0]) assert np.allclose(a[1], b[1]) else: assert a[0] == pytest.approx(b[0], abs=1) assert a[1] == pytest.approx(b[1], abs=1) assert a[2] == b[2] assert a[3].upper() == b[3].upper() def assert_latlon_equal(a, b): if use_numpy and isinstance(b[0], np.ndarray): assert np.allclose(a[0], b[0], rtol=1e-4, atol=1e-4) assert np.allclose(a[1], b[1], rtol=1e-4, atol=1e-4) else: assert a[0] == pytest.approx(b[0], 4) assert a[1] == pytest.approx(b[1], 4) known_values = [ # Aachen, Germany ( (50.77535, 6.08389), (294409, 5628898, 32, "U"), {"northern": True}, ), # New York, USA ( (40.71435, -74.00597), (583960, 4507523, 18, "T"), {"northern": True}, ), # Wellington, New Zealand ( (-41.28646, 174.77624), (313784, 5427057, 60, "G"), {"northern": False}, ), # Capetown, South Africa ( (-33.92487, 18.42406), (261878, 6243186, 34, "H"), {"northern": False}, ), # Mendoza, Argentina ( (-32.89018, -68.84405), (514586, 6360877, 19, "h"), {"northern": False}, ), # Fairbanks, Alaska, USA ( (64.83778, -147.71639), (466013, 7190568, 6, "W"), {"northern": True}, ), # Ben Nevis, Scotland, UK ( (56.79680, -5.00601), (377486, 6296562, 30, "V"), {"northern": True}, ), # Latitude 84 ( (84, -5.00601), (476594, 9328501, 30, "X"), {"northern": True}, ), ] @pytest.mark.parametrize("latlon, utm, utm_kw", known_values) def test_from_latlon(latlon, utm, utm_kw): """from_latlon should give known result with known input""" result = UTM.from_latlon(*latlon) assert_utm_equal(utm, result) @pytest.mark.skipif(not use_numpy, reason="numpy not installed") @pytest.mark.parametrize("latlon, utm, utm_kw", known_values) def test_from_latlon_numpy(latlon, utm, utm_kw): result = UTM.from_latlon(*[np.array([x]) for x in latlon]) assert_utm_equal(utm, result) @pytest.mark.skipif(not use_numpy, reason="numpy not installed") def test_from_latlon_numpy_static(): lats = np.array([0.0, 3.0, 6.0]) lons = np.array([0.0, 1.0, 3.4]) result = UTM.from_latlon(lats, lons) assert_utm_equal( ( np.array( [166021.44317933032, 277707.83075574087, 544268.12794623] ), np.array([0.0, 331796.29167519242, 663220.7198366751]), 31, "N", ), result, ) @pytest.mark.parametrize("latlon, utm, utm_kw", known_values) def test_to_latlon(latlon, utm, utm_kw): """to_latlon should give known result with known input""" result = UTM.to_latlon(*utm) assert_latlon_equal(latlon, result) result = UTM.to_latlon(*utm[0:3], **utm_kw) assert_latlon_equal(latlon, result) @pytest.mark.skipif(not use_numpy, reason="numpy not installed") @pytest.mark.parametrize("latlon, utm, utm_kw", known_values) def test_to_latlon_numpy(latlon, utm, utm_kw): utm = [np.array([x]) for x in utm[:2]] + list(utm[2:]) result = UTM.to_latlon(*utm) assert_latlon_equal(latlon, result) @pytest.mark.skipif(not use_numpy, reason="numpy not installed") def test_to_latlon_numpy_static(): result = UTM.to_latlon( np.array([166021.44317933032, 277707.83075574087, 544268.12794623]), np.array([0.0, 331796.29167519242, 663220.7198366751]), 31, northern=True, ) assert_latlon_equal( (np.array([0.0, 3.0, 6.0]), np.array([0.0, 1.0, 3.4])), result ) def test_from_latlon_range_ok(): """from_latlon should work for good values""" for i in range(-8000, 8400): assert UTM.from_latlon(i / 100, 0) for i in range(-18000, 18000): assert UTM.from_latlon(0, i / 100) @pytest.mark.parametrize( "lat, lon", [ (-100, 0), (-80.1, 0), (84.1, 0), (100, 0), (0, -300), (0, -180.1), (0, 180.1), (0, 300), (-100, -300), (100, -300), (-100, 300), (100, 300), ], ) def test_from_latlon_range_fails(lat, lon): """from_latlon should fail with out-of-bounds input""" with pytest.raises(UTM.OutOfRangeError): UTM.from_latlon(lat, lon) @pytest.mark.parametrize( "lat, lon, force_zone_number, force_zone_letter", [(40.71435, -74.00597, 70, "T"), (40.71435, -74.00597, 18, "A")], ) def test_from_latlon_range_forced_fails( lat, lon, force_zone_number, force_zone_letter ): """from_latlon should fail with out-of-bounds input""" with pytest.raises(UTM.OutOfRangeError): UTM.from_latlon(lat, lon, force_zone_number, force_zone_letter) def test_to_latlon_range_ok(): """to_latlon should work for good values""" for i in range(100000, 999999, 1000): assert UTM.to_latlon(i, 5000000, 32, "U") for i in range(10, 10000000, 1000): assert UTM.to_latlon(500000, i, 32, "U") for i in range(1, 60): assert UTM.to_latlon(500000, 5000000, i, "U") for i in range(ord("C"), ord("X")): i = chr(i) if i != "I" and i != "O": UTM.to_latlon(500000, 5000000, 32, i) @pytest.mark.parametrize( "easting, northing, zone_number, zone_letter", [ (0, 5000000, 32, "U"), (99999, 5000000, 32, "U"), (1000000, 5000000, 32, "U"), (100000000000, 5000000, 32, "U"), (500000, -100000, 32, "U"), (500000, -1, 32, "U"), (500000, 10000001, 32, "U"), (500000, 50000000, 32, "U"), (500000, 5000000, 0, "U"), (500000, 5000000, 61, "U"), (500000, 5000000, 1000, "U"), (500000, 5000000, 32, "A"), (500000, 5000000, 32, "B"), (500000, 5000000, 32, "I"), (500000, 5000000, 32, "O"), (500000, 5000000, 32, "Y"), (500000, 5000000, 32, "Z"), ], ) def test_to_latlon_range_checks(easting, northing, zone_number, zone_letter): """to_latlon should fail with out-of-bounds input""" with pytest.raises(UTM.OutOfRangeError): UTM.to_latlon(0, 5000000, 32, "U") @pytest.mark.parametrize( "lat, lon, expected_number, expected_letter", [ # test inside: (56, 3, 32, "V"), (56, 6, 32, "V"), (56, 9, 32, "V"), (56, 11.999999, 32, "V"), (60, 3, 32, "V"), (60, 6, 32, "V"), (60, 9, 32, "V"), (60, 11.999999, 32, "V"), (63.999999, 3, 32, "V"), (63.999999, 6, 32, "V"), (63.999999, 9, 32, "V"), (63.999999, 11.999999, 32, "V"), # test left of: (55.999999, 2.999999, 31, "U"), (56, 2.999999, 31, "V"), (60, 2.999999, 31, "V"), (63.999999, 2.999999, 31, "V"), (64, 2.999999, 31, "W"), # test right of: (55.999999, 12, 33, "U"), (56, 12, 33, "V"), (60, 12, 33, "V"), (63.999999, 12, 33, "V"), (64, 12, 33, "W"), # test below: (55.999999, 3, 31, "U"), (55.999999, 6, 32, "U"), (55.999999, 9, 32, "U"), (55.999999, 11.999999, 32, "U"), (55.999999, 12, 33, "U"), # test above: (64, 3, 31, "W"), (64, 6, 32, "W"), (64, 9, 32, "W"), (64, 11.999999, 32, "W"), (64, 12, 33, "W"), ], ) def test_from_latlon_zones(lat, lon, expected_number, expected_letter): result = UTM.from_latlon(lat, lon) assert result[2] == expected_number assert result[3].upper() == expected_letter.upper() @pytest.mark.parametrize( "lat, lon, expected_number", [ (40, 0, 31), (40, 5.999999, 31), (40, 6, 32), (72, 0, 31), (72, 5.999999, 31), (72, 6, 31), (72, 8.999999, 31), (72, 9, 33), ], ) def test_limits(lat, lon, expected_number): assert UTM.from_latlon(lat, lon)[2] == expected_number @pytest.mark.parametrize( "zone_number, zone_letter", [ (10, "C"), (10, "X"), (10, "p"), (10, "q"), (20, "X"), (1, "X"), (60, "e"), ], ) def test_valid_zones(zone_number, zone_letter): # should not raise any exceptions assert UTM.check_valid_zone(zone_number, zone_letter) is None @pytest.mark.parametrize( "zone_number, zone_letter", [(-100, "C"), (20, "I"), (20, "O"), (0, "O")] ) def test_invalid_zones(zone_number, zone_letter): with pytest.raises(UTM.OutOfRangeError): UTM.check_valid_zone(zone_number, zone_letter) @pytest.mark.parametrize( "lat, lon, utm, utm_kw, expected_number, expected_letter", [ (40.71435, -74.00597, 19, "T", 19, "T"), (40.71435, -74.00597, 17, "T", 17, "T"), (40.71435, -74.00597, 18, "u", 18, "U"), (40.71435, -74.00597, 18, "S", 18, "S"), ], ) def test_force_zone(lat, lon, utm, utm_kw, expected_number, expected_letter): # test forcing zone ranges # NYC should be zone 18T result = UTM.from_latlon(lat, lon, utm, utm_kw) assert result[2] == expected_number assert result[3].upper() == expected_letter.upper() def assert_equal_lon(result, expected_lon): _, lon = UTM.to_latlon(*result[:4], strict=False) assert lon == pytest.approx(expected_lon, abs=0.001) def test_force_east(): # Force point just west of anti-meridian to east zone 1 assert_equal_lon(UTM.from_latlon(0, 179.9, 1, "N"), 179.9) def test_force_west(): # Force point just east of anti-meridian to west zone 60 assert_equal_lon(UTM.from_latlon(0, -179.9, 60, "N"), -179.9) def test_version(): assert isinstance(UTM.__version__, str) and "." in UTM.__version__
Turbo87/utm
test/test_utm.py
Python
mit
10,049
''' Created on Dec 3, 2013 @author: bogdan requires python3 ''' import os, sys, re from collections import defaultdict import math import md060graphonoLev class clCrossLevenshtein(object): ''' classdocs ''' def __init__(self, SFInA, SFInB, SLangIDa, SLangIDb): ''' Constructor ''' SNameIName , SNameIExt = os.path.splitext(SFInA) # generate the debug using the first file name SFDebug = SNameIName + '-md050crosslevenshtein.debug' # FDebug = open(SFDebug, 'w') # debug file for each of the input files.. LWordsA = self.readWordList(SFInA) LWordsB = self.readWordList(SFInB) # graphonological object with phonological features over graphemes # OGraphonolev = md060graphonoLev.clGraphonolev(Debug = True, DebugFile = SFDebug) OGraphonolev = md060graphonoLev.clGraphonolev() LDistances = [] ICounter = 0 ICounterRec = 0 for (SWordA, SPoSA, IFrqA) in LWordsA: LenA = len(SWordA) try: LogFrqA = math.log(IFrqA) except: LogFrqA = 0 LCognates = [] LCognates1 = [] ICounter += 1 if ICounter % 1 == 0: sys.stderr.write(SWordA + ' ' + str(ICounter) + '\n') ''' # changed: for (SWordB, SPoSB, IFrqB) in LWordsB: LenB = len(SWordB) LenAve = (LenA + LenB) / 2 ILev = self.computeLevenshtein(SWordA, SWordB) ALevNorm = ILev/LenAve if ALevNorm <= 0.30: LCognates.append((ALevNorm, ILev, SWordB, SPoSB, IFrqB)) ''' for (SWordB, SPoSB, IFrqB) in LWordsB: # Lev0 is baseline Levenshtein distance # Lev1 is is graphonological Levenshtein distance (Lev0, Lev1, Lev0Norm, Lev1Norm) = OGraphonolev.computeLevenshtein(SWordA, SWordB, SLangIDa, SLangIDb) # if Lev0Norm <= 0.36: if Lev0Norm <= 0.4: LCognates.append((Lev0Norm, Lev0, SWordB, SPoSB, IFrqB)) # baseline Lev # if Lev1Norm <= 0.36: if Lev1Norm <= 0.4: LCognates1.append((Lev1Norm, Lev1, SWordB, SPoSB, IFrqB)) # graphonological Lev LDistances.append((SWordA, SPoSA, IFrqA, LCognates)) if (len(LCognates) > 0 or len(LCognates1) > 0): ICounterRec += 1 ACognPerCent = ICounterRec / ICounter # now restricted to writing only one cognate... # sys.stdout.write('\t{, %(ICounterRec)d, %(ICounter)d, %(SWordA)s, %(SPoSA)s, frq=%(IFrqA)d, ln=%(LogFrqA).2f, have-cognates: %(ACognPerCent).2f : \n' % locals()) sys.stdout.write('%(SWordA)s\t%(SPoSA)s\tfrq=%(IFrqA)d\t\n' % locals()) sys.stdout.flush() sys.stdout.write('BASELINE:\n') self.printCognate(LCognates, LogFrqA, SPoSA) # sys.stdout.write('\t') sys.stdout.write('GRAPHONOLOGICAL:\n') self.printCognate(LCognates1, LogFrqA, SPoSA) # removed (s) --> printCognate(s) in function call : simple production version sys.stdout.write('\n\n') sys.stdout.flush() ''' for (SWordA, SPoSA, IFrqA, LCognates) in LDistances: # sys.stdout.write('%(SWordA)s, %(SPoSA)s, %(IFrqA)d : \n' % locals()) for (ALevNorm, SWordB, SPoSB, IFrqB) in LCognates: # FDebug.write('\t %(ALevNorm)f, %(SWordB)s, %(SPoSB)s, %(IFrqB)d\n') pass ''' def printCognate(self, LCognates, LogFrqA, SPoSA): """ print only one cognate """ ICogRank = 0 ALevNormPrev = -1 for (ALevNorm, ILev, SWordB, SPoSB, IFrqB) in sorted(LCognates, reverse=False, key=lambda k: k[0]): if (ALevNorm != ALevNormPrev): ICogRank += 1 if ICogRank > 3: break ALevNormPrev = ALevNorm if ((SPoSB == SPoSA)): sys.stdout.write('%(ICogRank)d\t%(SWordB)s\t%(SPoSB)s\tPhLev=%(ILev).2f\tPhLevNormLen=%(ALevNorm).2f\n' % locals()) def printCognates(self, LCognates, LogFrqA, SPoSA): ICogRank = 0 ALevNormPrev = -1 for (ALevNorm, ILev, SWordB, SPoSB, IFrqB) in sorted(LCognates, reverse=False, key=lambda k: k[0]): # why reverse is False: starting from smallest try: LogFrqB = math.log(IFrqB) except: LogFrqB = 0 try: AFrqRange = min(LogFrqB, LogFrqA) / max(LogFrqB, LogFrqA) except: AFrqRange = 0 sys.stdout.write('\tFrqRange=' + str(AFrqRange) + '\t' + SPoSB + '\t' + SPoSA + '\t') if (ALevNorm != ALevNormPrev): ICogRank += 1 ALevNormPrev = ALevNorm if ((SPoSB == SPoSA) and (ICogRank == 1)): sys.stdout.write('--> %(SWordB)s\t%(SPoSB)s\t%(IFrqB)d\tPhLev=%(ILev).2f\tPhLevNormLen=%(ALevNorm).2f\n' % locals()) sys.stdout.write('\nTRACE:\n') if ((SPoSB == SPoSA) and (AFrqRange > 0.5 )): # ICogRank += 1 # # sys.stdout.write('\t\trank=%(ICogRank)d, %(AFrqRange).3f, %(ILev).3f, %(ALevNorm).3f, %(SWordB)s, %(SPoSB)s, %(IFrqB)d, ln=%(LogFrqB).2f\n' % locals()) sys.stdout.write('%(SWordB)s\t%(SPoSB)s\t%(IFrqB)d\tPhLev=%(ILev).2f\tPhLevNormLen=%(ALevNorm).2f\n' % locals()) ## temp: record a list of items ## break # record only one item... elif((SPoSB == SPoSA) and (AFrqRange <= 0.5)): sys.stdout.write('REJECTED-FRQ: %(SWordB)s\t%(SPoSB)s\t%(IFrqB)d\tPhLev=%(ILev).2f\tPhLevNormLen=%(ALevNorm).2f\n' % locals()) elif((SPoSB != SPoSA) and (AFrqRange > 0.5 )): sys.stdout.write('REJECTED-POS: %(SWordB)s\t%(SPoSB)s\t%(IFrqB)d\tPhLev=%(ILev).2f\tPhLevNormLen=%(ALevNorm).2f\n' % locals()) ## pass # sys.stdout.write('\t\t--, %(AFrqRange).3f, %(ILev).3f, %(ALevNorm).3f, %(SWordB)s, %(SPoSB)s, %(IFrqB)d, ln=%(LogFrqB).2f\n' % locals()) elif((SPoSB != SPoSA) and (AFrqRange <= 0.5 )): sys.stdout.write('REJECTED-FPS: %(SWordB)s\t%(SPoSB)s\t%(IFrqB)d\tPhLev=%(ILev).2f\tPhLevNormLen=%(ALevNorm).2f\n' % locals()) else: sys.stdout.write('REJECTED-OTH: %(SWordB)s\t%(SPoSB)s\t%(IFrqB)d\tPhLev=%(ILev).2f\tPhLevNormLen=%(ALevNorm).2f\n' % locals()) # sys.stdout.write('\t}\n') sys.stdout.write('\n') sys.stdout.flush() def readWordList(self, SFIn): # modified to adjust to Czech LWords = [] for SLine in open(SFIn, 'rU'): try: SLine = SLine.rstrip() SLine = SLine.lstrip() LLine = re.split('[\t ]+', SLine) SWord = LLine[1] SPoS = LLine[2] IFrq = int(LLine[0]) except: continue LWords.append((SWord, SPoS, IFrq)) return LWords def computeLevenshteinLocal(self, s1, s2): l1 = len(s1) l2 = len(s2) matrix = [list(range(l1 + 1))] * (l2 + 1) for zz in range(l2 + 1): matrix[zz] = list(range(zz,zz + l1 + 1)) for zz in range(0,l2): for sz in range(0,l1): # here: 1. compare sets of features; add the minimal substitution score here... if s1[sz] == s2[zz]: matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz]) else: matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + 1) return matrix[l2][l1] if __name__ == '__main__': OCrossLevenshtein = clCrossLevenshtein(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) # dictionary1, dictionary2, langID1, langID2
bogdanbabych/morphosyntax
src/s010cognatematch/md070crosslevenshteinPhonV05.py
Python
apache-2.0
8,375
# -*- coding: utf-8 -*- """ @attention: 校验方法封装 @author: lizheng @date: 2013-12-09 """ import re class VerifyError(Exception): pass def vlen(s, min_l, max_l): if min_l <= len(s) <= max_l: return raise VerifyError, u"长度超过范围(%s,%s)" % (min_l, max_l) def vemail(s, min_len=3, max_len=50): re_str = ur"^[-_A-Za-z0-9\.]+@([_A-Za-z0-9]+\.)+[A-Za-z0-9]{2,32}$" vlen(s, min_len, max_len) if not re.match(re_str, s): raise VerifyError, u"邮箱格式不正确" def vnick(value, min_len=2, max_len=12, check_ban=True): """ @attention: 验证昵称 """ if check_ban: ban_keywords = [u'测试', u'test', u'开户', u'佣金', u'手续费', u'智选'] for key in ban_keywords: if key in value: raise VerifyError, u"昵称不能含有关键字 %s !" % key re_str = u'^[\w\-\_\u4e00-\u9fa5]{%s,%s}$' % (min_len, max_len) if not re.match(re_str, value): raise VerifyError, u"昵称只能是%s~%s位中文、字母、数字、下划线或减号!" % (min_len, max_len) def vpassword(value): ''' @note: 判断是否是弱密码 ''' weak_password = [ '000000', '111111', '11111111', '112233', '123123', '123321', '123456', '12345678', '654321', '666666', '888888', 'abcdef', 'abcabc', 'abc123', 'a1b2c3', 'aaa111', '123qwe', 'qwerty', 'qweasd', 'password', 'p@ssword', 'passwd', 'iloveyou', '5201314' ] if value in weak_password: raise VerifyError, u"你的密码太过简单!请重新设置" use_char = set(list(value)) if len(use_char) > 2: return raise VerifyError, u"你的密码太过简单!请重新设置"
lantianlz/zx
common/validators.py
Python
gpl-2.0
1,739
# Ardclient # # Author: Dan Keder <dan.keder@gmail.com> import popen2 import struct import sys def hexdump(data): ''' Hexdump data. ''' (fout, fin) = popen2.popen2("/usr/bin/hexdump -Cv") fin.write(data) fin.close() return fout.read() def dump(filename, data): f = open(filename, "w") f.write(data) f.close MSG_INFO="Info" MSG_ERROR="Error" MSG_WARN="Warning" def log(type, text, verbose=False): ''' Print message to stderr. ''' if verbose or type == MSG_ERROR: msg = "[%s] %s\n" % (type, text) sys.stderr.write(msg) def contents(filename): ''' Read contents of a file and return them. ''' fd = open(filename, "r") try: return fd.read() finally: fd.close()
dankeder/ardclient
ardclient/debug.py
Python
gpl-2.0
765
# -*- coding: utf-8 -*- # # This file is part of INSPIRE. # Copyright (C) 2014-2017 CERN. # # INSPIRE is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # INSPIRE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with INSPIRE. If not, see <http://www.gnu.org/licenses/>. # # In applying this license, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. """Bundles for forms used across INSPIRE.""" from __future__ import absolute_import, division, print_function from invenio_assets import NpmBundle from invenio_assets.filters import RequireJSFilter from inspirehep.modules.theme.bundles import js as _js js = NpmBundle( "js/forms/inspire-form-init.js", output="gen/inspire-form.%(version)s.js", filters=RequireJSFilter(exclude=[_js]), npm={ "eonasdan-bootstrap-datetimepicker": "~4.15.35", "typeahead.js": "~0.10.5", "bootstrap-multiselect": "~0.9.13", "moment": "~2.11.2", } ) css = NpmBundle( "scss/forms/form.scss", "node_modules/eonasdan-bootstrap-datetimepicker/build/css/bootstrap-datetimepicker.css", "node_modules/typeahead.js-bootstrap-css/typeaheadjs.css", "node_modules/bootstrap-multiselect/dist/css/bootstrap-multiselect.css", output='gen/inspire-form.%(version)s.css', depends='scss/forms/*.scss', filters="node-scss, cleancss", npm={ "typeahead.js-bootstrap-css": "~1.2.1" } )
kaplun/inspire-next
inspirehep/modules/forms/bundles.py
Python
gpl-3.0
1,950
from newf import Application, Response, ResponseRedirect def foo(request): return Response("<h1>Hello World!</h1>") def bar(request): return ResponseRedirect("/foo") def test_debug(request): raise Exception, 'I am the exception' urls = ( (r'^/foo$', foo), (r'^/bar$', bar), (r'^/test-debug$', test_debug), ) application = Application(urls, debug=True) if __name__ == '__main__': from wsgiref.simple_server import make_server server = make_server('', 8000, application) server.serve_forever()
lucky/newf
example_app.py
Python
mit
559
#!/usr/bin/env python3 # more.py # # Simple implementation of Unix "more" command. # # Allows more than one file on command line. Checks all files before generating # output (note race condition here). # # AMJ # 2017-03-29 import argparse from shutil import get_terminal_size def file_exists (filename): """Determine if a file with the given name exists in the current directory and can be opened for reading. Returns: True iff it exists and can be opened, False otherwise. """ try: f = open (filename, 'r') f.close () return True except IOError: return False def missing_files (file_list): """Checks if all the files named in the list exist and can be opened. Returns: List of the files that cannot be opened, so empty list if all is well. """ naughty_files = [] for file in file_list: if not file_exists (file): naughty_files.append (file) return naughty_files def chomp (s): """Remove the last character from 's' and return the shortened string. Intended to remove end-of-line characters, but could also have a host of amusing uses. Returns: s, minus its last character, or an empty string if s was null. """ return s [:-1] def pager (text_to_display): """Display the text provided a screen at a time in the current terminal. Wait for any <CR> ended input before displaying next. """ lines_per_chunk = get_terminal_size ().lines - 2 number_of_chunks = len (text_to_display) // lines_per_chunk + 1 for chunk in range (number_of_chunks): for line in text_to_display [chunk * lines_per_chunk: chunk * lines_per_chunk + lines_per_chunk]: print (chomp (line)) input ('Press Return for More {0:.0%} '.format (chunk / number_of_chunks)) def print_files (file_list, verbose): """Print out the contents of all the files named in file_list. If verbose is True, format output a little with a header block and some separators. Otherwise, just output files as they are. Returns: A sense of a job well done. """ for file_name in file_list: output = [] if verbose: output.append ('\n\n') output.append ('**' + (len (file_name) * '*') + '**' + '\n') output.append ('* ' + file_name + ' *' + '\n') output.append ('**' + (len (file_name) * '*') + '**' + '\n') output.append ('\n\n') f = open (file_name, 'r') for line in f.readlines (): output.append (line) f.close () if verbose: output.append ('\n\n') output.append (80 * '-' + '\n') output.append ('\n\n') pager (output) # # Main Program # if __name__ == '__main__': parser = argparse.ArgumentParser () parser.add_argument ('-v', '--verbose', help = 'Enable Verbose Mode', action = 'store_true') parser.add_argument ('file_list', nargs = '+', help = 'List of Files to Display') args = parser.parse_args () missing = missing_files (args.file_list) if missing == []: print_files (args.file_list, args.verbose) else: for file in missing: print ('{:}: file not found.'.format (file))
TonyJenkins/cfs2160-python
03unix/more.py
Python
unlicense
3,318
# -*- coding: utf-8 -*- ''' Aggregation plug-in to copy all FCS files under a specified FLOW element to the user folder.or to the session workspace for download. @author: Aaron Ponti ''' from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchCriteria from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchSubCriteria from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto.SearchCriteria import MatchClause from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto.SearchCriteria import MatchClauseAttribute from ch.systemsx.cisd.base.utilities import OSUtilities import os import subprocess import sys import re import zipfile import java.io.File from ch.ethz.scu.obit.common.server.longrunning import LRCache import uuid from threading import Thread import logging from __builtin__ import True, None _DEBUG = False def touch(full_file): """Touches a file. """ f = open(full_file, 'w') f.close() def zip_folder(folder_path, output_path): """Zip the contents of an entire folder recursively. Please notice that empty sub-folders will NOT be included in the archive. """ # Note: os.path.relpath() does not exist in Jython. # target = os.path.relpath(folder_path, start=os.path.dirname(folder_path)) target = folder_path[folder_path.rfind(os.sep) + 1:] # Simple trick to build relative paths root_len = folder_path.find(target) try: # Open zip file (no compression) zip_file = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_STORED, allowZip64=True) # Now recurse into the folder for root, folders, files in os.walk(folder_path): # We do not process folders. This is only useful to store empty # folders to the archive, but 1) jython's zipfile implementation # throws: # # Exception: [Errno 21] Is a directory <directory_name> # # when trying to write a directory to a zip file (in contrast to # Python's implementation) and 2) oBIT does not export empty # folders in the first place. # Build the relative directory path (current root) relative_dir_path = os.path.abspath(root)[root_len:] # If a folder only contains a subfolder, we disrupt the hierarchy, # unless we add a file. if len(files) == 0: touch(os.path.join(root, '~')) files.append('~') # Include all files for file_name in files: # Full file path to add full_file_path = os.path.join(root, file_name) relative_file_path = os.path.join(relative_dir_path, file_name) # Workaround problem with file name encoding full_file_path = full_file_path.encode('latin-1') relative_file_path = relative_file_path.encode('latin-1') # Write to zip zip_file.write(full_file_path, relative_file_path, \ zipfile.ZIP_STORED) except IOError, message: raise Exception(message) except OSError, message: raise Exception(message) except zipfile.BadZipfile, message: raise Exception(message) finally: zip_file.close() class Mover(): """ Takes care of organizing the files to be copied to the user folder and performs the actual copying. """ def __init__(self, task, collectionId, collectionType, expSampleId, expSamplePermId, expSampleType, platePermId, plateType, mode, userId, properties, logger): """Constructor task : helper argument to define what to export. collectionId : id of the collection collectionType : type of the collection expSampleId : id of the experiment sample. expSamplePermId: permId of the experiment sample. expSampleType : type of the experiment sample. entityId : id of the entity to export (with children) entityType : type of the entity to export mode : "normal", or "zip". If mode is "normal", the files will be copied to the user folder; if mode is "zip", the files will be packaged into a zip files and served for download via the browser. userId : user id. properties : plug-in properties. logger : logger. """ # Logger self._logger = logger # Inform if _DEBUG: self._logger.info("Mover called with parameters:\n" + \ " task = " + task + "\n" + " collectionId = " + collectionId + "\n" + " collectionType = " + collectionType + "\n" + " expSampleId = " + expSampleId + "\n" + " expSamplePermId = " + expSamplePermId + "\n" + " expSampleType = " + expSampleType + "\n" + " platePermId = " + platePermId + "\n" + " plateType = " + plateType + "\n" + " mode = " + mode + "\n" + " userId = " + userId + "\n" + " properties = " + str(properties) + "\n") # Store properties self._properties = properties # Store task self._task = task # Experiment identifier self._collectionId = collectionId # Experiment type self._collectionType = collectionType # Experiment sample id self._expSampleId = expSampleId # Experiment sample perm id self._expSamplePermId = expSamplePermId # Experiment type self._expSampleType = expSampleType # Entity id self._platePermId = platePermId # Entity type self._plateType = plateType # Get the EXPERIMENT SAMPLE object self._experimentSample = self._getFlowExperimentSample() if self._experimentSample is None: raise Exception("Could not retrieve experiment sample with permId " + \ self._expSamplePermId + ".") # Get the COLLECTION object self._experiment = searchService.getExperiment(self._collectionId) # Get the PLATE object self._plate = None if self._platePermId != "" and self._plateType != "": self._plate = self._retrieveSampleWithTypeAndPermId(self._platePermId, self._plateType) # Set all relevant entity types for current experiment type self._expSamplePrefix = self._expSampleType[0:self._expSampleType.find("_EXPERIMENT")] # Original experiment (sample) name originalExperimentSampleName = self._experimentSample.getPropertyValue("$NAME") # Experiment sample name (to be used in the output folder) self._experimentSampleName = self._expSampleId[self._expSampleId.rfind("/") + 1:] + \ "/" + originalExperimentSampleName # Inform self._logger.info("Experiment name: " + self._experimentSampleName) # Collection name (to be used in the output folder) self._collectionName = self._collectionId[self._collectionId.rfind("/") + 1:] # User folder: depending on the 'mode' settings, the user folder changes if mode == "normal": # Standard user folder self._userFolder = os.path.join(self._properties['base_dir'], \ userId, self._properties['export_dir']) elif mode == "zip": # Get the path to the user's Session Workspace sessionWorkspace = sessionWorkspaceProvider.getSessionWorkspace() # The user folder now will point to the Session Workspace self._userFolder = sessionWorkspace.absolutePath else: raise Exception("Bad value for argument 'mode' (" + mode + ")") if _DEBUG: self._logger.info("User folder for mode " + mode + " is " + self._userFolder) # Store the mode self._mode = mode # Make sure the use folder (with export subfolder) exists and has # the correct permissions if not os.path.isdir(self._userFolder): self._createDir(self._userFolder) # Root (collection) of the export folder self._rootExportPath = os.path.join(self._userFolder, self._collectionName) # Experiment full path within the root export path self._experimentPath = os.path.join(self._rootExportPath, self._experimentSampleName) # Current path: this is used to keep track of the path where to copy # files when navigating the experiment hierarchy self._currentPath = "" # Message (in case of error) self._message = "" # Info if _DEBUG: self._logger.info("Target experiment folder: " + self._experimentPath) # Keep track of the number of copied files self._numCopiedFiles = 0 # Public methods # ========================================================================= def process(self): """ Uses the information stored in the Mover object to reconstruct the structure of the experiment and copies it to the user folder. If the processing was successful, the method returns True. Otherwise, it returns False. """ # Create the experiment (sample) folder in the user/export if not self._createRootAndExperimentFolder(): self._message = "Could not create experiment folder " + \ self._rootExportPath self._logger.error(self._message) return False self._logger.info("Starting copy...") # Now process depending on the task if self._task == "EXPERIMENT_SAMPLE": # Copy all datasets contained in this experiment return self._copyDataSetsForExperiment() if self._task == "ALL_PLATES": # Copy all datasets for all plates Experiment return self._copyDataSetsForPlates() if self._task == "TUBESET": # Copy all datasets for all plates Experiment return self._copyDataSetsForTubes() if self._task == "PLATE": # Copy all the datasets contained in selected plate return self._copyDataSetsForPlate(self._plate) else: self._message = "Unknown task!" self._logger.error(self._message) return False # Return return True def compressIfNeeded(self): """Compresses the exported experiment folder to a zip archive but only if the mode was "zip". """ if self._mode == "zip": zip_folder(self._rootExportPath, self.getZipArchiveFullPath()) def getZipArchiveFullPath(self): """Return the full path of the zip archive (or "" if mode was "normal"). """ if self._mode == "zip": return self._rootExportPath + ".zip" return "" def getZipArchiveFileName(self): """Return the file name of the zip archive without path.""" if self._mode == "zip": fullFile = java.io.File(self.getZipArchiveFullPath()) return fullFile.getName() return "" def getErrorMessage(self): """ Return the error message (in case process() returned failure) """ return self._message def getNumberOfCopiedFiles(self): """ Return the number of copied files. """ return self._numCopiedFiles def getRelativeRootExperimentPath(self): """ Return the experiment path relative to the user folder. """ return userId + "/" + \ self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):] # Private methods # ========================================================================= def _getFlowExperimentSample(self): """Find the {FLOW}_EXPERIMENT sample with given Id.""" # Inform if _DEBUG: self._logger.info("Retrieving experiment sample of code " + \ self._expSampleId + ", permId " + self._expSamplePermId + \ " and type " + self._expSampleType) # Search sample of type MICROSCOPY_EXPERIMENT with specified CODE sampleCriteria = SearchCriteria() sampleCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.TYPE, self._expSampleType)) sampleCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.PERM_ID, self._expSamplePermId)) # Search samples = searchService.searchForSamples(sampleCriteria) if len(samples) == 0: samples = [] self._message = "Could not retrieve " + self._expSampleType + " sample with code " + \ self._expSampleId + ", permId " + self._expSamplePermId + \ " and type " + self._expSampleType + "." self._logger.error(self._message) return samples if _DEBUG: self._logger.info("Successfully returned sample " + self._expSampleId) # Return return samples[0] def _retrieveSampleWithTypeAndPermId(self, samplePermId, sampleType): """ Retrieve a sample belonging to current experiment sample and collection having specified type and perm id. """ # The sample is of type 'sampleType' and has id 'sampleId' searchCriteria = SearchCriteria() searchCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.TYPE, sampleType) ) searchCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.PERM_ID, samplePermId) ) # Now search samples = searchService.searchForSamples(searchCriteria) if len(samples) != 1: self._message = "Sample with id " + sampleId + \ " and type " + sampleType + "not found!" self._logger.error(self._message) return None # Return the sample return samples[0] def _retrieveAllSamplesWithType(self, sampleType): """ Retrieve all samples belonging to current experiment sample and collection having specified type. """ # The samples are of type 'sampleType' searchCriteria = SearchCriteria() searchCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.TYPE, sampleType) ) # The samples have parent _EXPERIMENT_SAMPLE expSampleCriteria = SearchCriteria() expSampleCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.TYPE, self._expSampleType) ) expSampleCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.PERM_ID, self._expSamplePermId) ) searchCriteria.addSubCriteria( SearchSubCriteria.createSampleParentCriteria(expSampleCriteria) ) # Now search samples = searchService.searchForSamples(searchCriteria) # Return the samples return samples def _retrieveAllSamplesWithTypeAndParentWithPermId(self, sampleType, parentSamplePermId, parentSampleType): """ Retrieve all samples belonging to current experiment sample and collection having specified type. """ if _DEBUG: self._logger.info("Retrieving samples of type " + sampleType + " with parent sample with perm id " + parentSamplePermId + " and type " + parentSampleType) # The samples are of type 'sampleType' searchCriteria = SearchCriteria() searchCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.TYPE, sampleType) ) # The samples have given parent expSampleCriteria = SearchCriteria() expSampleCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.TYPE, parentSampleType) ) expSampleCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.PERM_ID, parentSamplePermId) ) searchCriteria.addSubCriteria( SearchSubCriteria.createSampleParentCriteria(expSampleCriteria) ) # Now search samples = searchService.searchForSamples(searchCriteria) # Return the samples return samples def _retrieveAllSamplesWithTypeAndParent(self, sampleType, parentSampleId, parentSampleType): """ Retrieve all samples belonging to current experiment sample and collection having specified type. """ if _DEBUG: self._logger.info("Retrieving samples of type " + sampleType + " with parent sample with id " + parentSampleId + " and type " + parentSampleType) # The samples are of type 'sampleType' searchCriteria = SearchCriteria() searchCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.TYPE, sampleType) ) # The samples have given parent expSampleCriteria = SearchCriteria() expSampleCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.TYPE, parentSampleType) ) expSampleCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.CODE, parentSampleId) ) searchCriteria.addSubCriteria( SearchSubCriteria.createSampleParentCriteria(expSampleCriteria) ) # Now search samples = searchService.searchForSamples(searchCriteria) # Return the samples return samples def _copyDataSetsForExperiment(self): """ Copies all FCS files in the experiment to the user directory reconstructing the sample hierarchy. Plates will map to subfolders. Tubes will be at the experiment root. Returns True for success. In case of error, returns False and sets the error message in self._message -- to be retrieved with the getErrorMessage() method. """ # Copy all tubes - if they could not be copied, we abort if not self._copyDataSetsForTubes(): return False # Copy the plates if not self._copyDataSetsForPlates(): return False # Return success return True def _copyDataSetsForPlate(self, plate): """ Copy all FCS files for given plate in the experiment to the user directory. If the plate is not passed, it will be retrieved using self._entityId. The plate will map to a subfolder. Returns True for success. In case of error, returns False and sets the error message in self._message -- to be retrieved with the getErrorMessage() method. """ # Get plate code and name plateCode = plate.getCode() plateName = plate.getPropertyValue("$NAME") if _DEBUG: self._logger.info("Processing plate with name " + plateName) # Create a folder for the plate self._currentPath = os.path.join(self._experimentPath, plateName) self._createDir(self._currentPath) if _DEBUG: self._logger.info("Plate with name " + plateName + " will be exported to " + self._currentPath) # Get all datasets for the plate dataSets = self._getDataSetsForPlate(plateCode) if len(dataSets) == 0: self._message = "Could not retrieve datasets for plate with code " + plateCode + "." self._logger.error(self._message) return False # Get all fcs files for the datasets dataSetFiles = self._getFilesForDataSets(dataSets) if len(dataSetFiles) == 0: self._message = "Could not retrieve files for datasets from plate " + plateCode + "." self._logger.error(self._message) return False # Copy the files to the user folder (in the plate folder) for fcsFile in dataSetFiles: self._copyFile(fcsFile, self._currentPath) # Return success return True def _copyDataSetsForPlates(self): """ Copy all FCS files for the plates in the experiment to the user directory. Each plate will map to a subfolder. Returns True for success. In case of error, returns False and sets the error message in self._message -- to be retrieved with the getErrorMessage() method. """ # Get the plates (if some exist) plates = self._getAllPlates() if len(plates) == 0: return True # Reset the current target folder to the root of the experiment sample self._currentPath = self._experimentPath # Now iterate over the plates, retrieve their datasets and fcs files # and copy them to the plate subfolders for plate in plates: if not self._copyDataSetsForPlate(plate): self._message = "Could not retrieve datasets for plate." self._logger.error(self._message) return False # Return return True def _copyDataSetsForTubes(self): """ Copy all FCS files for the tubes in the experiment to the user directory. Tubes will be at the experiment root. Returns True for success. In case of error, returns False and sets the error message in self._message -- to be retrieved with the getErrorMessage() method. """ # Get the tubes (if some exist) tubes = self._getAllTubes() if len(tubes) == 0: return True # Reset the current target folder to the root of the experiment sample self._currentPath = self._experimentPath # Now iterate over the tubes and retrieve their datasets dataSets = [] for tube in tubes: tubeId = tube.getCode() dataSetsForSample = self._getDataSetForTube(tubeId) dataSets.extend(dataSetsForSample) if _DEBUG: self._logger.info("Found " + str(len(dataSets)) + " datasets") if len(dataSets) == 0: self._message = "Could not retrieve datasets for tubes in " \ "experiment with code " + self._experimentCode + "." self._logger.error(self._message) return False # Get all fcs files for the datasets dataSetFiles = self._getFilesForDataSets(dataSets) if _DEBUG: self._logger.info("Found " + str(len(dataSetFiles)) + " dataset files") if len(dataSetFiles) == 0: self._message = "Could not retrieve files for datasets from tubes." self._logger.error(self._message) return False if _DEBUG: self._logger.info("Exporting tubes to folder " + self._currentPath) # Copy the files for fcsFile in dataSetFiles: if _DEBUG: self._logger.info("Copying " + str(fcsFile) + " to " + str(self._currentPath)) self._copyFile(fcsFile, self._currentPath) # Return success return True def _copyFile(self, source, dstDir): """Copies the source file (with full path) to directory dstDir. We use a trick to preserve the NFSv4 ACLs: since copying the file loses them, we first touch the destination file to create it, and then we overwrite it. """ dstFile = os.path.join(dstDir, os.path.basename(source)) touch = "/usr/bin/touch" if OSUtilities.isMacOS() else "/bin/touch" subprocess.call([touch, dstFile]) subprocess.call(["/bin/cp", source, dstDir]) self._logger.info("Copying file " + source + " to " + dstDir) self._numCopiedFiles += 1 def _createDir(self, dirFullPath): """Creates the passed directory (with full path). """ if os.path.isdir(dirFullPath): if _DEBUG: self._logger.info("Folder " + dirFullPath + " already exists.") return os.makedirs(dirFullPath) def _createRootAndExperimentFolder(self): """ Create the experiment folder. Notice that it uses information already stored in the object, but this info is filled in in the constructor, so it is safe to assume it is there if nothing major went wrong. In this case, the method will return False and no folder will be created. Otherwise, the method returns True. Please notice that if the experiment folder already exists, _{digit} will be appended to the folder name, to ensure that the folder is unique. The updated folder name will be stored in the _rootExportPath property. """ # This should not happen if self._rootExportPath == "" or self._experimentPath == "": return False # Make sure that the experiment folder does not already exist expPath = self._experimentPath # Does the folder already exist? It if does, append an increasing # numeric index. if os.path.exists(expPath): counter = 1 ok = False while not ok: tmpPath = expPath + "_" + str(counter) if not os.path.exists(tmpPath): expPath = tmpPath ok = True else: counter += 1 # Update the root and experiment paths self._experimentPath = os.path.join(self._rootExportPath, expPath) # Create the root folder self._createDir(self._rootExportPath) # And now create the experiment folder (in the root folder) self._createDir(self._experimentPath) if _DEBUG: self._logger.info("Successfully created folder " + self._experimentPath) # Return success return True def _getDataSetsForPlate(self, platePermId): """ Return a list of datasets belonging to the plate with specified ID. If none are found, return []. """ # Get the wells wells = self._retrieveAllSamplesWithTypeAndParent( self._expSamplePrefix + "_WELL", platePermId, self._expSamplePrefix + "_PLATE") if len(wells) == 0: self._message = "Could not retrieve wells for plate with " \ "code " + plateCode + "." self._logger.error(self._message) return wells # Now iterate over the samples and retrieve their datasets dataSets = [] for well in wells: wellCode = well.getCode() dataSetsForWell = self._getDataSetForWell(wellCode) dataSets.extend(dataSetsForWell) if len(dataSets) == 0: self._message = "Could not retrieve datasets for wells in " \ "plate with code " + plateCode + " from experiment " \ "with code " + self._experimentCode + "." self._logger.error(self._message) # Return return dataSets def _getDataSetForWell(self, wellId=None): """ Get the datasets belonging to the well with specified code. If none are found, return []. If no wellId is given, it is assumed that the well is the passed entity with code self._entityId. """ if wellId is None: wellId = self._entityId # Set search criteria to retrieve the dataset contained in the well searchCriteria = SearchCriteria() wellCriteria = SearchCriteria() wellCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.CODE, wellId) ) searchCriteria.addSubCriteria( SearchSubCriteria.createSampleCriteria( wellCriteria) ) dataSets = searchService.searchForDataSets(searchCriteria) if len(dataSets) == 0: self._message = "Could not retrieve datasets for well " \ "with code " + wellId + "." self._logger.error(self._message) # Return return dataSets def _getDataSetForTube(self, tubeCode): """ Get the datasets belonging to the tube with specified tube code. If none is found, return []. """ if _DEBUG: self._logger.info("Searching for tube with code " + tubeCode) # Set search criteria to retrieve the dataset contained in the tube searchCriteria = SearchCriteria() tubeCriteria = SearchCriteria() tubeCriteria.addMatchClause( MatchClause.createAttributeMatch( MatchClauseAttribute.CODE, tubeCode)) searchCriteria.addSubCriteria( SearchSubCriteria.createSampleCriteria( tubeCriteria) ) dataSets = searchService.searchForDataSets(searchCriteria) if _DEBUG: self._logger.info("Retrieved " + str(len(dataSets)) + \ " datasets for tube with code " + tubeCode) if len(dataSets) == 0: self._message = "Could not retrieve datasets for tube " \ "with code " + tubeCode + "." self._logger.error(self._message) # Return return dataSets def _getFilesForDataSets(self, dataSets): """ Get the list of FCS file paths that correspond to the input list of datasets. If not files are found, returns []. """ if len(dataSets) == 0: return [] dataSetFiles = [] for dataSet in dataSets: content = contentProvider.getContent(dataSet.getDataSetCode()) nodes = content.listMatchingNodes("original", ".*\.fcs") if nodes is not None: for node in nodes: fileName = node.tryGetFile() if fileName is not None: fileName = str(fileName) if fileName.lower().endswith(".fcs"): dataSetFiles.append(fileName) if len(dataSetFiles) == 0: self._message = "Could not retrieve dataset files!" self._logger.error(self._message) # Return the files return dataSetFiles def _getAllPlates(self): """ Get all plates in the experiment. Returns [] if none are found. """ # Set the sample type sampleType = self._expSamplePrefix + "_PLATE" if _DEBUG: self._logger.info("Finding all samples of type " + sampleType) # Retrieve all plates plates = self._retrieveAllSamplesWithTypeAndParentWithPermId(sampleType, self._expSamplePermId, self._expSampleType) if _DEBUG: self._logger.info("Found " + str(len(plates)) + " plates.") if len(plates) == 0: self._message = "No plates found!" self._logger.error(self._message) return [] # Return the tubes return plates def _getAllTubes(self): """ Get all tubes in the experiment. Returns [] if none are found. """ # Set the sample type sampleType = self._expSamplePrefix + "_TUBE" if _DEBUG: self._logger.info("Finding all samples of type " + sampleType) # Retrieve all tubes tubes = self._retrieveAllSamplesWithTypeAndParentWithPermId(sampleType, self._expSamplePermId, self._expSampleType) if _DEBUG: self._logger.info("Found " + str(len(tubes)) + " tubes.") if len(tubes) == 0: self._message = "No tubes found!" self._logger.error(self._message) return tubes # Return the tubes return tubes # Parse properties file for custom settings def parsePropertiesFile(): """Parse properties file for custom plug-in settings.""" filename = "../core-plugins/flow/4/dss/reporting-plugins/export_flow_datasets/plugin.properties" var_names = ['base_dir', 'export_dir'] properties = {} try: fp = open(filename, "r") except: return properties try: for line in fp: line = re.sub('[ \'\"\n]', '', line) parts = line.split("=") if len(parts) == 2: if parts[0] in var_names: properties[parts[0]] = parts[1] finally: fp.close() # Check that all variables were found if len(properties.keys()) == 0: return None found_vars = properties.keys() for var_name in var_names: if var_name not in found_vars: return None # Make sure that there are no Windows line endings for var_name in var_names: properties[var_name] = properties[var_name].replace('\r', '') # Everything found return properties # Plug-in entry point # # Input parameters: # # uid : job unique identifier (see below) # experimentId : experiment identifier # experimentSampleType: experiment type # entityType : entity type # entityId : entity ID # mode : requested mode of operation: one of 'normal', 'zip'. # # This method returns a table to the client with a different set of columns # depending on whether the plug-in is called for the first time and the process # is just started, or if it is queried for completeness at a later time. # # At the end of the first call, a table with following columns is returned: # # uid : unique identifier of the running plug-in # completed: indicated if the plug-in has finished. This is set to False in the # first call. # # Later calls return a table with the following columns: # # uid : unique identifier of the running plug-in. This was returned to # the client in the first call and was passed on again as a parameter. # Here it is returned again to make sure that client and server # always know which task they are talking about. # completed: True if the process has completed in the meanwhile, False if it # is still running. # success : True if the process completed successfully, False otherwise. # message : error message in case success was False. # nCopiedFiles: total number of copied files. # relativeExpFolder: folder to the copied folder relative to the root of the # export folder. # zipArchiveFileName: file name of the zip in case compression was requested. # mode : requested mode of operation. def aggregate(parameters, tableBuilder): # Get the ID of the call if it already exists uid = parameters.get("uid"); if uid is None or uid == "": # Create a unique id uid = str(uuid.uuid4()) # Add the table headers tableBuilder.addHeader("uid") tableBuilder.addHeader("completed") # Fill in relevant information row = tableBuilder.addRow() row.setCell("uid", uid) row.setCell("completed", False) # Launch the actual process in a separate thread thread = Thread(target=aggregateProcess, args=(parameters, tableBuilder, uid)) thread.start() # Return immediately return # The process is already running in a separate thread. We get current # results and return them resultToSend = LRCache.get(uid); if resultToSend is None: # This should not happen raise Exception("Could not retrieve results from result cache!") # Add the table headers tableBuilder.addHeader("uid") tableBuilder.addHeader("completed") tableBuilder.addHeader("success") tableBuilder.addHeader("message") tableBuilder.addHeader("nCopiedFiles") tableBuilder.addHeader("relativeExpFolder") tableBuilder.addHeader("zipArchiveFileName") tableBuilder.addHeader("mode") # Store current results in the table row = tableBuilder.addRow() row.setCell("uid", resultToSend["uid"]) row.setCell("completed", resultToSend["completed"]) row.setCell("success", resultToSend["success"]) row.setCell("message", resultToSend["message"]) row.setCell("nCopiedFiles", resultToSend["nCopiedFiles"]) row.setCell("relativeExpFolder", resultToSend["relativeExpFolder"]) row.setCell("zipArchiveFileName", resultToSend["zipArchiveFileName"]) row.setCell("mode", resultToSend["mode"]) # Actual work process def aggregateProcess(parameters, tableBuilder, uid): # Make sure to initialize and store the results. We need to have them since # most likely the client will try to retrieve them again before the process # is finished. resultToStore = {} resultToStore["uid"] = uid resultToStore["success"] = True resultToStore["completed"] = False resultToStore["message"] = "" resultToStore["nCopiedFiles"] = "" resultToStore["relativeExpFolder"] = "" resultToStore["zipArchiveFileName"] = "" resultToStore["mode"] = "" LRCache.set(uid, resultToStore) # Get path to containing folder # __file__ does not work (reliably) in Jython dbPath = "../core-plugins/flow/4/dss/reporting-plugins/export_flow_datasets" # Path to the logs subfolder logPath = os.path.join(dbPath, "logs") # Make sure the logs subforder exist if not os.path.exists(logPath): os.makedirs(logPath) # Path for the log file logFile = os.path.join(logPath, "log.txt") # Set up logging logging.basicConfig(filename=logFile, level=logging.DEBUG, format='%(asctime)-15s %(levelname)s: %(message)s') logger = logging.getLogger() # Get parameters from plugin.properties properties = parsePropertiesFile() if properties is None: msg = "Could not process plugin.properties" logger.error(msg) raise Exception(msg) if properties['base_dir'] == "" or properties['export_dir'] == '': msg = "Please set valid value for 'base_dir' and 'export_dir' in plugin.properties" logger.error(msg) raise Exception(msg) # Dump the properties dictionary to log logger.info(str(parameters)) # Get the task task = parameters["task"] # Get the experiment identifier collectionId = parameters["collectionId"] # Get the experiment type collectionType = parameters["collectionType"] # Get the experiment sample identifier expSampleId = parameters.get("expSampleId") # Get the experiment sample perm identifier expSamplePermId = parameters.get("expSamplePermId") # Get the experiment sample type expSampleType = parameters.get("expSampleType") # Get the plate code platePermId = parameters.get("platePermId") # Get the plate type plateType = parameters.get("plateType") # Get the mode mode = parameters.get("mode") # Info logger.info("Aggregation plug-in called with following parameters:") logger.info("task = " + task) logger.info("collectionId = " + collectionId) logger.info("collectionType = " + collectionType) logger.info("expSampleId = " + expSampleId) logger.info("expSamplePermId = " + expSamplePermId) logger.info("expSampleType = " + expSampleType) logger.info("platePermId = " + platePermId) logger.info("plateType = " + plateType) logger.info("mode = " + mode) logger.info("userId = " + userId) logger.info("Aggregation plugin properties:") logger.info("properties = " + str(properties)) # Consistency check: task must be one of a known set if task != "EXPERIMENT_SAMPLE" and \ task != "ALL_PLATES" and \ task != "PLATE" and \ task != "TUBESET": msg = "The requested task " + task + " is not known!" logger.error(msg) raise Exception(msg) logger.info("Requested task: " + task) # Instantiate the Mover object - userId is a global variable # made available to the aggregation plug-in mover = Mover(task, collectionId, collectionType, expSampleId, expSamplePermId, expSampleType, platePermId, plateType, mode, userId, properties, logger) # Process success = mover.process() logger.info("Process ended successfully.") # Compress if mode == "zip": mover.compressIfNeeded() # Get some results info nCopiedFiles = mover.getNumberOfCopiedFiles() errorMessage = mover.getErrorMessage() relativeExpFolder = mover.getRelativeRootExperimentPath() zipFileName = mover.getZipArchiveFileName() # Update results and store them resultToStore["uid"] = uid resultToStore["completed"] = True resultToStore["success"] = success resultToStore["message"] = errorMessage resultToStore["nCopiedFiles"] = nCopiedFiles resultToStore["relativeExpFolder"] = relativeExpFolder resultToStore["zipArchiveFileName"] = zipFileName resultToStore["mode"] = mode LRCache.set(uid, resultToStore) # Email result to the user if success == True: subject = "Flow export: successfully processed requested data" if nCopiedFiles == 1: snip = "One file was " else: snip = str(nCopiedFiles) + " files were " if mode == "normal": body = snip + "successfully exported to {...}/" + relativeExpFolder + "." else: body = snip + "successfully packaged for download: " + zipFileName else: subject = "Flow export: error processing request!" body = "Sorry, there was an error processing your request. " + \ "Please send your administrator the following report:\n\n" + \ "\"" + errorMessage + "\"\n" # Send try: mailService.createEmailSender().withSubject(subject).withBody(body).send() except: sys.stderr.write("export_flow_datasets: Failure sending email to user!")
aarpon/obit_flow_core_technology
core-plugins/flow/4/dss/reporting-plugins/export_flow_datasets/export_flow_datasets.py
Python
apache-2.0
43,482
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import frappe import frappe.utils from frappe.utils.oauth import get_oauth2_authorize_url, get_oauth_keys, login_via_oauth2, login_via_oauth2_id_token, login_oauth_user as _login_oauth_user, redirect_post_login import json from frappe import _ from frappe.auth import LoginManager from frappe.integrations.doctype.ldap_settings.ldap_settings import LDAPSettings from frappe.utils.password import get_decrypted_password from frappe.utils.html_utils import get_icon_html from frappe.integrations.oauth2_logins import decoder_compat no_cache = True def get_context(context): redirect_to = frappe.local.request.args.get("redirect-to") if frappe.session.user != "Guest": if not redirect_to: redirect_to = "/me" if frappe.session.data.user_type=="Website User" else "/desk" frappe.local.flags.redirect_location = redirect_to raise frappe.Redirect # get settings from site config context.no_header = True context.for_test = 'login.html' context["title"] = "Login" context["provider_logins"] = [] context["disable_signup"] = frappe.utils.cint(frappe.db.get_value("Website Settings", "Website Settings", "disable_signup")) providers = [i.name for i in frappe.get_all("Social Login Key", filters={"enable_social_login":1})] for provider in providers: client_id, base_url = frappe.get_value("Social Login Key", provider, ["client_id", "base_url"]) client_secret = get_decrypted_password("Social Login Key", provider, "client_secret") icon = get_icon_html(frappe.get_value("Social Login Key", provider, "icon"), small=True) if (get_oauth_keys(provider) and client_secret and client_id and base_url): context.provider_logins.append({ "name": provider, "provider_name": frappe.get_value("Social Login Key", provider, "provider_name"), "auth_url": get_oauth2_authorize_url(provider, redirect_to), "icon": icon }) context["social_login"] = True ldap_settings = LDAPSettings.get_ldap_client_settings() context["ldap_settings"] = ldap_settings login_name_placeholder = [_("Email address")] if frappe.utils.cint(frappe.get_system_settings("allow_login_using_mobile_number")): login_name_placeholder.append(_("Mobile number")) if frappe.utils.cint(frappe.get_system_settings("allow_login_using_user_name")): login_name_placeholder.append(_("Username")) context['login_name_placeholder'] = ' {0} '.format(_('or')).join(login_name_placeholder) return context @frappe.whitelist(allow_guest=True) def login_via_google(code, state): login_via_oauth2("google", code, state, decoder=decoder_compat) @frappe.whitelist(allow_guest=True) def login_via_github(code, state): login_via_oauth2("github", code, state) @frappe.whitelist(allow_guest=True) def login_via_facebook(code, state): login_via_oauth2("facebook", code, state, decoder=decoder_compat) @frappe.whitelist(allow_guest=True) def login_via_frappe(code, state): login_via_oauth2("frappe", code, state, decoder=decoder_compat) @frappe.whitelist(allow_guest=True) def login_via_office365(code, state): login_via_oauth2_id_token("office_365", code, state, decoder=decoder_compat) @frappe.whitelist(allow_guest=True) def login_oauth_user(data=None, provider=None, state=None, email_id=None, key=None, generate_login_token=False): if not ((data and provider and state) or (email_id and key)): frappe.respond_as_web_page(_("Invalid Request"), _("Missing parameters for login"), http_status_code=417) return _login_oauth_user(data, provider, state, email_id, key, generate_login_token) @frappe.whitelist(allow_guest=True) def login_via_token(login_token): sid = frappe.cache().get_value("login_token:{0}".format(login_token), expires=True) if not sid: frappe.respond_as_web_page(_("Invalid Request"), _("Invalid Login Token"), http_status_code=417) return frappe.local.form_dict.sid = sid frappe.local.login_manager = LoginManager() redirect_post_login(desk_user = frappe.db.get_value("User", frappe.session.user, "user_type")=="System User")
vjFaLk/frappe
frappe/www/login.py
Python
mit
4,110
# -*- coding: utf-8 -*- from collections import defaultdict import io import logging import operator import os.path import socket from babelfish import Language from pkg_resources import EntryPoint import requests from stevedore import EnabledExtensionManager, ExtensionManager from .subtitle import compute_score, get_subtitle_path logger = logging.getLogger(__name__) class InternalExtensionManager(ExtensionManager): """Add support for internal entry points to the :class:`~stevedore.extension.Extensionmanager` Internal entry points are useful for libraries that ship their own plugins but still keep the entry point open. All other parameters are passed onwards to the :class:`~stevedore.extension.Extensionmanager` constructor. :param internal_entry_points: the internal providers :type internal_entry_points: list of :class:`~pkg_resources.EntryPoint` """ def __init__(self, namespace, internal_entry_points, **kwargs): self.internal_entry_points = list(internal_entry_points) super(InternalExtensionManager, self).__init__(namespace, **kwargs) def _find_entry_points(self, namespace): return self.internal_entry_points + super(InternalExtensionManager, self)._find_entry_points(namespace) provider_manager = InternalExtensionManager('subliminal.providers', [EntryPoint.parse(ep) for ep in ( 'addic7ed = subliminal.providers.addic7ed:Addic7edProvider', 'napiprojekt = subliminal.providers.napiprojekt:NapiProjektProvider', 'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider', 'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider', 'thesubdb = subliminal.providers.thesubdb:TheSubDBProvider', 'tvsubtitles = subliminal.providers.tvsubtitles:TVsubtitlesProvider' )]) class ProviderPool(object): """A pool of providers with the same API as a single :class:`~subliminal.providers.Provider`. It has a few extra features: * Lazy loads providers when needed and supports the :keyword:`with` statement to :meth:`terminate` the providers on exit. * Automatically discard providers on failure. :param providers: name of providers to use, if not all. :type providers: list :param dict provider_configs: provider configuration as keyword arguments per provider name to pass when instanciating the :class:`~subliminal.providers.Provider`. """ def __init__(self, providers=None, provider_configs=None): #: Name of providers to use self.providers = providers or provider_manager.names() #: Provider configuration self.provider_configs = provider_configs or {} #: Initialized providers self.initialized_providers = {} #: Discarded providers self.discarded_providers = set() #: Dedicated :data:`provider_manager` as :class:`~stevedore.enabled.EnabledExtensionManager` self.manager = EnabledExtensionManager(provider_manager.namespace, lambda e: e.name in self.providers) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.terminate() def __getitem__(self, name): if name not in self.initialized_providers: logger.info('Initializing provider %s', name) provider = self.manager[name].plugin(**self.provider_configs.get(name, {})) provider.initialize() self.initialized_providers[name] = provider return self.initialized_providers[name] def __delitem__(self, name): if name not in self.initialized_providers: raise KeyError(name) try: logger.info('Terminating provider %s', name) self.initialized_providers[name].terminate() except (requests.Timeout, socket.timeout): logger.error('Provider %r timed out, improperly terminated', name) except: logger.exception('Provider %r terminated unexpectedly', name) del self.initialized_providers[name] def __iter__(self): return iter(self.initialized_providers) def list_subtitles(self, video, languages): """List subtitles. :param video: video to list subtitles for. :type video: :class:`~subliminal.video.Video` :param languages: languages to search for. :type languages: set of :class:`~babelfish.language.Language` :return: found subtitles. :rtype: list of :class:`~subliminal.subtitle.Subtitle` """ subtitles = [] for name in self.providers: # check discarded providers if name in self.discarded_providers: logger.debug('Skipping discarded provider %r', name) continue # check video validity if not self.manager[name].plugin.check(video): logger.info('Skipping provider %r: not a valid video', name) continue # check supported languages provider_languages = self.manager[name].plugin.languages & languages if not provider_languages: logger.info('Skipping provider %r: no language to search for', name) continue # list subtitles logger.info('Listing subtitles with provider %r and languages %r', name, provider_languages) try: provider_subtitles = self[name].list_subtitles(video, provider_languages) except (requests.Timeout, socket.timeout): logger.error('Provider %r timed out, discarding it', name) self.discarded_providers.add(name) continue except: logger.exception('Unexpected error in provider %r, discarding it', name) self.discarded_providers.add(name) continue subtitles.extend(provider_subtitles) return subtitles def download_subtitle(self, subtitle): """Download `subtitle`'s :attr:`~subliminal.subtitle.Subtitle.content`. :param subtitle: subtitle to download. :type subtitle: :class:`~subliminal.subtitle.Subtitle` :return: `True` if the subtitle has been successfully downloaded, `False` otherwise. :rtype: bool """ # check discarded providers if subtitle.provider_name in self.discarded_providers: logger.warning('Provider %r is discarded', subtitle.provider_name) return False logger.info('Downloading subtitle %r', subtitle) try: self[subtitle.provider_name].download_subtitle(subtitle) except (requests.Timeout, socket.timeout): logger.error('Provider %r timed out, discarding it', subtitle.provider_name) self.discarded_providers.add(subtitle.provider_name) return False except: logger.exception('Unexpected error in provider %r, discarding it', subtitle.provider_name) self.discarded_providers.add(subtitle.provider_name) return False # check subtitle validity if not subtitle.is_valid(): logger.error('Invalid subtitle') return False return True def download_best_subtitles(self, subtitles, video, languages, min_score=0, hearing_impaired=False, only_one=False, scores=None): """Download the best matching subtitles. :param subtitles: the subtitles to use. :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` :param video: video to download subtitles for. :type video: :class:`~subliminal.video.Video` :param languages: languages to download. :type languages: set of :class:`~babelfish.language.Language` :param int min_score: minimum score for a subtitle to be downloaded. :param bool hearing_impaired: hearing impaired preference. :param bool only_one: download only one subtitle, not one per language. :param dict scores: scores to use, if `None`, the :attr:`~subliminal.video.Video.scores` from the video are used. :return: downloaded subtitles. :rtype: list of :class:`~subliminal.subtitle.Subtitle` """ # sort subtitles by score scored_subtitles = sorted([(s, compute_score(s.get_matches(video, hearing_impaired=hearing_impaired), video, scores=scores)) for s in subtitles], key=operator.itemgetter(1), reverse=True) # download best subtitles, falling back on the next on error downloaded_subtitles = [] for subtitle, score in scored_subtitles: # check score if score < min_score: logger.info('Score %d is below min_score (%d)', score, min_score) break # check downloaded languages if subtitle.language in set(s.language for s in downloaded_subtitles): logger.debug('Skipping subtitle: %r already downloaded', subtitle.language) continue # download logger.info('Downloading subtitle %r with score %d', subtitle, score) if self.download_subtitle(subtitle): downloaded_subtitles.append(subtitle) # stop when all languages are downloaded if set(s.language for s in downloaded_subtitles) == languages: logger.debug('All languages downloaded') break # stop if only one subtitle is requested if only_one: logger.debug('Only one subtitle downloaded') break return downloaded_subtitles def terminate(self): """Terminate all the :attr:`initialized_providers`.""" logger.debug('Terminating initialized providers') for name in list(self.initialized_providers): del self[name] def check_video(video, languages=None, age=None, undefined=False): """Perform some checks on the `video`. All the checks are optional. Return `False` if any of this check fails: * `languages` already exist in `video`'s :attr:`~subliminal.video.Video.subtitle_languages`. * `video` is older than `age`. * `video` has an `undefined` language in :attr:`~subliminal.video.Video.subtitle_languages`. :param video: video to check. :type video: :class:`~subliminal.video.Video` :param languages: desired languages. :type languages: set of :class:`~babelfish.language.Language` :param datetime.timedelta age: maximum age of the video. :param bool undefined: fail on existing undefined language. :return: `True` if the video passes the checks, `False` otherwise. :rtype: bool """ # language test if languages and not (languages - video.subtitle_languages): logger.debug('All languages %r exist', languages) return False # age test if age and video.age > age: logger.debug('Video is older than %r', age) return False # undefined test if undefined and Language('und') in video.subtitle_languages: logger.debug('Undefined language found') return False return True def list_subtitles(videos, languages, **kwargs): """List subtitles. The `videos` must pass the `languages` check of :func:`check_video`. All other parameters are passed onwards to the :class:`ProviderPool` constructor. :param videos: videos to list subtitles for. :type videos: set of :class:`~subliminal.video.Video` :param languages: languages to search for. :type languages: set of :class:`~babelfish.language.Language` :return: found subtitles per video. :rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle` """ listed_subtitles = defaultdict(list) # check videos checked_videos = [] for video in videos: if not check_video(video, languages=languages): logger.info('Skipping video %r', video) continue checked_videos.append(video) # return immediatly if no video passed the checks if not checked_videos: return listed_subtitles # list subtitles with ProviderPool(**kwargs) as pool: for video in checked_videos: logger.info('Listing subtitles for %r', video) subtitles = pool.list_subtitles(video, languages - video.subtitle_languages) listed_subtitles[video].extend(subtitles) logger.info('Found %d subtitle(s)', len(subtitles)) return listed_subtitles def download_subtitles(subtitles, **kwargs): """Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`. All other parameters are passed onwards to the :class:`ProviderPool` constructor. :param subtitles: subtitles to download. :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` """ with ProviderPool(**kwargs) as pool: for subtitle in subtitles: logger.info('Downloading subtitle %r', subtitle) pool.download_subtitle(subtitle) def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, scores=None, **kwargs): """List and download the best matching subtitles. The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`. All other parameters are passed onwards to the :class:`ProviderPool` constructor. :param videos: videos to download subtitles for. :type videos: set of :class:`~subliminal.video.Video` :param languages: languages to download. :type languages: set of :class:`~babelfish.language.Language` :param int min_score: minimum score for a subtitle to be downloaded. :param bool hearing_impaired: hearing impaired preference. :param bool only_one: download only one subtitle, not one per language. :param dict scores: scores to use, if `None`, the :attr:`~subliminal.video.Video.scores` from the video are used. :return: downloaded subtitles per video. :rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle` """ downloaded_subtitles = defaultdict(list) # check videos checked_videos = [] for video in videos: if not check_video(video, languages=languages, undefined=only_one): logger.info('Skipping video %r') continue checked_videos.append(video) # return immediatly if no video passed the checks if not checked_videos: return downloaded_subtitles # download best subtitles with ProviderPool(**kwargs) as pool: for video in checked_videos: logger.info('Downloading best subtitles for %r', video) subtitles = pool.download_best_subtitles(pool.list_subtitles(video, languages - video.subtitle_languages), video, languages, min_score=min_score, hearing_impaired=hearing_impaired, only_one=only_one, scores=scores) logger.info('Downloaded %d subtitle(s)', len(subtitles)) downloaded_subtitles[video].extend(subtitles) return downloaded_subtitles def save_subtitles(video, subtitles, single=False, directory=None, encoding=None): """Save subtitles on filesystem. Subtitles are saved in the order of the list. If a subtitle with a language has already been saved, other subtitles with the same language are silently ignored. The extension used is `.lang.srt` by default or `.srt` is `single` is `True`, with `lang` being the IETF code for the :attr:`~subliminal.subtitle.Subtitle.language` of the subtitle. :param video: video of the subtitles. :type video: :class:`~subliminal.video.Video` :param subtitles: subtitles to save. :type subtitles: list of :class:`~subliminal.subtitle.Subtitle` :param bool single: save a single subtitle, default is to save one subtitle per language. :param str directory: path to directory where to save the subtitles, default is next to the video. :param str encoding: encoding in which to save the subtitles, default is to keep original encoding. :return: the saved subtitles :rtype: list of :class:`~subliminal.subtitle.Subtitle` """ saved_subtitles = [] for subtitle in subtitles: # check content if subtitle.content is None: logger.error('Skipping subtitle %r: no content', subtitle) continue # check language if subtitle.language in set(s.language for s in saved_subtitles): logger.debug('Skipping subtitle %r: language already saved', subtitle) continue # create subtitle path subtitle_path = get_subtitle_path(video.name, None if single else subtitle.language) if directory is not None: subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1]) # save content as is or in the specified encoding logger.info('Saving %r to %r', subtitle, subtitle_path) if encoding is None: with io.open(subtitle_path, 'wb') as f: f.write(subtitle.content) else: with io.open(subtitle_path, 'w', encoding=encoding) as f: f.write(subtitle.text) saved_subtitles.append(subtitle) # check single if single: break return saved_subtitles
duramato/SickRage
lib/subliminal/api.py
Python
gpl-3.0
17,713
"""Test fixtures for unit tests only""" from typing import Dict import pytest import responses @pytest.fixture(scope="module") def project_token() -> str: """Project API token""" return "0" * 32 @pytest.fixture(scope="module") def project_urls() -> Dict[str, str]: """Different urls for different mock projects""" return { "bad_url": "https://redcap.badproject.edu/api", "long_project": "https://redcap.longproject.edu/api/", "simple_project": "https://redcap.simpleproject.edu/api/", "survey_project": "https://redcap.surveyproject.edu/api/", } # See here for docs: https://github.com/getsentry/responses#responses-as-a-pytest-fixture @pytest.fixture(scope="module") def mocked_responses() -> responses.RequestsMock: """Base fixture for all mocked responses""" with responses.RequestsMock() as resps: yield resps
redcap-tools/PyCap
tests/unit/conftest.py
Python
mit
889
import zmq class Sender(): def __init__(self): self.port = '5555' self.context = zmq.Context() self.socket = self.context.socket(zmq.PAIR) self.socket.connect("tcp://localhost:%s" % self.port) def send(self, command): try: self.socket.send(command, flags=zmq.NOBLOCK) except zmq.ZMQError as e: pass
uberspaceguru/GiantTetris
python/tetris_web/app/models.py
Python
mit
381
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Cloudbase Solutions Srl # # Author: Alessandro Pilotti <apilotti@cloudbasesolutions.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Hyper-V inspector. """ import mock from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector from ceilometer.tests import base as test_base class TestHyperVInspection(test_base.TestCase): def setUp(self): self._inspector = hyperv_inspector.HyperVInspector() self._inspector._utils = mock.MagicMock() super(TestHyperVInspection, self).setUp() def test_inspect_instances(self): fake_name = 'fake_name' fake_uuid = 'fake_uuid' fake_instances = [(fake_name, fake_uuid)] self._inspector._utils.get_all_vms.return_value = fake_instances inspected_instances = list(self._inspector.inspect_instances()) self.assertEqual(1, len(inspected_instances)) self.assertEqual(fake_name, inspected_instances[0].name) self.assertEqual(fake_uuid, inspected_instances[0].UUID) def test_inspect_cpus(self): fake_instance_name = 'fake_instance_name' fake_host_cpu_clock = 1000 fake_host_cpu_count = 2 fake_cpu_clock_used = 2000 fake_cpu_count = 3000 fake_uptime = 4000 fake_cpu_percent_used = (fake_cpu_clock_used / float(fake_host_cpu_clock * fake_cpu_count)) fake_cpu_time = (long(fake_uptime * fake_cpu_percent_used) * 1000) self._inspector._utils.get_host_cpu_info.return_value = ( fake_host_cpu_clock, fake_host_cpu_count) self._inspector._utils.get_cpu_metrics.return_value = ( fake_cpu_clock_used, fake_cpu_count, fake_uptime) cpu_stats = self._inspector.inspect_cpus(fake_instance_name) self.assertEqual(fake_cpu_count, cpu_stats.number) self.assertEqual(fake_cpu_time, cpu_stats.time) def test_inspect_vnics(self): fake_instance_name = 'fake_instance_name' fake_rx_bytes = 1000 fake_tx_bytes = 2000 fake_element_name = 'fake_element_name' fake_address = 'fake_address' self._inspector._utils.get_vnic_metrics.return_value = [{ 'rx_bytes': fake_rx_bytes, 'tx_bytes': fake_tx_bytes, 'element_name': fake_element_name, 'address': fake_address}] inspected_vnics = list(self._inspector.inspect_vnics( fake_instance_name)) self.assertEqual(1, len(inspected_vnics)) self.assertEqual(2, len(inspected_vnics[0])) inspected_vnic, inspected_stats = inspected_vnics[0] self.assertEqual(fake_element_name, inspected_vnic.name) self.assertEqual(fake_address, inspected_vnic.mac) self.assertEqual(fake_rx_bytes, inspected_stats.rx_bytes) self.assertEqual(fake_tx_bytes, inspected_stats.tx_bytes) def test_inspect_disks(self): fake_instance_name = 'fake_instance_name' fake_read_mb = 1000 fake_write_mb = 2000 fake_instance_id = "fake_fake_instance_id" fake_host_resource = "fake_host_resource" fake_device = {"instance_id": fake_instance_id, "host_resource": fake_host_resource} self._inspector._utils.get_disk_metrics.return_value = [{ 'read_mb': fake_read_mb, 'write_mb': fake_write_mb, 'instance_id': fake_instance_id, 'host_resource': fake_host_resource}] inspected_disks = list(self._inspector.inspect_disks( fake_instance_name)) self.assertEqual(1, len(inspected_disks)) self.assertEqual(2, len(inspected_disks[0])) inspected_disk, inspected_stats = inspected_disks[0] self.assertEqual(fake_device, inspected_disk.device) self.assertEqual(fake_read_mb * 1024, inspected_stats.read_bytes) self.assertEqual(fake_write_mb * 1024, inspected_stats.write_bytes)
citrix-openstack-build/ceilometer
tests/compute/virt/hyperv/test_inspector.py
Python
apache-2.0
4,550
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.compute import base from tempest import exceptions from tempest import test class AbsoluteLimitsNegativeTestJSON(base.BaseV2ComputeTest): _interface = 'json' @classmethod def setUpClass(cls): super(AbsoluteLimitsNegativeTestJSON, cls).setUpClass() cls.client = cls.limits_client cls.server_client = cls.servers_client @test.attr(type=['negative', 'gate']) def test_max_image_meta_exceed_limit(self): # We should not create vm with image meta over maxImageMeta limit # Get max limit value max_meta = self.client.get_specific_absolute_limit('maxImageMeta') # Create server should fail, since we are passing > metadata Limit! max_meta_data = int(max_meta) + 1 meta_data = {} for xx in range(max_meta_data): meta_data[str(xx)] = str(xx) self.assertRaises(exceptions.OverLimit, self.server_client.create_server, name='test', meta=meta_data, flavor_ref=self.flavor_ref, image_ref=self.image_ref) class AbsoluteLimitsNegativeTestXML(AbsoluteLimitsNegativeTestJSON): _interface = 'xml'
BeenzSyed/tempest
tempest/api/compute/limits/test_absolute_limits_negative.py
Python
apache-2.0
1,868
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # django-yaat documentation build configuration file, created by # sphinx-quickstart on Mon Sep 7 12:10:33 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'django-yaat' copyright = '2015, Slapec' author = 'Slapec' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0.1' # The full version, including alpha/beta/rc tags. release = '0.0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'django-yaatdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'django-yaat.tex', 'django-yaat Documentation', 'Slapec', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'django-yaat', 'django-yaat Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'django-yaat', 'django-yaat Documentation', author, 'django-yaat', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
pombredanne/django-yaat
docs/source/conf.py
Python
mit
9,320
# Copyright 2018 Google LLC # # Use of this source code is governed by an MIT-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/MIT. import latindance class XConstruct(latindance.Latinlike): def __init__(self, delegate): self._delegate = delegate self._key = None def name(self): return "X" + self._delegate.name() def variant_name(self): return "X" + self._delegate.variant_name() def variants(self): d = self._delegate.copy() for v in d.variants(): d.variant = v dhl = d.hash_lengths() dl = d.lengths() # "Extending the Salsa20 nonce" asserts that Salsa20 takes a 256-bit key, and # doesn't specify how a 128-bit key would be handled, so we simply force the # key size to be the same as the hash output length here. if dhl["output"] == dl["key"]: yield {"cipher": self.name(), "rounds": v["rounds"], "delgatevariant": v, "lengths": { "key": dhl["key"], "nonce": dhl["nonceoffset"] + dl["nonce"]}} def _setup_variant(self): self._delegate.variant = self.variant["delgatevariant"] def gen_output(self, key, nonce, offset): ks = self._delegate.copy() nl = ks.hash_lengths()["nonceoffset"] subkey = ks.hash(key=key, nonceoffset=nonce[:nl]) return self._delegate.gen_output(key=subkey, nonce=nonce[nl:], offset=offset)
google/adiantum
python/xconstruct.py
Python
mit
1,523
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import time import json import random import urllib import logging import argparse import coloredlogs from pyquery import PyQuery BASEDIR = os.path.dirname(os.path.abspath(__name__)) OUTPUTDIR = os.path.join(BASEDIR, 'data/output') coloredlogs.install() class Topic(object): """Topic class is used for representing Topic on Zhihu""" def __init__(self, name, id_): """Init topic object with name and id :name: name of topic :id_: id of topic """ self._name = name self._id = id_ def __unicode__(self): return '[topic: %s (%d)]' % (self.name, self.id) def __repr__(self): return unicode(self) @property def name(self): return self._name @property def id(self): return self._id @property def url(self): return 'http://www.zhihu.com/topic/%d/questions' % self._id @property def filepath(self): return os.path.join(OUTPUTDIR, '%d.json' % self.id) @property def finished(self): return os.path.exists(self.filepath) def url_for_page(self, page_number): if page_number <= 1: return self.url return self.url + '?' + urllib.urlencode({'page': page_number}) def get_question(self, item): subtopicdom = item.children('.subtopic a') subtopic = subtopicdom.text().strip() subtopicid = int(subtopicdom.attr('href').split('/')[2]) if subtopicdom.attr('href') else self.id titledom = item.children('.question-item-title a') title = titledom.text().strip() questionid = int(titledom.attr('href').split('/')[2]) logging.debug('question: %s(%d)' % (title, questionid)) return { 'id': questionid, 'title': title, 'subtopic': { 'title': subtopic, 'id': subtopicid, }, } def get_questions(self, page): logging.info('processing: %s (page %d)' % (self, page)) url = self.url_for_page(page) logging.debug('fetching: %s' % url) items = PyQuery(url)('.feed-item') return [self.get_question(PyQuery(item)) for item in items] def persist(self, count=400): if self.finished: logging.info("skipped %s" % self) return page = 1 questions = [] logging.info("start fetching %s" % self) while len(questions) < count and page < 100: try: questions.extend(self.get_questions(page)) except Exception, e: logging.error("failed to fetch and parse %s(page %d)" % (self, page)) logging.exception(e) logging.debug("skipped %s(page %d)" % (self, page)) finally: page += 1 wait = random.randint(5, 20) logging.debug('wait for %d seconds' % wait) time.sleep(wait) if len(questions) == 0: logging.error("failed to fetch or parse %s" % self) return obj = { 'id': self.id, 'name': self.name, 'questions': questions, } logging.info('saving data for %s' % self) logging.debug('writing path: %s' % self.filepath) with open(self.filepath, 'w') as f: json.dump(obj, f) def readtopics(path): topics = [] with open(path) as f: for l in f.readlines(): l = l.decode('utf8').strip() if not l: continue topicpair = l.split() topics.append((topicpair[0], int(topicpair[1]))) return topics if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("filename", help="The file which contains the topics to be processed") args = parser.parse_args() if args.filename.strip(): if not os.path.isdir(OUTPUTDIR): logging.debug('making output directory: %s' % OUTPUTDIR) os.mkdir(OUTPUTDIR) topics = readtopics(args.filename.strip()) logging.info('%d topics to process' % len(topics)) for tname, tid in topics: topic = Topic(tname, tid) topic.persist()
shanzi/thesiscode
topiccrawler.py
Python
bsd-2-clause
4,289
# -*- coding: utf-8 -*- """The QCOW image file-like object.""" import pyqcow from dfvfs import dependencies from dfvfs.file_io import file_object_io from dfvfs.lib import errors from dfvfs.resolver import resolver dependencies.CheckModuleVersion(u'pyqcow') class QcowFile(file_object_io.FileObjectIO): """Class that implements a file-like object using pyqcow.""" def _OpenFileObject(self, path_spec): """Opens the file-like object defined by path specification. Args: path_spec: the path specification (instance of path.PathSpec). Returns: A file-like object. Raises: PathSpecError: if the path specification is incorrect. """ if not path_spec.HasParent(): raise errors.PathSpecError( u'Unsupported path specification without parent.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) qcow_file = pyqcow.file() qcow_file.open_file_object(file_object) return qcow_file def get_size(self): """Returns the size of the file-like object. Raises: IOError: if the file-like object has not been opened. """ if not self._is_open: raise IOError(u'Not opened.') return self._file_object.get_media_size()
jorik041/dfvfs
dfvfs/file_io/qcow_file_io.py
Python
apache-2.0
1,285
# Copyright (c) 2014 by Ecreall under licence AGPL terms # available on http://www.gnu.org/licenses/agpl.html # licence: AGPL # author: Amen Souissi from pyramid.view import view_config from pyramid.httpexceptions import HTTPFound from dace.util import getSite from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS from pontus.view import BasicView from lac.content.processes.services_processes.behaviors import ( SeeImportService) from lac.content.service import ImportService from lac.utilities.utils import ( ObjectRemovedException, generate_navbars) @view_config( name='', context=ImportService, renderer='pontus:templates/views_templates/grid.pt', ) class SeeImportServiceView(BasicView): title = '' name = 'seeimportservice' behaviors = [SeeImportService] template = 'lac:views/services_processes/import_service/templates/see_import_service.pt' viewid = 'seeimportservice' def update(self): self.execute(None) result = {} try: navbars = generate_navbars(self, self.context, self.request) except ObjectRemovedException: return HTTPFound(self.request.resource_url(getSite(), '')) values = {'object': self.context, 'navbar_body': navbars['navbar_body']} body = self.content(args=values, template=self.template)['body'] item = self.adapt_item(body, self.viewid) item['messages'] = navbars['messages'] item['isactive'] = navbars['isactive'] result.update(navbars['resources']) result['coordinates'] = {self.coordinates: [item]} return result DEFAULTMAPPING_ACTIONS_VIEWS.update({SeeImportService: SeeImportServiceView})
ecreall/lagendacommun
lac/views/services_processes/import_service/see_service.py
Python
agpl-3.0
1,732
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Module like __future__ for things that are changed between Python2 and Python3. These are here to provide compatible fall-backs. This is required to run the same code easily with both CPython2 and CPython3. Sometimes, we do not care about the actual types, or API, but would rather just check for something to be a "in (str, unicode)" rather than making useless version checks. """ # pylint: disable=C0103,W0622 # Work around for CPython 3.x renaming "long" to "int". try: long = long # @ReservedAssignment except NameError: long = int # @ReservedAssignment # Work around for CPython 3.x renaming "unicode" to "str". try: unicode = unicode # @ReservedAssignment except NameError: unicode = str # @ReservedAssignment def iterItems(d): try: return d.iteritems() except AttributeError: return d.items() if unicode is str: raw_input = input # @ReservedAssignment else: raw_input = raw_input # @ReservedAssignment # pylint: disable=E0611 try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve # pylint: enable=E0611 # For PyLint to be happy. assert long assert unicode assert urlretrieve
wfxiang08/Nuitka
nuitka/__past__.py
Python
apache-2.0
1,988
#!/usr/bin/python # ex:set fileencoding=utf-8: from __future__ import unicode_literals from django.contrib.auth import logout from django.contrib.auth import login from django.contrib.auth import REDIRECT_FIELD_NAME from django.core.urlresolvers import reverse from django.utils.decorators import method_decorator from django.views.decorators.cache import never_cache from django.views.generic.base import TemplateView from django.views.generic.edit import FormView from djangobmf.views.mixins import AjaxMixin from djangobmf.views.mixins import NextMixin from djangobmf.views.mixins import ViewMixin from .forms import BMFAuthenticationForm from .forms import BMFPasswordChangeForm class LogoutModal(AjaxMixin, TemplateView): template_name = 'djangobmf/account/modal_logout.html' class LogoutView(TemplateView): template_name = "djangobmf/account/logout.html" def get(self, *args, **kwargs): logout(self.request) return super(LogoutView, self).get(*args, **kwargs) class LoginView(FormView, NextMixin): form_class = BMFAuthenticationForm redirect_field_name = REDIRECT_FIELD_NAME template_name = 'djangobmf/account/login.html' @method_decorator(never_cache) def dispatch(self, *args, **kwargs): return super(LoginView, self).dispatch(*args, **kwargs) def form_valid(self, form): if self.request.session.test_cookie_worked(): self.request.session.delete_test_cookie() login(self.request, form.get_user()) return super(LoginView, self).form_valid(form) else: # pragma: no cover self.request.session.set_test_cookie() return super(LoginView, self).form_invalid(form) def get_success_url(self): return self.redirect_next('djangobmf:dashboard') def get(self, request, *args, **kwargs): self.request.session.set_test_cookie() return super(LoginView, self).get(request, *args, **kwargs) class PasswordChange(ViewMixin, FormView): template_name = 'djangobmf/account/change_password.html' @method_decorator(never_cache) def dispatch(self, *args, **kwargs): return super(PasswordChange, self).dispatch(*args, **kwargs) def get_form(self, *args, **kwargs): return BMFPasswordChangeForm(user=self.request.user, **self.get_form_kwargs()) def form_valid(self, form): # TODO add message return super(PasswordChange, self).form_valid(form) def get_success_url(self): return reverse('djangobmf:dashboard')
django-bmf/django-bmf
djangobmf/account/views.py
Python
bsd-3-clause
2,544
#!/usr/bin/env python ''' @file ion/services/sa/instrument/test/test_int_data_acquisition_management_service.py @author Maurice Manning @test ion.services.sa.acquisition.DataAcquisitionManagementService integration test ''' #from pyon.ion.endpoint import ProcessRPCClient from pyon.public import log, IonObject, PRED, RT, OT from pyon.util.int_test import IonIntegrationTestCase from pyon.util.context import LocalContextMixin from pyon.agent.agent import ResourceAgentClient, ResourceAgentEvent from pyon.event.event import EventSubscriber from coverage_model.parameter import ParameterContext from coverage_model.parameter_types import QuantityType, ArrayType from interface.services.icontainer_agent import ContainerAgentClient from interface.services.sa.idata_acquisition_management_service import DataAcquisitionManagementServiceClient from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient from interface.services.cei.iprocess_dispatcher_service import ProcessDispatcherServiceClient from interface.services.dm.idata_retriever_service import DataRetrieverServiceClient from interface.services.dm.idataset_management_service import DatasetManagementServiceClient from interface.objects import ExternalDataProvider, ExternalDatasetModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution from interface.objects import AgentCommand, ProcessDefinition, DataProduct from ion.agents.instrument.instrument_agent import InstrumentAgentState from ion.core.includes.mi import DriverEvent from ion.services.dm.inventory.dataset_management_service import DatasetManagementService from ion.services.dm.utility.granule_utils import time_series_domain from ion.services.dm.utility.granule_utils import RecordDictionaryTool from gevent.event import AsyncResult, Event from nose.plugins.attrib import attr import unittest import numpy as np import os class FakeProcess(LocalContextMixin): """ A fake process used because the test case is not an ion process. """ name = '' id='' process_type = '' @attr('INT', group='sa') #@unittest.skip('Not done yet.') class TestBulkIngest(IonIntegrationTestCase): EDA_MOD = 'ion.agents.data.external_dataset_agent' EDA_CLS = 'ExternalDatasetAgent' def setUp(self): # Start container self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') # Now create client to DataAcquisitionManagementService self.client = DataAcquisitionManagementServiceClient(node=self.container.node) self.rrclient = ResourceRegistryServiceClient(node=self.container.node) self.dataproductclient = DataProductManagementServiceClient(node=self.container.node) self.dams_client = DataAcquisitionManagementServiceClient(node=self.container.node) self.pubsub_client = PubsubManagementServiceClient(node=self.container.node) self.processdispatchclient = ProcessDispatcherServiceClient(node=self.container.node) self.data_retriever = DataRetrieverServiceClient(node=self.container.node) self._container_client = ContainerAgentClient(node=self.container.node, name=self.container.name) # Data async and subscription TODO: Replace with new subscriber self._finished_count = None #TODO: Switch to gevent.queue.Queue self._async_finished_result = AsyncResult() self._finished_events_received = [] self._finished_event_subscriber = None self._start_finished_event_subscriber() self.addCleanup(self._stop_finished_event_subscriber) self.DVR_CONFIG = {} self.DVR_CONFIG = { 'dvr_mod' : 'ion.agents.data.handlers.slocum_data_handler', 'dvr_cls' : 'SlocumDataHandler', } self._setup_resources() self.agent_config = { 'driver_config' : self.DVR_CONFIG, 'stream_config' : {}, 'agent' : {'resource_id': self.EDA_RESOURCE_ID}, 'test_mode' : True } datasetagent_instance_obj = IonObject(RT.ExternalDatasetAgentInstance, name='ExternalDatasetAgentInstance1', description='external data agent instance', handler_module=self.EDA_MOD, handler_class=self.EDA_CLS, dataset_driver_config=self.DVR_CONFIG, dataset_agent_config=self.agent_config ) self.dataset_agent_instance_id = self.dams_client.create_external_dataset_agent_instance(external_dataset_agent_instance=datasetagent_instance_obj, external_dataset_agent_id=self.datasetagent_id, external_dataset_id=self.EDA_RESOURCE_ID) #TG: Setup/configure the granule logger to log granules as they're published pid = self.dams_client.start_external_dataset_agent_instance(self.dataset_agent_instance_id) dataset_agent_instance_obj= self.dams_client.read_external_dataset_agent_instance(self.dataset_agent_instance_id) print 'TestBulkIngest: Dataset agent instance obj: = ', dataset_agent_instance_obj # Start a resource agent client to talk with the instrument agent. self._ia_client = ResourceAgentClient('datasetagentclient', name=pid, process=FakeProcess()) log.debug(" test_createTransformsThenActivateInstrument:: got ia client %s", str(self._ia_client)) def create_logger(self, name, stream_id=''): # logger process producer_definition = ProcessDefinition(name=name+'_logger') producer_definition.executable = { 'module':'ion.processes.data.stream_granule_logger', 'class':'StreamGranuleLogger' } logger_procdef_id = self.processdispatchclient.create_process_definition(process_definition=producer_definition) configuration = { 'process':{ 'stream_id':stream_id, } } pid = self.processdispatchclient.schedule_process(process_definition_id= logger_procdef_id, configuration=configuration) return pid def _start_finished_event_subscriber(self): def consume_event(*args,**kwargs): log.debug('EventSubscriber event received: %s', str(args[0]) ) if args[0].description == 'TestingFinished': log.debug('TestingFinished event received') self._finished_events_received.append(args[0]) if self._finished_count and self._finished_count == len(self._finished_events_received): log.debug('Finishing test...') self._async_finished_result.set(len(self._finished_events_received)) log.debug('Called self._async_finished_result.set({0})'.format(len(self._finished_events_received))) self._finished_event_subscriber = EventSubscriber(event_type='DeviceEvent', callback=consume_event) self._finished_event_subscriber.start() def _stop_finished_event_subscriber(self): if self._finished_event_subscriber: self._finished_event_subscriber.stop() self._finished_event_subscriber = None def tearDown(self): pass @unittest.skip('Update to agent refactor.') def test_slocum_data_ingest(self): HIST_CONSTRAINTS_1 = {} # Test instrument driver execute interface to start and stop streaming mode. cmd = AgentCommand(command='get_current_state') retval = self._ia_client.execute_agent(cmd) state = retval.result self.assertEqual(state, InstrumentAgentState.UNINITIALIZED) cmd = AgentCommand(command='initialize') retval = self._ia_client.execute_agent(cmd) cmd = AgentCommand(command='get_current_state') retval = self._ia_client.execute_agent(cmd) state = retval.result self.assertEqual(state, InstrumentAgentState.INACTIVE) cmd = AgentCommand(command='go_active') retval = self._ia_client.execute_agent(cmd) cmd = AgentCommand(command='get_current_state') retval = self._ia_client.execute_agent(cmd) state = retval.result self.assertEqual(state, InstrumentAgentState.IDLE) cmd = AgentCommand(command='run') retval = self._ia_client.execute_agent(cmd) cmd = AgentCommand(command='get_current_state') retval = self._ia_client.execute_agent(cmd) state = retval.result self.assertEqual(state, InstrumentAgentState.OBSERVATORY) # Make sure the polling interval is appropriate for a test params = { 'POLLING_INTERVAL': 3 } self._ia_client.set_param(params) self._finished_count = 1 cmd = AgentCommand(command='acquire_data') self._ia_client.execute(cmd) # Assert that data was received self._async_finished_result.get(timeout=15) self.assertTrue(len(self._finished_events_received) >= 1) cmd = AgentCommand(command='reset') retval = self._ia_client.execute_agent(cmd) cmd = AgentCommand(command='get_current_state') retval = self._ia_client.execute_agent(cmd) state = retval.result self.assertEqual(state, InstrumentAgentState.UNINITIALIZED) #todo enable after Luke's mor to retrieve, right now must have the Time axis called 'time' # replay_granule = self.data_retriever.retrieve_last_data_points(self.dataset_id, 10) # rdt = RecordDictionaryTool.load_from_granule(replay_granule) # # comp = rdt['date_pattern'] == numpy.arange(10) + 10 # # log.debug("TestBulkIngest: comp: %s", comp) # # self.assertTrue(comp.all()) for pid in self.loggerpids: self.processdispatchclient.cancel_process(pid) def _setup_resources(self): self.loggerpids = [] # Create DataProvider dprov = ExternalDataProvider(institution=Institution(), contact=ContactInformation()) dprov.contact.name = 'Christopher Mueller' dprov.contact.email = 'cmueller@asascience.com' # Create DataSetModel dataset_model = ExternalDatasetModel(name='slocum_model') dataset_model.datset_type = 'SLOCUM' dataset_model_id = self.dams_client.create_external_dataset_model(dataset_model) # Create ExternalDataset ds_name = 'slocum_test_dataset' dset = ExternalDataset(name=ds_name, dataset_description=DatasetDescription(), update_description=UpdateDescription(), contact=ContactInformation()) dset.dataset_description.parameters['base_url'] = 'test_data/slocum/' dset.dataset_description.parameters['list_pattern'] = 'ru05-2012-021-0-0-sbd.dat' dset.dataset_description.parameters['date_pattern'] = '%Y %j' dset.dataset_description.parameters['date_extraction_pattern'] = 'ru05-([\d]{4})-([\d]{3})-\d-\d-sbd.dat' dset.dataset_description.parameters['temporal_dimension'] = None dset.dataset_description.parameters['zonal_dimension'] = None dset.dataset_description.parameters['meridional_dimension'] = None dset.dataset_description.parameters['vertical_dimension'] = None dset.dataset_description.parameters['variables'] = [ 'c_wpt_y_lmc', 'sci_water_cond', 'm_y_lmc', 'u_hd_fin_ap_inflection_holdoff', 'sci_m_present_time', 'm_leakdetect_voltage_forward', 'sci_bb3slo_b660_scaled', 'c_science_send_all', 'm_gps_status', 'm_water_vx', 'm_water_vy', 'c_heading', 'sci_fl3slo_chlor_units', 'u_hd_fin_ap_gain', 'm_vacuum', 'u_min_water_depth', 'm_gps_lat', 'm_veh_temp', 'f_fin_offset', 'u_hd_fin_ap_hardover_holdoff', 'c_alt_time', 'm_present_time', 'm_heading', 'sci_bb3slo_b532_scaled', 'sci_fl3slo_cdom_units', 'm_fin', 'x_cycle_overrun_in_ms', 'sci_water_pressure', 'u_hd_fin_ap_igain', 'sci_fl3slo_phyco_units', 'm_battpos', 'sci_bb3slo_b470_scaled', 'm_lat', 'm_gps_lon', 'sci_ctd41cp_timestamp', 'm_pressure', 'c_wpt_x_lmc', 'c_ballast_pumped', 'x_lmc_xy_source', 'm_lon', 'm_avg_speed', 'sci_water_temp', 'u_pitch_ap_gain', 'm_roll', 'm_tot_num_inflections', 'm_x_lmc', 'u_pitch_ap_deadband', 'm_final_water_vy', 'm_final_water_vx', 'm_water_depth', 'm_leakdetect_voltage', 'u_pitch_max_delta_battpos', 'm_coulomb_amphr', 'm_pitch', ] ## Create the external dataset ds_id = self.dams_client.create_external_dataset(external_dataset=dset, external_dataset_model_id=dataset_model_id) ext_dprov_id = self.dams_client.create_external_data_provider(external_data_provider=dprov) # Register the ExternalDataset dproducer_id = self.dams_client.register_external_data_set(external_dataset_id=ds_id) ## Create the dataset agent datasetagent_obj = IonObject(RT.ExternalDatasetAgent, name='ExternalDatasetAgent1', description='external data agent', handler_module=self.EDA_MOD, handler_class=self.EDA_CLS ) self.datasetagent_id = self.dams_client.create_external_dataset_agent(external_dataset_agent=datasetagent_obj, external_dataset_model_id=dataset_model_id) # Generate the data product and associate it to the ExternalDataset pdict = DatasetManagementService.get_parameter_dictionary_by_name('ctd_parsed_param_dict') streamdef_id = self.pubsub_client.create_stream_definition(name="temp", parameter_dictionary_id=pdict.identifier) dprod = IonObject(RT.DataProduct, name='slocum_parsed_product', description='parsed slocum product') self.dproduct_id = self.dataproductclient.create_data_product(data_product=dprod, stream_definition_id=streamdef_id) self.dams_client.assign_data_product(input_resource_id=ds_id, data_product_id=self.dproduct_id) #save the incoming slocum data self.dataproductclient.activate_data_product_persistence(self.dproduct_id) self.addCleanup(self.dataproductclient.suspend_data_product_persistence, self.dproduct_id) stream_ids, assn = self.rrclient.find_objects(subject=self.dproduct_id, predicate=PRED.hasStream, object_type=RT.Stream, id_only=True) stream_id = stream_ids[0] dataset_id, assn = self.rrclient.find_objects(subject=self.dproduct_id, predicate=PRED.hasDataset, object_type=RT.Dataset, id_only=True) self.dataset_id = dataset_id[0] pid = self.create_logger('slocum_parsed_product', stream_id ) self.loggerpids.append(pid) self.DVR_CONFIG['dh_cfg'] = { 'TESTING':True, 'stream_id':stream_id, 'param_dictionary':pdict.dump(), 'data_producer_id':dproducer_id, #CBM: Should this be put in the main body of the config - with mod & cls? 'max_records':20, } # Create the logger for receiving publications #self.create_stream_and_logger(name='slocum',stream_id=stream_id) # Create agent config. self.EDA_RESOURCE_ID = ds_id self.EDA_NAME = ds_name class BulkIngestBase(object): """ awkward, non-obvious test class! subclasses will implement data-specific methods and this test class will parse sample file and assert data was read. test_data_ingest: create resources and call... start_agent: starts agent and then call... start_listener: starts listeners for data, including one that when granule is received calls... get_retrieve_client: asserts that callback had some data See replacement TestPreloadThenLoadDataset. A little more declarative and straight-forward, but much slower (requires preload). """ def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') self.pubsub_management = PubsubManagementServiceClient() self.dataset_management = DatasetManagementServiceClient() self.data_product_management = DataProductManagementServiceClient() self.data_acquisition_management = DataAcquisitionManagementServiceClient() self.data_retriever = DataRetrieverServiceClient() self.process_dispatch_client = ProcessDispatcherServiceClient(node=self.container.node) self.resource_registry = self.container.resource_registry self.context_ids = self.build_param_contexts() self.setup_resources() def build_param_contexts(self): raise NotImplementedError('build_param_contexts must be implemented in child classes') def create_external_dataset(self): raise NotImplementedError('create_external_dataset must be implemented in child classes') def get_dvr_config(self): raise NotImplementedError('get_dvr_config must be implemented in child classes') def get_retrieve_client(self, dataset_id=''): raise NotImplementedError('get_retrieve_client must be implemented in child classes') def test_data_ingest(self): self.pdict_id = self.create_parameter_dict(self.name) self.stream_def_id = self.create_stream_def(self.name, self.pdict_id) self.data_product_id = self.create_data_product(self.name, self.description, self.stream_def_id) self.dataset_id = self.get_dataset_id(self.data_product_id) self.stream_id, self.route = self.get_stream_id_and_route(self.data_product_id) self.external_dataset_id = self.create_external_dataset() self.data_producer_id = self.register_external_dataset(self.external_dataset_id) self.start_agent() def create_parameter_dict(self, name=''): return self.dataset_management.create_parameter_dictionary(name=name, parameter_context_ids=self.context_ids, temporal_context='time') def create_stream_def(self, name='', pdict_id=''): return self.pubsub_management.create_stream_definition(name=name, parameter_dictionary_id=pdict_id) def create_data_product(self, name='', description='', stream_def_id=''): dp_obj = DataProduct( name=name, description=description, processing_level_code='Parsed_Canonical') data_product_id = self.data_product_management.create_data_product(data_product=dp_obj, stream_definition_id=stream_def_id) self.data_product_management.activate_data_product_persistence(data_product_id) self.addCleanup(self.data_product_management.suspend_data_product_persistence, data_product_id) return data_product_id def register_external_dataset(self, external_dataset_id=''): return self.data_acquisition_management.register_external_data_set(external_dataset_id=external_dataset_id) def get_dataset_id(self, data_product_id=''): dataset_ids, assocs = self.resource_registry.find_objects(subject=data_product_id, predicate='hasDataset', id_only=True) return dataset_ids[0] def get_stream_id_and_route(self, data_product_id): stream_ids, _ = self.resource_registry.find_objects(data_product_id, PRED.hasStream, RT.Stream, id_only=True) stream_id = stream_ids[0] route = self.pubsub_management.read_stream_route(stream_id) #self.create_logger(self.name, stream_id) return stream_id, route def start_agent(self): agent_config = { 'driver_config': self.get_dvr_config(), 'stream_config': {}, 'agent': {'resource_id': self.external_dataset_id}, 'test_mode': True } self._ia_pid = self.container.spawn_process( name=self.EDA_NAME, module=self.EDA_MOD, cls=self.EDA_CLS, config=agent_config) self._ia_client = ResourceAgentClient(self.external_dataset_id, process=FakeProcess()) cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE) self._ia_client.execute_agent(cmd) cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE) self._ia_client.execute_agent(cmd) cmd = AgentCommand(command=ResourceAgentEvent.RUN) self._ia_client.execute_agent(cmd) cmd = AgentCommand(command=DriverEvent.START_AUTOSAMPLE) self._ia_client.execute_resource(command=cmd) self.start_listener(self.dataset_id) def stop_agent(self): cmd = AgentCommand(command=DriverEvent.STOP_AUTOSAMPLE) self._ia_client.execute_resource(cmd) cmd = AgentCommand(command=ResourceAgentEvent.RESET) self._ia_client.execute_agent(cmd) self.container.terminate_process(self._ia_pid) def start_listener(self, dataset_id=''): dataset_modified = Event() #callback to use retrieve to get data from the coverage def cb(*args, **kwargs): self.get_retrieve_client(dataset_id=dataset_id) #callback to keep execution going once dataset has been fully ingested def cb2(*args, **kwargs): dataset_modified.set() es = EventSubscriber(event_type=OT.DatasetModified, callback=cb, origin=dataset_id) es.start() es2 = EventSubscriber(event_type=OT.DeviceCommonLifecycleEvent, callback=cb2, origin='BaseDataHandler._acquire_sample') es2.start() self.addCleanup(es.stop) self.addCleanup(es2.stop) #let it go for up to 120 seconds, then stop the agent and reset it dataset_modified.wait(120) self.stop_agent() def create_logger(self, name, stream_id=''): # logger process producer_definition = ProcessDefinition(name=name+'_logger') producer_definition.executable = { 'module':'ion.processes.data.stream_granule_logger', 'class':'StreamGranuleLogger' } logger_procdef_id = self.process_dispatch_client.create_process_definition(process_definition=producer_definition) configuration = { 'process':{ 'stream_id':stream_id, } } pid = self.process_dispatch_client.schedule_process(process_definition_id=logger_procdef_id, configuration=configuration) return pid @attr('INT', group='sa') class TestBulkIngest_Hypm_WPF_CTD(BulkIngestBase, IonIntegrationTestCase): def setup_resources(self): self.name = 'Bulk Data Ingest HYPM WPF CTD' self.description = 'Bulk Data Ingest HYPM WPF CTD Test' self.EDA_NAME = 'ExampleEDA' self.EDA_MOD = 'ion.agents.data.external_dataset_agent' self.EDA_CLS = 'ExternalDatasetAgent' def build_param_contexts(self): context_ids = [] t_ctxt = ParameterContext('time', param_type=QuantityType(value_encoding=np.dtype('int64'))) t_ctxt.uom = 'seconds since 01-01-1970' context_ids.append(self.dataset_management.create_parameter_context(name='time', parameter_context=t_ctxt.dump())) cnd_ctxt = ParameterContext('conductivity', param_type=ArrayType()) cnd_ctxt.uom = 'mmho/cm' context_ids.append(self.dataset_management.create_parameter_context(name='conductivity', parameter_context=cnd_ctxt.dump())) temp_ctxt = ParameterContext('temperature', param_type=ArrayType()) temp_ctxt.uom = 'degC' context_ids.append(self.dataset_management.create_parameter_context(name='temperature', parameter_context=temp_ctxt.dump())) press_ctxt = ParameterContext('pressure', param_type=ArrayType()) press_ctxt.uom = 'decibars' context_ids.append(self.dataset_management.create_parameter_context(name='pressure', parameter_context=press_ctxt.dump())) oxy_ctxt = ParameterContext('oxygen', param_type=ArrayType()) oxy_ctxt.uom = 'Hz' context_ids.append(self.dataset_management.create_parameter_context(name='oxygen', parameter_context=oxy_ctxt.dump())) return context_ids def create_external_dataset(self): ds_name = 'hypm_01_wfp_ctd_dataset' dset = ExternalDataset(name=ds_name, dataset_description=DatasetDescription(), update_description=UpdateDescription(), contact=ContactInformation()) dset.dataset_description.parameters['base_url'] = 'test_data' dset.dataset_description.parameters['list_pattern'] = 'C*.HEX' return self.data_acquisition_management.create_external_dataset(external_dataset=dset) def get_dvr_config(self): DVR_CONFIG = { 'dvr_mod': 'ion.agents.data.handlers.hypm_data_handler', 'dvr_cls': 'HYPMDataHandler', 'dh_cfg': { 'parser_mod': 'ion.agents.data.handlers.hypm_data_handler', 'parser_cls': 'HYPM_01_WFP_CTDParser', 'stream_id': self.stream_id, 'stream_route': self.route, 'stream_def': self.stream_def_id, 'data_producer_id': self.data_producer_id, 'max_records': 4, 'TESTING': True, } } return DVR_CONFIG def get_retrieve_client(self, dataset_id=''): replay_data = self.data_retriever.retrieve(dataset_id) rdt = RecordDictionaryTool.load_from_granule(replay_data) self.assertIsNotNone(rdt['temperature']) #need to compare rdt from retrieve with the one from ingest somehow @attr('INT', group='sa') class TestBulkIngest_Slocum(BulkIngestBase, IonIntegrationTestCase): def setup_resources(self): self.name = 'Bulk Data Ingest Slocum' self.description = 'Bulk Data Ingest Slocum Test' self.EDA_NAME = 'ExampleEDA' self.EDA_MOD = 'ion.agents.data.external_dataset_agent' self.EDA_CLS = 'ExternalDatasetAgent' def build_param_contexts(self): context_ids = [] t_ctxt = ParameterContext('c_wpt_y_lmc', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='c_wpt_y_lmc', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_water_cond', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_water_cond', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_y_lmc', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_y_lmc', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('u_hd_fin_ap_inflection_holdoff', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='u_hd_fin_ap_inflection_holdoff', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_m_present_time', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_m_present_time', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_leakdetect_voltage_forward', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_leakdetect_voltage_forward', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_bb3slo_b660_scaled', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_bb3slo_b660_scaled', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('c_science_send_all', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='c_science_send_all', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_gps_status', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_gps_status', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_water_vx', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_water_vx', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_water_vy', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_water_vy', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('c_heading', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='c_heading', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_fl3slo_chlor_units', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_fl3slo_chlor_units', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('u_hd_fin_ap_gain', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='u_hd_fin_ap_gain', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_vacuum', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_vacuum', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('u_min_water_depth', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='u_min_water_depth', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_gps_lat', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_gps_lat', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_veh_temp', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_veh_temp', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('f_fin_offset', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='f_fin_offset', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('u_hd_fin_ap_hardover_holdoff', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='u_hd_fin_ap_hardover_holdoff', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('c_alt_time', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='c_alt_time', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_present_time', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_present_time', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_heading', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_heading', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_bb3slo_b532_scaled', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_bb3slo_b532_scaled', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_fl3slo_cdom_units', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_fl3slo_cdom_units', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_fin', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_fin', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('x_cycle_overrun_in_ms', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='x_cycle_overrun_in_ms', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_water_pressure', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_water_pressure', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('u_hd_fin_ap_igain', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='u_hd_fin_ap_igain', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_fl3slo_phyco_units', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_fl3slo_phyco_units', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_battpos', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_battpos', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_bb3slo_b470_scaled', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_bb3slo_b470_scaled', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_lat', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_lat', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_gps_lon', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_gps_lon', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_ctd41cp_timestamp', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_ctd41cp_timestamp', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_pressure', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_pressure', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('c_wpt_x_lmc', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='c_wpt_x_lmc', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('c_ballast_pumped', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='c_ballast_pumped', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('x_lmc_xy_source', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='x_lmc_xy_source', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_lon', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_lon', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_avg_speed', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_avg_speed', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('sci_water_temp', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='sci_water_temp', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('u_pitch_ap_gain', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='u_pitch_ap_gain', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_roll', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_roll', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_tot_num_inflections', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_tot_num_inflections', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_x_lmc', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_x_lmc', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('u_pitch_ap_deadband', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='u_pitch_ap_deadband', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_final_water_vy', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_final_water_vy', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_final_water_vx', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_final_water_vx', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_water_depth', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_water_depth', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_leakdetect_voltage', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_leakdetect_voltage', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('u_pitch_max_delta_battpos', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='u_pitch_max_delta_battpos', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_coulomb_amphr', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_coulomb_amphr', parameter_context=t_ctxt.dump())) t_ctxt = ParameterContext('m_pitch', param_type=QuantityType(value_encoding=np.dtype('float32'))) t_ctxt.uom = 'unknown' context_ids.append(self.dataset_management.create_parameter_context(name='m_pitch', parameter_context=t_ctxt.dump())) return context_ids def create_external_dataset(self): ds_name = 'slocum_dataset' dset = ExternalDataset(name=ds_name, dataset_description=DatasetDescription(), update_description=UpdateDescription(), contact=ContactInformation()) dset.dataset_description.parameters['base_url'] = 'test_data/slocum' dset.dataset_description.parameters['list_pattern'] = 'ru05-*-sbd.dat' dset.dataset_description.parameters['date_pattern'] = '%Y %j' dset.dataset_description.parameters['date_extraction_pattern'] = 'ru05-([\d]{4})-([\d]{3})-\d-\d-sbd.dat' return self.data_acquisition_management.create_external_dataset(external_dataset=dset) def get_dvr_config(self): DVR_CONFIG = { 'dvr_mod': 'ion.agents.data.handlers.slocum_data_handler', 'dvr_cls': 'SlocumDataHandler', 'dh_cfg': { 'stream_id': self.stream_id, 'stream_route': self.route, 'stream_def': self.stream_def_id, 'data_producer_id': self.data_producer_id, 'max_records': 4, 'TESTING': True, } } return DVR_CONFIG def get_retrieve_client(self, dataset_id=''): replay_data = self.data_retriever.retrieve(dataset_id) rdt = RecordDictionaryTool.load_from_granule(replay_data) self.assertIsNotNone(rdt['c_wpt_y_lmc'])
ooici/coi-services
ion/services/sa/acquisition/test/test_bulk_data_ingestion.py
Python
bsd-2-clause
43,234
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RLimsolve(RPackage): """Solving Linear Inverse Models Functions that (1) find the minimum/maximum of a linear or quadratic function: min or max (f(x)), where f(x) = ||Ax-b||^2 or f(x) = sum(a_i*x_i) subject to equality constraints Ex=f and/or inequality constraints Gx>=h, (2) sample an underdetermined- or overdetermined system Ex=f subject to Gx>=h, and if applicable Ax~=b, (3) solve a linear system Ax=B for the unknown x. It includes banded and tridiagonal linear systems.""" homepage = "https://cloud.r-project.org/package=limSolve" url = "https://cloud.r-project.org/src/contrib/limSolve_1.5.6.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/limSolve" version('1.5.6', sha256='b97ea9930383634c8112cdbc42f71c4e93fe0e7bfaa8f401921835cb44cb49a0') depends_on('r@2.10:', type=('build', 'run')) depends_on('r-quadprog', type=('build', 'run')) depends_on('r-lpsolve', type=('build', 'run')) depends_on('r-mass', type=('build', 'run'))
LLNL/spack
var/spack/repos/builtin/packages/r-limsolve/package.py
Python
lgpl-2.1
1,250
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import prettytable import six import sys def _print(pt, order): if sys.version_info >= (3, 0): print(pt.get_string(sortby=order)) else: print(str((pt.get_string(sortby=order)))) def print_dict(d, property="Property"): pt = prettytable.PrettyTable([property, 'Value'], caching=False) pt.align = 'l' [pt.add_row(list(r)) for r in six.iteritems(d)] _print(pt, property) def print_list(objs, fields, formatters=None, order_by=None, obj_is_dict=False, labels=None): if not labels: labels = {} for field in fields: if field not in labels: # No underscores (use spaces instead) and uppercase any ID's label = field.replace("_", " ").replace("id", "ID") # Uppercase anything else that's less than 3 chars if len(label) < 3: label = label.upper() # Capitalize each word otherwise else: label = ' '.join(word[0].upper() + word[1:] for word in label.split()) labels[field] = label pt = prettytable.PrettyTable( [labels[field] for field in fields], caching=False) # set the default alignment to left-aligned align = dict((labels[field], 'l') for field in fields) set_align = True for obj in objs: row = [] for field in fields: if formatters and field in formatters: row.append(formatters[field](obj)) elif obj_is_dict: data = obj.get(field, '') else: data = getattr(obj, field, '') row.append(data) # set the alignment to right-aligned if it's a numeric if set_align and hasattr(data, '__int__'): align[labels[field]] = 'r' set_align = False pt.add_row(row) pt._align = align if not order_by: order_by = fields[0] order_by = labels[order_by] _print(pt, order_by)
aria-tosca/aria-cli
aria_cli/print_utils.py
Python
apache-2.0
2,652
""" Copyright (c) 2013, SMART Technologies ULC All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Copyright holder (SMART Technologies ULC) nor the names of its contributors (Joshua Henn) may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER (SMART Technologies ULC) "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from region.transform import Transform, RegionBelow, ActionClick transforms = { Transform.CONTEXT_PREVIOUS: [ ActionClick(0,0) # Highlight the window ], \ Transform.CONTEXT_CURRENT: [], \ Transform.CONTEXT_NEXT: [ \ RegionBelow() # The next search region should be blow the current match ], \ Transform.CONTEXT_MATCH: [], \ Transform.CONTEXT_FINAL: [], \ Transform.CONTEXT_ENTITY: [] }
smysnk/sikuli-framework
examples/textedit/baseline/os/mac/TextEdit/TextEdit-1.py
Python
bsd-3-clause
2,077
# Configuration settings for electoral address GUI from PyQt4.QtCore import * import ElectoralAddress.Database as Database organisationName='Land Information New Zealand' applicationName='Electoral Address Loader' _settings=None def settings(): global _settings if not _settings: _settings = QSettings( organisationName, applicationName ) return _settings def set( item, value ): settings().setValue(item,value) def get( item, default='' ): value = str(settings().value(item,default)) return value def configureDatabase(dbmodule=Database): dbmodule.setHost( str(get('Database/host',dbmodule.host()))) dbmodule.setPort( str(get('Database/port',dbmodule.port()))) dbmodule.setDatabase( str(get('Database/database',dbmodule.database()))) dbmodule.setUser( str(get('Database/user',dbmodule.user()))) dbmodule.setPassword( str(get('Database/password',dbmodule.password()))) dbmodule.setBdeSchema( str(get('Database/bdeSchema',dbmodule.bdeSchema()))) dbmodule.setAddressSchema( str(get('Database/addressSchema',dbmodule.addressSchema()))) def setDatabaseConfiguration( host=None, port=None, database=None, user=None, password=None, bdeSchema=None, addressSchema=None, dbmodule=Database ): if not host: host = dbmodule.host() if not port: host = dbmodule.port() if not database: database = dbmodule.database() if not user: user = dbmodule.user() if not password: password = dbmodule.password() if not addressSchema: addressSchema = dbmodule.addressSchema() if not bdeSchema: bdeSchema = dbmodule.bdeSchema() set('Database/host',host) set('Database/port',port) set('Database/database',database) set('Database/user',user) set('Database/password',password) set('Database/addressSchema',addressSchema) set('Database/bdeSchema',bdeSchema) configureDatabase(dbmodule)
SPlanzer/AIMS
ElectoralAddress/Gui/Config.py
Python
bsd-3-clause
1,936
#Calculating user's salary according to the number of days given by user and capturing the output in a file #PsuedoCode #Step 1 : User Input from stdin, Enter number of days #step 2 : Take the days as argument to the condition #step 3 : Check the condition if the number of days are valid i.e. 1 to 366 inclusive #step 4 : If the condition is true calculate the result using 2^n-1 #step 5 : Convert cents to dollar using divide by 100 from the result obtained from step 4 #step 6 : If the condition is false display a message saying you have entered invalid days #step 7 : Ask the user to re-enter valid number of days #step 8 : Check the condition , if it is true execute the result like in step #4 #step 9 : If it is false , ask the user to re-enter the valid number of days again and continue #step 10 : Once the condition is true break the loop and display the result #step 11 : Stop #Programming import math #math calculation import time def main(): filename = time.strftime('%A-%Y%m%d-%H-%M-%S') + '.txt' # filename with system date time format f = open(filename , 'w') # opening the file with write 'w' permission f.write('+++++++++++++++++++++++++++++++++\n') # decoration with +++ print('+++++++++++++++++++++++++++++++++') #To add the characters/decorating output days = int(input("Please enter number of days: ") ) #Asking user input f.write('days: %s'%days) # Writing the user input 'days' to the text file if ( days > 0 and days <= 366 ): #condition to check valid days in a year n = math.pow(2, days-1) #power function ...2^n-1 m = n / 100 #Converting cents to dollars f.write('\n+++++++++++++++++++++++++++++++++\n') print('+++++++++++++++++++++++++++++++++') f.write(' Day Today\'s Salary \n------ -----------------\n %s $%s'%(days,m)) #%s-substitution %(days,m) using variable to display output print(' Day Today\'s Salary \n------ -----------------\n %s $%s'%(days,m)) #%s-substitution %(days,m) using variable to display output else: while True: #looping to check whether user has entered valid data f.write("\nInvalid number of days\n") # Writing invalid number of days to text file print("Invalid number of days\n") f.write('+++++++++++++++++++++++++++++++++\n') # decoration with +++ in the text file print('+++++++++++++++++++++++++++++++++\n') days = int(input("Please enter number of days: ") ) #Asking user input f.write('days: %s'%days) #print ('------------------------------') if ( days > 0 and days <= 366 ): n = int(math.pow(2, days-1)) m = n / 100 print ('+++++++++++++++++++++++++++++++++\n') f.write('\n+++++++++++++++++++++++++++++++++\n') f.write(' Day Today\'s Salary\n------ -------------------\n %s $%s'%(days,m)) print(' Day Today\'s Salary\n------ -------------------\n %s $%s'%(days,m)) break #breaking the loop main() #calling main function
stirumer/Python
Python #3/Salary_psuedo_program.py
Python
mit
3,401
#!/usr/bin/env python # Copyright 2014 Roland Knall <rknall [AT] gmail.com> # # Wireshark - Network traffic analyzer # By Gerald Combs <gerald@wireshark.org> # Copyright 1998 Gerald Combs # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ This is a generic example, which produces pcap packages every n seconds, and is configurable via extcap options. @note { To use this script on Windows, please generate an extcap_example.bat inside the extcap folder, with the following content: ------- @echo off <Path to python interpreter> <Path to script file> $* ------- Windows is not able to execute Python scripts directly, which also goes for all other script-based formates beside VBScript } """ import os import sys import signal import re import argparse import time import struct import binascii from threading import Thread ERROR_USAGE = 0 ERROR_ARG = 1 ERROR_INTERFACE = 2 ERROR_FIFO = 3 doExit = False globalinterface = 0 def signalHandler(signal, frame): global doExit doExit = True #### EXTCAP FUNCTIONALITY """@brief Extcap configuration This method prints the extcap configuration, which will be picked up by the interface in Wireshark to present a interface specific configuration for this extcap plugin """ def extcap_config(interface): args = [] values = [] args.append ( (0, '--delay', 'Time delay', 'Time delay between packages', 'integer', '{range=1,15}') ) args.append ( (1, '--message', 'Message', 'Package message content', 'string', '') ) args.append ( (2, '--verify', 'Verify', 'Verify package content', 'boolflag', '') ) args.append ( (3, '--remote', 'Remote Channel', 'Remote Channel Selector', 'selector', '')) values.append ( (3, "if1", "Remote1", "true" ) ) values.append ( (3, "if2", "Remote2", "false" ) ) for arg in args: print ("arg {number=%d}{call=%s}{display=%s}{tooltip=%s}{type=%s}%s" % arg) for value in values: print ("value {arg=%d}{value=%s}{display=%s}{default=%s}" % value) def extcap_interfaces(): print ("interface {value=example1}{display=Example interface usage for extcap}") def extcap_dlts(interface): if ( interface == 'example1' ): print ("dlt {number=147}{name=USER0}{display=Demo Implementation for Extcap}") """ ### FAKE DATA GENERATOR Extcap capture routine This routine simulates a capture by any kind of user defined device. The parameters are user specified and must be handled by the extcap. The data captured inside this routine is fake, so change this routine to present your own input data, or call your own capture program via Popen for example. See for more details. """ def unsigned(n): return int(n) & 0xFFFFFFFF def append_bytes(ba, blist): for c in range(0, len(blist)): ba.append(blist[c]) return ba def pcap_fake_header(): header = bytearray() header = append_bytes(header, struct.pack('<L', int ('a1b2c3d4', 16) )) header = append_bytes(header, struct.pack('<H', unsigned(2)) ) # Pcap Major Version header = append_bytes(header, struct.pack('<H', unsigned(4)) ) # Pcap Minor Version header = append_bytes(header, struct.pack('<I', int(0))) # Timezone header = append_bytes(header, struct.pack('<I', int(0))) # Accurancy of timestamps header = append_bytes(header, struct.pack('<L', int ('0000ffff', 16) )) # Max Length of capture frame header = append_bytes(header, struct.pack('<L', unsigned(1))) # Ethernet return header # Calculates and returns the IP checksum based on the given IP Header def ip_checksum(iph): #split into bytes words = splitN(''.join(iph.split()),4) csum = 0; for word in words: csum += int(word, base=16) csum += (csum >> 16) csum = csum & 0xFFFF ^ 0xFFFF return csum def pcap_fake_package ( message ): pcap = bytearray() #length = 14 bytes [ eth ] + 20 bytes [ ip ] + messagelength caplength = len(message) + 14 + 20 timestamp = int(time.time()) pcap = append_bytes(pcap, struct.pack('<L', unsigned(timestamp) ) ) # timestamp seconds pcap = append_bytes(pcap, struct.pack('<L', 0x00 ) ) # timestamp nanoseconds pcap = append_bytes(pcap, struct.pack('<L', unsigned(caplength) ) ) # length captured pcap = append_bytes(pcap, struct.pack('<L', unsigned(caplength) ) ) # length in frame # ETH pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac pcap = append_bytes(pcap, struct.pack('h', 0 )) # source mac pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac pcap = append_bytes(pcap, struct.pack('h', 0 )) # dest mac pcap = append_bytes(pcap, struct.pack('<h', unsigned(8) )) # protocol (ip) # IP pcap = append_bytes(pcap, struct.pack('b', int ( '45', 16) )) # IP version pcap = append_bytes(pcap, struct.pack('b', int ( '0', 16) )) # pcap = append_bytes(pcap, struct.pack('>H', unsigned(len(message)+20) )) # length of data + payload pcap = append_bytes(pcap, struct.pack('<H', int ( '0', 16) )) # Identification pcap = append_bytes(pcap, struct.pack('b', int ( '40', 16) )) # Don't fragment pcap = append_bytes(pcap, struct.pack('b', int ( '0', 16) )) # Fragment Offset pcap = append_bytes(pcap, struct.pack('b', int ( '40', 16) )) pcap = append_bytes(pcap, struct.pack('B', 0xFE )) # Protocol (2 = unspecified) pcap = append_bytes(pcap, struct.pack('<H', int ( '0000', 16) )) # Checksum pcap = append_bytes(pcap, struct.pack('>L', int ( '7F000001', 16) )) # Source IP pcap = append_bytes(pcap, struct.pack('>L', int ( '7F000001', 16) )) # Dest IP pcap = append_bytes(pcap, message) return pcap def extcap_capture(interface, fifo, delay, verify, message, remote): global doExit signal.signal(signal.SIGINT, signalHandler) signal.signal(signal.SIGTERM , signalHandler) tdelay = delay if delay != 0 else 5 try: os.stat(fifo) except OSError: doExit = True print ( "Fifo does not exist, exiting!" ) fh = open(fifo, 'w+b', 0 ) fh.write (pcap_fake_header()) while doExit == False: out = str( "%s|%04X%s|%s" % ( remote.strip(), len(message), message, verify ) ) try: fh.write (pcap_fake_package(out)) time.sleep(tdelay) except IOError: doExit = True fh.close() #### def usage(): print ( "Usage: %s <--extcap-interfaces | --extcap-dlts | --extcap-interface | --extcap-config | --capture | --fifo>" % sys.argv[0] ) if __name__ == '__main__': interface = "" # Capture options delay = 0 message = "" parser = argparse.ArgumentParser( prog="Extcap Example", description="Extcap example program for python" ) # Extcap Arguments parser.add_argument("--capture", help="Start the capture routine", action="store_true" ) parser.add_argument("--extcap-interfaces", help="Provide a list of interfaces to capture from", action="store_true") parser.add_argument("--extcap-interface", help="Provide the interface to capture from") parser.add_argument("--extcap-dlts", help="Provide a list of dlts for the given interface", action="store_true") parser.add_argument("--extcap-config", help="Provide a list of configurations for the given interface", action="store_true") parser.add_argument("--fifo", help="Use together with capture to provide the fifo to dump data to") # Interface Arguments parser.add_argument("--verify", help="Demonstrates a verification bool flag", action="store_true" ) parser.add_argument("--delay", help="Demonstrates an integer variable", type=int, default=0, choices=[0, 1, 2, 3, 4, 5] ) parser.add_argument("--remote", help="Demonstrates a selector choice", default="if1", choices=["if1", "if2"] ) parser.add_argument("--message", help="Demonstrates string variable", nargs='?', default="" ) args = parser.parse_args() if ( len(sys.argv) <= 1 ): parser.exit("No arguments given!") if ( args.extcap_interfaces == False and args.extcap_interface == None ): parser.exit("An interface must be provided or the selection must be displayed") if ( args.extcap_interfaces == True or args.extcap_interface == None ): extcap_interfaces() sys.exit(0) m = re.match ( 'example(\d+)', args.extcap_interface ) if not m: sys.exit(ERROR_INTERFACE) interface = m.group(1) message = args.message if ( args.message == None or len(args.message) == 0 ): message = "Extcap Test" if args.extcap_config: extcap_config(interface) elif args.extcap_dlts: extcap_dlts(interface) elif args.capture: if args.fifo is None: sys.exit(ERROR_FIFO) extcap_capture(interface, args.fifo, args.delay, args.verify, message, args.remote) else: usage() sys.exit(ERROR_USAGE)
sodzawic/tk
doc/extcap_example.py
Python
gpl-2.0
9,150
import unittest import os from auctionTheory.src import StoreTickData from auctionTheory.tests import HTMLTestRunner class RunTests(object): def executeTests(self): # get the directory path to output report file static_folder_root=os.path.dirname(os.path.dirname(__file__)) # get all tests from SearchProductTest and HomePageTest class search_tests = unittest.TestLoader().loadTestsFromModule(StoreTickData,) # create a test suite combining search_test and home_page_test smoke_tests = unittest.TestSuite([search_tests]) # open the report file outfile = open("Test.html", "w") # configure HTMLTestRunner options runner = HTMLTestRunner.HTMLTestRunner( stream=outfile, title='Test Report', description='Smoke Tests' ) # run the suite using HTMLTestRunner runner.run(smoke_tests) if __name__ == "__main__": obj=RunTests() obj.executeTests()
gullyy/auctionTheory
auctionTheory/tests/RunTests.py
Python
mit
1,058
import os.path from wptserve.utils import isomorphic_decode def main(request, response): header = [(b'Content-Type', b'text/html')] if b'test' in request.GET: with open(os.path.join(os.path.dirname(isomorphic_decode(__file__)), u'blank.html'), u'r') as f: body = f.read() return (header, body) if b'sandbox' in request.GET: header.append((b'Content-Security-Policy', b'sandbox %s' % request.GET[b'sandbox'])) with open(os.path.join(os.path.dirname(isomorphic_decode(__file__)), u'sandboxed-iframe-fetch-event-iframe.html'), u'r') as f: body = f.read() return (header, body)
scheib/chromium
third_party/blink/web_tests/external/wpt/service-workers/service-worker/resources/sandboxed-iframe-fetch-event-iframe.py
Python
bsd-3-clause
653
#! /usr/bin/env python # # ProjectEuler.net - problem #003 # # Summary: The prime factors of 13195 are 5, 7, 13 and 29. # What is the largest prime factor of the number 600851475143 ? import sys from sets import Set import time import math n = 600851475143 queue = [n] factors = Set([]) def debug(message): if debug_enabled: print message # fermat's factorization method. # see http://de.wikipedia.org/wiki/Faktorisierungsmethode_von_Fermat def factorize(input): x = math.sqrt(input) x = math.ceil(x) r = x*x - input debug("checking if %d is square" % r) while not r in squares: r = r + 2*x + 1 x = x + 1 y = math.sqrt(r) a = x + y b = x - y return a, b def primesFound(x, y): factors.add(x) factors.add(y) debug("found that both %d and %d are primes" % (x, y)) def output(factors): print "The prime factors of %d are:" % n, for x in sorted(factors): print int(x), def main(): while len(queue) > 0: candidate = queue.pop() debug("checking %d" % candidate) (a, b) = factorize(candidate) if a == 1: primesFound(candidate, b) elif b == 1: primesFound(candidate, a) else: queue.append(a) queue.append(b) debug("%d can be factored into %d and %d" % (candidate, a, b)) output(factors) # generates all square numbers that are smaller than the number we're trying to # find prime factors for. While this is kinda brute-force-ish it runs # reasonably fast (< 0.5s on this machine.) def genSquares(): squares = Set([]) i = 2 result = 0 while result < n: result = i * i i = i + 1 squares.add(result) return squares if __name__ == "__main__": if len(sys.argv) == 2 and sys.argv[1] == "-v": debug_enabled = True else: debug_enabled = False squares = genSquares() main()
olivereggert/euler
euler003.py
Python
mit
1,976
# -*- coding: utf-8 -*- # # PyService documentation build configuration file, created by # sphinx-quickstart on Tue Nov 8 11:31:42 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyService' copyright = u'2016, Photonios' author = u'Photonios' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'0.1.0' # The full version, including alpha/beta/rc tags. release = u'0.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # # today = '' # # Else, today_fmt is used as the format for a strftime call. # # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. # # html_title = u'PyService v0.1.0' # A shorter title for the navigation bar. Default is the same as html_title. # # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # # html_additional_pages = {} # If false, no module index is generated. # # html_domain_indices = True # If false, no index is generated. # # html_use_index = True # If true, the index is split into individual pages for each letter. # # html_split_index = False # If true, links to the reST sources are added to the pages. # # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' # # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'PyServicedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'PyService.tex', u'PyService Documentation', u'Photonios', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # # latex_use_parts = False # If true, show page references after internal links. # # latex_show_pagerefs = False # If true, show URL addresses after external links. # # latex_show_urls = False # Documents to append as an appendix to all manuals. # # latex_appendices = [] # It false, will not define \strong, \code, itleref, \crossref ... but only # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added # packages. # # latex_keep_old_macro_names = True # If false, no module index is generated. # # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pyservice', u'PyService Documentation', [author], 1) ] # If true, show URL addresses after external links. # # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'PyService', u'PyService Documentation', author, 'PyService', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # # texinfo_appendices = [] # If false, no module index is generated. # # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # # texinfo_no_detailmenu = False
iLoveTux/pyservice
docs/source/conf.py
Python
gpl-2.0
9,807
import bpy import bmesh from math import radians def get_bmesh(ob): me = ob.data if ob.mode == 'OBJECT': bm = bmesh.new() bm.from_mesh(me) return bm bm = bmesh.from_edit_mesh(me) return bm def finalize_bmesh(bm, ob): me = ob.data if ob.mode == 'OBJECT': bm.to_mesh(me) bm.free() del bm else: bmesh.update_edit_mesh(me) def get_sharp_edges(ob, bm, angle, sharp): """ Return the sharp edges according the 'sharp' argument angle: angle in degrees sharp: - True = get edges with an angle greater than or equal to angle - False = get edges with an angle less than to angle """ edges = set() for e in bm.edges: if e.is_manifold: #to exclude non manifold edges face0, face1 = e.link_faces if sharp: if face0.normal.angle(face1.normal) >= radians(angle): edges.add(e) else: if face0.normal.angle(face1.normal) < radians(angle): edges.add(e) return edges C = bpy.context ob = C.object if ob.mode == 'EDIT': bpy.ops.mesh.select_mode(type = 'EDGE') bm = get_bmesh(ob) edges = get_sharp_edges(ob, bm, 30, True) for e in bm.edges: if e in edges: e.select = True else: e.select = False bm.select_flush(False) finalize_bmesh(bm, ob)
BlenderShare/templates
TemplatesFiles/blender-2.79/bmesh/select_sharp.py
Python
gpl-3.0
1,483
""" Author: Ioana Butoi Date: Apr 2005 Project: Two robots play soccer against each other Each of them controls half of the arena. Each half of the court is colored differenly. Robot: Aibo ERS-7 """ from pyrobot.brain.behaviors.fsm import * from time import sleep import random matchBall = 25 matchGoal = 40 class RobotVsRobotSoccer(FSMBrain): def setup(self): camera = self.robot.camera[0] # goal filter camera.addFilter("match", 64, 104, 153, 30, 2)#blue camera.addFilter("blobify",2,255,255,0,1,1,1,)#blue # ball filter #camera.addFilter("matchRange",178,19,41,250,101,214,0)#pink ball camera.addFilter("match", 193, 27, 31,70)#red ball camera.addFilter("blobify",0,255,255,0,1,1,1,) def destroy(self): self.camera[0].clearFilters() class approachBall(State): """ When a ball is in sight, get close to it """ def onActivate(self): self.speed = 0.05 self.turnSpeed = 0.1 self.headMaxTurn = 0.3 print "APPROACH" def step(self): results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a blob in sight x1, y1, x2, y2, area = results[-1][0] centerX, centerY = (x1 + x2)/2, (y1 + y2)/2 if area > matchBall: pose = self.robot.ptz[0].pose # p,t,z,r; acts as a pointer # 1. center camera on ball # ---------------X direction------------------ diff = (centerX - (self.robot.camera.width/2)) if abs(diff) < (.1 * self.robot.camera.width): pass elif diff < 0: # negative is right, positive is left self.robot.ptz[0].pan( pose[0] + self.speed) else: self.robot.ptz[0].pan( pose[0] - self.speed) # ---------------Y direction------------------ diff = (centerY - self.robot.camera.height/2) if abs(diff) < .1 * self.robot.camera.height: pass elif diff < 0: # down self.robot.ptz[0].tilt( pose[1] + self.speed) # positive is left else: self.robot.ptz[0].tilt( pose[1] - self.speed) # negative is right # 2. get closer to ball if abs(pose[0]) > self.headMaxTurn: # 2.1 rotate so ball is in front of you if pose[0] > 0: self.robot.move(0,self.turnSpeed) self.robot.ptz[0].pan(pose[0] - self.speed) else: self.robot.move(0,-self.turnSpeed) self.robot.ptz[0].pan(pose[0] + self.speed) elif area<300: # 2.2 get closer to ball self.robot.move(0.7,0) else: # 3. when close enough kick it self.goto("lookForGoal") return else: self.goto("lostBall", centerX, centerY) return else: self.goto("searchDown") return else: self.goto("searchDown") return class lostBall(State): """ Ball moved out of my sight. I think I know where it went """ def onActivate(self): self.robot.move(0,0) self.speed = 0.1 print "LOST" def onGoto(self, args): self.ballCenterX = args[0] self.ballCenterY = args[1] def step(self): results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a blob in sight x1, y1, x2, y2, area = results[-1][0] if area > matchBall: self.goto("approachBall") return pose = self.robot.ptz[0].pose# p,t,z,r; acts as a pointer diffX = abs(self.ballCenterX - self.robot.camera.width/2) diffY = abs(self.ballCenterY - self.robot.camera.height/2) if diffX > diffY: turnDirVer = 0 # need to search horizontally if (self.ballCenterX > self.robot.camera.width/2): # right turnDirHor = -1 else: # left turnDirHor = 1 else: turnDirHor = 0 if (self.ballCenterY > self.robot.camera.width/2): # down turnDirVer = -1 else: turnDirVer = 1 self.robot.ptz[0].pan(pose[0]+ turnDirHor*self.speed) self.robot.ptz[0].tilt(pose[1] + turnDirVer*self.speed) if (((pose[0] == 1.0 or pose[0] == -1.0) and turnDirVer == 0) or ((pose[1] == -1.0 or pose[1] == 0.0) and turnDirHor == 0)): self.goto("searchDown") class searchDown(State): """ Searches for the ball by turning the head down """ def onActivate(self): self.robot.ptz[0].center() self.speed = 0.1 print "DOWN" def step(self): pose = self.robot.ptz[0].pose results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a blob in sight x1, y1, x2, y2, area = results[-1][0] if area > matchBall: self.goto("approachBall") return self.robot.ptz[0].tilt(pose[1] - self.speed) if (pose[1] < -0.9): self.goto("searchLeftRight") class searchLeftRight(State): """ Searches for the ball by turning the head left-right """ def onActivate(self): self.robot.ptz[0].tilt(-0.1) sleep(1) self.robot.ptz[0].pan(1.0) self.speed = 0.05 print "L - R" def step(self): pose = self.robot.ptz[0].pose results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a blob in sight x1, y1, x2, y2, area = results[-1][0] if area > matchBall: self.goto("approachBall") return self.robot.ptz[0].pan(pose[0] - self.speed) if (pose[0] == -1.0): self.goto("searchRightLeft") class searchRightLeft(State): """ Searches for the ball by turning the head right-left """ def onActivate(self): self.robot.ptz[0].tilt(-0.6) sleep(0.5) self.robot.ptz[0].pan(-1.0) self.speed = 0.05 print "R - L" def step(self): pose = self.robot.ptz[0].pose results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a blob in sight x1, y1, x2, y2, area = results[-1][0] if area > matchBall: self.goto("approachBall") return self.robot.ptz[0].pan(pose[0] + self.speed) if (pose[0] == 1.0): self.goto("searchDynamic") class searchDynamic(State): """ Searches for the ball by spinning in place """ def onActivate(self): self.robot.ptz[0].center() self.robot.ptz[0].tilt(-0.1) self.counter = 0 if random.random() > 0.5: # randomize the turn direction self.turnSpeed = 0.2 else: self.turnSpeed = -0.2 def step(self): results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a blob in sight x1, y1, x2, y2, area = results[-1][0] if area > matchBall: self.goto("approachBall") return self.counter +=1 if self.counter == 50:# after 360 change the angle self.robot.ptz[0].tilt(-0.5) self.robot.move(0.0,self.turnSpeed) class prepareToKick(State): def onActivate(self): self.robot.move(0,0) self.robot.playSound("mew") self.speed = 0.05 self.turnSpeed= 0.1 self.turnHeadMin = 0.4 self.turnHeadMax = 0.5 print "PREPARE TO KICK" def step(self): results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a blob in sight x1, y1, x2, y2, area = results[-1][0] if area> 50: # 1.center the image centerX, centerY = (x1 + x2)/2, (y1 + y2)/2 pose = self.robot.ptz[0].pose # p,t,z,r # ---------------X direction------------------ diff = (centerX - (self.robot.camera.width/2)) if abs(diff) < (.1 * self.robot.camera.width): pass elif diff < 0: # negative is right, positive is left self.robot.ptz[0].pan( pose[0] + self.speed) else: self.robot.ptz[0].pan( pose[0] - self.speed) # ---------------Y direction------------------ diff = (centerY - self.robot.camera.height/2) if abs(diff) < .1 * self.robot.camera.height: pass elif diff < 0: # down self.robot.ptz[0].tilt( pose[1] + self.speed) # positive is left else: self.robot.ptz[0].tilt( pose[1] - self.speed) # negative is right # 2. put your foor next to the ball if abs(pose[0]) >= self.turnHeadMin and abs(pose[0]) <= self.turnHeadMax: # 3.move close enough if (x2-x1 + y2-y1 < 140): self.robot.move(0.1,0.0) else: self.goto("kick") return elif abs(pose[0]) < self.turnHeadMin: if pose[0] < 0: self.robot.move(0,self.turnSpeed) else: self.robot.move(0,-self.turnSpeed) elif abs(pose[0]) > self.turnHeadMax: if pose[0] < 0: self.robot.move(0,-self.turnSpeed) else: self.robot.move(0,self.turnSpeed) else: self.goto("approachBall") else: self.goto("searchDown") class kick(State): def onActivate(self): self.robot.move(0,0) self.pose = self.robot.ptz[0].pose # p,t,z,r if (self.pose[0] > 0): self.leg = "left" else: self.leg = "right" self.robot.ptz[0].center() print "KICK" def step(self): # 1. stand on 3 legs: # move the weight on the oposite side of kicking self.robot.setPose(self.leg+" back leg",-0.3,0.2,0.8) sleep(1) self.robot.setPose(self.leg+" back leg",-0.5,0.15,0.7) sleep(1) # 2. kick self.robot.setPose(self.leg+ " front leg",0.7, 0.1, 0.3) sleep(1.0) # 3. bring leg to initial position self.robot.setPose(self.leg+" front leg rotator",0.0) sleep(1.0) # 4. distribute weight on all 4 legs self.robot.setPose(self.leg+" back leg ", -0.3,0.1,0.8) self.robot.move(0.1,0) self.goto("didYouScore") class lookForGoal(State): def onActivate(self): self.robot.move(0,0) self.p,self.t, self.z,self.r = self.robot.ptz[0].pose # remember where you were looking self.robot.ptz[0].center() if random.random() > 0.5: self.maxTurn = -1.0 self.speed = 0.1 else: self.maxTurn = 1.0 self.speed = -0.1 self.robot.ptz[0].pan(self.maxTurn) sleep(1) print "LOOK FOR GOAL" def step(self): results = self.robot.camera[0].filterResults pose = self.robot.ptz[0].pose if len(results) > 1 and len(results[1]) > 0: # need a match, and blobify at least if len(results[1][0]) == 5: # have a goal blob in sight x1, y1, x2, y2, area = results[1][0] if area >35: sleep(1) if abs(pose[0]) > 0.1: if pose[0] > 0: dir = -1 # left else: dir = 1 else: self.goto("findGoal",[self.p,self.t,self.z,self.r],0) return self.goto("findGoal", [self.p,self.t,self.z,self.r], dir) return if (pose[0] == -self.maxTurn): if random.random()>0.5:# randomize direction dir = 1 else: dir = -1 self.goto("findGoal",[self.p,self.t,self.z,self.r],dir) return self.robot.ptz[0].pan(pose[0] + self.speed) class findGoal(State): def onActivate(self): self.speed = 0.05 self.robot.move(0,0) self.headMaxTurn = 0.2 self.strafeSpeed = 0.5 self.turnSpeed = 0.1 self.counter = 0 print "FIND GOAL" def onGoto(self, args): self.robot.ptz[0].setPose(args[0]) sleep(1) self.dir = args[1] if self.dir == 0: self.goto("prepareToKick") def step(self): self.robot.strafe(0) self.robot.move(0,0) areab = 0 results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a ball blob in sight x1b, y1b, x2b, y2b, areab = results[-1][0] centerXb, centerYb = (x1b + x2b)/2, (y1b + y2b)/2 if len(results) > 1 and len(results[1]) > 0: # need a match, and blobify at least if len(results[1][0]) == 5: # have a goal blob in sight x1g, y1g, x2g, y2g, areag = results[1][0] centerXg, centerYg = (x1g + x2g)/2, (y1g + y2g)/2 if areab > 10: #self.matchBall: # see the ball and search for the goal pose = self.robot.ptz[0].pose # p,t,z,r # 1. center camera on ball # ---------------X direction------------------ diff = (centerXb - (self.robot.camera.width/2)) if abs(diff) < (.1 * self.robot.camera.width): pass elif diff < 0: # negative is right, positive is left self.robot.ptz[0].pan( pose[0] + self.speed) else: self.robot.ptz[0].pan( pose[0] - self.speed) # ---------------Y direction------------------ # keep ball at the bottom of the image if y1b < (.5*self.robot.camera.width): self.robot.ptz[0].tilt( pose[1] + .5*self.speed) # up elif y1b > .8*self.robot.camera.width: # don't want to lose ball self.robot.ptz[0].tilt(pose[1] - .5*self.speed) # down # keep the ball centered if abs(pose[0]) > self.headMaxTurn: # 2.1 rotate so ball is in front of you if pose[0] > 0: self.robot.move(0,self.turnSpeed) self.robot.ptz[0].pan(pose[0] - self.speed) else: self.robot.move(0,-self.turnSpeed) self.robot.ptz[0].pan(pose[0] + self.speed) if areag> 10:#self.matchGoal: # if you see the goal diff = (centerXg - (self.robot.camera.width/2)) if abs(diff) < (.1 * self.robot.camera.width): # if centered on x direction self.robot.strafe(0) self.goto("prepareToKick") return # rotate around the ball self.counter+=1 print self.counter, self.robot.strafe(self.strafeSpeed*self.dir) else: self.robot.strafe(0) self.goto("searchDown") class didYouScore(State): def onActivate(self): self.robot.move(0,0) self.robot.ptz[0].pan(0) self.robot.ptz[0].tilt(-0.1) self.speed = 0.05 self.speedHor = 0.05 self.tiltSpeed = 0.3 print "DID YOU SCORE" def step(self): pose = self.robot.ptz[0].pose results = self.robot.camera[0].filterResults if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a ball blob in sight x1b, y1b, x2b, y2b, areab = results[-1][0] if len(results) > 1 and len(results[1]) > 0: # need a match, and blobify at least if len(results[1][0]) == 5: # have a goal blob in sight x1g, y1g, x2g, y2g, areag = results[1][0] if areab> matchBall: if areag> matchGoal: if (y2g >= y2b) and \ ((x1b > x1g and x1b < x2g) or (x2b < x2g and x2b> x1g)): self.goto("moveTail") return self.goto("waitForBall") return else: if (abs(pose[0]) > 0.6): self.speedHor = -self.speedHor if pose[1] <= -0.8: self.goto("waitForBall") return self.robot.ptz[0].tilt(pose[1] - self.tiltSpeed) self.robot.ptz[0].pan(pose[0] - self.speedHor) class moveTail(State): def onActivate(self): self.robot.playSound("3yips") self.side = 1.0 self.robot.setPose("tail",self.side, 0) self.counter = 1 def step(self): self.side = -self.side self.robot.setPose("tail pan", self.side) self.counter +=1 if self.counter > 25: self.goto("waitForBall") class waitForBall(State): """ The robot looks around for the ball and then waits until the ball is on its side of the court. """ def onActivate(self): self.speed = 0.05 self.speedHor = 0.05 self.tiltSpeed = 0.3 print "WAIT FOR BALL" def step(self): results = self.robot.camera[0].filterResults pose = self.robot.ptz[0].pose if len(results) > 1 and len(results[-1]) > 0: # need a match, and blobify at least if len(results[-1][0]) == 5: # have a ball blob in sight x1b, y1b, x2b, y2b, areab = results[-1][0] if len(results) > 1 and len(results[1]) > 0: # need a match, and blobify at least if len(results[1][0]) == 5: # have a goal blob in sight x1g, y1g, x2g, y2g, areag = results[1][0] if areab> matchBall: # move the head so you can see the floor next to the ball if y2b > (.5*self.robot.camera.width): self.robot.ptz[0].tilt( pose[1] - .5*self.speed) # down if pose[1] < -0.8: # cannot move head anymore self.goto("approachBall") return elif y2b < .2*self.robot.camera.width: # don't want to lose ball self.robot.ptz[0].tilt(pose[1] - .5*self.speed) # up else: if areag> matchGoal: if y2g < y2b or x2b < x1g or x1b > x2g: self.goto("approachBall") return else: self.goto("approachBall") return else: # search for the ball if (abs(pose[0]) == 1.0): self.speedHor = -self.speedHor if pose[1] >= 0 or pose[1] <= -0.8: self.tiltSpeed = -self.tiltSpeed self.robot.ptz[0].tilt(pose[1] - self.tiltSpeed) self.robot.ptz[0].pan(pose[0] - self.speedHor) def INIT(engine): brain = RobotVsRobotSoccer(engine) brain.add(searchDown()) brain.add(searchLeftRight()) brain.add(searchRightLeft()) brain.add(approachBall()) brain.add(searchDynamic()) brain.add(lostBall()) brain.add(prepareToKick()) brain.add(findGoal()) brain.add(lookForGoal()) brain.add(kick()) brain.add(didYouScore()) brain.add(moveTail()) brain.add(waitForBall(1)) return brain
emilydolson/forestcat
pyrobot/plugins/brains/AiboSoccer.py
Python
agpl-3.0
21,594
""" Models for User Information (students, staff, etc) Migration Notes If you make changes to this model, be sure to create an appropriate migration file and check it in at the same time as your model changes. To do that, 1. Go to the edx-platform dir 2. ./manage.py lms schemamigration student --auto description_of_your_change 3. Add the migration file created in edx-platform/common/djangoapps/student/migrations/ """ from collections import defaultdict, OrderedDict from datetime import datetime, timedelta from functools import total_ordering import hashlib from importlib import import_module import json import logging from pytz import UTC from urllib import urlencode import uuid import analytics from config_models.models import ConfigurationModel from django.utils.translation import ugettext_lazy as _ from django.conf import settings from django.utils import timezone from django.contrib.auth.models import User from django.contrib.auth.hashers import make_password from django.contrib.auth.signals import user_logged_in, user_logged_out from django.db import models, IntegrityError, transaction from django.db.models import Count from django.db.models.signals import pre_save, post_save from django.dispatch import receiver, Signal from django.core.exceptions import ObjectDoesNotExist from django.utils.translation import ugettext_noop from django.core.cache import cache from django_countries.fields import CountryField import dogstats_wrapper as dog_stats_api from eventtracking import tracker from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey from simple_history.models import HistoricalRecords from south.modelsinspector import add_introspection_rules from track import contexts from xmodule_django.models import CourseKeyField, NoneToEmptyManager from certificates.models import GeneratedCertificate from course_modes.models import CourseMode import lms.lib.comment_client as cc from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, ECOMMERCE_DATE_FORMAT from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from util.model_utils import emit_field_changed_events, get_changed_fields_dict from util.query import use_read_replica_if_available from util.milestones_helpers import is_entrance_exams_enabled UNENROLL_DONE = Signal(providing_args=["course_enrollment", "skip_refund"]) log = logging.getLogger(__name__) AUDIT_LOG = logging.getLogger("audit") SessionStore = import_module(settings.SESSION_ENGINE).SessionStore # pylint: disable=invalid-name UNENROLLED_TO_ALLOWEDTOENROLL = 'from unenrolled to allowed to enroll' ALLOWEDTOENROLL_TO_ENROLLED = 'from allowed to enroll to enrolled' ENROLLED_TO_ENROLLED = 'from enrolled to enrolled' ENROLLED_TO_UNENROLLED = 'from enrolled to unenrolled' UNENROLLED_TO_ENROLLED = 'from unenrolled to enrolled' ALLOWEDTOENROLL_TO_UNENROLLED = 'from allowed to enroll to enrolled' UNENROLLED_TO_UNENROLLED = 'from unenrolled to unenrolled' DEFAULT_TRANSITION_STATE = 'N/A' TRANSITION_STATES = ( (UNENROLLED_TO_ALLOWEDTOENROLL, UNENROLLED_TO_ALLOWEDTOENROLL), (ALLOWEDTOENROLL_TO_ENROLLED, ALLOWEDTOENROLL_TO_ENROLLED), (ENROLLED_TO_ENROLLED, ENROLLED_TO_ENROLLED), (ENROLLED_TO_UNENROLLED, ENROLLED_TO_UNENROLLED), (UNENROLLED_TO_ENROLLED, UNENROLLED_TO_ENROLLED), (ALLOWEDTOENROLL_TO_UNENROLLED, ALLOWEDTOENROLL_TO_UNENROLLED), (UNENROLLED_TO_UNENROLLED, UNENROLLED_TO_UNENROLLED), (DEFAULT_TRANSITION_STATE, DEFAULT_TRANSITION_STATE) ) class AnonymousUserId(models.Model): """ This table contains user, course_Id and anonymous_user_id Purpose of this table is to provide user by anonymous_user_id. We generate anonymous_user_id using md5 algorithm, and use result in hex form, so its length is equal to 32 bytes. """ objects = NoneToEmptyManager() user = models.ForeignKey(User, db_index=True) anonymous_user_id = models.CharField(unique=True, max_length=32) course_id = CourseKeyField(db_index=True, max_length=255, blank=True) unique_together = (user, course_id) def anonymous_id_for_user(user, course_id, save=True): """ Return a unique id for a (user, course) pair, suitable for inserting into e.g. personalized survey links. If user is an `AnonymousUser`, returns `None` Keyword arguments: save -- Whether the id should be saved in an AnonymousUserId object. """ # This part is for ability to get xblock instance in xblock_noauth handlers, where user is unauthenticated. if user.is_anonymous(): return None cached_id = getattr(user, '_anonymous_id', {}).get(course_id) if cached_id is not None: return cached_id # include the secret key as a salt, and to make the ids unique across different LMS installs. hasher = hashlib.md5() hasher.update(settings.SECRET_KEY) hasher.update(unicode(user.id)) if course_id: hasher.update(course_id.to_deprecated_string().encode('utf-8')) digest = hasher.hexdigest() if not hasattr(user, '_anonymous_id'): user._anonymous_id = {} # pylint: disable=protected-access user._anonymous_id[course_id] = digest # pylint: disable=protected-access if save is False: return digest try: anonymous_user_id, __ = AnonymousUserId.objects.get_or_create( defaults={'anonymous_user_id': digest}, user=user, course_id=course_id ) if anonymous_user_id.anonymous_user_id != digest: log.error( u"Stored anonymous user id %r for user %r " u"in course %r doesn't match computed id %r", user, course_id, anonymous_user_id.anonymous_user_id, digest ) except IntegrityError: # Another thread has already created this entry, so # continue pass return digest def user_by_anonymous_id(uid): """ Return user by anonymous_user_id using AnonymousUserId lookup table. Do not raise `django.ObjectDoesNotExist` exception, if there is no user for anonymous_student_id, because this function will be used inside xmodule w/o django access. """ if uid is None: return None try: return User.objects.get(anonymoususerid__anonymous_user_id=uid) except ObjectDoesNotExist: return None class UserStanding(models.Model): """ This table contains a student's account's status. Currently, we're only disabling accounts; in the future we can imagine taking away more specific privileges, like forums access, or adding more specific karma levels or probationary stages. """ ACCOUNT_DISABLED = "disabled" ACCOUNT_ENABLED = "enabled" USER_STANDING_CHOICES = ( (ACCOUNT_DISABLED, u"Account Disabled"), (ACCOUNT_ENABLED, u"Account Enabled"), ) user = models.ForeignKey(User, db_index=True, related_name='standing', unique=True) account_status = models.CharField( blank=True, max_length=31, choices=USER_STANDING_CHOICES ) changed_by = models.ForeignKey(User, blank=True) standing_last_changed_at = models.DateTimeField(auto_now=True) class UserProfile(models.Model): """This is where we store all the user demographic fields. We have a separate table for this rather than extending the built-in Django auth_user. Notes: * Some fields are legacy ones from the first run of 6.002, from which we imported many users. * Fields like name and address are intentionally open ended, to account for international variations. An unfortunate side-effect is that we cannot efficiently sort on last names for instance. Replication: * Only the Portal servers should ever modify this information. * All fields are replicated into relevant Course databases Some of the fields are legacy ones that were captured during the initial MITx fall prototype. """ # cache key format e.g user.<user_id>.profile.country = 'SG' PROFILE_COUNTRY_CACHE_KEY = u"user.{user_id}.profile.country" class Meta(object): db_table = "auth_userprofile" # CRITICAL TODO/SECURITY # Sanitize all fields. # This is not visible to other users, but could introduce holes later user = models.OneToOneField(User, unique=True, db_index=True, related_name='profile') name = models.CharField(blank=True, max_length=255, db_index=True) meta = models.TextField(blank=True) # JSON dictionary for future expansion courseware = models.CharField(blank=True, max_length=255, default='course.xml') # Location is no longer used, but is held here for backwards compatibility # for users imported from our first class. language = models.CharField(blank=True, max_length=255, db_index=True) location = models.CharField(blank=True, max_length=255, db_index=True) # Optional demographic data we started capturing from Fall 2012 this_year = datetime.now(UTC).year VALID_YEARS = range(this_year, this_year - 120, -1) year_of_birth = models.IntegerField(blank=True, null=True, db_index=True) GENDER_CHOICES = ( ('m', ugettext_noop('Male')), ('f', ugettext_noop('Female')), # Translators: 'Other' refers to the student's gender ('o', ugettext_noop('Other/Prefer Not to Say')) ) gender = models.CharField( blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES ) # [03/21/2013] removed these, but leaving comment since there'll still be # p_se and p_oth in the existing data in db. # ('p_se', 'Doctorate in science or engineering'), # ('p_oth', 'Doctorate in another field'), LEVEL_OF_EDUCATION_CHOICES = ( ('p', ugettext_noop('Doctorate')), ('m', ugettext_noop("Master's or professional degree")), ('b', ugettext_noop("Bachelor's degree")), ('a', ugettext_noop("Associate degree")), ('hs', ugettext_noop("Secondary/high school")), ('jhs', ugettext_noop("Junior secondary/junior high/middle school")), ('el', ugettext_noop("Elementary/primary school")), # Translators: 'None' refers to the student's level of education ('none', ugettext_noop("No Formal Education")), # Translators: 'Other' refers to the student's level of education ('other', ugettext_noop("Other Education")) ) level_of_education = models.CharField( blank=True, null=True, max_length=6, db_index=True, choices=LEVEL_OF_EDUCATION_CHOICES ) mailing_address = models.TextField(blank=True, null=True) city = models.TextField(blank=True, null=True) country = CountryField(blank=True, null=True) goals = models.TextField(blank=True, null=True) allow_certificate = models.BooleanField(default=1) bio = models.CharField(blank=True, null=True, max_length=3000, db_index=False) profile_image_uploaded_at = models.DateTimeField(null=True) @property def has_profile_image(self): """ Convenience method that returns a boolean indicating whether or not this user has uploaded a profile image. """ return self.profile_image_uploaded_at is not None @property def age(self): """ Convenience method that returns the age given a year_of_birth. """ year_of_birth = self.year_of_birth year = datetime.now(UTC).year if year_of_birth is not None: return year - year_of_birth @property def level_of_education_display(self): """ Convenience method that returns the human readable level of education. """ if self.level_of_education: return self.__enumerable_to_display(self.LEVEL_OF_EDUCATION_CHOICES, self.level_of_education) @property def gender_display(self): """ Convenience method that returns the human readable gender. """ if self.gender: return self.__enumerable_to_display(self.GENDER_CHOICES, self.gender) def get_meta(self): # pylint: disable=missing-docstring js_str = self.meta if not js_str: js_str = dict() else: js_str = json.loads(self.meta) return js_str def set_meta(self, meta_json): # pylint: disable=missing-docstring self.meta = json.dumps(meta_json) def set_login_session(self, session_id=None): """ Sets the current session id for the logged-in user. If session_id doesn't match the existing session, deletes the old session object. """ meta = self.get_meta() old_login = meta.get('session_id', None) if old_login: SessionStore(session_key=old_login).delete() meta['session_id'] = session_id self.set_meta(meta) self.save() def requires_parental_consent(self, date=None, age_limit=None, default_requires_consent=True): """Returns true if this user requires parental consent. Args: date (Date): The date for which consent needs to be tested (defaults to now). age_limit (int): The age limit at which parental consent is no longer required. This defaults to the value of the setting 'PARENTAL_CONTROL_AGE_LIMIT'. default_requires_consent (bool): True if users require parental consent if they have no specified year of birth (default is True). Returns: True if the user requires parental consent. """ if age_limit is None: age_limit = getattr(settings, 'PARENTAL_CONSENT_AGE_LIMIT', None) if age_limit is None: return False # Return True if either: # a) The user has a year of birth specified and that year is fewer years in the past than the limit. # b) The user has no year of birth specified and the default is to require consent. # # Note: we have to be conservative using the user's year of birth as their birth date could be # December 31st. This means that if the number of years since their birth year is exactly equal # to the age limit then we have to assume that they might still not be old enough. year_of_birth = self.year_of_birth if year_of_birth is None: return default_requires_consent if date is None: age = self.age else: age = date.year - year_of_birth return age <= age_limit def __enumerable_to_display(self, enumerables, enum_value): """ Get the human readable value from an enumerable list of key-value pairs. """ return dict(enumerables)[enum_value] @classmethod def country_cache_key_name(cls, user_id): """Return cache key name to be used to cache current country. Args: user_id(int): Id of user. Returns: Unicode cache key """ return cls.PROFILE_COUNTRY_CACHE_KEY.format(user_id=user_id) @receiver(models.signals.post_save, sender=UserProfile) def invalidate_user_profile_country_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name """Invalidate the cache of country in UserProfile model. """ changed_fields = getattr(instance, '_changed_fields', {}) if 'country' in changed_fields: cache_key = UserProfile.country_cache_key_name(instance.user_id) cache.delete(cache_key) log.info("Country changed in UserProfile for %s, cache deleted", instance.user_id) @receiver(pre_save, sender=UserProfile) def user_profile_pre_save_callback(sender, **kwargs): """ Ensure consistency of a user profile before saving it. """ user_profile = kwargs['instance'] # Remove profile images for users who require parental consent if user_profile.requires_parental_consent() and user_profile.has_profile_image: user_profile.profile_image_uploaded_at = None # Cache "old" field values on the model instance so that they can be # retrieved in the post_save callback when we emit an event with new and # old field values. user_profile._changed_fields = get_changed_fields_dict(user_profile, sender) @receiver(post_save, sender=UserProfile) def user_profile_post_save_callback(sender, **kwargs): """ Emit analytics events after saving the UserProfile. """ user_profile = kwargs['instance'] # pylint: disable=protected-access emit_field_changed_events( user_profile, user_profile.user, sender._meta.db_table, excluded_fields=['meta'] ) @receiver(pre_save, sender=User) def user_pre_save_callback(sender, **kwargs): """ Capture old fields on the user instance before save and cache them as a private field on the current model for use in the post_save callback. """ user = kwargs['instance'] user._changed_fields = get_changed_fields_dict(user, sender) @receiver(post_save, sender=User) def user_post_save_callback(sender, **kwargs): """ Emit analytics events after saving the User. """ user = kwargs['instance'] # pylint: disable=protected-access emit_field_changed_events( user, user, sender._meta.db_table, excluded_fields=['last_login', 'first_name', 'last_name'], hidden_fields=['password'] ) class UserSignupSource(models.Model): """ This table contains information about users registering via Micro-Sites """ user = models.ForeignKey(User, db_index=True) site = models.CharField(max_length=255, db_index=True) def unique_id_for_user(user, save=True): """ Return a unique id for a user, suitable for inserting into e.g. personalized survey links. Keyword arguments: save -- Whether the id should be saved in an AnonymousUserId object. """ # Setting course_id to '' makes it not affect the generated hash, # and thus produce the old per-student anonymous id return anonymous_id_for_user(user, None, save=save) # TODO: Should be renamed to generic UserGroup, and possibly # Given an optional field for type of group class UserTestGroup(models.Model): users = models.ManyToManyField(User, db_index=True) name = models.CharField(blank=False, max_length=32, db_index=True) description = models.TextField(blank=True) class Registration(models.Model): ''' Allows us to wait for e-mail before user is registered. A registration profile is created when the user creates an account, but that account is inactive. Once the user clicks on the activation key, it becomes active. ''' class Meta(object): db_table = "auth_registration" user = models.ForeignKey(User, unique=True) activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True) def register(self, user): # MINOR TODO: Switch to crypto-secure key self.activation_key = uuid.uuid4().hex self.user = user self.save() def activate(self): self.user.is_active = True self.user.save() class PendingNameChange(models.Model): user = models.OneToOneField(User, unique=True, db_index=True) new_name = models.CharField(blank=True, max_length=255) rationale = models.CharField(blank=True, max_length=1024) class PendingEmailChange(models.Model): user = models.OneToOneField(User, unique=True, db_index=True) new_email = models.CharField(blank=True, max_length=255, db_index=True) activation_key = models.CharField(('activation key'), max_length=32, unique=True, db_index=True) def request_change(self, email): """Request a change to a user's email. Implicitly saves the pending email change record. Arguments: email (unicode): The proposed new email for the user. Returns: unicode: The activation code to confirm the change. """ self.new_email = email self.activation_key = uuid.uuid4().hex self.save() return self.activation_key EVENT_NAME_ENROLLMENT_ACTIVATED = 'edx.course.enrollment.activated' EVENT_NAME_ENROLLMENT_DEACTIVATED = 'edx.course.enrollment.deactivated' EVENT_NAME_ENROLLMENT_MODE_CHANGED = 'edx.course.enrollment.mode_changed' class PasswordHistory(models.Model): """ This model will keep track of past passwords that a user has used as well as providing contraints (e.g. can't reuse passwords) """ user = models.ForeignKey(User) password = models.CharField(max_length=128) time_set = models.DateTimeField(default=timezone.now) def create(self, user): """ This will copy over the current password, if any of the configuration has been turned on """ if not (PasswordHistory.is_student_password_reuse_restricted() or PasswordHistory.is_staff_password_reuse_restricted() or PasswordHistory.is_password_reset_frequency_restricted() or PasswordHistory.is_staff_forced_password_reset_enabled() or PasswordHistory.is_student_forced_password_reset_enabled()): return self.user = user self.password = user.password self.save() @classmethod def is_student_password_reuse_restricted(cls): """ Returns whether the configuration which limits password reuse has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE', 0 ) return min_diff_pw > 0 @classmethod def is_staff_password_reuse_restricted(cls): """ Returns whether the configuration which limits password reuse has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_diff_pw = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE', 0 ) return min_diff_pw > 0 @classmethod def is_password_reset_frequency_restricted(cls): """ Returns whether the configuration which limits the password reset frequency has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS' ) return min_days_between_reset @classmethod def is_staff_forced_password_reset_enabled(cls): """ Returns whether the configuration which forces password resets to occur has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_days_between_reset = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS' ) return min_days_between_reset @classmethod def is_student_forced_password_reset_enabled(cls): """ Returns whether the configuration which forces password resets to occur has been turned on """ if not settings.FEATURES['ADVANCED_SECURITY']: return False min_days_pw_reset = settings.ADVANCED_SECURITY_CONFIG.get( 'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS' ) return min_days_pw_reset @classmethod def should_user_reset_password_now(cls, user): """ Returns whether a password has 'expired' and should be reset. Note there are two different expiry policies for staff and students """ if not settings.FEATURES['ADVANCED_SECURITY']: return False days_before_password_reset = None if user.is_staff: if cls.is_staff_forced_password_reset_enabled(): days_before_password_reset = \ settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS'] elif cls.is_student_forced_password_reset_enabled(): days_before_password_reset = \ settings.ADVANCED_SECURITY_CONFIG['MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS'] if days_before_password_reset: history = PasswordHistory.objects.filter(user=user).order_by('-time_set') time_last_reset = None if history: # first element should be the last time we reset password time_last_reset = history[0].time_set else: # no history, then let's take the date the user joined time_last_reset = user.date_joined now = timezone.now() delta = now - time_last_reset return delta.days >= days_before_password_reset return False @classmethod def is_password_reset_too_soon(cls, user): """ Verifies that the password is not getting reset too frequently """ if not cls.is_password_reset_frequency_restricted(): return False history = PasswordHistory.objects.filter(user=user).order_by('-time_set') if not history: return False now = timezone.now() delta = now - history[0].time_set return delta.days < settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS'] @classmethod def is_allowable_password_reuse(cls, user, new_password): """ Verifies that the password adheres to the reuse policies """ if not settings.FEATURES['ADVANCED_SECURITY']: return True if user.is_staff and cls.is_staff_password_reuse_restricted(): min_diff_passwords_required = \ settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE'] elif cls.is_student_password_reuse_restricted(): min_diff_passwords_required = \ settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE'] else: min_diff_passwords_required = 0 # just limit the result set to the number of different # password we need history = PasswordHistory.objects.filter(user=user).order_by('-time_set')[:min_diff_passwords_required] for entry in history: # be sure to re-use the same salt # NOTE, how the salt is serialized in the password field is dependent on the algorithm # in pbkdf2_sha256 [LMS] it's the 3rd element, in sha1 [unit tests] it's the 2nd element hash_elements = entry.password.split('$') algorithm = hash_elements[0] if algorithm == 'pbkdf2_sha256': hashed_password = make_password(new_password, hash_elements[2]) elif algorithm == 'sha1': hashed_password = make_password(new_password, hash_elements[1]) else: # This means we got something unexpected. We don't want to throw an exception, but # log as an error and basically allow any password reuse AUDIT_LOG.error(''' Unknown password hashing algorithm "{0}" found in existing password hash, password reuse policy will not be enforced!!! '''.format(algorithm)) return True if entry.password == hashed_password: return False return True class LoginFailures(models.Model): """ This model will keep track of failed login attempts """ user = models.ForeignKey(User) failure_count = models.IntegerField(default=0) lockout_until = models.DateTimeField(null=True) @classmethod def is_feature_enabled(cls): """ Returns whether the feature flag around this functionality has been set """ return settings.FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] @classmethod def is_user_locked_out(cls, user): """ Static method to return in a given user has his/her account locked out """ try: record = LoginFailures.objects.get(user=user) if not record.lockout_until: return False now = datetime.now(UTC) until = record.lockout_until is_locked_out = until and now < until return is_locked_out except ObjectDoesNotExist: return False @classmethod def increment_lockout_counter(cls, user): """ Ticks the failed attempt counter """ record, _ = LoginFailures.objects.get_or_create(user=user) record.failure_count = record.failure_count + 1 max_failures_allowed = settings.MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED # did we go over the limit in attempts if record.failure_count >= max_failures_allowed: # yes, then store when this account is locked out until lockout_period_secs = settings.MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS record.lockout_until = datetime.now(UTC) + timedelta(seconds=lockout_period_secs) record.save() @classmethod def clear_lockout_counter(cls, user): """ Removes the lockout counters (normally called after a successful login) """ try: entry = LoginFailures.objects.get(user=user) entry.delete() except ObjectDoesNotExist: return class CourseEnrollmentException(Exception): pass class NonExistentCourseError(CourseEnrollmentException): pass class EnrollmentClosedError(CourseEnrollmentException): pass class CourseFullError(CourseEnrollmentException): pass class AlreadyEnrolledError(CourseEnrollmentException): pass class CourseEnrollmentManager(models.Manager): """ Custom manager for CourseEnrollment with Table-level filter methods. """ def num_enrolled_in(self, course_id): """ Returns the count of active enrollments in a course. 'course_id' is the course_id to return enrollments """ enrollment_number = super(CourseEnrollmentManager, self).get_query_set().filter( course_id=course_id, is_active=1 ).count() return enrollment_number def is_course_full(self, course): """ Returns a boolean value regarding whether a course has already reached it's max enrollment capacity """ is_course_full = False if course.max_student_enrollments_allowed is not None: is_course_full = self.num_enrolled_in(course.id) >= course.max_student_enrollments_allowed return is_course_full def users_enrolled_in(self, course_id): """Return a queryset of User for every user enrolled in the course.""" return User.objects.filter( courseenrollment__course_id=course_id, courseenrollment__is_active=True ) def enrollment_counts(self, course_id): """ Returns a dictionary that stores the total enrollment count for a course, as well as the enrollment count for each individual mode. """ # Unfortunately, Django's "group by"-style queries look super-awkward query = use_read_replica_if_available( super(CourseEnrollmentManager, self).get_query_set().filter(course_id=course_id, is_active=True).values( 'mode').order_by().annotate(Count('mode'))) total = 0 enroll_dict = defaultdict(int) for item in query: enroll_dict[item['mode']] = item['mode__count'] total += item['mode__count'] enroll_dict['total'] = total return enroll_dict def enrolled_and_dropped_out_users(self, course_id): """Return a queryset of Users in the course.""" return User.objects.filter( courseenrollment__course_id=course_id ) class CourseEnrollment(models.Model): """ Represents a Student's Enrollment record for a single Course. You should generally not manipulate CourseEnrollment objects directly, but use the classmethods provided to enroll, unenroll, or check on the enrollment status of a given student. We're starting to consolidate course enrollment logic in this class, but more should be brought in (such as checking against CourseEnrollmentAllowed, checking course dates, user permissions, etc.) This logic is currently scattered across our views. """ MODEL_TAGS = ['course_id', 'is_active', 'mode'] user = models.ForeignKey(User) course_id = CourseKeyField(max_length=255, db_index=True) created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) # If is_active is False, then the student is not considered to be enrolled # in the course (is_enrolled() will return False) is_active = models.BooleanField(default=True) # Represents the modes that are possible. We'll update this later with a # list of possible values. mode = models.CharField(default="honor", max_length=100) objects = CourseEnrollmentManager() # Maintain a history of requirement status updates for auditing purposes history = HistoricalRecords() # cache key format e.g enrollment.<username>.<course_key>.mode = 'honor' COURSE_ENROLLMENT_CACHE_KEY = u"enrollment.{}.{}.mode" class Meta(object): unique_together = (('user', 'course_id'),) ordering = ('user', 'course_id') def __init__(self, *args, **kwargs): super(CourseEnrollment, self).__init__(*args, **kwargs) # Private variable for storing course_overview to minimize calls to the database. # When the property .course_overview is accessed for the first time, this variable will be set. self._course_overview = None def __unicode__(self): return ( "[CourseEnrollment] {}: {} ({}); active: ({})" ).format(self.user, self.course_id, self.created, self.is_active) @classmethod def get_or_create_enrollment(cls, user, course_key): """ Create an enrollment for a user in a class. By default *this enrollment is not active*. This is useful for when an enrollment needs to go through some sort of approval process before being activated. If you don't need this functionality, just call `enroll()` instead. Returns a CoursewareEnrollment object. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) It is expected that this method is called from a method which has already verified the user authentication and access. """ # If we're passing in a newly constructed (i.e. not yet persisted) User, # save it to the database so that it can have an ID that we can throw # into our CourseEnrollment object. Otherwise, we'll get an # IntegrityError for having a null user_id. assert isinstance(course_key, CourseKey) if user.id is None: user.save() try: enrollment, created = CourseEnrollment.objects.get_or_create( user=user, course_id=course_key, ) # If we *did* just create a new enrollment, set some defaults if created: enrollment.mode = "honor" enrollment.is_active = False enrollment.save() except IntegrityError: log.info( ( "An integrity error occurred while getting-or-creating the enrollment" "for course key %s and student %s. This can occur if two processes try to get-or-create " "the enrollment at the same time and the database is set to REPEATABLE READ. We will try " "committing the transaction and retrying." ), course_key, user ) transaction.commit() enrollment = CourseEnrollment.objects.get( user=user, course_id=course_key, ) return enrollment @classmethod def get_enrollment(cls, user, course_key): """Returns a CoursewareEnrollment object. Args: user (User): The user associated with the enrollment. course_id (CourseKey): The key of the course associated with the enrollment. Returns: Course enrollment object or None """ try: return CourseEnrollment.objects.get( user=user, course_id=course_key ) except cls.DoesNotExist: return None @classmethod def is_enrollment_closed(cls, user, course): """ Returns a boolean value regarding whether the user has access to enroll in the course. Returns False if the enrollment has been closed. """ # Disable the pylint error here, as per ormsbee. This local import was previously # in CourseEnrollment.enroll from courseware.access import has_access # pylint: disable=import-error return not has_access(user, 'enroll', course) def update_enrollment(self, mode=None, is_active=None, skip_refund=False): """ Updates an enrollment for a user in a class. This includes options like changing the mode, toggling is_active True/False, etc. Also emits relevant events for analytics purposes. This saves immediately. """ activation_changed = False # if is_active is None, then the call to update_enrollment didn't specify # any value, so just leave is_active as it is if self.is_active != is_active and is_active is not None: self.is_active = is_active activation_changed = True mode_changed = False # if mode is None, the call to update_enrollment didn't specify a new # mode, so leave as-is if self.mode != mode and mode is not None: self.mode = mode mode_changed = True if activation_changed or mode_changed: self.save() if activation_changed: if self.is_active: self.emit_event(EVENT_NAME_ENROLLMENT_ACTIVATED) dog_stats_api.increment( "common.student.enrollment", tags=[u"org:{}".format(self.course_id.org), u"offering:{}".format(self.course_id.offering), u"mode:{}".format(self.mode)] ) else: UNENROLL_DONE.send(sender=None, course_enrollment=self, skip_refund=skip_refund) self.emit_event(EVENT_NAME_ENROLLMENT_DEACTIVATED) dog_stats_api.increment( "common.student.unenrollment", tags=[u"org:{}".format(self.course_id.org), u"offering:{}".format(self.course_id.offering), u"mode:{}".format(self.mode)] ) if mode_changed: # the user's default mode is "honor" and disabled for a course # mode change events will only be emitted when the user's mode changes from this self.emit_event(EVENT_NAME_ENROLLMENT_MODE_CHANGED) def emit_event(self, event_name): """ Emits an event to explicitly track course enrollment and unenrollment. """ try: context = contexts.course_context_from_course_id(self.course_id) assert isinstance(self.course_id, CourseKey) data = { 'user_id': self.user.id, 'course_id': self.course_id.to_deprecated_string(), 'mode': self.mode, } with tracker.get_tracker().context(event_name, context): tracker.emit(event_name, data) if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY: tracking_context = tracker.get_tracker().resolve_context() analytics.track(self.user_id, event_name, { 'category': 'conversion', 'label': self.course_id.to_deprecated_string(), 'org': self.course_id.org, 'course': self.course_id.course, 'run': self.course_id.run, 'mode': self.mode, }, context={ 'ip': tracking_context.get('ip'), 'Google Analytics': { 'clientId': tracking_context.get('client_id') } }) except: # pylint: disable=bare-except if event_name and self.course_id: log.exception( u'Unable to emit event %s for user %s and course %s', event_name, self.user.username, # pylint: disable=no-member self.course_id, ) @classmethod def enroll(cls, user, course_key, mode="honor", check_access=False): """ Enroll a user in a course. This saves immediately. Returns a CoursewareEnrollment object. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_key` is our usual course_id string (e.g. "edX/Test101/2013_Fall) `mode` is a string specifying what kind of enrollment this is. The default is 'honor', meaning honor certificate. Other options include 'professional', 'verified', 'audit', 'no-id-professional' and 'credit'. See CourseMode in common/djangoapps/course_modes/models.py. `check_access`: if True, we check that an accessible course actually exists for the given course_key before we enroll the student. The default is set to False to avoid breaking legacy code or code with non-standard flows (ex. beta tester invitations), but for any standard enrollment flow you probably want this to be True. Exceptions that can be raised: NonExistentCourseError, EnrollmentClosedError, CourseFullError, AlreadyEnrolledError. All these are subclasses of CourseEnrollmentException if you want to catch all of them in the same way. It is expected that this method is called from a method which has already verified the user authentication. Also emits relevant events for analytics purposes. """ # All the server-side checks for whether a user is allowed to enroll. try: course = CourseOverview.get_from_id(course_key) except CourseOverview.DoesNotExist: # This is here to preserve legacy behavior which allowed enrollment in courses # announced before the start of content creation. if check_access: log.warning(u"User %s failed to enroll in non-existent course %s", user.username, unicode(course_key)) raise NonExistentCourseError if check_access: if CourseEnrollment.is_enrollment_closed(user, course): log.warning( u"User %s failed to enroll in course %s because enrollment is closed", user.username, course_key.to_deprecated_string() ) raise EnrollmentClosedError if CourseEnrollment.objects.is_course_full(course): log.warning( u"User %s failed to enroll in full course %s", user.username, course_key.to_deprecated_string(), ) raise CourseFullError if CourseEnrollment.is_enrolled(user, course_key): log.warning( u"User %s attempted to enroll in %s, but they were already enrolled", user.username, course_key.to_deprecated_string() ) if check_access: raise AlreadyEnrolledError # User is allowed to enroll if they've reached this point. enrollment = cls.get_or_create_enrollment(user, course_key) enrollment.update_enrollment(is_active=True, mode=mode) return enrollment @classmethod def enroll_by_email(cls, email, course_id, mode="honor", ignore_errors=True): """ Enroll a user in a course given their email. This saves immediately. Note that enrolling by email is generally done in big batches and the error rate is high. For that reason, we supress User lookup errors by default. Returns a CoursewareEnrollment object. If the User does not exist and `ignore_errors` is set to `True`, it will return None. `email` Email address of the User to add to enroll in the course. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) `mode` is a string specifying what kind of enrollment this is. The default is "honor", meaning honor certificate. Future options may include "audit", "verified_id", etc. Please don't use it until we have these mapped out. `ignore_errors` is a boolean indicating whether we should suppress `User.DoesNotExist` errors (returning None) or let it bubble up. It is expected that this method is called from a method which has already verified the user authentication and access. """ try: user = User.objects.get(email=email) return cls.enroll(user, course_id, mode) except User.DoesNotExist: err_msg = u"Tried to enroll email {} into course {}, but user not found" log.error(err_msg.format(email, course_id)) if ignore_errors: return None raise @classmethod def unenroll(cls, user, course_id, skip_refund=False): """ Remove the user from a given course. If the relevant `CourseEnrollment` object doesn't exist, we log an error but don't throw an exception. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) `skip_refund` can be set to True to avoid the refund process. """ try: record = CourseEnrollment.objects.get(user=user, course_id=course_id) record.update_enrollment(is_active=False, skip_refund=skip_refund) except cls.DoesNotExist: log.error( u"Tried to unenroll student %s from %s but they were not enrolled", user, course_id ) @classmethod def unenroll_by_email(cls, email, course_id): """ Unenroll a user from a course given their email. This saves immediately. User lookup errors are logged but will not throw an exception. `email` Email address of the User to unenroll from the course. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) """ try: user = User.objects.get(email=email) return cls.unenroll(user, course_id) except User.DoesNotExist: log.error( u"Tried to unenroll email %s from course %s, but user not found", email, course_id ) @classmethod def is_enrolled(cls, user, course_key): """ Returns True if the user is enrolled in the course (the entry must exist and it must have `is_active=True`). Otherwise, returns False. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) """ if not user.is_authenticated(): return False try: record = CourseEnrollment.objects.get(user=user, course_id=course_key) return record.is_active except cls.DoesNotExist: return False @classmethod def is_enrolled_by_partial(cls, user, course_id_partial): """ Returns `True` if the user is enrolled in a course that starts with `course_id_partial`. Otherwise, returns False. Can be used to determine whether a student is enrolled in a course whose run name is unknown. `user` is a Django User object. If it hasn't been saved yet (no `.id` attribute), this method will automatically save it before adding an enrollment for it. `course_id_partial` (CourseKey) is missing the run component """ assert isinstance(course_id_partial, CourseKey) assert not course_id_partial.run # None or empty string course_key = SlashSeparatedCourseKey(course_id_partial.org, course_id_partial.course, '') querystring = unicode(course_key.to_deprecated_string()) try: return CourseEnrollment.objects.filter( user=user, course_id__startswith=querystring, is_active=1 ).exists() except cls.DoesNotExist: return False @classmethod def enrollment_mode_for_user(cls, user, course_id): """ Returns the enrollment mode for the given user for the given course `user` is a Django User object `course_id` is our usual course_id string (e.g. "edX/Test101/2013_Fall) Returns (mode, is_active) where mode is the enrollment mode of the student and is_active is whether the enrollment is active. Returns (None, None) if the courseenrollment record does not exist. """ try: record = CourseEnrollment.objects.get(user=user, course_id=course_id) return (record.mode, record.is_active) except cls.DoesNotExist: return (None, None) @classmethod def enrollments_for_user(cls, user): return CourseEnrollment.objects.filter(user=user, is_active=1) def is_paid_course(self): """ Returns True, if course is paid """ paid_course = CourseMode.is_white_label(self.course_id) if paid_course or CourseMode.is_professional_slug(self.mode): return True return False def activate(self): """Makes this `CourseEnrollment` record active. Saves immediately.""" self.update_enrollment(is_active=True) def deactivate(self): """Makes this `CourseEnrollment` record inactive. Saves immediately. An inactive record means that the student is not enrolled in this course. """ self.update_enrollment(is_active=False) def change_mode(self, mode): """Changes this `CourseEnrollment` record's mode to `mode`. Saves immediately.""" self.update_enrollment(mode=mode) def refundable(self): """ For paid/verified certificates, students may receive a refund if they have a verified certificate and the deadline for refunds has not yet passed. """ # In order to support manual refunds past the deadline, set can_refund on this object. # On unenrolling, the "UNENROLL_DONE" signal calls CertificateItem.refund_cert_callback(), # which calls this method to determine whether to refund the order. # This can't be set directly because refunds currently happen as a side-effect of unenrolling. # (side-effects are bad) if getattr(self, 'can_refund', None) is not None: return True # If the student has already been given a certificate they should not be refunded if GeneratedCertificate.certificate_for_student(self.user, self.course_id) is not None: return False # If it is after the refundable cutoff date they should not be refunded. refund_cutoff_date = self.refund_cutoff_date() if refund_cutoff_date and datetime.now(UTC) > refund_cutoff_date: return False course_mode = CourseMode.mode_for_course(self.course_id, 'verified') if course_mode is None: return False else: return True def refund_cutoff_date(self): """ Calculate and return the refund window end date. """ try: attribute = self.attributes.get(namespace='order', name='order_number') # pylint: disable=no-member except ObjectDoesNotExist: return None order_number = attribute.value order = ecommerce_api_client(self.user).orders(order_number).get() refund_window_start_date = max( datetime.strptime(order['date_placed'], ECOMMERCE_DATE_FORMAT), self.course_overview.start.replace(tzinfo=None) ) return refund_window_start_date.replace(tzinfo=UTC) + EnrollmentRefundConfiguration.current().refund_window @property def username(self): return self.user.username @property def course(self): # Deprecated. Please use the `course_overview` property instead. return self.course_overview @property def course_overview(self): """ Returns a CourseOverview of the course to which this enrollment refers. Returns None if an error occurred while trying to load the course. Note: If the course is re-published within the lifetime of this CourseEnrollment object, then the value of this property will become stale. """ if not self._course_overview: try: self._course_overview = CourseOverview.get_from_id(self.course_id) except (CourseOverview.DoesNotExist, IOError): self._course_overview = None return self._course_overview def is_verified_enrollment(self): """ Check the course enrollment mode is verified or not """ return CourseMode.is_verified_slug(self.mode) @classmethod def is_enrolled_as_verified(cls, user, course_key): """ Check whether the course enrollment is for a verified mode. Arguments: user (User): The user object. course_key (CourseKey): The identifier for the course. Returns: bool """ enrollment = cls.get_enrollment(user, course_key) return ( enrollment is not None and enrollment.is_active and enrollment.is_verified_enrollment() ) @classmethod def cache_key_name(cls, user_id, course_key): """Return cache key name to be used to cache current configuration. Args: user_id(int): Id of user. course_key(unicode): Unicode of course key Returns: Unicode cache key """ return cls.COURSE_ENROLLMENT_CACHE_KEY.format(user_id, unicode(course_key)) @receiver(models.signals.post_save, sender=CourseEnrollment) @receiver(models.signals.post_delete, sender=CourseEnrollment) def invalidate_enrollment_mode_cache(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name """Invalidate the cache of CourseEnrollment model. """ cache_key = CourseEnrollment.cache_key_name( instance.user.id, unicode(instance.course_id) ) cache.delete(cache_key) class ManualEnrollmentAudit(models.Model): """ Table for tracking which enrollments were performed through manual enrollment. """ enrollment = models.ForeignKey(CourseEnrollment, null=True) enrolled_by = models.ForeignKey(User, null=True) enrolled_email = models.CharField(max_length=255, db_index=True) time_stamp = models.DateTimeField(auto_now_add=True, null=True) state_transition = models.CharField(max_length=255, choices=TRANSITION_STATES) reason = models.TextField(null=True) @classmethod def create_manual_enrollment_audit(cls, user, email, state_transition, reason, enrollment=None): """ saves the student manual enrollment information """ cls.objects.create( enrolled_by=user, enrolled_email=email, state_transition=state_transition, reason=reason, enrollment=enrollment ) @classmethod def get_manual_enrollment_by_email(cls, email): """ if matches returns the most recent entry in the table filtered by email else returns None. """ try: manual_enrollment = cls.objects.filter(enrolled_email=email).latest('time_stamp') except cls.DoesNotExist: manual_enrollment = None return manual_enrollment @classmethod def get_manual_enrollment(cls, enrollment): """ if matches returns the most recent entry in the table filtered by enrollment else returns None, """ try: manual_enrollment = cls.objects.filter(enrollment=enrollment).latest('time_stamp') except cls.DoesNotExist: manual_enrollment = None return manual_enrollment class CourseEnrollmentAllowed(models.Model): """ Table of users (specified by email address strings) who are allowed to enroll in a specified course. The user may or may not (yet) exist. Enrollment by users listed in this table is allowed even if the enrollment time window is past. """ email = models.CharField(max_length=255, db_index=True) course_id = CourseKeyField(max_length=255, db_index=True) auto_enroll = models.BooleanField(default=0) created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) class Meta(object): unique_together = (('email', 'course_id'),) def __unicode__(self): return "[CourseEnrollmentAllowed] %s: %s (%s)" % (self.email, self.course_id, self.created) @classmethod def may_enroll_and_unenrolled(cls, course_id): """ Return QuerySet of students who are allowed to enroll in a course. Result excludes students who have already enrolled in the course. `course_id` identifies the course for which to compute the QuerySet. """ enrolled = CourseEnrollment.objects.users_enrolled_in(course_id=course_id).values_list('email', flat=True) return CourseEnrollmentAllowed.objects.filter(course_id=course_id).exclude(email__in=enrolled) @total_ordering class CourseAccessRole(models.Model): """ Maps users to org, courses, and roles. Used by student.roles.CourseRole and OrgRole. To establish a user as having a specific role over all courses in the org, create an entry without a course_id. """ objects = NoneToEmptyManager() user = models.ForeignKey(User) # blank org is for global group based roles such as course creator (may be deprecated) org = models.CharField(max_length=64, db_index=True, blank=True) # blank course_id implies org wide role course_id = CourseKeyField(max_length=255, db_index=True, blank=True) role = models.CharField(max_length=64, db_index=True) class Meta(object): unique_together = ('user', 'org', 'course_id', 'role') @property def _key(self): """ convenience function to make eq overrides easier and clearer. arbitrary decision that role is primary, followed by org, course, and then user """ return (self.role, self.org, self.course_id, self.user_id) def __eq__(self, other): """ Overriding eq b/c the django impl relies on the primary key which requires fetch. sometimes we just want to compare roles w/o doing another fetch. """ return type(self) == type(other) and self._key == other._key # pylint: disable=protected-access def __hash__(self): return hash(self._key) def __lt__(self, other): """ Lexigraphic sort """ return self._key < other._key # pylint: disable=protected-access def __unicode__(self): return "[CourseAccessRole] user: {} role: {} org: {} course: {}".format(self.user.username, self.role, self.org, self.course_id) #### Helper methods for use from python manage.py shell and other classes. def get_user_by_username_or_email(username_or_email): """ Return a User object, looking up by email if username_or_email contains a '@', otherwise by username. Raises: User.DoesNotExist is lookup fails. """ if '@' in username_or_email: return User.objects.get(email=username_or_email) else: return User.objects.get(username=username_or_email) def get_user(email): user = User.objects.get(email=email) u_prof = UserProfile.objects.get(user=user) return user, u_prof def user_info(email): user, u_prof = get_user(email) print "User id", user.id print "Username", user.username print "E-mail", user.email print "Name", u_prof.name print "Location", u_prof.location print "Language", u_prof.language return user, u_prof def change_email(old_email, new_email): user = User.objects.get(email=old_email) user.email = new_email user.save() def change_name(email, new_name): _user, u_prof = get_user(email) u_prof.name = new_name u_prof.save() def user_count(): print "All users", User.objects.all().count() print "Active users", User.objects.filter(is_active=True).count() return User.objects.all().count() def active_user_count(): return User.objects.filter(is_active=True).count() def create_group(name, description): utg = UserTestGroup() utg.name = name utg.description = description utg.save() def add_user_to_group(user, group): utg = UserTestGroup.objects.get(name=group) utg.users.add(User.objects.get(username=user)) utg.save() def remove_user_from_group(user, group): utg = UserTestGroup.objects.get(name=group) utg.users.remove(User.objects.get(username=user)) utg.save() DEFAULT_GROUPS = { 'email_future_courses': 'Receive e-mails about future MITx courses', 'email_helpers': 'Receive e-mails about how to help with MITx', 'mitx_unenroll': 'Fully unenrolled -- no further communications', '6002x_unenroll': 'Took and dropped 6002x' } def add_user_to_default_group(user, group): try: utg = UserTestGroup.objects.get(name=group) except UserTestGroup.DoesNotExist: utg = UserTestGroup() utg.name = group utg.description = DEFAULT_GROUPS[group] utg.save() utg.users.add(User.objects.get(username=user)) utg.save() def create_comments_service_user(user): if not settings.FEATURES['ENABLE_DISCUSSION_SERVICE']: # Don't try--it won't work, and it will fill the logs with lots of errors return try: cc_user = cc.User.from_django_user(user) cc_user.save() except Exception: # pylint: disable=broad-except log = logging.getLogger("edx.discussion") # pylint: disable=redefined-outer-name log.error( "Could not create comments service user with id {}".format(user.id), exc_info=True ) # Define login and logout handlers here in the models file, instead of the views file, # so that they are more likely to be loaded when a Studio user brings up the Studio admin # page to login. These are currently the only signals available, so we need to continue # identifying and logging failures separately (in views). @receiver(user_logged_in) def log_successful_login(sender, request, user, **kwargs): # pylint: disable=unused-argument """Handler to log when logins have occurred successfully.""" if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u"Login success - user.id: {0}".format(user.id)) else: AUDIT_LOG.info(u"Login success - {0} ({1})".format(user.username, user.email)) @receiver(user_logged_out) def log_successful_logout(sender, request, user, **kwargs): # pylint: disable=unused-argument """Handler to log when logouts have occurred successfully.""" if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u"Logout - user.id: {0}".format(request.user.id)) else: AUDIT_LOG.info(u"Logout - {0}".format(request.user)) @receiver(user_logged_in) @receiver(user_logged_out) def enforce_single_login(sender, request, user, signal, **kwargs): # pylint: disable=unused-argument """ Sets the current session id in the user profile, to prevent concurrent logins. """ if settings.FEATURES.get('PREVENT_CONCURRENT_LOGINS', False): if signal == user_logged_in: key = request.session.session_key else: key = None if user: user.profile.set_login_session(key) class DashboardConfiguration(ConfigurationModel): """Dashboard Configuration settings. Includes configuration options for the dashboard, which impact behavior and rendering for the application. """ recent_enrollment_time_delta = models.PositiveIntegerField( default=0, help_text="The number of seconds in which a new enrollment is considered 'recent'. " "Used to display notifications." ) @property def recent_enrollment_seconds(self): return self.recent_enrollment_time_delta class LinkedInAddToProfileConfiguration(ConfigurationModel): """ LinkedIn Add to Profile Configuration This configuration enables the "Add to Profile" LinkedIn button on the student dashboard. The button appears when users have a certificate available; when clicked, users are sent to the LinkedIn site with a pre-filled form allowing them to add the certificate to their LinkedIn profile. """ MODE_TO_CERT_NAME = { "honor": _(u"{platform_name} Honor Code Certificate for {course_name}"), "verified": _(u"{platform_name} Verified Certificate for {course_name}"), "professional": _(u"{platform_name} Professional Certificate for {course_name}"), "no-id-professional": _( u"{platform_name} Professional Certificate for {course_name}" ), } company_identifier = models.TextField( help_text=_( u"The company identifier for the LinkedIn Add-to-Profile button " u"e.g 0_0dPSPyS070e0HsE9HNz_13_d11_" ) ) # Deprecated dashboard_tracking_code = models.TextField(default="", blank=True) trk_partner_name = models.CharField( max_length=10, default="", blank=True, help_text=_( u"Short identifier for the LinkedIn partner used in the tracking code. " u"(Example: 'edx') " u"If no value is provided, tracking codes will not be sent to LinkedIn." ) ) def add_to_profile_url(self, course_key, course_name, cert_mode, cert_url, source="o", target="dashboard"): """Construct the URL for the "add to profile" button. Arguments: course_key (CourseKey): The identifier for the course. course_name (unicode): The display name of the course. cert_mode (str): The course mode of the user's certificate (e.g. "verified", "honor", "professional") cert_url (str): The download URL for the certificate. Keyword Arguments: source (str): Either "o" (for onsite/UI), "e" (for emails), or "m" (for mobile) target (str): An identifier for the occurrance of the button. """ params = OrderedDict([ ('_ed', self.company_identifier), ('pfCertificationName', self._cert_name(course_name, cert_mode).encode('utf-8')), ('pfCertificationUrl', cert_url), ('source', source) ]) tracking_code = self._tracking_code(course_key, cert_mode, target) if tracking_code is not None: params['trk'] = tracking_code return u'http://www.linkedin.com/profile/add?{params}'.format( params=urlencode(params) ) def _cert_name(self, course_name, cert_mode): """Name of the certification, for display on LinkedIn. """ return self.MODE_TO_CERT_NAME.get( cert_mode, _(u"{platform_name} Certificate for {course_name}") ).format( platform_name=settings.PLATFORM_NAME, course_name=course_name ) def _tracking_code(self, course_key, cert_mode, target): """Create a tracking code for the button. Tracking codes are used by LinkedIn to collect analytics about certifications users are adding to their profiles. The tracking code format is: &trk=[partner name]-[certificate type]-[date]-[target field] In our case, we're sending: &trk=edx-{COURSE ID}_{COURSE MODE}-{TARGET} If no partner code is configured, then this will return None, indicating that tracking codes are disabled. Arguments: course_key (CourseKey): The identifier for the course. cert_mode (str): The enrollment mode for the course. target (str): Identifier for where the button is located. Returns: unicode or None """ return ( u"{partner}-{course_key}_{cert_mode}-{target}".format( partner=self.trk_partner_name, course_key=unicode(course_key), cert_mode=cert_mode, target=target ) if self.trk_partner_name else None ) class EntranceExamConfiguration(models.Model): """ Represents a Student's entrance exam specific data for a single Course """ user = models.ForeignKey(User, db_index=True) course_id = CourseKeyField(max_length=255, db_index=True) created = models.DateTimeField(auto_now_add=True, null=True, db_index=True) updated = models.DateTimeField(auto_now=True, db_index=True) # if skip_entrance_exam is True, then student can skip entrance exam # for the course skip_entrance_exam = models.BooleanField(default=True) class Meta(object): unique_together = (('user', 'course_id'), ) def __unicode__(self): return "[EntranceExamConfiguration] %s: %s (%s) = %s" % ( self.user, self.course_id, self.created, self.skip_entrance_exam ) @classmethod def user_can_skip_entrance_exam(cls, user, course_key): """ Return True if given user can skip entrance exam for given course otherwise False. """ can_skip = False if is_entrance_exams_enabled(): try: record = EntranceExamConfiguration.objects.get(user=user, course_id=course_key) can_skip = record.skip_entrance_exam except EntranceExamConfiguration.DoesNotExist: can_skip = False return can_skip class LanguageField(models.CharField): """Represents a language from the ISO 639-1 language set.""" def __init__(self, *args, **kwargs): """Creates a LanguageField. Accepts all the same kwargs as a CharField, except for max_length and choices. help_text defaults to a description of the ISO 639-1 set. """ kwargs.pop('max_length', None) kwargs.pop('choices', None) help_text = kwargs.pop( 'help_text', _("The ISO 639-1 language code for this language."), ) super(LanguageField, self).__init__( max_length=16, choices=settings.ALL_LANGUAGES, help_text=help_text, *args, **kwargs ) add_introspection_rules([], [r"^student\.models\.LanguageField"]) class LanguageProficiency(models.Model): """ Represents a user's language proficiency. Note that we have not found a way to emit analytics change events by using signals directly on this model or on UserProfile. Therefore if you are changing LanguageProficiency values, it is important to go through the accounts API (AccountsView) defined in /edx-platform/openedx/core/djangoapps/user_api/accounts/views.py or its associated api method (update_account_settings) so that the events are emitted. """ class Meta(object): unique_together = (('code', 'user_profile'),) user_profile = models.ForeignKey(UserProfile, db_index=True, related_name='language_proficiencies') code = models.CharField( max_length=16, blank=False, choices=settings.ALL_LANGUAGES, help_text=_("The ISO 639-1 language code for this language.") ) class CourseEnrollmentAttribute(models.Model): """ Provide additional information about the user's enrollment. """ enrollment = models.ForeignKey(CourseEnrollment, related_name="attributes") namespace = models.CharField( max_length=255, help_text=_("Namespace of enrollment attribute") ) name = models.CharField( max_length=255, help_text=_("Name of the enrollment attribute") ) value = models.CharField( max_length=255, help_text=_("Value of the enrollment attribute") ) def __unicode__(self): """Unicode representation of the attribute. """ return u"{namespace}:{name}, {value}".format( namespace=self.namespace, name=self.name, value=self.value, ) @classmethod def add_enrollment_attr(cls, enrollment, data_list): """Delete all the enrollment attributes for the given enrollment and add new attributes. Args: enrollment(CourseEnrollment): 'CourseEnrollment' for which attribute is to be added data(list): list of dictionaries containing data to save """ cls.objects.filter(enrollment=enrollment).delete() attributes = [ cls(enrollment=enrollment, namespace=data['namespace'], name=data['name'], value=data['value']) for data in data_list ] cls.objects.bulk_create(attributes) @classmethod def get_enrollment_attributes(cls, enrollment): """Retrieve list of all enrollment attributes. Args: enrollment(CourseEnrollment): 'CourseEnrollment' for which list is to retrieve Returns: list Example: >>> CourseEnrollmentAttribute.get_enrollment_attributes(CourseEnrollment) [ { "namespace": "credit", "name": "provider_id", "value": "hogwarts", }, ] """ return [ { "namespace": attribute.namespace, "name": attribute.name, "value": attribute.value, } for attribute in cls.objects.filter(enrollment=enrollment) ] class EnrollmentRefundConfiguration(ConfigurationModel): """ Configuration for course enrollment refunds. """ # TODO: Django 1.8 introduces a DurationField # (https://docs.djangoproject.com/en/1.8/ref/models/fields/#durationfield) # for storing timedeltas which uses MySQL's bigint for backing # storage. After we've completed the Django upgrade we should be # able to replace this field with a DurationField named # `refund_window` without having to run a migration or change # other code. refund_window_microseconds = models.BigIntegerField( default=1209600000000, help_text=_( "The window of time after enrolling during which users can be granted" " a refund, represented in microseconds. The default is 14 days." ) ) @property def refund_window(self): """Return the configured refund window as a `datetime.timedelta`.""" return timedelta(microseconds=self.refund_window_microseconds) @refund_window.setter def refund_window(self, refund_window): """Set the current refund window to the given timedelta.""" self.refund_window_microseconds = int(refund_window.total_seconds() * 1000000)
iivic/BoiseStateX
common/djangoapps/student/models.py
Python
agpl-3.0
77,328
from django.db import models class PostQuerySet(models.QuerySet): def released(self): return self.filter(released=True) def not_released(self): return self.filter(released=False)
arineto/arineto-website
apps/blog/managers.py
Python
mit
207
import judicious # judicious.register("http://127.0.0.1:5000") # judicious.register("https://imprudent.herokuapp.com") scenario = "A drug cartel has hidden 1 kilogram of cocaine somewhere on the person or in the luggage of a passenger on a commercial flight. Where do you think the cocaine might be hidden?" response = judicious.redact_illicit(scenario)
suchow/judicious
tests/test45.py
Python
mit
356
import os import csv import fnmatch import datetime import pandas as pd import win32com.client def num(s): try: if s == "" or s == "None" or str(float(s)) == "nan": return 0 return str(int(s)) except: return s def arrs_to_xlsx(filename, header=[], arr=[]): i = 1 xl = win32com.client.Dispatch('Excel.Application') wb = xl.Workbooks.Add() for x in range(0, len(header)): ws = wb.Worksheets(x+1) for i, cell in enumerate(header[x].split(',')): ws.Cells(1,i+1).Value = cell for i, row in enumerate(arr[x]): for j, cell in enumerate(row): ws.Cells(i+2,j+1).Value = str(cell) wb.Worksheets(1).Range("C:D").NumberFormat = "#,##0" wb.Worksheets(1).Columns.AutoFit() wb.Worksheets(1).Range("$A$1:$Z$3000").FormatConditions.Add(2, "", '=AND(ISNUMBER($C1),$C1<>$D1) ') wb.Worksheets(1).Range("$A$1:$Z$3000").FormatConditions(1).Interior.ColorIndex = 6 wb.Worksheets(1).Range("$A$1:$Z$3000").FormatConditions(1).StopIfTrue = False wb.Worksheets(2).Range("D:E").NumberFormat = "#,##0" wb.Worksheets(2).Columns.AutoFit() wb.Worksheets(2).Range("$A$1:$Z$3000").FormatConditions.Add(2, "", '=AND(ISNUMBER($D1),$D1<>$E1) ') wb.Worksheets(2).Range("$A$1:$Z$3000").FormatConditions(1).Interior.ColorIndex = 6 wb.Worksheets(2).Range("$A$1:$Z$3000").FormatConditions(1).StopIfTrue = False xl.DisplayAlerts = False wb.SaveAs(filename) xl.DisplayAlerts = True wb.Close(True) return def compare_prf_ins(src_df, des_df, mapper, recon_arr): for index, src_row in src_df.loc[(src_df.ins_type == "Option") & (src_df.written == 0)].iterrows(): if src_row.portfolio in mapper: des_tmp_df = des_df.loc[(des_df.portfolio == mapper[src_row.portfolio]) & (des_df.ins == src_row.ins) & (des_df.written == 0)] else: continue des_qty = 0 if des_tmp_df.empty else des_tmp_df.iloc[0].qty # if src_row["qty"] == des_qty: if True: des_df.loc[(des_df.portfolio == mapper[src_row.portfolio]) & (des_df.ins == src_row.ins), "written"] = 1 src_df.loc[(src_df.portfolio == src_row.portfolio) & (src_df.ins == src_row.ins), "written"] = 1 if src_row["source"] == "FA": recon_arr.append([src_row.ins, mapper[src_row.portfolio], "Option", int(src_row.qty), int(des_qty)]) else: recon_arr.append([src_row.ins, mapper[src_row.portfolio], "Option", int(des_qty), int(src_row.qty)]) return src_df, des_df def stock_recon(fa_df, mssd_df, recon_arr): fa_df.loc[fa_df.ins_type == "", "ins_type"] = "Stock" mssd_df.loc[mssd_df.ins_type == "", "ins_type"] = "Stock" fa_tmp_series = fa_df.loc[(fa_df.ins_type == "Stock") & (fa_df.portfolio != "EDD Alpha") & (fa_df.portfolio != "EDD Repo") & (fa_df.portfolio != "EDD Deltaone") ].groupby(["ins", "portfolio"])["qty"].sum() mssd_tmp_series = mssd_df.loc[(mssd_df.ins_type == "Stock") & (mssd_df.portfolio != "D1 Trading") & (mssd_df.portfolio != "D1 SS")].groupby(["ins", "portfolio"])["qty"].sum() result_df = pd.DataFrame(dict(fa = fa_tmp_series, msse = mssd_tmp_series)).reset_index() result_df.fillna(0, inplace=True) for index, row in result_df.iterrows(): ins_name = row["ins"] prf = row["portfolio"] fa_qty = int(row["fa"]) mssd_qty = int(row["msse"]) # if fa_qty == mssd_qty: if True: fa_df.loc[fa_df.ins == ins_name, "written"] = 1 mssd_df.loc[mssd_df.ins == ins_name, "written"] = 1 recon_arr.append([ins_name, prf, "Stock", fa_qty, mssd_qty]) return def main(): recon_arr = [] recon_sum_arr = [] mssd_short_arr = [] fa_file = "" mssd_file = "" client_file = "" # input_dir = "C:\\temp" input_dir = "U:\\MSS_FA_Recon" # input_dir = "D:\\Projects\\HTI\\option_exercise" for file in os.listdir(input_dir): if fnmatch.fnmatch(file, "FAPos_to_MSSD_*.csv"): if file > fa_file: fa_file = file if fnmatch.fnmatch(file, "MSSD_Position*.csv"): if file > mssd_file: mssd_file = file print(fa_file) print(mssd_file) today_date = fa_file[-12:-4] fa_df = pd.read_csv(os.path.join(input_dir, fa_file), header=None, names=["ins","portfolio","default","way","qty"]) mssd_df = pd.read_csv(os.path.join(input_dir, mssd_file), header=None, names=["ins","portfolio","default","way","qty"]) fa_df.loc[((fa_df.way == "S") & (fa_df.portfolio != "EDD SBL")) | ((fa_df.way == "B") & (fa_df.portfolio == "EDD SBL")), "qty"] = fa_df.qty*-1 mssd_df.loc[mssd_df.way == "S", "qty"] = mssd_df.qty*-1 fa_df = fa_df.drop(["way", "default"], 1) mssd_df = mssd_df.drop(["way", "default"], 1) fa_df["written"] = 0 fa_df["source"] = "FA" fa_df["ins_type"] = "" mssd_df["written"] = 0 mssd_df["source"] = "MSSD" mssd_df["ins_type"] = "" # fa_df = fa_df.append(d1sbl_df) stock_recon(fa_df, mssd_df, recon_arr) result_df = pd.concat([fa_df, mssd_df]) result_df = result_df.loc[result_df.written == 0].sort_values(by=["ins","source","portfolio"],ascending=[True,True,True]) # result_df.to_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), "fa_mssd_recon_YYYYMMDD.csv".replace("YYYYMMDD", today_date)), columns=["source","ins","ins_type","portfolio","qty"], index=False) recon_df = pd.DataFrame(recon_arr, columns=["instrument","portfolio","prod_type","fa_qty","mssd_qty"]) recon_sum_df = recon_df.groupby(["prod_type","instrument"]).sum() for index, row in recon_sum_df.iterrows(): recon_sum_arr.append([index[1], index[0], row["fa_qty"], row["mssd_qty"]]) mssd_ss_df = mssd_df.loc[(mssd_df.ins_type == "Stock") & (mssd_df.qty < 0)] for index, row in mssd_ss_df.iterrows(): mssd_short_arr.append([row["portfolio"], row["ins"], row["qty"]]) arrs_to_xlsx(os.path.join(os.path.dirname(os.path.abspath(__file__)), "fa_mssd_recon_YYYYMMDD.xlsx".replace("YYYYMMDD", today_date)), ["instrument,prod_type,fa_qty,mssd_qty", "instrument,portfolio,prod_type,fa_qty,mssd_qty", "instrument,portfolio,mssd_qty"], [recon_sum_arr, recon_arr, mssd_short_arr]) # for index, row in result_df.iterrows(): # print row.source + ',' + row.ins + ',' + row.ins_type + ',' + row.portfolio + ',' + str(row.qty) return main()
frederick623/HTI
recon/fa_mssd_recon.py
Python
apache-2.0
5,991
# -*- coding: utf-8 -*- # This file is part of pygal # # A python svg graph plotting library # Copyright © 2012-2014 Kozea # # This library is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # This library is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with pygal. If not, see <http://www.gnu.org/licenses/>. """ Horizontal stacked graph """ from pygal.graph.horizontal import HorizontalGraph from pygal.graph.stackedbar import StackedBar class HorizontalStackedBar(HorizontalGraph, StackedBar): """Horizontal Stacked Bar graph"""
mytliulei/DCNRobotInstallPackages
windows/win32/pygal-1.7.0/pygal/graph/horizontalstackedbar.py
Python
apache-2.0
1,002
import sys, re filename = sys.argv[1] + '/src/gui/image/qjpeghandler.pri' print " patching ", filename s = open(filename).read() s = re.sub(r'win32:\s*LIBS \+= libjpeg.lib', 'win32: LIBS += jpeg.lib', s) open(filename, "w").write(s) filename = sys.argv[1] + '/src/gui/image/qpnghandler.pri' print " patching ", filename s = open(filename).read() s = re.sub(r'win32:\s*LIBS \+= libpng.lib', 'win32: LIBS += libpng15.lib', s) open(filename, "w").write(s) filename = sys.argv[1] + '/src/gui/image/qtiffhandler.pri' print " patching ", filename s = open(filename).read() s = re.sub(r'win32:\s*LIBS \+= libtiff.lib', 'win32: LIBS += libtiff_i.lib', s) open(filename, "w").write(s) filename = sys.argv[1] + '/src/3rdparty/zlib_dependency.pri' print " patching ", filename s = open(filename).read() s = re.sub(r'else:\s*LIBS \+= zdll.lib', 'else: LIBS += zlib.lib', s) open(filename, "w").write(s) filename = sys.argv[1] + '/src/tools/bootstrap/bootstrap.pri' print " patching ", filename s = open(filename).read() s = re.sub(r'else:LIBS \+= zdll.lib', 'else:LIBS += zlib.lib', s) open(filename, "w").write(s)
ukoethe/ilastik-build
patches/patch_qt.py
Python
gpl-2.0
1,169
''' Created on Mar 18, 2012 @author: mchrzanowski ''' from math import floor, sqrt from ProjectEulerLibrary import isNumberPalindromic from time import time def main(): rollingTotal = 1 # 1 ** 2 is special as it's palindromic, but it's only 1 number. # so start the total at 1 to avoid special casiing in the algo. palindromes = set([]) LIMIT = 10 ** 8 END = int(floor((sqrt(4 - 4 * 2 * (1 - LIMIT)) + 2) / 4)) # solve x ** 2 + (x - 1) ** 2 = LIMIT # this establishes the very last squared number # that, when added to the 2nd-to-last squared number, # produces a number < LIMIT. for i in xrange(2, END + 1): rollingTotal += i ** 2 if rollingTotal < LIMIT and isNumberPalindromic(rollingTotal): palindromes.add(rollingTotal) copyOfTotal = rollingTotal for j in xrange(1, i - 1): # the sum is of at least two values. copyOfTotal -= j ** 2 if copyOfTotal < LIMIT and isNumberPalindromic(copyOfTotal): palindromes.add(copyOfTotal) print "Sum of all palindromes:", sum(palindromes) if __name__ == '__main__': start = time() main() end = time() print "Runtime:", end - start, "seconds."
mchrzanowski/ProjectEuler
src/python/Problem125.py
Python
mit
1,532
import unittest from urh.awre.CommonRange import CommonRange class TestCommonRange(unittest.TestCase): def test_ensure_not_overlaps(self): test_range = CommonRange(start=4, length=8, value="12345678") self.assertEqual(test_range.end, 11) # no overlapping self.assertEqual(test_range, test_range.ensure_not_overlaps(0, 3)[0]) self.assertEqual(test_range, test_range.ensure_not_overlaps(20, 24)[0]) # overlapping on left result = test_range.ensure_not_overlaps(2, 6)[0] self.assertEqual(result.start, 6) self.assertEqual(result.end, 11) # overlapping on right result = test_range.ensure_not_overlaps(6, 14)[0] self.assertEqual(result.start, 4) self.assertEqual(result.end, 5) # full overlapping self.assertEqual(len(test_range.ensure_not_overlaps(3, 14)), 0) # overlapping in the middle result = test_range.ensure_not_overlaps(6, 9) self.assertEqual(len(result), 2) left, right = result[0], result[1] self.assertEqual(left.start, 4) self.assertEqual(left.end, 5) self.assertEqual(right.start, 10) self.assertEqual(right.end, 11)
jopohl/urh
tests/awre/test_common_range.py
Python
gpl-3.0
1,223
# This file is part of eventmq. # # eventmq is free software: you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 2.1 of the License, or (at your option) # any later version. # # eventmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with eventmq. If not, see <http://www.gnu.org/licenses/>. """ :mod:`router` -- Router ======================= Routes messages to workers (that are in named queues). """ from copy import copy import json # deserialize queues in on_inform. should be refactored import logging import signal from eventmq.log import setup_logger, setup_wal_logger from . import __version__ from . import conf, constants, exceptions, poller, receiver from .constants import ( CLIENT_TYPE, DISCONNECT, KBYE, PROTOCOL_VERSION, ROUTER_SHOW_SCHEDULERS, ROUTER_SHOW_WORKERS, STATUS ) from .utils import tuplify from .utils.classes import EMQdeque, HeartbeatMixin from .utils.devices import generate_device_name from .utils.messages import ( fwd_emqp_router_message as fwdmsg, parse_router_message, send_emqp_router_message as sendmsg, ) from .utils.settings import import_settings from .utils.timeutils import monotonic, timestamp logger = logging.getLogger(__name__) wal_logger = logging.getLogger('eventmq-wal') class Router(HeartbeatMixin): """ A simple router of messages """ def __init__(self, *args, **kwargs): super(Router, self).__init__(*args, **kwargs) # Creates _meta setup_logger("eventmq") self.name = generate_device_name() logger.info('EventMQ Version {}'.format(__version__)) logger.info('Initializing Router {}...'.format(self.name)) self.poller = poller.Poller() self.incoming = receiver.Receiver() self.outgoing = receiver.Receiver() self.administrative_socket = receiver.Receiver() self.poller.register(self.incoming, poller.POLLIN) self.poller.register(self.outgoing, poller.POLLIN) self.poller.register(self.administrative_socket, poller.POLLIN) self.status = STATUS.ready #: Tracks the last time the worker queues were cleaned of dead workers self._meta['last_worker_cleanup'] = 0 #: JobManager address by queue name. The lists here are Last Recently #: Used queues where a worker is popped off when given a job, and #: appeneded when one finishes. There is one entry per available #: worker slot, so you may see duplicate addresses. #: #: Example: #: {'default': ['w1', 'w2', 'w1', 'w4']} self.queues = {} #: List of queues by workers. Meta data about the worker such as the #: queue memebership and timestamp of last message received are stored #: here. #: #: **Keys** #: * ``queues``: list() of queue names and prioritiess the worker #: belongs to. e.g. (10, 'default') #: * ``hb``: monotonic timestamp of the last received message from #: worker #: * ``available_slots``: int count of jobs this manager can still #: process. self.workers = {} #: Message buffer. When messages can't be sent because there are no #: workers available to take the job self.waiting_messages = {} # Key: Queue.name, Value: # of messages sent to workers on that queue # Includes REQUESTS in flight but not REQUESTS queued self.processed_message_counts = {} # Same as above but Key: Worker.uuid self.processed_message_counts_by_worker = {} #: Tracks the last time the scheduler queue was cleaned out of dead #: schedulers self._meta['last_scheduler_cleanup'] = 0 #: Queue for schedulers to use: self.scheduler_queue = [] #: Scheduler clients. Clients are able to send SCHEDULE commands that #: need to be routed to a scheduler, which will keep track of time and #: run the job. #: Contains dictionaries: #: self.schedulers[<scheduler_zmq_id>] = { #: 'hb': <last_recv_heartbeat>, #: } self.schedulers = {} #: Latency tracking dictionary #: Key: msgid of msg each REQUEST received and forwarded to a worker #: Value: (timestamp, queue_name) self.job_latencies = {} #: Excecuted function tracking dictionary #: Key: msgid of msg each REQUEST received and forwarded to a worker #: Value: (function_name, queue_name) #: Set to True when the router should die. self.received_disconnect = False # Tests skip setting the signals. if not kwargs.pop('skip_signal', False): signal.signal(signal.SIGHUP, self.sighup_handler) signal.signal(signal.SIGUSR1, self.handle_pdb) def handle_pdb(self, sig, frame): import pdb pdb.Pdb().set_trace(frame) def start(self, frontend_addr=conf.FRONTEND_ADDR, backend_addr=conf.BACKEND_ADDR, administrative_addr=conf.ADMINISTRATIVE_ADDR): """ Begin listening for connections on the provided connection strings Args: frontend_addr (str): connection string to listen for requests backend_addr (str): connection string to listen for workers administrative_addr (str): connection string to listen for emq-cli commands on. """ self.status = STATUS.starting self.incoming.listen(frontend_addr) self.outgoing.listen(backend_addr) self.administrative_socket.listen(administrative_addr) self.status = STATUS.listening logger.info('Listening for requests on {}'.format(frontend_addr)) logger.info('Listening for workers on {}'.format(backend_addr)) logger.info('Listening for administrative commands on {}'.format( administrative_addr)) self._start_event_loop() def _start_event_loop(self): """ Starts the actual eventloop. Usually called by :meth:`Router.start` """ while True: if self.received_disconnect: break now = monotonic() events = self.poller.poll() if events.get(self.administrative_socket) == poller.POLLIN: msg = self.administrative_socket.recv_multipart() if conf.SUPER_DEBUG: logger.debug('ADMIN: {}'.format(msg)) # ############## # Admin Commands # ############## if len(msg) > 4: if msg[3] == DISCONNECT: logger.info('Received DISCONNECT from administrator') self.send_ack( self.administrative_socket, msg[0], msg[4]) self.on_disconnect(msg[4], msg) elif msg[3] == 'STATUS': sendmsg(self.administrative_socket, msg[0], 'REPLY', (self.get_status(),)) elif msg[3] == ROUTER_SHOW_WORKERS: sendmsg(self.administrative_socket, msg[0], 'REPLY', (self.get_workers_status(),)) elif msg[3] == ROUTER_SHOW_SCHEDULERS: sendmsg(self.administrative_socket, msg[0], 'REPLY', (self.get_schedulers_status(),)) if events.get(self.incoming) == poller.POLLIN: msg = self.incoming.recv_multipart() self.handle_wal_log(msg) self.process_client_message(msg) if events.get(self.outgoing) == poller.POLLIN: msg = self.outgoing.recv_multipart() self.process_worker_message(msg) # TODO: Optimization: the calls to functions could be done in # another thread so they don't block the loop. synchronize if not conf.DISABLE_HEARTBEATS: # Send a HEARTBEAT if necessary if now - self._meta['last_sent_heartbeat'] >= \ conf.HEARTBEAT_INTERVAL: self.send_workers_heartbeats() if now - self._meta['last_worker_cleanup'] >= 10: # Loop through the next worker queue and clean up any dead # ones so the next one is alive self.clean_up_dead_workers() if now - self._meta['last_sent_scheduler_heartbeat'] >= \ conf.HEARTBEAT_INTERVAL: self.send_schedulers_heartbeats() if now - self._meta['last_scheduler_cleanup'] >= 10: self.clean_up_dead_schedulers() def reset_heartbeat_counters(self): """ Reset all the counters for heartbeats back to 0 """ super(Router, self).reset_heartbeat_counters() # track the last time the router sent a heartbeat to the schedulers self._meta['last_sent_scheduler_heartbeat'] = 0 def send_ack(self, socket, recipient, msgid): """ Sends an ACK response Args: socket (socket): The socket to use for this ack recipient (str): The recipient id for the ack msgid: The unique id that we are acknowledging Returns: msgid: The ID of the ACK message """ logger.info('Sending ACK to %s' % recipient) logger.info('Queue information %s' % self.queues) logger.info('Worker information %s' % self.workers) msgid = sendmsg(socket, recipient, 'ACK', msgid) return msgid def send_kbye(self, socket, recipient): logger.info('Sending {} to {}'.format(KBYE, recipient)) msg_id = sendmsg(socket, recipient, KBYE) return msg_id def send_heartbeat(self, socket, recipient): """ Custom send heartbeat method to take into account the recipient that is needed when building messages Args: socket (socket): the socket to send the heartbeat with recipient (str): Worker I Returns: msgid: The ID of the HEARTBEAT message """ msgid = sendmsg(socket, recipient, 'HEARTBEAT', str(timestamp())) return msgid def send_workers_heartbeats(self): """ Send HEARTBEATs to all registered workers. """ self._meta['last_sent_heartbeat'] = monotonic() for worker_id in self.workers: self.send_heartbeat(self.outgoing, worker_id) def send_schedulers_heartbeats(self): """ Send HEARTBEATs to all registered schedulers """ self._meta['last_sent_scheduler_heartbeat'] = monotonic() for scheduler_id in self.schedulers: self.send_heartbeat(self.incoming, scheduler_id) def on_heartbeat(self, sender, msgid, msg): """ a placeholder for a no-op command. The actual 'logic' for HEARTBEAT is in :meth:`self.process_worker_message` because any message from a worker counts as a HEARTBEAT """ def on_inform(self, sender, msgid, msg): """ Handles an INFORM message. This happens when new worker coming online and announces itself. """ queue_names = msg[0] client_type = msg[1] if not queue_names: # Ideally, this matches some workers queues = conf.QUEUES else: try: queues = list(map(tuplify, json.loads(queue_names))) except ValueError: # this was invalid json logger.error( 'Received invalid queue names in INFORM. names:{} from:{} ' 'type:{}'.format( queue_names, sender, client_type)) return logger.info('Received INFORM request from {} (type: {})'.format( sender, client_type)) if client_type == CLIENT_TYPE.worker: self.add_worker(sender, queues) self.send_ack(self.outgoing, sender, msgid) elif client_type == CLIENT_TYPE.scheduler: self.add_scheduler(sender) self.send_ack(self.incoming, sender, msgid) def on_reply(self, sender, msgid, msg): """ Handles an REPLY message. Replies are sent by the worker for latanecy measurements """ orig_msgid = msg[1] if conf.SUPER_DEBUG: logger.debug('Received REPLY from {} (msgid: {}, ACK msgid: {})'. format(sender, msgid, orig_msgid)) if orig_msgid in self.job_latencies: elapsed_secs = (monotonic() - self.job_latencies[orig_msgid][0]) * 1000.0 logger.info("Completed {queue} job with msgid: {msgid} in " "{time:.2f}ms".format( queue=self.job_latencies[orig_msgid][1], msgid=orig_msgid, time=elapsed_secs)) del self.job_latencies[orig_msgid] def on_disconnect(self, msgid, msg): """ Prepare router for disconnecting by removing schedulers, clearing worker queue (if needed), and removing workers. """ # Remove schedulers and send them a kbye logger.info("Router preparing to disconnect...") for scheduler in self.schedulers: self.send_kbye(self.incoming, scheduler) self.schedulers.clear() self.incoming.unbind(conf.FRONTEND_ADDR) if len(self.waiting_messages) > 0: logger.info("Router processing messages in queue.") for queue in self.waiting_messages.keys(): while not self.waiting_messages[queue].is_empty(): msg = self.waiting_messages[queue].popleft() self.process_worker_message(msg) for worker in self.workers.keys(): self.send_kbye(self.outgoing, worker) self.workers.clear() self.outgoing.unbind(conf.BACKEND_ADDR) # Loops event loops should check for this and break out self.received_disconnect = True def on_ready(self, sender, msgid, msg): """ A worker that we should already know about is ready for another job Args: sender (str): The id of the sender msgid (str): Unique identifier for this message msg: The actual message that was sent """ queue_names = self.workers[sender]['queues'] # if there are waiting messages for the queues this worker is a member # of, then reply back with the oldest waiting message, otherwise just # add the worker to the list of available workers. # Note: This is only taking into account the queue the worker is # returning from, and not other queue_names that might have had # messages waiting even longer. # Assumes the highest priority queue comes first for queue in queue_names: queue_name = queue[1] if queue_name in self.waiting_messages.keys(): logger.debug('Found waiting message in the %s waiting_messages' ' queue' % queue_name) msg = self.waiting_messages[queue_name].peekleft() try: fwdmsg(self.outgoing, sender, msg) self.waiting_messages[queue_name].popleft() except exceptions.PeerGoneAwayError: # Cleanup a workerg that cannot be contacted, leaving the # message in queue self.workers[sender]['hb'] = 0 self.clean_up_dead_workers() # It is easier to check if a key exists rather than the len of # a key's value if it exists elsewhere, so if that was the last # message remove the queue if len(self.waiting_messages[queue_name]) == 0: logger.debug('No more messages in waiting_messages queue ' '%s. Removing from list...' % queue_name) del self.waiting_messages[queue_name] # the message has been forwarded so short circuit that way the # manager isn't reslotted return self.requeue_worker(sender) def on_request(self, sender, msgid, msg, depth=1): """ Process a client REQUEST frame Args: sender msgid msgid depth (int): The recusion depth in retrying when PeerGoneAwayError is raised. """ import psutil try: queue_name = msg[0] except IndexError: logger.exception("Queue name undefined. Sender {}; MsgID: {}; " "Msg: {}".format(sender, msgid, msg)) return # If we have no workers for the queue assign it to the default queue if queue_name not in self.queues: logger.warning("Received REQUEST with a queue I don't recognize: " "%s. Sending to default queue." % (queue_name,)) queue_name = conf.DEFAULT_QUEUE_NAME self.job_latencies[msgid] = (monotonic(), queue_name) try: worker_addr = self.get_available_worker(queue_name=queue_name) except (exceptions.NoAvailableWorkerSlotsError, exceptions.UnknownQueueError): logger.warning('No available workers for queue "%s". ' 'Buffering message to send later.' % queue_name) if queue_name not in self.waiting_messages: # Since the default queue will pick up messages with invalid # queues, it will need to be larger than other queues if queue_name == conf.DEFAULT_QUEUE_NAME: total_mem = psutil.virtual_memory().total # Set queue limit to be 75% of total memory with ~100 byte # messages limit = int((total_mem / 100) * 0.75) self.waiting_messages[queue_name] = EMQdeque( full=limit, on_full=router_on_full) else: self.waiting_messages[queue_name] = \ EMQdeque(full=conf.HWM, on_full=router_on_full) if self.waiting_messages[queue_name].append( ['', constants.PROTOCOL_VERSION, 'REQUEST', msgid, ] + msg): logger.debug('%d waiting messages in queue "%s"' % (len(self.waiting_messages[queue_name]), queue_name)) else: logger.warning('High Watermark {} met for {}, notifying'. format(conf.HWM, queue_name)) return try: # Check if msg type is for executing function self.job_latencies[msgid] = (monotonic(), queue_name) # Rebuild the message to be sent to the worker. fwdmsg will # properly address the message. if queue_name not in self.processed_message_counts: self.processed_message_counts[queue_name] = 1 else: self.processed_message_counts[queue_name] += 1 if worker_addr not in self.processed_message_counts_by_worker: self.processed_message_counts_by_worker[worker_addr] = 1 else: self.processed_message_counts_by_worker[worker_addr] += 1 fwdmsg(self.outgoing, worker_addr, ['', constants.PROTOCOL_VERSION, 'REQUEST', msgid, ] + msg) self.workers[worker_addr]['available_slots'] -= 1 # Acknowledgment of the request being submitted to the client sendmsg(self.incoming, sender, 'REPLY', (msgid,)) except exceptions.PeerGoneAwayError: logger.debug( "Worker {} has unexpectedly gone away. Removing this worker " "before trying another worker".format(worker_addr)) # Remove this worker to prevent infinite loop self.workers[worker_addr]['hb'] = 0 self.clean_up_dead_workers() # Recursively try again. TODO: are there better options? self.process_client_message( [sender, '', PROTOCOL_VERSION, 'REQUEST', msgid] + msg, depth=depth+1) def clean_up_dead_workers(self): """ Loops through the worker queues and removes any workers who haven't responded in HEARTBEAT_TIMEOUT """ now = monotonic() self._meta['last_worker_cleanup'] = now # Because workers and queues are removed from inside a loop, a copy is # needed to prevent the dict we are iterating over from changing. workers = copy(self.workers) queues = copy(self.queues) for worker_id in workers: last_hb_seconds = now - self.workers[worker_id]['hb'] if last_hb_seconds >= conf.HEARTBEAT_TIMEOUT: logger.info("No messages from worker {} in {}. Removing from " "the queue. TIMEOUT: {}".format( worker_id, last_hb_seconds, conf.HEARTBEAT_TIMEOUT)) # Remove the worker from the actual queues for queue in self.workers[worker_id]['queues']: try: self.queues[queue[1]].remove((queue[0], worker_id)) except KeyError: # This queue disappeared for some reason continue del self.workers[worker_id] # Remove the empty queue for queue_name in queues: if len(self.queues[queue_name]) == 0: del self.queues[queue_name] def add_worker(self, worker_id, queues=None): """ Adds a worker to worker queues Args: worker_id (str): unique id of the worker to add queues: queue or queues this worker should be a member of """ if queues and not isinstance(queues, (list, tuple)): raise TypeError('type of `queue` parameter not one of (list, ' 'tuple). got {}'.format(type(queues))) if worker_id in self.workers: logger.warning('Worker id already found in `workers`. Overwriting ' 'data') # Add the worker to our worker dict self.workers[worker_id] = {} self.workers[worker_id]['queues'] = tuple(queues) self.workers[worker_id]['hb'] = monotonic() self.workers[worker_id]['available_slots'] = 0 # Define priorities. First element is the highest priority for q in queues: if q[1] not in self.queues: self.queues[q[1]] = list() self.queues[q[1]].append((q[0], worker_id)) self.queues[q[1]] = self.prioritize_queue_list(self.queues[q[1]]) logger.debug('Added worker {} to the queues {}'.format( worker_id, queues)) def get_available_worker(self, queue_name=conf.DEFAULT_QUEUE_NAME): """ Gets the job manager with the next available worker for the provided queue. Args: queue_name (str): Name of the queue Raises: NoAvailableWorkerSlotsError: Raised when there are no available slots in any the job managers. UnknownQueueError: Raised when ``queue_name`` is not found in self.queues Returns: (str): uuid of the job manager with an available worker slot """ if queue_name not in self.queues: logger.warning("unknown queue name: {} - Discarding message.". format(queue_name)) raise exceptions.UnknownQueueError('Unknown queue name {}'.format( queue_name )) popped_workers = [] worker_addr = None while not worker_addr and len(self.queues[queue_name]) > 0: try: # pop the next job manager id & check if it has a worker slot # if it doesn't add it to popped_workers to be added back to # self.queues after the loop worker = self.queues[queue_name].pop(0) # LRU when sorted later by appending popped_workers.append(worker) if self.workers[worker[1]]['available_slots'] > 0: worker_addr = worker[1] break except KeyError: # This should only happen if worker[1] is missing 1 from # self.workers because: # - available slots initialized to 0 self.add_worker() # - we already checked that self.queues[queue_name] exists logger.error("Worker {} not found for queue {}".format( worker, queue_name)) logger.debug("Tried worker {} in self.workers for queue {} " "but it wasn't found in self.workers".format( worker, queue_name )) continue except IndexError: # worker[1] should exist if it follows the (priority, id) fmt logger.error("Invalid priority/worker format in self.queues " "{}".format(worker)) continue else: # No more queues to try pass if popped_workers: self.queues[queue_name].extend(popped_workers) self.queues[queue_name] = self.prioritize_queue_list( self.queues[queue_name]) if worker_addr: return worker_addr else: raise exceptions.NoAvailableWorkerSlotsError( "There are no availabe workers for queue {}. Try again " "later".format(queue_name)) def clean_up_dead_schedulers(self): """ Loops through the list of schedulers and remove any schedulers who the router hasn't received a heartbeat in HEARTBEAT_TIMEOUT """ now = monotonic() self._meta['last_scheduler_cleanup'] = now schedulers = copy(self.scheduler_queue) for scheduler_id in schedulers: last_hb_seconds = now - self.schedulers[scheduler_id]['hb'] if last_hb_seconds >= conf.HEARTBEAT_TIMEOUT: logger.critical("No HEARTBEAT from scheduler {} in {} Removing" " from the queue".format(scheduler_id, last_hb_seconds)) del self.schedulers[scheduler_id] self.scheduler_queue.remove(scheduler_id) def add_scheduler(self, scheduler_id): """ Adds a scheduler to the queue to receive SCHEDULE commands Args: scheduler_id (str): unique id of the scheduler to add """ self.scheduler_queue.append(scheduler_id) self.schedulers[scheduler_id] = {} self.schedulers[scheduler_id]['hb'] = monotonic() logger.debug('Adding {} to self.schedulers'.format(scheduler_id)) def requeue_worker(self, worker_id): """ Add a worker back to the pools for which it is a member of. """ self.workers[worker_id]['available_slots'] += 1 def handle_wal_log(self, original_msg): try: message = parse_router_message(original_msg) except exceptions.InvalidMessageError: logger.exception('Invalid message from clients: {}'.format( str(original_msg))) return command = message[1] if conf.WAL_ENABLED and \ command in ("REQUEST", "SCHEDULE", "UNSCHEDULE"): wal_logger.info(original_msg) def process_client_message(self, original_msg, depth=0): """ Args: msg: The untouched message from zmq depth: The number of times this method has been recursively called. This is used to short circuit message retry attempts. Raises: InvalidMessageError: Unable to parse the message """ # Limit recusive depth (timeout on PeerGoneAwayError) if (depth > 100): logger.error('Recursion Error: process_client_message called too ' 'many times with message: {}'.format(original_msg)) return try: message = parse_router_message(original_msg) except exceptions.InvalidMessageError: logger.exception('Invalid message from clients: {}'.format( str(original_msg))) return sender = message[0] command = message[1] msgid = message[2] msg = message[3] # Count this message as a heart beat if it came from a scheduler that # the router is aware of. if sender in self.schedulers and command == KBYE: self._remove_scheduler(sender) return if sender in self.schedulers and sender in self.scheduler_queue: self.schedulers[sender]['hb'] = monotonic() # If it is a heartbeat then there is nothing left to do if command == "HEARTBEAT": return # REQUEST is the most common message so it goes at the top if command == "REQUEST": self.on_request(sender, msgid, msg, depth=depth) elif command == "INFORM": # This is a scheduler trying join self.on_inform(sender, msgid, msg) elif command == "SCHEDULE": # Forward the schedule message to the schedulers try: scheduler_addr = self.scheduler_queue.pop() except IndexError: logger.error("Received a SCHEDULE command with no schedulers. " "Discarding.") return self.scheduler_queue.append(scheduler_addr) self.schedulers[scheduler_addr] = { 'hb': monotonic(), } try: # Strips off the client id before forwarding because the # scheduler isn't expecting it. fwdmsg(self.incoming, scheduler_addr, original_msg[1:]) except exceptions.PeerGoneAwayError: logger.debug("Scheduler {} has unexpectedly gone away. Trying " "another scheduler.".format(scheduler_addr)) self.process_client_message(original_msg[1:], depth+1) elif command == "UNSCHEDULE": # Forward the unschedule message to all schedulers for scheduler_addr, scheduler in self.schedulers.items(): self.schedulers[scheduler_addr] = { 'hb': monotonic(), } try: # Strips off the client id before forwarding because the # scheduler isn't expecting it. fwdmsg(self.incoming, scheduler_addr, original_msg[1:]) except exceptions.PeerGoneAwayError: logger.debug("Scheduler {} has unexpectedly gone away." " Schedule may still exist.". format(scheduler_addr)) self.process_client_message(original_msg[1:], depth+1) elif command == DISCONNECT: self.on_disconnect(msgid, msg) def process_worker_message(self, msg): """ This method is called when a message comes in from the worker socket. It then calls `on_COMMAND.lower()`. If `on_command` isn't found, then a warning is created. Args: msg: The untouched message from zmq """ try: message = parse_router_message(msg) except exceptions.InvalidMessageError: logger.exception('Invalid message from workers: %s' % str(msg)) return sender = message[0] command = message[1] msgid = message[2] message = message[3] if sender in self.workers: if command.upper() == KBYE: self._remove_worker(sender) # Treat any other message like a HEARTBEAT. else: self.workers[sender]['hb'] = monotonic() elif command.lower() != 'inform': logger.critical('Unknown worker %s attempting to run %s command: ' '%s' % (sender, command, str(msg))) return if hasattr(self, "on_%s" % command.lower()): func = getattr(self, "on_%s" % command.lower()) func(sender, msgid, message) def _remove_worker(self, worker_id): """ Remove worker with given id from any queues it belongs to. Args: worker_id: (str) ID of worker to remove """ worker = self.workers.pop(worker_id) for queue in worker['queues']: name = queue[1] workers = self.queues[name] revised_list = filter(lambda x: x[1] != worker_id, workers) self.queues[name] = revised_list logger.debug('Removed worker - {} from {}'.format(worker_id, name)) def _remove_scheduler(self, scheduler_id): """ Remove scheduler with given id from registered schedulers. Args: scheduler_id: (str) ID of scheduler to remove """ self.schedulers.pop(scheduler_id) schedulers_to_remove = self.scheduler_queue self.scheduler_queue = filter(lambda x: x != scheduler_id, schedulers_to_remove) logger.debug('Removed scheduler - {} from known schedulers'.format( scheduler_id)) @classmethod def prioritize_queue_list(cls, unprioritized_iterable): """ Prioritize a given iterable in the format: ((PRIORITY, OBJ),..) Args: unprioritized_iterable (iter): Any list, tuple, etc where the 0-index key is an integer to use as priority. Largest numbers come first. Raises: IndexError - There was no 0-index element. Returns: decsending order list. E.g. ((20, 'a'), (14, 'b'), (12, 'c')) """ return sorted(unprioritized_iterable, key=lambda x: x[0], reverse=True) def get_status(self): """ Return (str) Serialized information about the current state of the router. """ return json.dumps({ 'job_latencies_count': len(self.job_latencies), 'processed_messages': self.processed_message_counts, 'processed_messages_by_worker': self.processed_message_counts_by_worker, 'waiting_message_counts': [ '{}: {}'. format(q, len(self.waiting_messages[q])) for q in self.waiting_messages] }) def get_workers_status(self): return json.dumps({ 'connected_workers': self.workers, 'connected_queues': self.queues }) def get_schedulers_status(self): return json.dumps({ 'connected_schedulers': self.schedulers }) def sighup_handler(self, signum, frame): """ Reloads the configuration and rebinds the ports. Exectued when the process receives a SIGHUP from the system. """ logger.info('Caught signame %s' % signum) self.incoming.unbind(conf.FRONTEND_ADDR) self.outgoing.unbind(conf.BACKEND_ADDR) import_settings() self.start(frontend_addr=conf.FRONTEND_ADDR, backend_addr=conf.BACKEND_ADDR, administrative_addr=conf.ADMINISTRATIVE_ADDR) def router_main(self): """ Kick off router with logging and settings import """ import_settings() setup_wal_logger('eventmq-wal', conf.WAL) self.start(frontend_addr=conf.FRONTEND_ADDR, backend_addr=conf.BACKEND_ADDR, administrative_addr=conf.ADMINISTRATIVE_ADDR) def router_on_full(): logger.critical('High watermark hit in router') # Entry point for pip console scripts def router_main(): Router()
com4/eventmq
eventmq/router.py
Python
lgpl-2.1
37,314
import enum import sys import unittest from vendor.enum import Enum, IntEnum, unique, EnumMeta from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL pyver = float('%s.%s' % sys.version_info[:2]) try: any except NameError: def any(iterable): for element in iterable: if element: return True return False try: unicode except NameError: unicode = str try: from collections import OrderedDict except ImportError: OrderedDict = None # for pickle tests try: class Stooges(Enum): LARRY = 1 CURLY = 2 MOE = 3 except Exception: Stooges = sys.exc_info()[1] try: class IntStooges(int, Enum): LARRY = 1 CURLY = 2 MOE = 3 except Exception: IntStooges = sys.exc_info()[1] try: class FloatStooges(float, Enum): LARRY = 1.39 CURLY = 2.72 MOE = 3.142596 except Exception: FloatStooges = sys.exc_info()[1] # for pickle test and subclass tests try: class StrEnum(str, Enum): 'accepts only string values' class Name(StrEnum): BDFL = 'Guido van Rossum' FLUFL = 'Barry Warsaw' except Exception: Name = sys.exc_info()[1] try: Question = Enum('Question', 'who what when where why', module=__name__) except Exception: Question = sys.exc_info()[1] try: Answer = Enum('Answer', 'him this then there because') except Exception: Answer = sys.exc_info()[1] try: Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition') except Exception: Theory = sys.exc_info()[1] # for doctests try: class Fruit(Enum): tomato = 1 banana = 2 cherry = 3 except Exception: pass def test_pickle_dump_load(assertion, source, target=None, protocol=(0, HIGHEST_PROTOCOL)): start, stop = protocol failures = [] for protocol in range(start, stop+1): try: if target is None: assertion(loads(dumps(source, protocol=protocol)) is source) else: assertion(loads(dumps(source, protocol=protocol)), target) except Exception: exc, tb = sys.exc_info()[1:] failures.append('%2d: %s' %(protocol, exc)) if failures: raise ValueError('Failed with protocols: %s' % ', '.join(failures)) def test_pickle_exception(assertion, exception, obj, protocol=(0, HIGHEST_PROTOCOL)): start, stop = protocol failures = [] for protocol in range(start, stop+1): try: assertion(exception, dumps, obj, protocol=protocol) except Exception: exc = sys.exc_info()[1] failures.append('%d: %s %s' % (protocol, exc.__class__.__name__, exc)) if failures: raise ValueError('Failed with protocols: %s' % ', '.join(failures)) class TestHelpers(unittest.TestCase): # _is_descriptor, _is_sunder, _is_dunder def test_is_descriptor(self): class foo: pass for attr in ('__get__','__set__','__delete__'): obj = foo() self.assertFalse(enum._is_descriptor(obj)) setattr(obj, attr, 1) self.assertTrue(enum._is_descriptor(obj)) def test_is_sunder(self): for s in ('_a_', '_aa_'): self.assertTrue(enum._is_sunder(s)) for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_', '__', '___', '____', '_____',): self.assertFalse(enum._is_sunder(s)) def test_is_dunder(self): for s in ('__a__', '__aa__'): self.assertTrue(enum._is_dunder(s)) for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_', '__', '___', '____', '_____',): self.assertFalse(enum._is_dunder(s)) class TestEnum(unittest.TestCase): def setUp(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = 3 WINTER = 4 self.Season = Season class Konstants(float, Enum): E = 2.7182818 PI = 3.1415926 TAU = 2 * PI self.Konstants = Konstants class Grades(IntEnum): A = 5 B = 4 C = 3 D = 2 F = 0 self.Grades = Grades class Directional(str, Enum): EAST = 'east' WEST = 'west' NORTH = 'north' SOUTH = 'south' self.Directional = Directional from datetime import date class Holiday(date, Enum): NEW_YEAR = 2013, 1, 1 IDES_OF_MARCH = 2013, 3, 15 self.Holiday = Holiday if pyver >= 2.6: # cannot specify custom `dir` on previous versions def test_dir_on_class(self): Season = self.Season self.assertEqual( set(dir(Season)), set(['__class__', '__doc__', '__members__', '__module__', 'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']), ) def test_dir_on_item(self): Season = self.Season self.assertEqual( set(dir(Season.WINTER)), set(['__class__', '__doc__', '__module__', 'name', 'value']), ) def test_dir_on_sub_with_behavior_on_super(self): # see issue22506 class SuperEnum(Enum): def invisible(self): return "did you see me?" class SubEnum(SuperEnum): sample = 5 self.assertEqual( set(dir(SubEnum.sample)), set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']), ) if pyver >= 2.7: # OrderedDict first available here def test_members_is_ordereddict_if_ordered(self): class Ordered(Enum): __order__ = 'first second third' first = 'bippity' second = 'boppity' third = 'boo' self.assertTrue(type(Ordered.__members__) is OrderedDict) def test_members_is_ordereddict_if_not_ordered(self): class Unordered(Enum): this = 'that' these = 'those' self.assertTrue(type(Unordered.__members__) is OrderedDict) if pyver >= 3.0: # all objects are ordered in Python 2.x def test_members_is_always_ordered(self): class AlwaysOrdered(Enum): first = 1 second = 2 third = 3 self.assertTrue(type(AlwaysOrdered.__members__) is OrderedDict) def test_comparisons(self): def bad_compare(): Season.SPRING > 4 Season = self.Season self.assertNotEqual(Season.SPRING, 1) self.assertRaises(TypeError, bad_compare) class Part(Enum): SPRING = 1 CLIP = 2 BARREL = 3 self.assertNotEqual(Season.SPRING, Part.SPRING) def bad_compare(): Season.SPRING < Part.CLIP self.assertRaises(TypeError, bad_compare) def test_enum_in_enum_out(self): Season = self.Season self.assertTrue(Season(Season.WINTER) is Season.WINTER) def test_enum_value(self): Season = self.Season self.assertEqual(Season.SPRING.value, 1) def test_intenum_value(self): self.assertEqual(IntStooges.CURLY.value, 2) def test_enum(self): Season = self.Season lst = list(Season) self.assertEqual(len(lst), len(Season)) self.assertEqual(len(Season), 4, Season) self.assertEqual( [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst) for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split()): i += 1 e = Season(i) self.assertEqual(e, getattr(Season, season)) self.assertEqual(e.value, i) self.assertNotEqual(e, i) self.assertEqual(e.name, season) self.assertTrue(e in Season) self.assertTrue(type(e) is Season) self.assertTrue(isinstance(e, Season)) self.assertEqual(str(e), 'Season.' + season) self.assertEqual( repr(e), '<Season.%s: %s>' % (season, i), ) def test_value_name(self): Season = self.Season self.assertEqual(Season.SPRING.name, 'SPRING') self.assertEqual(Season.SPRING.value, 1) def set_name(obj, new_value): obj.name = new_value def set_value(obj, new_value): obj.value = new_value self.assertRaises(AttributeError, set_name, Season.SPRING, 'invierno', ) self.assertRaises(AttributeError, set_value, Season.SPRING, 2) def test_attribute_deletion(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = 3 WINTER = 4 def spam(cls): pass self.assertTrue(hasattr(Season, 'spam')) del Season.spam self.assertFalse(hasattr(Season, 'spam')) self.assertRaises(AttributeError, delattr, Season, 'SPRING') self.assertRaises(AttributeError, delattr, Season, 'DRY') self.assertRaises(AttributeError, delattr, Season.SPRING, 'name') def test_invalid_names(self): def create_bad_class_1(): class Wrong(Enum): mro = 9 def create_bad_class_2(): class Wrong(Enum): _reserved_ = 3 self.assertRaises(ValueError, create_bad_class_1) self.assertRaises(ValueError, create_bad_class_2) def test_contains(self): Season = self.Season self.assertTrue(Season.AUTUMN in Season) self.assertTrue(3 not in Season) val = Season(3) self.assertTrue(val in Season) class OtherEnum(Enum): one = 1; two = 2 self.assertTrue(OtherEnum.two not in Season) if pyver >= 2.6: # when `format` came into being def test_format_enum(self): Season = self.Season self.assertEqual('{0}'.format(Season.SPRING), '{0}'.format(str(Season.SPRING))) self.assertEqual( '{0:}'.format(Season.SPRING), '{0:}'.format(str(Season.SPRING))) self.assertEqual('{0:20}'.format(Season.SPRING), '{0:20}'.format(str(Season.SPRING))) self.assertEqual('{0:^20}'.format(Season.SPRING), '{0:^20}'.format(str(Season.SPRING))) self.assertEqual('{0:>20}'.format(Season.SPRING), '{0:>20}'.format(str(Season.SPRING))) self.assertEqual('{0:<20}'.format(Season.SPRING), '{0:<20}'.format(str(Season.SPRING))) def test_format_enum_custom(self): class TestFloat(float, Enum): one = 1.0 two = 2.0 def __format__(self, spec): return 'TestFloat success!' self.assertEqual('{0}'.format(TestFloat.one), 'TestFloat success!') def assertFormatIsValue(self, spec, member): self.assertEqual(spec.format(member), spec.format(member.value)) def test_format_enum_date(self): Holiday = self.Holiday self.assertFormatIsValue('{0}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{0:}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{0:20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{0:^20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{0:>20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{0:<20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{0:%Y %m}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{0:%Y %m %M:00}', Holiday.IDES_OF_MARCH) def test_format_enum_float(self): Konstants = self.Konstants self.assertFormatIsValue('{0}', Konstants.TAU) self.assertFormatIsValue('{0:}', Konstants.TAU) self.assertFormatIsValue('{0:20}', Konstants.TAU) self.assertFormatIsValue('{0:^20}', Konstants.TAU) self.assertFormatIsValue('{0:>20}', Konstants.TAU) self.assertFormatIsValue('{0:<20}', Konstants.TAU) self.assertFormatIsValue('{0:n}', Konstants.TAU) self.assertFormatIsValue('{0:5.2}', Konstants.TAU) self.assertFormatIsValue('{0:f}', Konstants.TAU) def test_format_enum_int(self): Grades = self.Grades self.assertFormatIsValue('{0}', Grades.C) self.assertFormatIsValue('{0:}', Grades.C) self.assertFormatIsValue('{0:20}', Grades.C) self.assertFormatIsValue('{0:^20}', Grades.C) self.assertFormatIsValue('{0:>20}', Grades.C) self.assertFormatIsValue('{0:<20}', Grades.C) self.assertFormatIsValue('{0:+}', Grades.C) self.assertFormatIsValue('{0:08X}', Grades.C) self.assertFormatIsValue('{0:b}', Grades.C) def test_format_enum_str(self): Directional = self.Directional self.assertFormatIsValue('{0}', Directional.WEST) self.assertFormatIsValue('{0:}', Directional.WEST) self.assertFormatIsValue('{0:20}', Directional.WEST) self.assertFormatIsValue('{0:^20}', Directional.WEST) self.assertFormatIsValue('{0:>20}', Directional.WEST) self.assertFormatIsValue('{0:<20}', Directional.WEST) def test_hash(self): Season = self.Season dates = {} dates[Season.WINTER] = '1225' dates[Season.SPRING] = '0315' dates[Season.SUMMER] = '0704' dates[Season.AUTUMN] = '1031' self.assertEqual(dates[Season.AUTUMN], '1031') def test_enum_duplicates(self): __order__ = "SPRING SUMMER AUTUMN WINTER" class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = FALL = 3 WINTER = 4 ANOTHER_SPRING = 1 lst = list(Season) self.assertEqual( lst, [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER, ]) self.assertTrue(Season.FALL is Season.AUTUMN) self.assertEqual(Season.FALL.value, 3) self.assertEqual(Season.AUTUMN.value, 3) self.assertTrue(Season(3) is Season.AUTUMN) self.assertTrue(Season(1) is Season.SPRING) self.assertEqual(Season.FALL.name, 'AUTUMN') self.assertEqual( set([k for k,v in Season.__members__.items() if v.name != k]), set(['FALL', 'ANOTHER_SPRING']), ) if pyver >= 3.0: cls = vars() result = {'Enum':Enum} exec("""def test_duplicate_name(self): with self.assertRaises(TypeError): class Color(Enum): red = 1 green = 2 blue = 3 red = 4 with self.assertRaises(TypeError): class Color(Enum): red = 1 green = 2 blue = 3 def red(self): return 'red' with self.assertRaises(TypeError): class Color(Enum): @property def red(self): return 'redder' red = 1 green = 2 blue = 3""", result) cls['test_duplicate_name'] = result['test_duplicate_name'] def test_enum_with_value_name(self): class Huh(Enum): name = 1 value = 2 self.assertEqual( list(Huh), [Huh.name, Huh.value], ) self.assertTrue(type(Huh.name) is Huh) self.assertEqual(Huh.name.name, 'name') self.assertEqual(Huh.name.value, 1) def test_intenum_from_scratch(self): class phy(int, Enum): pi = 3 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_intenum_inherited(self): class IntEnum(int, Enum): pass class phy(IntEnum): pi = 3 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_floatenum_from_scratch(self): class phy(float, Enum): pi = 3.1415926 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_floatenum_inherited(self): class FloatEnum(float, Enum): pass class phy(FloatEnum): pi = 3.1415926 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_strenum_from_scratch(self): class phy(str, Enum): pi = 'Pi' tau = 'Tau' self.assertTrue(phy.pi < phy.tau) def test_strenum_inherited(self): class StrEnum(str, Enum): pass class phy(StrEnum): pi = 'Pi' tau = 'Tau' self.assertTrue(phy.pi < phy.tau) def test_intenum(self): class WeekDay(IntEnum): SUNDAY = 1 MONDAY = 2 TUESDAY = 3 WEDNESDAY = 4 THURSDAY = 5 FRIDAY = 6 SATURDAY = 7 self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c') self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2]) lst = list(WeekDay) self.assertEqual(len(lst), len(WeekDay)) self.assertEqual(len(WeekDay), 7) target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY' target = target.split() for i, weekday in enumerate(target): i += 1 e = WeekDay(i) self.assertEqual(e, i) self.assertEqual(int(e), i) self.assertEqual(e.name, weekday) self.assertTrue(e in WeekDay) self.assertEqual(lst.index(e)+1, i) self.assertTrue(0 < e < 8) self.assertTrue(type(e) is WeekDay) self.assertTrue(isinstance(e, int)) self.assertTrue(isinstance(e, Enum)) def test_intenum_duplicates(self): class WeekDay(IntEnum): __order__ = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY' SUNDAY = 1 MONDAY = 2 TUESDAY = TEUSDAY = 3 WEDNESDAY = 4 THURSDAY = 5 FRIDAY = 6 SATURDAY = 7 self.assertTrue(WeekDay.TEUSDAY is WeekDay.TUESDAY) self.assertEqual(WeekDay(3).name, 'TUESDAY') self.assertEqual([k for k,v in WeekDay.__members__.items() if v.name != k], ['TEUSDAY', ]) def test_pickle_enum(self): if isinstance(Stooges, Exception): raise Stooges test_pickle_dump_load(self.assertTrue, Stooges.CURLY) test_pickle_dump_load(self.assertTrue, Stooges) def test_pickle_int(self): if isinstance(IntStooges, Exception): raise IntStooges test_pickle_dump_load(self.assertTrue, IntStooges.CURLY) test_pickle_dump_load(self.assertTrue, IntStooges) def test_pickle_float(self): if isinstance(FloatStooges, Exception): raise FloatStooges test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY) test_pickle_dump_load(self.assertTrue, FloatStooges) def test_pickle_enum_function(self): if isinstance(Answer, Exception): raise Answer test_pickle_dump_load(self.assertTrue, Answer.him) test_pickle_dump_load(self.assertTrue, Answer) def test_pickle_enum_function_with_module(self): if isinstance(Question, Exception): raise Question test_pickle_dump_load(self.assertTrue, Question.who) test_pickle_dump_load(self.assertTrue, Question) if pyver >= 3.4: def test_class_nested_enum_and_pickle_protocol_four(self): # would normally just have this directly in the class namespace class NestedEnum(Enum): twigs = 'common' shiny = 'rare' self.__class__.NestedEnum = NestedEnum self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__ test_pickle_exception( self.assertRaises, PicklingError, self.NestedEnum.twigs, protocol=(0, 3)) test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs, protocol=(4, HIGHEST_PROTOCOL)) def test_exploding_pickle(self): BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter') enum._make_class_unpicklable(BadPickle) globals()['BadPickle'] = BadPickle test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill) test_pickle_exception(self.assertRaises, PicklingError, BadPickle) def test_string_enum(self): class SkillLevel(str, Enum): master = 'what is the sound of one hand clapping?' journeyman = 'why did the chicken cross the road?' apprentice = 'knock, knock!' self.assertEqual(SkillLevel.apprentice, 'knock, knock!') def test_getattr_getitem(self): class Period(Enum): morning = 1 noon = 2 evening = 3 night = 4 self.assertTrue(Period(2) is Period.noon) self.assertTrue(getattr(Period, 'night') is Period.night) self.assertTrue(Period['morning'] is Period.morning) def test_getattr_dunder(self): Season = self.Season self.assertTrue(getattr(Season, '__hash__')) def test_iteration_order(self): class Season(Enum): __order__ = 'SUMMER WINTER AUTUMN SPRING' SUMMER = 2 WINTER = 4 AUTUMN = 3 SPRING = 1 self.assertEqual( list(Season), [Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING], ) def test_iteration_order_with_unorderable_values(self): class Complex(Enum): a = complex(7, 9) b = complex(3.14, 2) c = complex(1, -1) d = complex(-77, 32) self.assertEqual( list(Complex), [Complex.a, Complex.b, Complex.c, Complex.d], ) def test_programatic_function_string(self): SummerMonth = Enum('SummerMonth', 'june july august') lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split()): i += 1 e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_string_list(self): SummerMonth = Enum('SummerMonth', ['june', 'july', 'august']) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split()): i += 1 e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_iterable(self): SummerMonth = Enum( 'SummerMonth', (('june', 1), ('july', 2), ('august', 3)) ) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split()): i += 1 e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_from_dict(self): SummerMonth = Enum( 'SummerMonth', dict((('june', 1), ('july', 2), ('august', 3))) ) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) if pyver < 3.0: self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split()): i += 1 e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_type(self): SummerMonth = Enum('SummerMonth', 'june july august', type=int) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split()): i += 1 e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_type_from_subclass(self): SummerMonth = IntEnum('SummerMonth', 'june july august') lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split()): i += 1 e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_unicode(self): SummerMonth = Enum('SummerMonth', unicode('june july august')) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate(unicode('june july august').split()): i += 1 e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_unicode_list(self): SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')]) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate(unicode('june july august').split()): i += 1 e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_unicode_iterable(self): SummerMonth = Enum( 'SummerMonth', ((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)) ) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate(unicode('june july august').split()): i += 1 e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_from_unicode_dict(self): SummerMonth = Enum( 'SummerMonth', dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))) ) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) if pyver < 3.0: self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate(unicode('june july august').split()): i += 1 e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_unicode_type(self): SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate(unicode('june july august').split()): i += 1 e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programatic_function_unicode_type_from_subclass(self): SummerMonth = IntEnum('SummerMonth', unicode('june july august')) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate(unicode('june july august').split()): i += 1 e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_programmatic_function_unicode_class(self): if pyver < 3.0: class_names = unicode('SummerMonth'), 'S\xfcmm\xe9rM\xf6nth'.decode('latin1') else: class_names = 'SummerMonth', 'S\xfcmm\xe9rM\xf6nth' for i, class_name in enumerate(class_names): if pyver < 3.0 and i == 1: self.assertRaises(TypeError, Enum, class_name, unicode('june july august')) else: SummerMonth = Enum(class_name, unicode('june july august')) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate(unicode('june july august').split()): i += 1 e = SummerMonth(i) self.assertEqual(e.value, i) self.assertEqual(e.name, month) self.assertTrue(e in SummerMonth) self.assertTrue(type(e) is SummerMonth) def test_subclassing(self): if isinstance(Name, Exception): raise Name self.assertEqual(Name.BDFL, 'Guido van Rossum') self.assertTrue(Name.BDFL, Name('Guido van Rossum')) self.assertTrue(Name.BDFL is getattr(Name, 'BDFL')) test_pickle_dump_load(self.assertTrue, Name.BDFL) def test_extending(self): def bad_extension(): class Color(Enum): red = 1 green = 2 blue = 3 class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 self.assertRaises(TypeError, bad_extension) def test_exclude_methods(self): class whatever(Enum): this = 'that' these = 'those' def really(self): return 'no, not %s' % self.value self.assertFalse(type(whatever.really) is whatever) self.assertEqual(whatever.this.really(), 'no, not that') def test_wrong_inheritance_order(self): def wrong_inherit(): class Wrong(Enum, str): NotHere = 'error before this point' self.assertRaises(TypeError, wrong_inherit) def test_intenum_transitivity(self): class number(IntEnum): one = 1 two = 2 three = 3 class numero(IntEnum): uno = 1 dos = 2 tres = 3 self.assertEqual(number.one, numero.uno) self.assertEqual(number.two, numero.dos) self.assertEqual(number.three, numero.tres) def test_introspection(self): class Number(IntEnum): one = 100 two = 200 self.assertTrue(Number.one._member_type_ is int) self.assertTrue(Number._member_type_ is int) class String(str, Enum): yarn = 'soft' rope = 'rough' wire = 'hard' self.assertTrue(String.yarn._member_type_ is str) self.assertTrue(String._member_type_ is str) class Plain(Enum): vanilla = 'white' one = 1 self.assertTrue(Plain.vanilla._member_type_ is object) self.assertTrue(Plain._member_type_ is object) def test_wrong_enum_in_call(self): class Monochrome(Enum): black = 0 white = 1 class Gender(Enum): male = 0 female = 1 self.assertRaises(ValueError, Monochrome, Gender.male) def test_wrong_enum_in_mixed_call(self): class Monochrome(IntEnum): black = 0 white = 1 class Gender(Enum): male = 0 female = 1 self.assertRaises(ValueError, Monochrome, Gender.male) def test_mixed_enum_in_call_1(self): class Monochrome(IntEnum): black = 0 white = 1 class Gender(IntEnum): male = 0 female = 1 self.assertTrue(Monochrome(Gender.female) is Monochrome.white) def test_mixed_enum_in_call_2(self): class Monochrome(Enum): black = 0 white = 1 class Gender(IntEnum): male = 0 female = 1 self.assertTrue(Monochrome(Gender.male) is Monochrome.black) def test_flufl_enum(self): class Fluflnum(Enum): def __int__(self): return int(self.value) class MailManOptions(Fluflnum): option1 = 1 option2 = 2 option3 = 3 self.assertEqual(int(MailManOptions.option1), 1) def test_no_such_enum_member(self): class Color(Enum): red = 1 green = 2 blue = 3 self.assertRaises(ValueError, Color, 4) self.assertRaises(KeyError, Color.__getitem__, 'chartreuse') def test_new_repr(self): class Color(Enum): red = 1 green = 2 blue = 3 def __repr__(self): return "don't you just love shades of %s?" % self.name self.assertEqual( repr(Color.blue), "don't you just love shades of blue?", ) def test_inherited_repr(self): class MyEnum(Enum): def __repr__(self): return "My name is %s." % self.name class MyIntEnum(int, MyEnum): this = 1 that = 2 theother = 3 self.assertEqual(repr(MyIntEnum.that), "My name is that.") def test_multiple_mixin_mro(self): class auto_enum(EnumMeta): def __new__(metacls, cls, bases, classdict): original_dict = classdict classdict = enum._EnumDict() for k, v in original_dict.items(): classdict[k] = v temp = type(classdict)() names = set(classdict._member_names) i = 0 for k in classdict._member_names: v = classdict[k] if v == (): v = i else: i = v i += 1 temp[k] = v for k, v in classdict.items(): if k not in names: temp[k] = v return super(auto_enum, metacls).__new__( metacls, cls, bases, temp) AutoNumberedEnum = auto_enum('AutoNumberedEnum', (Enum,), {}) AutoIntEnum = auto_enum('AutoIntEnum', (IntEnum,), {}) class TestAutoNumber(AutoNumberedEnum): a = () b = 3 c = () class TestAutoInt(AutoIntEnum): a = () b = 3 c = () def test_subclasses_with_getnewargs(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args if len(args) < 1: raise TypeError("name and value must be specified") name, args = args[0], args[1:] self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __getnewargs__(self): return self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "%s(%r, %s)" % (type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '(%s + %s)' % (self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertTrue(NEI.__new__ is Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertTrue, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertTrue, NEI.y) if pyver >= 3.4: def test_subclasses_with_getnewargs_ex(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args if len(args) < 2: raise TypeError("name and value must be specified") name, args = args[0], args[1:] self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __getnewargs_ex__(self): return self._args, {} @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format(type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, HIGHEST_PROTOCOL)) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertTrue, NEI.y, protocol=(4, HIGHEST_PROTOCOL)) def test_subclasses_with_reduce(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args if len(args) < 1: raise TypeError("name and value must be specified") name, args = args[0], args[1:] self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __reduce__(self): return self.__class__, self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "%s(%r, %s)" % (type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '(%s + %s)' % (self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertTrue(NEI.__new__ is Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertTrue, NEI.y) def test_subclasses_with_reduce_ex(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args if len(args) < 1: raise TypeError("name and value must be specified") name, args = args[0], args[1:] self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __reduce_ex__(self, proto): return self.__class__, self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "%s(%r, %s)" % (type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '(%s + %s)' % (self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertTrue(NEI.__new__ is Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertTrue, NEI.y) def test_subclasses_without_direct_pickle_support(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, args = args[0], args[1:] if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "%s(%r, %s)" % (type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '(%s + %s)' % (self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = ('the-x', 1) y = ('the-y', 2) self.assertTrue(NEI.__new__ is Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_exception(self.assertRaises, TypeError, NEI.x) test_pickle_exception(self.assertRaises, PicklingError, NEI) def test_subclasses_without_direct_pickle_support_using_name(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, args = args[0], args[1:] if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "%s(%r, %s)" % (type(self).__name__, self.__name__, int.__repr__(self)) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '(%s + %s)' % (self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = ('the-x', 1) y = ('the-y', 2) def __reduce_ex__(self, proto): return getattr, (self.__class__, self._name_) self.assertTrue(NEI.__new__ is Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertTrue, NEI.y) test_pickle_dump_load(self.assertTrue, NEI) def test_tuple_subclass(self): class SomeTuple(tuple, Enum): __qualname__ = 'SomeTuple' first = (1, 'for the money') second = (2, 'for the show') third = (3, 'for the music') self.assertTrue(type(SomeTuple.first) is SomeTuple) self.assertTrue(isinstance(SomeTuple.second, tuple)) self.assertEqual(SomeTuple.third, (3, 'for the music')) globals()['SomeTuple'] = SomeTuple test_pickle_dump_load(self.assertTrue, SomeTuple.first) def test_duplicate_values_give_unique_enum_items(self): class AutoNumber(Enum): __order__ = 'enum_m enum_d enum_y' enum_m = () enum_d = () enum_y = () def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __int__(self): return int(self._value_) self.assertEqual(int(AutoNumber.enum_d), 2) self.assertEqual(AutoNumber.enum_y.value, 3) self.assertTrue(AutoNumber(1) is AutoNumber.enum_m) self.assertEqual( list(AutoNumber), [AutoNumber.enum_m, AutoNumber.enum_d, AutoNumber.enum_y], ) def test_inherited_new_from_enhanced_enum(self): class AutoNumber2(Enum): def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __int__(self): return int(self._value_) class Color(AutoNumber2): __order__ = 'red green blue' red = () green = () blue = () self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3)) self.assertEqual(list(Color), [Color.red, Color.green, Color.blue]) if pyver >= 3.0: self.assertEqual(list(map(int, Color)), [1, 2, 3]) def test_inherited_new_from_mixed_enum(self): class AutoNumber3(IntEnum): def __new__(cls): value = len(cls.__members__) + 1 obj = int.__new__(cls, value) obj._value_ = value return obj class Color(AutoNumber3): red = () green = () blue = () self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3)) Color.red Color.green Color.blue def test_ordered_mixin(self): class OrderedEnum(Enum): def __ge__(self, other): if self.__class__ is other.__class__: return self._value_ >= other._value_ return NotImplemented def __gt__(self, other): if self.__class__ is other.__class__: return self._value_ > other._value_ return NotImplemented def __le__(self, other): if self.__class__ is other.__class__: return self._value_ <= other._value_ return NotImplemented def __lt__(self, other): if self.__class__ is other.__class__: return self._value_ < other._value_ return NotImplemented class Grade(OrderedEnum): __order__ = 'A B C D F' A = 5 B = 4 C = 3 D = 2 F = 1 self.assertEqual(list(Grade), [Grade.A, Grade.B, Grade.C, Grade.D, Grade.F]) self.assertTrue(Grade.A > Grade.B) self.assertTrue(Grade.F <= Grade.C) self.assertTrue(Grade.D < Grade.A) self.assertTrue(Grade.B >= Grade.B) def test_extending2(self): def bad_extension(): class Shade(Enum): def shade(self): print(self.name) class Color(Shade): red = 1 green = 2 blue = 3 class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 self.assertRaises(TypeError, bad_extension) def test_extending3(self): class Shade(Enum): def shade(self): return self.name class Color(Shade): def hex(self): return '%s hexlified!' % self.value class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!') def test_no_duplicates(self): def bad_duplicates(): class UniqueEnum(Enum): def __init__(self, *args): cls = self.__class__ if any(self.value == e.value for e in cls): a = self.name e = cls(self.value).name raise ValueError( "aliases not allowed in UniqueEnum: %r --> %r" % (a, e) ) class Color(UniqueEnum): red = 1 green = 2 blue = 3 class Color(UniqueEnum): red = 1 green = 2 blue = 3 grene = 2 self.assertRaises(ValueError, bad_duplicates) def test_reversed(self): self.assertEqual( list(reversed(self.Season)), [self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER, self.Season.SPRING] ) def test_init(self): class Planet(Enum): MERCURY = (3.303e+23, 2.4397e6) VENUS = (4.869e+24, 6.0518e6) EARTH = (5.976e+24, 6.37814e6) MARS = (6.421e+23, 3.3972e6) JUPITER = (1.9e+27, 7.1492e7) SATURN = (5.688e+26, 6.0268e7) URANUS = (8.686e+25, 2.5559e7) NEPTUNE = (1.024e+26, 2.4746e7) def __init__(self, mass, radius): self.mass = mass # in kilograms self.radius = radius # in meters @property def surface_gravity(self): # universal gravitational constant (m3 kg-1 s-2) G = 6.67300E-11 return G * self.mass / (self.radius * self.radius) self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80) self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6)) def test_nonhash_value(self): class AutoNumberInAList(Enum): def __new__(cls): value = [len(cls.__members__) + 1] obj = object.__new__(cls) obj._value_ = value return obj class ColorInAList(AutoNumberInAList): __order__ = 'red green blue' red = () green = () blue = () self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue]) self.assertEqual(ColorInAList.red.value, [1]) self.assertEqual(ColorInAList([1]), ColorInAList.red) def test_conflicting_types_resolved_in_new(self): class LabelledIntEnum(int, Enum): def __new__(cls, *args): value, label = args obj = int.__new__(cls, value) obj.label = label obj._value_ = value return obj class LabelledList(LabelledIntEnum): unprocessed = (1, "Unprocessed") payment_complete = (2, "Payment Complete") self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete]) self.assertEqual(LabelledList.unprocessed, 1) self.assertEqual(LabelledList(1), LabelledList.unprocessed) class TestUnique(unittest.TestCase): """2.4 doesn't allow class decorators, use function syntax.""" def test_unique_clean(self): class Clean(Enum): one = 1 two = 'dos' tres = 4.0 unique(Clean) class Cleaner(IntEnum): single = 1 double = 2 triple = 3 unique(Cleaner) def test_unique_dirty(self): try: class Dirty(Enum): __order__ = 'one two tres' one = 1 two = 'dos' tres = 1 unique(Dirty) except ValueError: exc = sys.exc_info()[1] message = exc.args[0] self.assertTrue('tres -> one' in message) try: class Dirtier(IntEnum): __order__ = 'single double triple turkey' single = 1 double = 1 triple = 3 turkey = 3 unique(Dirtier) except ValueError: exc = sys.exc_info()[1] message = exc.args[0] self.assertTrue('double -> single' in message) self.assertTrue('turkey -> triple' in message) class TestMe(unittest.TestCase): pass if __name__ == '__main__': unittest.main()
bernardorufino/pick
src/vendor/enum/test_enum.py
Python
mit
62,290
# -*- coding: utf-8 -*- ############################################################################### # # CSW Client # --------------------------------------------------------- # QGIS Catalog Service client. # # Copyright (C) 2010 NextGIS (http://nextgis.org), # Alexander Bruy (alexander.bruy@gmail.com), # Maxim Dubinin (sim@gis-lab.info) # # Copyright (C) 2017 Tom Kralidis (tomkralidis@gmail.com) # # This source is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This code is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ############################################################################### import json import os.path from urllib.request import build_opener, install_opener, ProxyHandler from qgis.PyQt.QtCore import Qt from qgis.PyQt.QtWidgets import (QApplication, QDialog, QComboBox, QDialogButtonBox, QMessageBox, QTreeWidgetItem, QWidget) from qgis.PyQt.QtGui import QColor, QCursor from qgis.core import (QgsApplication, QgsCoordinateReferenceSystem, QgsCoordinateTransform, QgsGeometry, QgsPointXY, QgsProviderRegistry, QgsSettings, QgsProject) from qgis.gui import QgsRubberBand from qgis.utils import OverrideCursor from owslib.csw import CatalogueServiceWeb # spellok from owslib.fes import BBox, PropertyIsLike from owslib.ows import ExceptionReport from MetaSearch import link_types from MetaSearch.dialogs.manageconnectionsdialog import ManageConnectionsDialog from MetaSearch.dialogs.newconnectiondialog import NewConnectionDialog from MetaSearch.dialogs.recorddialog import RecordDialog from MetaSearch.dialogs.xmldialog import XMLDialog from MetaSearch.util import (clean_ows_url, get_connections_from_file, get_ui_class, get_help_url, highlight_xml, normalize_text, open_url, render_template, serialize_string, StaticContext) BASE_CLASS = get_ui_class('maindialog.ui') class MetaSearchDialog(QDialog, BASE_CLASS): """main dialogue""" def __init__(self, iface): """init window""" QDialog.__init__(self) self.setupUi(self) self.iface = iface self.map = iface.mapCanvas() self.settings = QgsSettings() self.catalog = None self.catalog_url = None self.catalog_username = None self.catalog_password = None self.context = StaticContext() version = self.context.metadata.get('general', 'version') self.setWindowTitle(self.tr('MetaSearch {0}').format(version)) self.rubber_band = QgsRubberBand(self.map, True) # True = a polygon self.rubber_band.setColor(QColor(255, 0, 0, 75)) self.rubber_band.setWidth(5) # form inputs self.startfrom = 0 self.maxrecords = 10 self.timeout = 10 self.constraints = [] # Servers tab self.cmbConnectionsServices.activated.connect(self.save_connection) self.cmbConnectionsSearch.activated.connect(self.save_connection) self.btnServerInfo.clicked.connect(self.connection_info) self.btnAddDefault.clicked.connect(self.add_default_connections) self.btnCapabilities.clicked.connect(self.show_xml) self.tabWidget.currentChanged.connect(self.populate_connection_list) # server management buttons self.btnNew.clicked.connect(self.add_connection) self.btnEdit.clicked.connect(self.edit_connection) self.btnDelete.clicked.connect(self.delete_connection) self.btnLoad.clicked.connect(self.load_connections) self.btnSave.clicked.connect(save_connections) # Search tab self.treeRecords.itemSelectionChanged.connect(self.record_clicked) self.treeRecords.itemDoubleClicked.connect(self.show_metadata) self.btnSearch.clicked.connect(self.search) self.leKeywords.returnPressed.connect(self.search) # prevent dialog from closing upon pressing enter self.buttonBox.button(QDialogButtonBox.Close).setAutoDefault(False) # launch help from button self.buttonBox.helpRequested.connect(self.help) self.btnCanvasBbox.setAutoDefault(False) self.btnCanvasBbox.clicked.connect(self.set_bbox_from_map) self.btnGlobalBbox.clicked.connect(self.set_bbox_global) # navigation buttons self.btnFirst.clicked.connect(self.navigate) self.btnPrev.clicked.connect(self.navigate) self.btnNext.clicked.connect(self.navigate) self.btnLast.clicked.connect(self.navigate) self.mActionAddWms.triggered.connect(self.add_to_ows) self.mActionAddWfs.triggered.connect(self.add_to_ows) self.mActionAddWcs.triggered.connect(self.add_to_ows) self.mActionAddAms.triggered.connect(self.add_to_ows) self.mActionAddAfs.triggered.connect(self.add_to_ows) self.btnShowXml.clicked.connect(self.show_xml) # settings self.radioTitleAsk.clicked.connect(self.set_ows_save_title_ask) self.radioTitleNoAsk.clicked.connect(self.set_ows_save_title_no_ask) self.radioTempName.clicked.connect(self.set_ows_save_temp_name) self.manageGui() def manageGui(self): """open window""" self.tabWidget.setCurrentIndex(0) self.populate_connection_list() self.btnCapabilities.setEnabled(False) self.spnRecords.setValue( int(self.settings.value('/MetaSearch/returnRecords', 10))) key = '/MetaSearch/%s' % self.cmbConnectionsSearch.currentText() self.catalog_url = self.settings.value('%s/url' % key) self.catalog_username = self.settings.value('%s/username' % key) self.catalog_password = self.settings.value('%s/password' % key) self.set_bbox_global() self.reset_buttons() # get preferred connection save strategy from settings and set it save_strategy = self.settings.value('/MetaSearch/ows_save_strategy', 'title_ask') if save_strategy == 'temp_name': self.radioTempName.setChecked(True) elif save_strategy == 'title_no_ask': self.radioTitleNoAsk.setChecked(True) else: self.radioTitleAsk.setChecked(True) # install proxy handler if specified in QGIS settings self.install_proxy() # Servers tab def populate_connection_list(self): """populate select box with connections""" self.settings.beginGroup('/MetaSearch/') self.cmbConnectionsServices.clear() self.cmbConnectionsServices.addItems(self.settings.childGroups()) self.cmbConnectionsSearch.clear() self.cmbConnectionsSearch.addItems(self.settings.childGroups()) self.settings.endGroup() self.set_connection_list_position() if self.cmbConnectionsServices.count() == 0: # no connections - disable various buttons state_disabled = False self.btnSave.setEnabled(state_disabled) # and start with connection tab open self.tabWidget.setCurrentIndex(1) # tell the user to add services msg = self.tr('No services/connections defined. To get ' 'started with MetaSearch, create a new ' 'connection by clicking \'New\' or click ' '\'Add default services\'.') self.textMetadata.setHtml('<p><h3>%s</h3></p>' % msg) else: # connections - enable various buttons state_disabled = True self.btnServerInfo.setEnabled(state_disabled) self.btnEdit.setEnabled(state_disabled) self.btnDelete.setEnabled(state_disabled) def set_connection_list_position(self): """set the current index to the selected connection""" to_select = self.settings.value('/MetaSearch/selected') conn_count = self.cmbConnectionsServices.count() if conn_count == 0: self.btnDelete.setEnabled(False) self.btnServerInfo.setEnabled(False) self.btnEdit.setEnabled(False) # does to_select exist in cmbConnectionsServices? exists = False for i in range(conn_count): if self.cmbConnectionsServices.itemText(i) == to_select: self.cmbConnectionsServices.setCurrentIndex(i) self.cmbConnectionsSearch.setCurrentIndex(i) exists = True break # If we couldn't find the stored item, but there are some, default # to the last item (this makes some sense when deleting items as it # allows the user to repeatidly click on delete to remove a whole # lot of items) if not exists and conn_count > 0: # If to_select is null, then the selected connection wasn't found # by QgsSettings, which probably means that this is the first time # the user has used CSWClient, so default to the first in the list # of connetions. Otherwise default to the last. if not to_select: current_index = 0 else: current_index = conn_count - 1 self.cmbConnectionsServices.setCurrentIndex(current_index) self.cmbConnectionsSearch.setCurrentIndex(current_index) def save_connection(self): """save connection""" caller = self.sender().objectName() if caller == 'cmbConnectionsServices': # servers tab current_text = self.cmbConnectionsServices.currentText() elif caller == 'cmbConnectionsSearch': # search tab current_text = self.cmbConnectionsSearch.currentText() self.settings.setValue('/MetaSearch/selected', current_text) key = '/MetaSearch/%s' % current_text if caller == 'cmbConnectionsSearch': # bind to service in search tab self.catalog_url = self.settings.value('%s/url' % key) self.catalog_username = self.settings.value('%s/username' % key) self.catalog_password = self.settings.value('%s/password' % key) if caller == 'cmbConnectionsServices': # clear server metadata self.textMetadata.clear() self.btnCapabilities.setEnabled(False) def connection_info(self): """show connection info""" current_text = self.cmbConnectionsServices.currentText() key = '/MetaSearch/%s' % current_text self.catalog_url = self.settings.value('%s/url' % key) self.catalog_username = self.settings.value('%s/username' % key) self.catalog_password = self.settings.value('%s/password' % key) # connect to the server if not self._get_csw(): return if self.catalog: # display service metadata self.btnCapabilities.setEnabled(True) metadata = render_template('en', self.context, self.catalog, 'service_metadata.html') style = QgsApplication.reportStyleSheet() self.textMetadata.clear() self.textMetadata.document().setDefaultStyleSheet(style) self.textMetadata.setHtml(metadata) def add_connection(self): """add new service""" conn_new = NewConnectionDialog() conn_new.setWindowTitle(self.tr('New Catalog service')) if conn_new.exec_() == QDialog.Accepted: # add to service list self.populate_connection_list() self.textMetadata.clear() def edit_connection(self): """modify existing connection""" current_text = self.cmbConnectionsServices.currentText() url = self.settings.value('/MetaSearch/%s/url' % current_text) conn_edit = NewConnectionDialog(current_text) conn_edit.setWindowTitle(self.tr('Edit Catalog service')) conn_edit.leName.setText(current_text) conn_edit.leURL.setText(url) conn_edit.leUsername.setText(self.settings.value('/MetaSearch/%s/username' % current_text)) conn_edit.lePassword.setText(self.settings.value('/MetaSearch/%s/password' % current_text)) if conn_edit.exec_() == QDialog.Accepted: # update service list self.populate_connection_list() def delete_connection(self): """delete connection""" current_text = self.cmbConnectionsServices.currentText() key = '/MetaSearch/%s' % current_text msg = self.tr('Remove service {0}?').format(current_text) result = QMessageBox.information(self, self.tr('Confirm delete'), msg, QMessageBox.Ok | QMessageBox.Cancel) if result == QMessageBox.Ok: # remove service from list self.settings.remove(key) index_to_delete = self.cmbConnectionsServices.currentIndex() self.cmbConnectionsServices.removeItem(index_to_delete) self.cmbConnectionsSearch.removeItem(index_to_delete) self.set_connection_list_position() def load_connections(self): """load services from list""" ManageConnectionsDialog(1).exec_() self.populate_connection_list() def add_default_connections(self): """add default connections""" filename = os.path.join(self.context.ppath, 'resources', 'connections-default.xml') doc = get_connections_from_file(self, filename) if doc is None: return self.settings.beginGroup('/MetaSearch/') keys = self.settings.childGroups() self.settings.endGroup() for server in doc.findall('csw'): name = server.attrib.get('name') # check for duplicates if name in keys: msg = self.tr('{0} exists. Overwrite?').format(name) res = QMessageBox.warning(self, self.tr('Loading connections'), msg, QMessageBox.Yes | QMessageBox.No) if res != QMessageBox.Yes: continue # no dups detected or overwrite is allowed key = '/MetaSearch/%s' % name self.settings.setValue('%s/url' % key, server.attrib.get('url')) self.populate_connection_list() # Settings tab def set_ows_save_title_ask(self): """save ows save strategy as save ows title, ask if duplicate""" self.settings.setValue('/MetaSearch/ows_save_strategy', 'title_ask') def set_ows_save_title_no_ask(self): """save ows save strategy as save ows title, do NOT ask if duplicate""" self.settings.setValue('/MetaSearch/ows_save_strategy', 'title_no_ask') def set_ows_save_temp_name(self): """save ows save strategy as save with a temporary name""" self.settings.setValue('/MetaSearch/ows_save_strategy', 'temp_name') # Search tab def set_bbox_from_map(self): """set bounding box from map extent""" crs = self.map.mapSettings().destinationCrs() try: crsid = int(crs.authid().split(':')[1]) except IndexError: # no projection crsid = 4326 extent = self.map.extent() if crsid != 4326: # reproject to EPSG:4326 src = QgsCoordinateReferenceSystem(crsid) dest = QgsCoordinateReferenceSystem(4326) xform = QgsCoordinateTransform(src, dest, QgsProject.instance()) minxy = xform.transform(QgsPointXY(extent.xMinimum(), extent.yMinimum())) maxxy = xform.transform(QgsPointXY(extent.xMaximum(), extent.yMaximum())) minx, miny = minxy maxx, maxy = maxxy else: # 4326 minx = extent.xMinimum() miny = extent.yMinimum() maxx = extent.xMaximum() maxy = extent.yMaximum() self.leNorth.setText(str(maxy)[0:9]) self.leSouth.setText(str(miny)[0:9]) self.leWest.setText(str(minx)[0:9]) self.leEast.setText(str(maxx)[0:9]) def set_bbox_global(self): """set global bounding box""" self.leNorth.setText('90') self.leSouth.setText('-90') self.leWest.setText('-180') self.leEast.setText('180') def search(self): """execute search""" self.catalog = None self.constraints = [] # clear all fields and disable buttons self.lblResults.clear() self.treeRecords.clear() self.reset_buttons() # save some settings self.settings.setValue('/MetaSearch/returnRecords', self.spnRecords.cleanText()) # set current catalog current_text = self.cmbConnectionsSearch.currentText() key = '/MetaSearch/%s' % current_text self.catalog_url = self.settings.value('%s/url' % key) self.catalog_username = self.settings.value('%s/username' % key) self.catalog_password = self.settings.value('%s/password' % key) # start position and number of records to return self.startfrom = 0 self.maxrecords = self.spnRecords.value() # set timeout self.timeout = self.spnTimeout.value() # bbox # CRS is WGS84 with axis order longitude, latitude # defined by 'urn:ogc:def:crs:OGC:1.3:CRS84' minx = self.leWest.text() miny = self.leSouth.text() maxx = self.leEast.text() maxy = self.leNorth.text() bbox = [minx, miny, maxx, maxy] # only apply spatial filter if bbox is not global # even for a global bbox, if a spatial filter is applied, then # the CSW server will skip records without a bbox if bbox != ['-180', '-90', '180', '90']: self.constraints.append(BBox(bbox, crs='urn:ogc:def:crs:OGC:1.3:CRS84')) # keywords if self.leKeywords.text(): # TODO: handle multiple word searches keywords = self.leKeywords.text() self.constraints.append(PropertyIsLike('csw:AnyText', keywords)) if len(self.constraints) > 1: # exclusive search (a && b) self.constraints = [self.constraints] # build request if not self._get_csw(): return # TODO: allow users to select resources types # to find ('service', 'dataset', etc.) try: with OverrideCursor(Qt.WaitCursor): self.catalog.getrecords2(constraints=self.constraints, maxrecords=self.maxrecords, esn='full') except ExceptionReport as err: QMessageBox.warning(self, self.tr('Search error'), self.tr('Search error: {0}').format(err)) return except Exception as err: QMessageBox.warning(self, self.tr('Connection error'), self.tr('Connection error: {0}').format(err)) return if self.catalog.results['matches'] == 0: self.lblResults.setText(self.tr('0 results')) return self.display_results() def display_results(self): """display search results""" self.treeRecords.clear() position = self.catalog.results['returned'] + self.startfrom msg = self.tr('Showing {0} - {1} of %n result(s)', 'number of results', self.catalog.results['matches']).format(self.startfrom + 1, position) self.lblResults.setText(msg) for rec in self.catalog.records: item = QTreeWidgetItem(self.treeRecords) if self.catalog.records[rec].type: item.setText(0, normalize_text(self.catalog.records[rec].type)) else: item.setText(0, 'unknown') if self.catalog.records[rec].title: item.setText(1, normalize_text(self.catalog.records[rec].title)) if self.catalog.records[rec].identifier: set_item_data(item, 'identifier', self.catalog.records[rec].identifier) self.btnShowXml.setEnabled(True) if self.catalog.results["matches"] < self.maxrecords: disabled = False else: disabled = True self.btnFirst.setEnabled(disabled) self.btnPrev.setEnabled(disabled) self.btnNext.setEnabled(disabled) self.btnLast.setEnabled(disabled) def record_clicked(self): """record clicked signal""" # disable only service buttons self.reset_buttons(True, False, False) if not self.treeRecords.selectedItems(): return item = self.treeRecords.currentItem() if not item: return identifier = get_item_data(item, 'identifier') try: record = self.catalog.records[identifier] except KeyError as err: QMessageBox.warning(self, self.tr('Record parsing error'), 'Unable to locate record identifier') return # if the record has a bbox, show a footprint on the map if record.bbox is not None: points = bbox_to_polygon(record.bbox) if points is not None: src = QgsCoordinateReferenceSystem(4326) dst = self.map.mapSettings().destinationCrs() geom = QgsGeometry.fromWkt(points) if src.postgisSrid() != dst.postgisSrid(): ctr = QgsCoordinateTransform(src, dst, QgsProject.instance()) try: geom.transform(ctr) except Exception as err: QMessageBox.warning( self, self.tr('Coordinate Transformation Error'), str(err)) self.rubber_band.setToGeometry(geom, None) # figure out if the data is interactive and can be operated on self.find_services(record, item) def find_services(self, record, item): """scan record for WMS/WMTS|WFS|WCS endpoints""" links = record.uris + record.references services = {} for link in links: if 'scheme' in link: link_type = link['scheme'] elif 'protocol' in link: link_type = link['protocol'] else: link_type = None if link_type is not None: link_type = link_type.upper() wmswmst_link_types = list(map(str.upper, link_types.WMSWMST_LINK_TYPES)) wfs_link_types = list(map(str.upper, link_types.WFS_LINK_TYPES)) wcs_link_types = list(map(str.upper, link_types.WCS_LINK_TYPES)) ams_link_types = list(map(str.upper, link_types.AMS_LINK_TYPES)) afs_link_types = list(map(str.upper, link_types.AFS_LINK_TYPES)) # if the link type exists, and it is one of the acceptable # interactive link types, then set if all([link_type is not None, link_type in wmswmst_link_types + wfs_link_types + wcs_link_types + ams_link_types + afs_link_types]): if link_type in wmswmst_link_types: services['wms'] = link['url'] self.mActionAddWms.setEnabled(True) if link_type in wfs_link_types: services['wfs'] = link['url'] self.mActionAddWfs.setEnabled(True) if link_type in wcs_link_types: services['wcs'] = link['url'] self.mActionAddWcs.setEnabled(True) if link_type in ams_link_types: services['ams'] = link['url'] self.mActionAddAms.setEnabled(True) if link_type in afs_link_types: services['afs'] = link['url'] self.mActionAddAfs.setEnabled(True) self.tbAddData.setEnabled(True) set_item_data(item, 'link', json.dumps(services)) def navigate(self): """manage navigation / paging""" caller = self.sender().objectName() if caller == 'btnFirst': self.startfrom = 0 elif caller == 'btnLast': self.startfrom = self.catalog.results['matches'] - self.maxrecords elif caller == 'btnNext': self.startfrom += self.maxrecords if self.startfrom >= self.catalog.results["matches"]: msg = self.tr('End of results. Go to start?') res = QMessageBox.information(self, self.tr('Navigation'), msg, (QMessageBox.Ok | QMessageBox.Cancel)) if res == QMessageBox.Ok: self.startfrom = 0 else: return elif caller == "btnPrev": self.startfrom -= self.maxrecords if self.startfrom <= 0: msg = self.tr('Start of results. Go to end?') res = QMessageBox.information(self, self.tr('Navigation'), msg, (QMessageBox.Ok | QMessageBox.Cancel)) if res == QMessageBox.Ok: self.startfrom = (self.catalog.results['matches'] - self.maxrecords) else: return try: with OverrideCursor(Qt.WaitCursor): self.catalog.getrecords2(constraints=self.constraints, maxrecords=self.maxrecords, startposition=self.startfrom, esn='full') except ExceptionReport as err: QMessageBox.warning(self, self.tr('Search error'), self.tr('Search error: {0}').format(err)) return except Exception as err: QMessageBox.warning(self, self.tr('Connection error'), self.tr('Connection error: {0}').format(err)) return self.display_results() def add_to_ows(self): """add to OWS provider connection list""" conn_name_matches = [] item = self.treeRecords.currentItem() if not item: return item_data = json.loads(get_item_data(item, 'link')) caller = self.sender().objectName() # stype = human name,/qgis/connections-%s,providername if caller == 'mActionAddWms': stype = ['OGC:WMS/OGC:WMTS', 'wms', 'wms'] data_url = item_data['wms'] elif caller == 'mActionAddWfs': stype = ['OGC:WFS', 'wfs', 'WFS'] data_url = item_data['wfs'] elif caller == 'mActionAddWcs': stype = ['OGC:WCS', 'wcs', 'wcs'] data_url = item_data['wcs'] elif caller == 'mActionAddAms': stype = ['ESRI:ArcGIS:MapServer', 'ams', 'arcgismapserver'] data_url = item_data['ams'].split('MapServer')[0] + 'MapServer' elif caller == 'mActionAddAfs': stype = ['ESRI:ArcGIS:FeatureServer', 'afs', 'arcgisfeatureserver'] data_url = item_data['afs'].split('FeatureServer')[0] + 'FeatureServer' sname = '%s from MetaSearch' % stype[1] # store connection # check if there is a connection with same name if caller in ['mActionAddAms', 'mActionAddAfs']: self.settings.beginGroup('/qgis/connections-%s' % stype[2]) else: self.settings.beginGroup('/qgis/connections-%s' % stype[1]) keys = self.settings.childGroups() self.settings.endGroup() for key in keys: if key.startswith(sname): conn_name_matches.append(key) if conn_name_matches: sname = conn_name_matches[-1] # check for duplicates if sname in keys: # duplicate found if self.radioTitleAsk.isChecked(): # ask to overwrite msg = self.tr('Connection {0} exists. Overwrite?').format(sname) res = QMessageBox.warning(self, self.tr('Saving server'), msg, QMessageBox.Yes | QMessageBox.No) if res != QMessageBox.Yes: # assign new name with serial sname = serialize_string(sname) elif self.radioTitleNoAsk.isChecked(): # don't ask to overwrite pass elif self.radioTempName.isChecked(): # use temp name sname = serialize_string(sname) # no dups detected or overwrite is allowed if caller in ['mActionAddAms', 'mActionAddAfs']: self.settings.beginGroup('/qgis/connections-%s' % stype[2]) else: self.settings.beginGroup('/qgis/connections-%s' % stype[1]) self.settings.setValue('/%s/url' % sname, clean_ows_url(data_url)) self.settings.endGroup() # open provider window ows_provider = QgsProviderRegistry.instance().createSelectionWidget(stype[2], self) service_type = stype[0] # connect dialog signals to iface slots if service_type == 'OGC:WMS/OGC:WMTS': ows_provider.addRasterLayer.connect(self.iface.addRasterLayer) conn_cmb = ows_provider.findChild(QWidget, 'cmbConnections') connect = 'btnConnect_clicked' elif service_type == 'OGC:WFS': def addVectorLayer(path, name): self.iface.mainWindow().addVectorLayer(path, name, 'WFS') ows_provider.addVectorLayer.connect(addVectorLayer) conn_cmb = ows_provider.findChild(QWidget, 'cmbConnections') connect = 'connectToServer' elif service_type == 'OGC:WCS': ows_provider.addRasterLayer.connect(self.iface.addRasterLayer) conn_cmb = ows_provider.findChild(QWidget, 'mConnectionsComboBox') connect = 'mConnectButton_clicked' elif service_type == 'ESRI:ArcGIS:MapServer': ows_provider.addRasterLayer.connect(self.iface.addRasterLayer) conn_cmb = ows_provider.findChild(QComboBox) connect = 'connectToServer' elif service_type == 'ESRI:ArcGIS:FeatureServer': def addAfsLayer(path, name): self.iface.mainWindow().addVectorLayer(path, name, 'afs') ows_provider.addVectorLayer.connect(addAfsLayer) conn_cmb = ows_provider.findChild(QComboBox) connect = 'connectToServer' ows_provider.setModal(False) ows_provider.show() # open provider dialogue against added OWS index = conn_cmb.findText(sname) if index > -1: conn_cmb.setCurrentIndex(index) # only for wfs if service_type == 'OGC:WFS': ows_provider.cmbConnections_activated(index) elif service_type in ['ESRI:ArcGIS:MapServer', 'ESRI:ArcGIS:FeatureServer']: ows_provider.cmbConnections_activated(index) getattr(ows_provider, connect)() def show_metadata(self): """show record metadata""" if not self.treeRecords.selectedItems(): return item = self.treeRecords.currentItem() if not item: return identifier = get_item_data(item, 'identifier') try: with OverrideCursor(Qt.WaitCursor): cat = CatalogueServiceWeb(self.catalog_url, timeout=self.timeout, # spellok username=self.catalog_username, password=self.catalog_password) cat.getrecordbyid( [self.catalog.records[identifier].identifier]) except ExceptionReport as err: QMessageBox.warning(self, self.tr('GetRecords error'), self.tr('Error getting response: {0}').format(err)) return except KeyError as err: QMessageBox.warning(self, self.tr('Record parsing error'), self.tr('Unable to locate record identifier')) return record = cat.records[identifier] record.xml_url = cat.request crd = RecordDialog() metadata = render_template('en', self.context, record, 'record_metadata_dc.html') style = QgsApplication.reportStyleSheet() crd.textMetadata.document().setDefaultStyleSheet(style) crd.textMetadata.setHtml(metadata) crd.exec_() def show_xml(self): """show XML request / response""" crd = XMLDialog() request_html = highlight_xml(self.context, self.catalog.request) response_html = highlight_xml(self.context, self.catalog.response) style = QgsApplication.reportStyleSheet() crd.txtbrXMLRequest.clear() crd.txtbrXMLResponse.clear() crd.txtbrXMLRequest.document().setDefaultStyleSheet(style) crd.txtbrXMLResponse.document().setDefaultStyleSheet(style) crd.txtbrXMLRequest.setHtml(request_html) crd.txtbrXMLResponse.setHtml(response_html) crd.exec_() def reset_buttons(self, services=True, xml=True, navigation=True): """Convenience function to disable WMS/WMTS|WFS|WCS buttons""" if services: self.tbAddData.setEnabled(False) self.mActionAddWms.setEnabled(False) self.mActionAddWfs.setEnabled(False) self.mActionAddWcs.setEnabled(False) self.mActionAddAms.setEnabled(False) self.mActionAddAfs.setEnabled(False) if xml: self.btnShowXml.setEnabled(False) if navigation: self.btnFirst.setEnabled(False) self.btnPrev.setEnabled(False) self.btnNext.setEnabled(False) self.btnLast.setEnabled(False) def help(self): """launch help""" open_url(get_help_url()) def reject(self): """back out of dialogue""" QDialog.reject(self) self.rubber_band.reset() def _get_csw(self): """convenience function to init owslib.csw.CatalogueServiceWeb""" # spellok # connect to the server with OverrideCursor(Qt.WaitCursor): try: self.catalog = CatalogueServiceWeb(self.catalog_url, # spellok timeout=self.timeout, username=self.catalog_username, password=self.catalog_password) return True except ExceptionReport as err: msg = self.tr('Error connecting to service: {0}').format(err) except ValueError as err: msg = self.tr('Value Error: {0}').format(err) except Exception as err: msg = self.tr('Unknown Error: {0}').format(err) QMessageBox.warning(self, self.tr('CSW Connection error'), msg) return False def install_proxy(self): """set proxy if one is set in QGIS network settings""" # initially support HTTP for now if self.settings.value('/proxy/proxyEnabled') == 'true': if self.settings.value('/proxy/proxyType') == 'HttpProxy': ptype = 'http' else: return user = self.settings.value('/proxy/proxyUser') password = self.settings.value('/proxy/proxyPassword') host = self.settings.value('/proxy/proxyHost') port = self.settings.value('/proxy/proxyPort') proxy_up = '' proxy_port = '' if all([user != '', password != '']): proxy_up = '%s:%s@' % (user, password) if port != '': proxy_port = ':%s' % port conn = '%s://%s%s%s' % (ptype, proxy_up, host, proxy_port) install_opener(build_opener(ProxyHandler({ptype: conn}))) def save_connections(): """save servers to list""" ManageConnectionsDialog(0).exec_() def get_item_data(item, field): """return identifier for a QTreeWidgetItem""" return item.data(_get_field_value(field), 32) def set_item_data(item, field, value): """set identifier for a QTreeWidgetItem""" item.setData(_get_field_value(field), 32, value) def _get_field_value(field): """convenience function to return field value integer""" value = 0 if field == 'identifier': value = 0 if field == 'link': value = 1 return value def bbox_to_polygon(bbox): """converts OWSLib bbox object to list of QgsPointXY objects""" if all([bbox.minx is not None, bbox.maxx is not None, bbox.miny is not None, bbox.maxy is not None]): minx = float(bbox.minx) miny = float(bbox.miny) maxx = float(bbox.maxx) maxy = float(bbox.maxy) return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny) # noqa else: return None
CS-SI/QGIS
python/plugins/MetaSearch/dialogs/maindialog.py
Python
gpl-2.0
38,568
import pygame import pygame.locals from pygame.locals import * import math import time class GameObject(object): height = 10 width = 10 backgroundColor = (0, 0, 0) pos = None rect = None surface = None classification = None uniqueid = None def __init__(self, gameworld, pos): self.pos = pos self.uniqueid = time.time() self.rect = pygame.Rect(pos[0]-self.width/2, pos[1]-self.height/2, self.width, self.height) self.surface = pygame.Surface((self.width, self.height)) self.surface.fill(self.backgroundColor) gameworld.addobj(self) def draw(self, gameworld): gameworld.windowSurface.blit(self.surface, self.rect) # A utility function we can use to find the distance between ourself # and another object in the world. # We assume that objects involved implement a pos argument, # and the pos is an iterable of the form (x, y) def distancebetween(self, target): # We'll return the floating point distance. return math.hypot(target.pos[0] - self.pos[0], target.pos[1] - self.pos[1])
jeremyosborne/python
third_party/pygame/10_petri/gameobject.py
Python
mit
1,280
# encoding: UTF-8 import psutil import uiBasicWidget from PyQt4 import QtGui uiBasicWidget.BASIC_FONT = QtGui.QFont(u'微软雅黑', 10) from uiBasicWidget import * # from ctaAlgo.uiCtaWidget import CtaEngineManager from ctaAlgo.uiCtaWidget import CtaEngineManager2 # from dataRecorder.uiDrWidget import DrEngineManager from riskManager.uiRmWidget import RmEngineManager ######################################################################## class MainWindow(QtGui.QMainWindow): """主窗口""" signalStatusBar = QtCore.pyqtSignal(type(Event())) #---------------------------------------------------------------------- def __init__(self, mainEngine, eventEngine): """Constructor""" super(MainWindow, self).__init__() self.mainEngine = mainEngine self.eventEngine = eventEngine self.widgetDict = {} # 用来保存子窗口的字典 self.initUi() # self.loadWindowSettings() #---------------------------------------------------------------------- def initUi(self): """初始化界面""" self.setWindowTitle('VnTrader') self.initCentral() self.initMenu() self.initStatusBar() #---------------------------------------------------------------------- def initCentral(self): """初始化中心区域""" widgetCtaW, dockCtaW = self.createCtaDock(CtaEngineManager2, u'策略', QtCore.Qt.LeftDockWidgetArea) widgetMarketM, dockMarketM = self.createDock(MarketMonitor, u'行情', QtCore.Qt.RightDockWidgetArea) widgetCtaL, docCtaL = self.createCtaLogDock(widgetCtaW.ctaLogMonitor, u'策略日志', QtCore.Qt.BottomDockWidgetArea) widgetLogM, dockLogM = self.createDock(LogMonitor, u'日志', QtCore.Qt.BottomDockWidgetArea) widgetErrorM, dockErrorM = self.createDock(ErrorMonitor, u'错误', QtCore.Qt.BottomDockWidgetArea) widgetTradeM, dockTradeM = self.createDock(TradeMonitor, u'成交', QtCore.Qt.BottomDockWidgetArea) widgetOrderM, dockOrderM = self.createDock(OrderMonitor, u'委托', QtCore.Qt.RightDockWidgetArea) widgetPositionM, dockPositionM = self.createDock(PositionMonitor, u'持仓', QtCore.Qt.BottomDockWidgetArea) widgetAccountM, dockAccountM = self.createDock(AccountMonitor, u'资金', QtCore.Qt.BottomDockWidgetArea) widgetCtaW.setMinimumHeight(450) widgetCtaL.setMaximumWidth(400) self.tabifyDockWidget(dockTradeM, dockErrorM) self.tabifyDockWidget(dockTradeM, dockLogM) self.tabifyDockWidget(dockPositionM, dockAccountM) dockOrderM.raise_() dockTradeM.raise_() dockPositionM.raise_() # 连接组件之间的信号 # widgetPositionM.itemDoubleClicked.connect(widgetTradingW.closePosition) #---------------------------------------------------------------------- def initMenu(self): """初始化菜单""" # 创建操作 connectCtpAction = QtGui.QAction(u'连接CTP', self) connectCtpAction.triggered.connect(self.connectCtp) connectLtsAction = QtGui.QAction(u'连接LTS', self) connectLtsAction.triggered.connect(self.connectLts) connectKsotpAction = QtGui.QAction(u'连接金仕达期权', self) connectKsotpAction.triggered.connect(self.connectKsotp) connectFemasAction = QtGui.QAction(u'连接飞马', self) connectFemasAction.triggered.connect(self.connectFemas) connectXspeedAction = QtGui.QAction(u'连接飞创', self) connectXspeedAction.triggered.connect(self.connectXspeed) connectKsgoldAction = QtGui.QAction(u'连接金仕达黄金', self) connectKsgoldAction.triggered.connect(self.connectKsgold) connectSgitAction = QtGui.QAction(u'连接飞鼠', self) connectSgitAction.triggered.connect(self.connectSgit) connectWindAction = QtGui.QAction(u'连接Wind', self) connectWindAction.triggered.connect(self.connectWind) connectIbAction = QtGui.QAction(u'连接IB', self) connectIbAction.triggered.connect(self.connectIb) connectOandaAction = QtGui.QAction(u'连接OANDA', self) connectOandaAction.triggered.connect(self.connectOanda) connectDbAction = QtGui.QAction(u'连接数据库', self) connectDbAction.triggered.connect(self.mainEngine.dbConnect) # testAction = QtGui.QAction(u'测试', self) # testAction.triggered.connect(self.test) exitAction = QtGui.QAction(u'退出', self) exitAction.triggered.connect(self.close) aboutAction = QtGui.QAction(u'关于', self) aboutAction.triggered.connect(self.openAbout) # contractAction = QtGui.QAction(u'查询合约', self) # contractAction.triggered.connect(self.openContract) # drAction = QtGui.QAction(u'行情数据记录', self) # drAction.triggered.connect(self.openDr) # ctaAction = QtGui.QAction(u'CTA策略', self) # ctaAction.triggered.connect(self.openCta) rmAction = QtGui.QAction(u'风险管理', self) rmAction.triggered.connect(self.openRm) # 创建菜单 menubar = self.menuBar() # 设计为只显示存在的接口 sysMenu = menubar.addMenu(u'系统') if 'CTP' in self.mainEngine.gatewayDict: sysMenu.addAction(connectCtpAction) if 'LTS' in self.mainEngine.gatewayDict: sysMenu.addAction(connectLtsAction) if 'FEMAS' in self.mainEngine.gatewayDict: sysMenu.addAction(connectFemasAction) if 'XSPEED' in self.mainEngine.gatewayDict: sysMenu.addAction(connectXspeedAction) if 'KSOTP' in self.mainEngine.gatewayDict: sysMenu.addAction(connectKsotpAction) if 'KSGOLD' in self.mainEngine.gatewayDict: sysMenu.addAction(connectKsgoldAction) if 'SGIT' in self.mainEngine.gatewayDict: sysMenu.addAction(connectSgitAction) sysMenu.addSeparator() if 'IB' in self.mainEngine.gatewayDict: sysMenu.addAction(connectIbAction) if 'OANDA' in self.mainEngine.gatewayDict: sysMenu.addAction(connectOandaAction) sysMenu.addSeparator() if 'Wind' in self.mainEngine.gatewayDict: sysMenu.addAction(connectWindAction) sysMenu.addSeparator() sysMenu.addAction(connectDbAction) sysMenu.addSeparator() sysMenu.addAction(exitAction) functionMenu = menubar.addMenu(u'功能') # functionMenu.addAction(contractAction) # functionMenu.addAction(drAction) functionMenu.addAction(rmAction) # 算法相关 # algoMenu = menubar.addMenu(u'算法') # algoMenu.addAction(ctaAction) # 帮助 helpMenu = menubar.addMenu(u'帮助') helpMenu.addAction(aboutAction) # helpMenu.addAction(testAction) #---------------------------------------------------------------------- def initStatusBar(self): """初始化状态栏""" self.statusLabel = QtGui.QLabel() self.statusLabel.setAlignment(QtCore.Qt.AlignLeft) self.statusBar().addPermanentWidget(self.statusLabel) self.statusLabel.setText(self.getCpuMemory()) self.sbCount = 0 self.sbTrigger = 10 # 10秒刷新一次 self.signalStatusBar.connect(self.updateStatusBar) self.eventEngine.register(EVENT_TIMER, self.signalStatusBar.emit) #---------------------------------------------------------------------- def updateStatusBar(self, event): """在状态栏更新CPU和内存信息""" self.sbCount += 1 if self.sbCount == self.sbTrigger: self.sbCount = 0 self.statusLabel.setText(self.getCpuMemory()) #---------------------------------------------------------------------- def getCpuMemory(self): """获取CPU和内存状态信息""" cpuPercent = psutil.cpu_percent() memoryPercent = psutil.virtual_memory().percent return u'CPU使用率:%d%% 内存使用率:%d%%' % (cpuPercent, memoryPercent) #---------------------------------------------------------------------- def connectCtp(self): """连接CTP接口""" self.mainEngine.connect('CTP') #---------------------------------------------------------------------- def connectLts(self): """连接LTS接口""" self.mainEngine.connect('LTS') #---------------------------------------------------------------------- def connectKsotp(self): """连接金仕达期权接口""" self.mainEngine.connect('KSOTP') #---------------------------------------------------------------------- def connectFemas(self): """连接飞马接口""" self.mainEngine.connect('FEMAS') #---------------------------------------------------------------------- def connectXspeed(self): """连接飞马接口""" self.mainEngine.connect('XSPEED') #---------------------------------------------------------------------- def connectKsgold(self): """连接金仕达黄金接口""" self.mainEngine.connect('KSGOLD') #---------------------------------------------------------------------- def connectSgit(self): """连接飞鼠接口""" self.mainEngine.connect('SGIT') #---------------------------------------------------------------------- def connectWind(self): """连接Wind接口""" self.mainEngine.connect('Wind') #---------------------------------------------------------------------- def connectIb(self): """连接Ib""" self.mainEngine.connect('IB') #---------------------------------------------------------------------- def connectOanda(self): """连接OANDA""" self.mainEngine.connect('OANDA') #---------------------------------------------------------------------- def test(self): """测试按钮用的函数""" # api = self.mainEngine.gatewayDict['CTP'].tdApi # api.reqID += 1 # api.reqQryOrder({}, api.reqID) # #api.reqQryTrade({}, api.reqID) # 有需要使用手动触发的测试函数可以写在这里 pass #---------------------------------------------------------------------- def openAbout(self): """打开关于""" try: self.widgetDict['aboutW'].show() except KeyError: self.widgetDict['aboutW'] = AboutWidget(self) self.widgetDict['aboutW'].show() #---------------------------------------------------------------------- # def openContract(self): # """打开合约查询""" # try: # self.widgetDict['contractM'].show() # except KeyError: # self.widgetDict['contractM'] = ContractMonitor(self.mainEngine) # self.widgetDict['contractM'].show() #---------------------------------------------------------------------- # def openCta(self): # """打开CTA组件""" # try: # self.widgetDict['ctaM'].showMaximized() # except KeyError: # self.widgetDict['ctaM'] = CtaEngineManager(self.mainEngine.ctaEngine, self.eventEngine) # self.widgetDict['ctaM'].showMaximized() #---------------------------------------------------------------------- # def openDr(self): # """打开行情数据记录组件""" # try: # self.widgetDict['drM'].showMaximized() # except KeyError: # self.widgetDict['drM'] = DrEngineManager(self.mainEngine.drEngine, self.eventEngine) # self.widgetDict['drM'].showMaximized() #---------------------------------------------------------------------- def openRm(self): """打开组件""" try: self.widgetDict['rmM'].show() except KeyError: self.widgetDict['rmM'] = RmEngineManager(self.mainEngine.rmEngine, self.eventEngine) self.widgetDict['rmM'].show() #---------------------------------------------------------------------- def closeEvent(self, event): """关闭事件""" reply = QtGui.QMessageBox.question(self, u'退出', u'确认退出?', QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: for widget in self.widgetDict.values(): widget.close() self.saveWindowSettings() self.mainEngine.exit() event.accept() else: event.ignore() #---------------------------------------------------------------------- def createDock(self, widgetClass, widgetName, widgetArea): """创建停靠组件""" widget = widgetClass(self.mainEngine, self.eventEngine) dock = QtGui.QDockWidget(widgetName) dock.setWidget(widget) dock.setObjectName(widgetName) dock.setFeatures(dock.DockWidgetFloatable|dock.DockWidgetMovable) self.addDockWidget(widgetArea, dock) return widget, dock #---------------------------------------------------------------------- def createCtaDock(self, widgetClass, widgetName, widgetArea): """创建停靠组件""" widget = widgetClass(self.mainEngine.ctaEngine, self.eventEngine) dock = QtGui.QDockWidget(widgetName) dock.setWidget(widget) dock.setObjectName(widgetName) dock.setFeatures(dock.DockWidgetFloatable|dock.DockWidgetMovable) self.addDockWidget(widgetArea, dock) return widget, dock #---------------------------------------------------------------------- def createCtaLogDock(self, widgetClass, widgetName, widgetArea): """创建停靠组件""" widget = widgetClass dock = QtGui.QDockWidget(widgetName) dock.setWidget(widget) dock.setObjectName(widgetName) dock.setFeatures(dock.DockWidgetFloatable|dock.DockWidgetMovable) self.addDockWidget(widgetArea, dock) return widget, dock #---------------------------------------------------------------------- def saveWindowSettings(self): """保存窗口设置""" settings = QtCore.QSettings('vn.py', 'vn.trader') settings.setValue('state', self.saveState()) settings.setValue('geometry', self.saveGeometry()) #---------------------------------------------------------------------- def loadWindowSettings(self): """载入窗口设置""" settings = QtCore.QSettings('vn.py', 'vn.trader') # 这里由于PyQt4的版本不同,settings.value('state')调用返回的结果可能是: # 1. None(初次调用,注册表里无相应记录,因此为空) # 2. QByteArray(比较新的PyQt4) # 3. QVariant(以下代码正确执行所需的返回结果) # 所以为了兼容考虑,这里加了一个try...except,如果是1、2的情况就pass # 可能导致主界面的设置无法载入(每次退出时的保存其实是成功了) try: self.restoreState(settings.value('state').toByteArray()) self.restoreGeometry(settings.value('geometry').toByteArray()) except AttributeError: pass ######################################################################## class AboutWidget(QtGui.QDialog): """显示关于信息""" #---------------------------------------------------------------------- def __init__(self, parent=None): """Constructor""" super(AboutWidget, self).__init__(parent) self.initUi() #---------------------------------------------------------------------- def initUi(self): """""" self.setWindowTitle(u'关于VnTrader') text = u""" Developed by traders, for traders. License:MIT Website:www.vnpy.org Github:www.github.com/vnpy/vnpy """ label = QtGui.QLabel() label.setText(text) label.setMinimumWidth(500) vbox = QtGui.QVBoxLayout() vbox.addWidget(label) self.setLayout(vbox)
freeitaly/Trading-System
vn.trader/ctaAlgo/uiStrategyWindow.py
Python
mit
16,851
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 5, transform = "Integration", sigma = 0.0, exog_count = 20, ar_order = 12);
antoinecarme/pyaf
tests/artificial/transf_Integration/trend_ConstantTrend/cycle_5/ar_12/test_artificial_32_Integration_ConstantTrend_5_12_20.py
Python
bsd-3-clause
270
#!/usr/bin/python #This fabric script must be used with `RHEL/CentOS/Fedora` systems from fabric.api import sudo, run, env, cd import sys import os import configuration as config import instances if os.stat('hosts_file').st_size == 0: print "hosts_file is empty\n" sys.exit(1) env.hosts = open('hosts_file', 'r').readlines() env.user = config.USERNAME env.port = '22' env.key_filename = config.KEY_FILE env.warn_only = True #Run this for the first time on a box to set it up def provision_box(): update_system() install_epel() install_base() install_python27() clean_up() #Update system via yum def update_system(): sudo('yum update --nogpgcheck --skip-broken -y update') #Install EPEL repo file def install_epel(): sudo('yum install --nogpgcheck -y http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm') #Install packages defined below def yum_install(*packages): sudo('yum install --nogpgcheck --skip-broken -y %s' % ' '.join(packages), shell=False) #Remove packages defined below def yum_remove(*packages): sudo('yum remove -y --skip-broken %s' % ' '.join(packages), shell=False) #Install dependencies on system via yum def install_base(): yum_install('dos2unix glances screen gcc make python-devel python-setuptools python-pip git rubygems rpmbuild ruby-devel') #Install Docker LXC Engine def install_docker(): yum_install('docker-io') sudo('/sbin/chkconfig docker on') sudo('/sbin/service docker start') #Install Python pip packages def pip_install(*pip): sudo('pip install %s' % ' '.join(pip), shell=False) #Upgrade Python pip packages def pip_upgrade(*pip): sudo('pip install --upgrade %s' % ' '.join(pip), shell=False) #Install Ruby gems def gem_install(*gem): sudo('gem install %s' % ' '.join(gem), shell=False) #Update Ruby gem def gem_update(*gem): sudo('gem update %s' % ' '.join(gem), shell=False) #Install Python 2.7.8 def install_python(): run('curl -O https://www.python.org/ftp/python/2.7.8/Python-2.7.8.tar.xz') run('tar xfv Python-2.7.8.tar.xz') with cd ('Python-2.7.8'): sudo('./configure --prefix=/usr/local --enable-unicode=ucs4 --enable-shared LDFLAGS="-Wl,-rpath /usr/local/lib"') sudo('make && make altinstall') #Clean up yum/rpm & post-install of Python 2.7 def clean_up(): yum_remove('gcc') sudo('rm -f /var/lib/rpm/__db*; rpm --rebuilddb') sudo('yum history new; yum clean all') sudo('rm -rf Python-*')
marshyski/aws-fabric
fabfile.py
Python
apache-2.0
2,489
#!/usr/bin/env python import sys def read_params(args): import argparse as ap import textwrap p = ap.ArgumentParser( description= "TBA" ) p.add_argument( '--in', metavar='INPUT_FILE', type=str, nargs='?', default=sys.stdin, help= "the Qiime OTU table file " "[ stdin if not present ]" ) p.add_argument( '--md', metavar='METADATA_FILE', type=str, nargs='?', default=None, help= "the Qiime OTU table file " "[ only OTU table without metadata if not present ]" ) p.add_argument( '--out', metavar='OUTPUT_FILE', type=str, nargs = '?', default=sys.stdout, help= "the output file " "[stdout if not present]") p.add_argument( '-c', metavar="class attribute", type=str, help = "the attribute to use as class" ) p.add_argument( '-s', metavar="subclass attribute", type=str, help = "the attribute to use as subclass" ) p.add_argument( '-u', metavar="subject attribute", type=str, help = "the attribute to use as subject" ) return vars(p.parse_args()) def qiime2lefse( fin, fmd, fout, all_md, sel_md ): with (fin if fin==sys.stdin else open(fin)) as inpf : lines = [list(ll) for ll in (zip(*[l.strip().split('\t') for l in inpf.readlines()[1:]]) ) ] for i,(l1,l2) in enumerate(zip( lines[0], lines[-1] )): if not l2 == 'Consensus Lineage': lines[-1][i] = l2+"|"+l1 data = dict([(l[0],l[1:]) for l in lines[1:]]) md = {} if fmd: with open(fmd) as inpf: mdlines = [l.strip().split('\t') for l in inpf.readlines()] mdf = mdlines[0][1:] for l in mdlines: mdd = dict(zip(mdf,l[1:])) md[l[0]] = mdd selected_md = md.values()[0].keys() if md else [] if not all_md: selected_md = [s for s in sel_md if s] out_m = [ selected_md + list([d.replace(";","|").replace("\"","") for d in data[ 'Consensus Lineage' ]]) ] for k,v in data.items(): if k == 'Consensus Lineage': continue out_m.append( [md[k][kmd] for kmd in selected_md] + list(v) ) with (fout if fout == sys.stdout else open( fout, "w" )) as outf: for l in zip(*out_m): outf.write( "\t".join(l) + "\n" ) if __name__ == '__main__': pars = read_params( sys.argv ) qiime2lefse( fin = pars['in'], fmd = pars['md'], fout = pars['out'], all_md = not pars['c'] and not pars['s'] and not pars['u'], sel_md = [pars['c'],pars['s'],pars['u']])
geoffrosen/vaginal-microbiome
bin/custom_lefse/qiime2lefse.py
Python
mit
2,945
# Copyright 2014 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.compute import base from tempest import test class VersionV3Test(base.BaseV3ComputeTest): _interface = 'json' @test.attr(type='gate') def test_version(self): # Get version information resp, version = self.version_client.get_version() self.assertEqual(200, resp.status) self.assertIn("id", version) self.assertEqual("v3.0", version["id"])
ntymtsiv/tempest
tempest/api/compute/v3/test_version.py
Python
apache-2.0
1,043
""" Test if Matplotlib is using PyQt4 as backend """ def test_matplotlib_qt_backend(): print ("testing matplotlib qt backend ...") try: # Matplot changed its file structure several times in history, so we # must test all try: from matplotlib.backends.qt_compat import QtCore except: try: from matplotlib.backends.qt4_compat import QtCore except: from matplotlib.backends.qt import QtCore from PyQt4 import QtCore as QtCore4 if QtCore is QtCore4: print ("... test passed") return True else: using = QtCore.__name__.split('.')[0] expect = QtCore4.__name__.split('.')[0] print ("... test FAIL\n" + " Matplotlib is using %s\n" % using + " It must use %s\n" % expect + " Possible reasons for that are that " + "%s is not installed " % expect + "or the envoriment variable QT_API is overwriting it.") return False except: import traceback print(traceback.format_exc()) print ("... Test fail is an expected way! We would like to know why, " "please report in 'https://github.com/nguy/artview/issues'") return None if __name__ == '__main__': test_matplotlib_qt_backend()
jjhelmus/artview
tests/qt.py
Python
bsd-3-clause
1,431
import pycurl import json import csv import certifi import io from openpyxl import Workbook from openpyxl.styles import Alignment,Font ### Setup Variables ### URL='https://{id}.live.dynatrace.com/api/v1/' APITOKEN='XXXXXXXXXXXXXXXXXXXXX' DEST_FILENAME='dt-export.xlsx' ### function to go get the data def dtApiQuery(endpoint): buffer=io.BytesIO() c = pycurl.Curl() c.setopt(c.URL, URL + endpoint) c.setopt(pycurl.CAINFO, certifi.where()) c.setopt(c.HTTPHEADER, ['Authorization: Api-Token ' + APITOKEN] ) c.setopt(pycurl.WRITEFUNCTION, buffer.write) c.perform() print('Status: %d' % c.getinfo(c.RESPONSE_CODE)) c.close() return(buffer.getvalue().decode('UTF-8')) ### Setup workbook wb = Workbook() wsHosts = wb.create_sheet("hosts") wsHostHost = wb.create_sheet("host-host") wsProcess = wb.create_sheet("processes") wsProcessProcess = wb.create_sheet("process-process") wsProcessHost = wb.create_sheet("process-host") wb.remove(wb.active) ### Get & Process hosts data hostsIO=dtApiQuery('entity/infrastructure/hosts') hosts=json.loads(hostsIO) wsHosts.append( ['hostId','displayName','osType','osVersion','hypervisorType','ipAddress1','ipAddress2','ipAddress3'] ) for host in hosts: wsHosts.append( [ host['entityId'], host['displayName'], host['osType'], host['osVersion'], host['hypervisorType'] if 'hypervisorType' in host else '', host['ipAddresses'][0] if 'ipAddresses' in host else '', host['ipAddresses'][1] if 'ipAddresses' in host and len(host['ipAddresses']) >1 else '', host['ipAddresses'][2] if 'ipAddresses' in host and len(host['ipAddresses']) >2 else '' ] ) wsHostHost.append( ['fromHostId','toHostId'] ) for fromHost in hosts: if 'toRelationships' in fromHost and 'isNetworkClientOfHost' in fromHost['toRelationships']: for toHost in fromHost['toRelationships']['isNetworkClientOfHost']: wsHostHost.append( [ fromHost['entityId'], toHost, ] ) ### Get & Process processes data processesIO=dtApiQuery('entity/infrastructure/processes') processes=json.loads(processesIO) wsProcess.append( ['processId','displayName','softwareType','softwareVersion','port1','port2','port3','port4','port5'] ) for process in processes: wsProcess.append( [ process['entityId'], process['displayName'], process['softwareTechnologies'][0]['type'] if 'softwareTechnologies' in process else '', process['softwareTechnologies'][0]['version'] if 'softwareTechnologies' in process else '', process['listenPorts'][0] if 'listenPorts' in process else '', process['listenPorts'][1] if 'listenPorts' in process and len(process['listenPorts'])>1 else '', process['listenPorts'][2] if 'listenPorts' in process and len(process['listenPorts'])>2 else '', process['listenPorts'][3] if 'listenPorts' in process and len(process['listenPorts'])>3 else '', process['listenPorts'][4] if 'listenPorts' in process and len(process['listenPorts'])>4 else '' ] ) wsProcessProcess.append( ['fromProcessId','toProcessId'] ) for fromProcess in processes: if 'toRelationships' in fromProcess and 'isNetworkClientOf' in fromProcess['toRelationships']: for toProcess in fromProcess['toRelationships']['isNetworkClientOf']: wsProcessProcess.append( [ fromProcess['entityId'], toProcess, ] ) wsProcessHost.append( ['processId','hostId'] ) for process in processes: if 'fromRelationships' in process and 'isProcessOf' in process['fromRelationships']: for host in process['fromRelationships']['isProcessOf']: wsProcessHost.append( [ process['entityId'], host, ] ) ### set column widths for ws in wb.worksheets: for column_cells in ws.columns: length = max(len(str(cell.value)) for cell in column_cells) ws.column_dimensions[column_cells[0].column].width = length+1 ### Set header format for ws in wb.worksheets: for cell in ws["1:1"]: cell.style='Headline 3' ### Generate FW Rule Sheet wsFWRules = wb.create_sheet("FWRules",0) wsFWRules.append([ 'Linking Pointers','','','','', 'Firewall Rule','','','','', 'Source Extended Info','','','', 'Destination Extended Info','','','', 'Filtering' ]) wsFWRules.merge_cells('A1:E1') wsFWRules.merge_cells('F1:J1') wsFWRules.merge_cells('K1:N1') wsFWRules.merge_cells('O1:R1') wsFWRules.append([ 'fromProcessId','toProcessId','fromHostId','toHostId','', 'srcIP','dstIP','proto','port','', 'srcHostName','srcProcessName','srcProcessType','', 'dstHostname','dstProcessName','dstProcessType','', 'isIntraHost?' ]) for col in ['A','B','C','D','E']: wsFWRules.column_dimensions[col].hidden=True wsFWRules["A1"].style="Accent3" wsFWRules["F1"].style="Accent1" wsFWRules["K1"].style="Accent4" wsFWRules["O1"].style="Accent5" wsFWRules["S1"].style="Accent2" for cell in wsFWRules["1:1"]: cell.font=Font(bold=True,color='FFFFFF') cell.alignment=Alignment(horizontal='center') for cell in wsFWRules["2:2"]: cell.style='Headline 3' wsFWRules.sheet_properties.tabColor = '0066FF' i=3 for row in wsProcessProcess.rows: wsFWRules.append([ "='process-process'!A%i" % i, "='process-process'!B%i" % i, "=VLOOKUP(A%i,'process-host'!$A:$B,2,FALSE)" % i, "=VLOOKUP(B%i,'process-host'!$A:$B,2,FALSE)" % i, "", "=VLOOKUP(C%i,'hosts'!$A:$F,6,FALSE)" % i, "=VLOOKUP(D%i,'hosts'!$A:$F,6,FALSE)" % i, "TCP", "=IF(LEN(VLOOKUP(B%i,'processes'!$A:$E,5,FALSE))=0,\"\",VLOOKUP(B%i,'processes'!$A:$E,5,FALSE))" % (i,i), "", "=VLOOKUP(C%i,'hosts'!$A:$B,2,FALSE)" % i, "=VLOOKUP(A%i,'processes'!$A:$B,2,FALSE)" % i, "=VLOOKUP(A%i,'processes'!$A:$C,3,FALSE)" % i, "", "=VLOOKUP(D%i,'hosts'!$A:$B,2,FALSE)" % i, "=VLOOKUP(B%i,'processes'!$A:$B,2,FALSE)" % i, "=VLOOKUP(B%i,'processes'!$A:$C,3,FALSE)" % i, "", "=IF(C%i=D%i,TRUE,FALSE)" % (i,i) ]) i += 1 wsFWRules.column_dimensions['F'].width = wsHosts.column_dimensions['F'].width wsFWRules.column_dimensions['G'].width = wsHosts.column_dimensions['F'].width wsFWRules.column_dimensions['H'].width = 8 wsFWRules.column_dimensions['I'].width = wsProcess.column_dimensions['E'].width wsFWRules.column_dimensions['J'].width = 5 wsFWRules.column_dimensions['K'].width = wsHosts.column_dimensions['B'].width wsFWRules.column_dimensions['L'].width = wsProcess.column_dimensions['B'].width wsFWRules.column_dimensions['M'].width = wsProcess.column_dimensions['C'].width wsFWRules.column_dimensions['N'].width = 5 wsFWRules.column_dimensions['O'].width = wsHosts.column_dimensions['B'].width wsFWRules.column_dimensions['P'].width = wsProcess.column_dimensions['B'].width wsFWRules.column_dimensions['Q'].width = wsProcess.column_dimensions['C'].width wsFWRules.column_dimensions['R'].width = 5 wsFWRules.column_dimensions['S'].width = 8 wsFWRules.auto_filter.ref="A2:S2" ### Output file wb.save(filename=DEST_FILENAME)
ruxit/data-export-api
ExcelExport/dt-excel.py
Python
bsd-3-clause
6,726
# 1.1.3 Write a program that takes three integer command-line arguments and # prints equal if all three are equal, and not equal otherwise. # Needed to get command line arguments import sys def main(argv=None): ''' Function called to run main script including unit tests INPUT: List of arguments from the command line RETURNS: Exit code to be passed to sys.exit(): -1: Invalid input 0: Script completed successfully ''' if argv is None: argv = sys.argv options = argv[1:] return check_and_compare_opts(options) def check_and_compare_opts(options): ''' Checks options are integers, and compares them if so. INPUT: List of options RETURNS: None if options aren't 3 integers, False/True otherwise depending on comparison ''' int_options = convert_args(options) if int_options is None or len(int_options) != 3: print('Error - need 3 integer arguments, got {}'.format(options)) return -1 if check_equal(int_options): print('equal') return 0 else: print('not equal') return 0 def list_contains_ints(vals): ''' Checks if all elements in a list contains ints INPUT: list of arbitrary lengths RETURNS: bool showing whether all values are ints ''' for val in vals: if type(val) is not int: return False return True def convert_args(args): ''' Convert all entries in list to integer INPUT: List of arguments RETURN: List of integers (if they can be converted) ''' try: arg_ints = [int(arg) for arg in args] except: # print('Error converting {} to integer'.format(args)) return None return arg_ints def check_equal(vals): ''' Checks if all the values are equal (assumes list of ints) INPUT: List of integer values RETURNS: Boolean when all values are equal ''' first_val = vals[0] for val in vals: if first_val != val: return False return True def check_equal_recurse(vals): ''' Checks if all the values are equal (assumes list of ints) INPUT: List of integer values RETURNS: Boolean when all values are equal ''' if len(vals) == 2: return vals[0] == vals[1] else: return check_equal_recurse() if __name__ == '__main__': sys.exit(main())
timgasser/algorithms_4ed
ch1_fundamentals/ex1.1.3.py
Python
mit
2,468
""" Render to gtk from agg """ import os import matplotlib from matplotlib.figure import Figure from matplotlib.backends.backend_agg import FigureCanvasAgg from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\ show, draw_if_interactive,\ error_msg_gtk, NavigationToolbar, PIXELS_PER_INCH, backend_version, \ NavigationToolbar2GTK from matplotlib.backends._gtkagg import agg_to_gtk_drawable DEBUG = False class NavigationToolbar2GTKAgg(NavigationToolbar2GTK): def _get_canvas(self, fig): return FigureCanvasGTKAgg(fig) class FigureManagerGTKAgg(FigureManagerGTK): def _get_toolbar(self, canvas): # must be inited after the window, drawingArea and figure # attrs are set if matplotlib.rcParams['toolbar']=='classic': toolbar = NavigationToolbar (canvas, self.window) elif matplotlib.rcParams['toolbar']=='toolbar2': toolbar = NavigationToolbar2GTKAgg (canvas, self.window) else: toolbar = None return toolbar def new_figure_manager(num, *args, **kwargs): """ Create a new figure manager instance """ if DEBUG: print('backend_gtkagg.new_figure_manager') FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, thisFig) def new_figure_manager_given_figure(num, figure): """ Create a new figure manager instance for the given figure. """ canvas = FigureCanvasGTKAgg(figure) return FigureManagerGTKAgg(canvas, num) if DEBUG: print('backend_gtkagg.new_figure_manager done') class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg): filetypes = FigureCanvasGTK.filetypes.copy() filetypes.update(FigureCanvasAgg.filetypes) def configure_event(self, widget, event=None): if DEBUG: print('FigureCanvasGTKAgg.configure_event') if widget.window is None: return try: del self.renderer except AttributeError: pass w,h = widget.window.get_size() if w==1 or h==1: return # empty fig # compute desired figure size in inches dpival = self.figure.dpi winch = w/dpival hinch = h/dpival self.figure.set_size_inches(winch, hinch) self._need_redraw = True self.resize_event() if DEBUG: print('FigureCanvasGTKAgg.configure_event end') return True def _render_figure(self, pixmap, width, height): if DEBUG: print('FigureCanvasGTKAgg.render_figure') FigureCanvasAgg.draw(self) if DEBUG: print('FigureCanvasGTKAgg.render_figure pixmap', pixmap) #agg_to_gtk_drawable(pixmap, self.renderer._renderer, None) buf = self.buffer_rgba() ren = self.get_renderer() w = int(ren.width) h = int(ren.height) pixbuf = gtk.gdk.pixbuf_new_from_data( buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4) pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h, gtk.gdk.RGB_DITHER_NONE, 0, 0) if DEBUG: print('FigureCanvasGTKAgg.render_figure done') def blit(self, bbox=None): if DEBUG: print('FigureCanvasGTKAgg.blit', self._pixmap) agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox) x, y, w, h = self.allocation self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap, 0, 0, 0, 0, w, h) if DEBUG: print('FigureCanvasGTKAgg.done') def print_png(self, filename, *args, **kwargs): # Do this so we can save the resolution of figure in the PNG file agg = self.switch_backends(FigureCanvasAgg) return agg.print_png(filename, *args, **kwargs) """\ Traceback (most recent call last): File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event self._render_figure(self._pixmap, w, h) File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure pixbuf = gtk.gdk.pixbuf_new_from_data( ValueError: data length (3156672) is less then required by the other parameters (3160608) """
alephu5/Soundbyte
environment/lib/python3.3/site-packages/matplotlib/backends/backend_gtkagg.py
Python
gpl-3.0
4,299
#!c:\users\adebayo\myedge\myvenv\scripts\python.exe # # The Python Imaging Library # $Id$ # # this demo script illustrates pasting into an already displayed # photoimage. note that the current version of Tk updates the whole # image every time we paste, so to get decent performance, we split # the image into a set of tiles. # try: from tkinter import Tk, Canvas, NW except ImportError: from Tkinter import Tk, Canvas, NW from PIL import Image, ImageTk import sys # # painter widget class PaintCanvas(Canvas): def __init__(self, master, image): Canvas.__init__(self, master, width=image.size[0], height=image.size[1]) # fill the canvas self.tile = {} self.tilesize = tilesize = 32 xsize, ysize = image.size for x in range(0, xsize, tilesize): for y in range(0, ysize, tilesize): box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize) tile = ImageTk.PhotoImage(image.crop(box)) self.create_image(x, y, image=tile, anchor=NW) self.tile[(x, y)] = box, tile self.image = image self.bind("<B1-Motion>", self.paint) def paint(self, event): xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10 im = self.image.crop(xy) # process the image in some fashion im = im.convert("L") self.image.paste(im, xy) self.repair(xy) def repair(self, box): # update canvas dx = box[0] % self.tilesize dy = box[1] % self.tilesize for x in range(box[0]-dx, box[2]+1, self.tilesize): for y in range(box[1]-dy, box[3]+1, self.tilesize): try: xy, tile = self.tile[(x, y)] tile.paste(self.image.crop(xy)) except KeyError: pass # outside the image self.update_idletasks() # # main if len(sys.argv) != 2: print("Usage: painter file") sys.exit(1) root = Tk() im = Image.open(sys.argv[1]) if im.mode != "RGB": im = im.convert("RGB") PaintCanvas(root, im).pack() root.mainloop()
tadebayo/myedge
myvenv/Scripts/painter.py
Python
mit
2,141
import re import sys import whoisSrvDict import whoispy_sock import parser_branch OK = '\033[92m' FAIL = '\033[91m' ENDC = '\033[0m' def query(domainName): rawMsg = "" tldName = "" whoisSrvAddr = "" regex = re.compile('.+\..+') match = regex.search(domainName) if not match: # Invalid domain _display_fail("Invalid domain format") return None # Divice TLD regex = re.compile('\..+') match = regex.search(domainName) if match: tldName = match.group() else: _display_fail("Can not parse TLD") return None # Get TLD List if not (tldName in whoisSrvDict.get_whoisSrvDict()): _display_fail("Not Found TLD whois server") return None whoisSrvAddr = whoisSrvDict.get_whoisSrvDict().get(tldName) rawMsg = whoispy_sock.get_rawMsg(whoisSrvAddr , domainName, 43) return parser_branch.get_parser(rawMsg, whoisSrvAddr) # Display method def _display_fail(msg): sys.stdout.write( FAIL ) sys.stdout.write("%s\n" % msg) sys.stdout.write( ENDC ) def _display_safe(msg): sys.stdout.write( OK ) sys.stdout.write("%s\n" % msg) sys.stdout.write( ENDC )
nemumu/whoispy
whoispy/whoispy.py
Python
gpl-3.0
1,198
import os import json LATEST_SCHEMA_VERSION = 2 def _id_to_name(id): return ' '.join(id.split('_')) def _name_to_id(name): return '_'.join(name.split(' ')) def ensure_schema_structure(schema): schema['pages'] = schema.get('pages', []) schema['title'] = schema['name'] schema['version'] = schema.get('version', 1) return schema here = os.path.split(os.path.abspath(__file__))[0] def from_json(fname): with open(os.path.join(here, fname)) as f: return json.load(f) OSF_META_SCHEMAS = [ ensure_schema_structure(from_json('osf-open-ended-1.json')), ensure_schema_structure(from_json('osf-open-ended-2.json')), ensure_schema_structure(from_json('osf-standard-1.json')), ensure_schema_structure(from_json('osf-standard-2.json')), ensure_schema_structure(from_json('brandt-prereg-1.json')), ensure_schema_structure(from_json('brandt-prereg-2.json')), ensure_schema_structure(from_json('brandt-postcomp-1.json')), ensure_schema_structure(from_json('brandt-postcomp-2.json')), ensure_schema_structure(from_json('prereg-prize.json')), ensure_schema_structure(from_json('confirmatory-general-2.json')), ensure_schema_structure(from_json('egap-project-2.json')), ensure_schema_structure(from_json('veer-1.json')), ] ACTIVE_META_SCHEMAS = ( 'Prereg Challenge', 'Open-Ended Registration', 'OSF-Standard Pre-Data Collection Registration', 'Replication Recipe (Brandt et al., 2013): Pre-Registration', 'Replication Recipe (Brandt et al., 2013): Post-Completion', "Pre-Registration in Social Psychology (van 't Veer & Giner-Sorolla, 2016): Pre-Registration", )
jnayak1/osf.io
website/project/metadata/schemas.py
Python
apache-2.0
1,660
# -*- coding: utf-8 -*- """ xccdf.models.notice includes the class Notice to create or import a <xccdf:notice> element. This module is part of the xccdf library. Author: Rodrigo Núñez <rnunezmujica@icloud.com> """ # XCCDF from xccdf.models.html_element import HTMLElement from xccdf.exceptions import RequiredAttributeException class Notice(HTMLElement): """ Class to implement <xccdf:notice> element. """ def __init__(self, xml_element=None, id=None): """ Initializes the attrs attribute to serialize the attributes. :param lxml.etree._Element xml_element: XML element to load. :param str id: Id attribute. :raises ValueError: If no parameter is given. :raises RequiredAttributeException: If after importing the xml_element the id attribute is missing. """ if xml_element is None and id is None: raise ValueError('either xml_element or id are required') tag_name = 'notice' if xml_element is None else None self.id = id super(Notice, self).__init__(xml_element, tag_name) if not hasattr(self, 'id') or self.id == '' or self.id is None: raise RequiredAttributeException('id attribute required') def __str__(self): """ String representation of Notice object. :returns: Notice object as a string. :rtype: str """ string_value = 'notice {id}'.format(id=self.id) if hasattr(self, 'lang'): string_value += ' ({lang})'.format(lang=self.lang) return string_value
Dalveen84/xccdf
src/xccdf/models/notice.py
Python
lgpl-3.0
1,634
from Crypto.Cipher import AES import base64 mode = AES.MODE_OFB iv = "\x00" * 16 PYEXFIL_DEFAULT_PASSWORD = base64.b64decode('VEhBVElTQURFQURQQVJST1Qh') """ START Symmetric stream mode for AES """ def AESEncryptOFB(key, text): if type(key) == str: key = bytes(key, 'ascii') pad_len = (-len(text)) % 16 padded_text = text + b'\x00' * pad_len padded_key = key + b'\x00' * (32 - len(key)) encs = AES.new(padded_key, mode, iv.encode("utf8")) plain = encs.encrypt(padded_text) return plain def AESDecryptOFB(key, text, unpad=True): if type(key) == str: key = bytes(key, 'ascii') padded_key = key + b'\x00' * (32 - len(key)) decryptor = AES.new(padded_key, mode, iv.encode("utf8")) plain = decryptor.decrypt(text) if unpad: plain = plain.replace(b'\x00', b'') return plain """ END Symmetric stream mode for AES """ """ RC4 START """ # https://github.com/bozhu/RC4-Python/blob/master/rc4.py def KSA(key): keylength = len(key) S = range(256) j = 0 for i in range(256): j = (j + S[i] + key[i % keylength]) % 256 S[i], S[j] = S[j], S[i] # swap return S def PRGA(S): i = 0 j = 0 while True: i = (i + 1) % 256 j = (j + S[i]) % 256 S[i], S[j] = S[j], S[i] # swap K = S[(S[i] + S[j]) % 256] yield K def RC4_unwrapped(key): S = KSA(key) return PRGA(S) def RC4(key, plaintext): def convert_key(s): return [ord(c) for c in s] key = convert_key(key) keystream = RC4_unwrapped(key) data = "" for c in plaintext: data += "%02X" % (ord(c) ^ keystream.next()) """ RC4 Ends """
ytisf/PyExfil
pyexfil/includes/encryption_wrappers.py
Python
mit
1,543