code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from scrapy.commands.crawl import Command
from scrapy.exceptions import UsageError
from typing import List, Any
class StatusCommand(Command):
def run(self, args, opts):
# type: (List[str], Any) -> None
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError(
"running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0]
crawler = self.crawler_process.create_crawler(spname)
self.crawler_process.crawl(crawler)
self.crawler_process.start()
# Get exceptions quantity from crawler stat data
if crawler.spider.has_error:
# Return non-zero exit code if exceptions are contained
self.exitcode = 1
| sonali0901/zulip | tools/documentation_crawler/documentation_crawler/commands/crawl_with_status.py | Python | apache-2.0 | 784 |
from __future__ import print_function
import os
import networking
import tts.tts as tts
import schedule
import robot_util
# TODO
# If I pull the send_video stuff into controller, the ability to restart the ffmpeg process would
# be useful
# /stationary only able to move left and right.
#/mod username
#/unmod username
#/stop username
#/unstop username
#/tts volume (int)
#/mic mute
#/mic unmute
#/xcontrol username
#/xcontrol username (int time)
#/xcontrol off
#/speed (int)
# Cant DO
# Can't do anything that requires server side stuff. Ban and timoue are do-able,
# only on at the bot level.
#(blocking a user also bans them from your channel)
#/block username
#/unblock username
# Done
#/devmode on
#/devmode off
#/anon control on
#/anon control off
#/anon tts on
#/anon tts off
#/anon off
#/anon on
#/tts mute
#/tts unmute
#/brightness (int)
#/contrast (int)
#/saturation (int)
#/ban username
#/unban username
#/timeout username
#/untimeout username
move_handler = None
dev_mode = None
dev_mode_mods = False
anon_control = True
owner = None
robot_id = None
api_key = None
stationary = None
banned=[]
mods=[]
def setup(robot_config):
global owner
global robot_id
global api_key
global buttons_json
owner = robot_config.get('robot', 'owner')
robot_id = robot_config.get('robot', 'robot_id')
if robot_config.has_option('robot', 'api_key'):
api_key = robot_config.get('robot', 'api_key')
if api_key == "":
api_key = None
mods = networking.getOwnerDetails(owner)['moderators']
# mods = networking.getOwnerDetails(owner)['robocaster']['moderators']
print("Moderators :", mods)
# check if the user is the owner or moderator, 0 for not, 1 for moderator, 2 for owner
def is_authed(user):
if user == owner:
return(2)
elif user in mods:
return(1)
else:
return(0)
# add a new command handler, this will also allow for overriding existing ones.
def add_command(command, function):
global commands
commands[command] = function
def anon_handler(command, args):
global anon_control
if len(command) > 1:
if is_authed(args['name']): # Moderator
if command[1] == 'on':
anon_control = True
tts.unmute_anon_tts()
robot_util.setAnonControl(True, robot_id, api_key)
elif command[1] == 'off':
anon_control = False
tts.mute_anon_tts()
robot_util.setAnonControl(False, robot_id, api_key)
elif len(command) > 3:
if command[1] == 'control':
if command[2] == 'on':
anon_control = True
robot_util.setAnonControl(True, robot_id, api_key)
elif command[2] == 'off':
anon_control = False
robot_util.setAnonControl(False, robot_id, api_key)
elif command[1] == 'tts':
if command[2] == 'on':
tts.unmute_anon_tts()
elif command[2] == 'off':
tts.mute_anon_tts()
print("anon_control : " + str(anon_control))
def ban_handler(command, args):
global banned
if len(command) > 1:
user = command[1]
if is_authed(args['name']): # Moderator
banned.append(user)
print(user + " added to ban list")
tts.mute_user_tts(user)
def unban_handler(command, args):
global banned
if len(command) > 1:
user = command[1]
if is_authed(args['name']): # Moderator
if user in banned:
banned.remove(user)
print(user + " removed from ban list")
tts.unmute_user_tts(user)
def timeout_handler(command, args):
global banned
if len(command) > 1:
user = command[1]
if is_authed(args['name']): # Moderator
banned.append(user)
schedule.single_task(5, untimeout_user, user)
print(user + " added to timeout list")
tts.mute_user_tts(user)
def untimeout_user(user):
global banned
if user in banned:
banned.remove(user)
print(user + " timeout expired")
tts.unmute_user_tts(user)
def untimeout_handler(command, args):
global banned
if len(command) > 1:
user = command[1]
if is_authed(args['name']): # Moderator
if user in banned:
banned.remove(user)
print(user = " removed from timeout list")
tts.unmute_user_tts(user)
def public_mode_handler(command, args):
if len(command) > 1:
if api_key != None:
if is_authed(args['name']) == 2: # Owner
if command[1] == 'on':
robot_util.setAllowed('roboempress', robot_id, api_key)
robot_util.setPrivateMode(True, robot_id, api_key)
elif command[1] == 'off':
robot_util.setPrivateMode(False, robot_id, api_key)
def devmode_handler(command, args):
global dev_mode
global dev_mode_mods
if len(command) > 1:
if is_authed(args['name']) == 2: # Owner
if command[1] == 'on':
dev_mode = True
dev_mode_mods = False
if api_key != None:
robot_util.setDevMode(True, robot_id, api_key)
elif command[1] == 'off':
dev_mode = False
if api_key != None:
robot_util.setDevMode(False, robot_id, api_key)
elif command[1] == 'mods':
dev_mode = True
dev_mode_mods = True
print("dev_mode : " + str(dev_mode))
print("dev_mode_mods : " + str(dev_mode_mods))
def mic_handler(command, args):
if is_authed(args['name']) == 1: # Owner
if len(command) > 1:
if command[1] == 'mute':
if api_key != None:
robot_util.setMicEnabled(True, robot_id, api_key)
# Mic Mute
return
elif command[1] == 'unmute':
if api_key != None:
robot_util.setMicEnabled(False, robot_id, api_key)
# Mic Unmute
return
def tts_handler(command, args):
print("tts :", tts)
if len(command) > 1:
if is_authed(args['name']) == 2: # Owner
if command[1] == 'mute':
print("mute")
tts.mute_tts()
return
elif command[1] == 'unmute':
tts.unmute_tts()
return
elif command[1] == 'vol':
# TTS int volume command
return
def stationary_handler(command, args):
global stationary
if is_authed(args['name']) == 2: # Owner
stationary = not stationary
print ("stationary is ", stationary)
def global_chat_handler(command, args):
if len(command) > 1:
if api_key != None:
if is_authed(args['name']) == 2: # Owner
if command[1] == 'on':
robot_util.setGlobalChat(False, robot_id, api_key)
return
elif command[1] == 'off':
robot_util.setGlobalChat(True, robot_id, api_key)
return
def word_filter_handler(command, args):
if len(command) > 1:
if api_key != None:
if is_authed(args['name']) == 2: # Owner
if command[1] == 'on':
robot_util.setWordFilter(True, robot_id, api_key)
return
elif command[1] == 'off':
robot_util.setWordFilter(False, robot_id, api_key)
return
def show_exclusive_handler(command, args):
if len(command) > 1:
if api_key != None:
if is_authed(args['name']) == 2: # Owner
if command[1] == 'on':
robot_util.setShowExclusive(False, robot_id, api_key)
return
elif command[1] == 'off':
robot_util.setShowExclusive(True, robot_id, api_key)
return
# This is a dictionary of commands and their handler functions
commands={ '.anon' : anon_handler,
'.ban' : ban_handler,
'.unban' : unban_handler,
'.timeout' : timeout_handler,
'.untimout' : untimeout_handler,
'.devmode' : devmode_handler,
'.mic' : mic_handler,
'.tts' : tts_handler,
'.global_chat': global_chat_handler,
'.public' : public_mode_handler,
'.show_exclusive': show_exclusive_handler,
'.word_filter': word_filter_handler,
'.stationary' : stationary_handler
}
def handler(args):
command = args['message']
# TODO : This will not work with robot names with spaces, update it to split on ']'
# [1:]
try:
command = command.split(']')[1:][0].split(' ')[1:]
print(command)
except IndexError: # catch empty messages
return
if command != None:
if command[0] in commands:
commands[command[0]](command, args)
# This function checks the user sending the command, and if authorized
# call the move handler.
def move_auth(args):
user = args['user']
anon = args['anonymous']
if stationary:
direction = args['command']
if direction == 'F' or direction == 'B':
print("No forward for you.....")
return
if anon_control == False and anon:
return
elif dev_mode_mods:
if is_authed(user):
move_handler(args)
else:
return
elif dev_mode:
if is_authed(user) == 2: # owner
move_handler(args)
else:
return
elif user not in banned: # Check for banned and timed out users
move_handler(args)
return
| Nocturnal42/runmyrobot | extended_command.py | Python | apache-2.0 | 10,228 |
from flask_smorest import Page
class CursorPage(Page):
@property
def item_count(self):
return self.collection.count()
| ev-agelos/Python-bookmarks | bookmarks/api/pagination.py | Python | mit | 136 |
#!/usr/bin/env python3
#
# Copyright (C) 2020 FreeIPA Contributors see COPYING for license
#
"""Configure lite-server environment.
See README.md for more details.
"""
import argparse
import os
import socket
from urllib.request import urlopen
DEFAULT_CONF = """\
[global]
host = {args.hostname}
server = {args.servername}
basedn = {args.basedn}
realm = {args.realm}
domain = {args.domain}
xmlrpc_uri = {args.xmlrpc_uri}
ldap_uri = ldap://{args.servername}
debug = {args.debug}
enable_ra = False
ra_plugin = dogtag
dogtag_version = 10
"""
KRB5_CONF = """\
[libdefaults]
default_realm = {args.realm}
dns_lookup_realm = false
dns_lookup_kdc = false
rdns = false
ticket_lifetime = 24h
forwardable = true
udp_preference_limit = 0
default_ccache_name = FILE:{args.ccache}
[realms]
{args.realm} = {{
kdc = {args.kdc}
master_kdc = {args.kdc}
admin_server = {args.kadmin}
default_domain = ipa.example
pkinit_anchors = FILE:{args.ca_crt}
pkinit_pool = FILE:{args.ca_crt}
http_anchors = FILE:{args.ca_crt}
}}
[domain_realm]
.ipa.example = {args.realm}
ipa.example = {args.realm}
{args.servername} = {args.realm}
"""
LDAP_CONF = """\
URI ldaps://{args.servername}
BASE {args.basedn}
TLS_CACERT {args.ca_crt}
SASL_MECH GSSAPI
SASL_NOCANON on
"""
IPA_BIN = """\
#!/bin/sh
exec python3 -m ipaclient $*
"""
ACTIVATE = """\
deactivate_ipaenv () {{
export PS1="${{_OLD_IPAENV_PS1}}"
export PATH="${{_OLD_IPAENV_PATH}}"
unset _OLD_IPAENV_PS1
unset _OLD_IPAENV_PATH
unset KRB5_CONFIG
unset KRB5CCNAME
unset LDAPCONF
unset IPA_CONFDIR
unset PYTHONPATH
unset -f deactivate_ipaenv
}}
export _OLD_IPAENV_PS1="${{PS1:-}}"
export _OLD_IPAENV_PATH="${{PATH:-}}"
export PS1="(ipaenv) ${{PS1:-}}"
export PATH="{args.dot_ipa}:${{PATH:-}}"
export KRB5_CONFIG="{args.krb5_conf}"
export KRB5CCNAME="{args.ccache}"
{args.tracecomment}export KRB5_TRACE=/dev/stderr
export LDAPCONF="{args.ldap_conf}"
export IPA_CONFDIR="{args.dot_ipa}"
export PYTHONPATH="{args.basedir}"
"""
MSG = """\
Configured for server '{args.servername}' and realm '{args.realm}'.
To activate the IPA test env:
source {args.activate}
kinit
make lite-server
To deactivate the IPA test env and to unset the env vars:
deactivate_ipaenv
The source file configures the env vars:
export KRB5_CONFIG="{args.krb5_conf}"
export KRB5CCNAME="{args.ccache}"
export LDAPCONF="{args.ldap_conf}"
export IPA_CONFDIR="{args.dot_ipa}"
export PYTHONPATH="{args.basedir}"
"""
parser = argparse.ArgumentParser()
parser.add_argument("servername", help="IPA server name")
parser.add_argument("domain", default=None, nargs="?")
parser.add_argument(
"--kdcproxy", action="store_true", help="Use KRB5 over HTTPS (KDC-Proxy)"
)
parser.add_argument(
"--debug",
action="store_true",
help="Enable debug mode for lite-server and KRB5",
)
parser.add_argument(
"--remote-server",
action="store_true",
help="Configure client to use a remote server instead of lite-server",
)
def main():
args = parser.parse_args()
if args.domain is None:
args.domain = args.servername.lower().split(".", 1)[1]
else:
args.domain = args.domain.lower().rstrip(".")
args.realm = args.domain.upper()
args.hostname = socket.gethostname()
args.basedn = ",".join(f"dc={part}" for part in args.domain.split("."))
args.tracecomment = "" if args.debug else "#"
if args.kdcproxy:
args.kdc = f"https://{args.servername}/KdcProxy"
args.kadmin = f"https://{args.servername}/KdcProxy"
else:
args.kdc = f"{args.servername}:88"
args.kadmin = f"{args.servername}:749"
if args.remote_server:
args.xmlrpc_uri = f"https://{args.servername}/ipa/xml"
else:
args.xmlrpc_uri = f"http://localhost:8888/ipa/xml"
args.basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
args.dot_ipa = os.path.expanduser("~/.ipa")
args.default_conf = os.path.join(args.dot_ipa, "default.conf")
args.ca_crt = os.path.join(args.dot_ipa, "ca.crt")
args.krb5_conf = os.path.join(args.dot_ipa, "krb5.conf")
args.ldap_conf = os.path.join(args.dot_ipa, "ldap.conf")
args.ccache = os.path.join(args.dot_ipa, "ccache")
args.ipa_bin = os.path.join(args.dot_ipa, "ipa")
args.activate = os.path.join(args.dot_ipa, "activate.sh")
if not os.path.isdir(args.dot_ipa):
os.makedirs(args.dot_ipa, mode=0o750)
with urlopen(f"http://{args.servername}/ipa/config/ca.crt") as req:
ca_data = req.read()
with open(args.ca_crt, "wb") as f:
f.write(ca_data)
with open(args.default_conf, "w") as f:
f.write(DEFAULT_CONF.format(args=args))
with open(args.krb5_conf, "w") as f:
f.write(KRB5_CONF.format(args=args))
with open(args.ldap_conf, "w") as f:
f.write(LDAP_CONF.format(args=args))
with open(args.ipa_bin, "w") as f:
f.write(IPA_BIN.format(args=args))
os.fchmod(f.fileno(), 0o755)
with open(args.activate, "w") as f:
f.write(ACTIVATE.format(args=args))
print(MSG.format(args=args))
if __name__ == "__main__":
main()
| encukou/freeipa | contrib/lite-setup.py | Python | gpl-3.0 | 5,165 |
# -*- coding: utf-8 -*-
"""
garage.help_text
Helper function to retrieve help text for backend admin form views.
* created: 2011-03-18 Kevin Chan <kefin@makedostudio.com>
* updated: 2014-11-21 kchan
"""
from __future__ import (absolute_import, unicode_literals)
import warnings
# issue deprecation warning
# * This module is here for compatibility with old imports but will be
# phased out in the next minor version.
warnings.warn('The help_text module will be deprecated in version 0.2.1. ',
DeprecationWarning)
# maintain a help text registry for django models
HELP_TEXT_REGISTRY = {}
def register_help_text_dictionary(module, dictionary):
HELP_TEXT_REGISTRY[module] = dictionary
def unregister_help_text_dictionary(module):
try:
d = HELP_TEXT_REGISTRY.get(module)
del HELP_TEXT_REGISTRY[module]
return d
except (AttributeError, KeyError):
return None
def get_help_text_registry(module=None):
if module:
return HELP_TEXT_REGISTRY.get(module)
return HELP_TEXT_REGISTRY
def get_help_text(module, model, field, default_dict={}):
"""
Legacy function for compatiblity with old projects using the
`help_text` module.
Get help text for model and field in module help registry.
"""
for d in [get_help_text_registry(module), default_dict]:
try:
txt = d[model].get(field)
if txt:
return txt
except (TypeError, KeyError):
pass
return ''
| kefin/django-garage | garage/help_text.py | Python | bsd-3-clause | 1,517 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder import exception
from cinder.i18n import _
from cinder import quota
from cinder import utils
QUOTAS = quota.QUOTAS
NON_QUOTA_KEYS = ['tenant_id', 'id']
authorize_update = extensions.extension_authorizer('volume', 'quotas:update')
authorize_show = extensions.extension_authorizer('volume', 'quotas:show')
authorize_delete = extensions.extension_authorizer('volume', 'quotas:delete')
class QuotaTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('quota_set', selector='quota_set')
root.set('id')
for resource in QUOTAS.resources:
elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource
return xmlutil.MasterTemplate(root, 1)
class QuotaSetsController(wsgi.Controller):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict."""
quota_set['id'] = str(project_id)
return dict(quota_set=quota_set)
def _validate_existing_resource(self, key, value, quota_values):
if key == 'per_volume_gigabytes':
return
v = quota_values.get(key, {})
if value < (v.get('in_use', 0) + v.get('reserved', 0)):
msg = _("Quota %s limit must be equal or greater than existing "
"resources.") % key
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_quotas(self, context, id, usages=False, parent_project_id=None):
values = QUOTAS.get_project_quotas(context, id, usages=usages,
parent_project_id=parent_project_id)
if usages:
return values
else:
return {k: v['limit'] for k, v in values.items()}
@wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id):
context = req.environ['cinder.context']
authorize_show(context)
params = req.params
if not hasattr(params, '__call__') and 'usage' in params:
usage = strutils.bool_from_string(params['usage'])
else:
usage = False
try:
sqlalchemy_api.authorize_project_context(context, id)
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
return self._format_quota_set(id, self._get_quotas(context, id, usage))
@wsgi.serializers(xml=QuotaTemplate)
def update(self, req, id, body):
context = req.environ['cinder.context']
authorize_update(context)
self.validate_string_length(id, 'quota_set_name',
min_length=1, max_length=255)
project_id = id
self.assert_valid_body(body, 'quota_set')
# Get the optional argument 'skip_validation' from body,
# if skip_validation is False, then validate existing resource.
skip_flag = body.get('skip_validation', True)
if not utils.is_valid_boolstr(skip_flag):
msg = _("Invalid value '%s' for skip_validation.") % skip_flag
raise exception.InvalidParameterValue(err=msg)
skip_flag = strutils.bool_from_string(skip_flag)
bad_keys = []
# NOTE(ankit): Pass #1 - In this loop for body['quota_set'].items(),
# we figure out if we have any bad keys.
for key, value in body['quota_set'].items():
if (key not in QUOTAS and key not in NON_QUOTA_KEYS):
bad_keys.append(key)
continue
if len(bad_keys) > 0:
msg = _("Bad key(s) in quota set: %s") % ",".join(bad_keys)
raise webob.exc.HTTPBadRequest(explanation=msg)
# NOTE(ankit): Pass #2 - In this loop for body['quota_set'].keys(),
# we validate the quota limits to ensure that we can bail out if
# any of the items in the set is bad. Meanwhile we validate value
# to ensure that the value can't be lower than number of existing
# resources.
quota_values = QUOTAS.get_project_quotas(context, project_id)
valid_quotas = {}
for key in body['quota_set'].keys():
if key in NON_QUOTA_KEYS:
continue
valid_quotas[key] = self.validate_integer(
body['quota_set'][key], key, min_value=-1,
max_value=db.MAX_INT)
if not skip_flag:
self._validate_existing_resource(key, value, quota_values)
# NOTE(ankit): Pass #3 - At this point we know that all the keys and
# values are valid and we can iterate and update them all in one shot
# without having to worry about rolling back etc as we have done
# the validation up front in the 2 loops above.
for key, value in valid_quotas.items():
try:
db.quota_update(context, project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return {'quota_set': self._get_quotas(context, id)}
@wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id):
context = req.environ['cinder.context']
authorize_show(context)
return self._format_quota_set(id,
QUOTAS.get_defaults(context,
parent_project_id=
None))
@wsgi.serializers(xml=QuotaTemplate)
def delete(self, req, id):
context = req.environ['cinder.context']
authorize_delete(context)
try:
db.quota_destroy_by_project(context, id)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
class Quotas(extensions.ExtensionDescriptor):
"""Quota management support."""
name = "Quotas"
alias = "os-quota-sets"
namespace = "http://docs.openstack.org/volume/ext/quotas-sets/api/v1.1"
updated = "2011-08-08T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
QuotaSetsController(),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources
| potsmaster/cinder | cinder/api/contrib/quotas.py | Python | apache-2.0 | 7,248 |
# -*- Mode: Python; -*-
# Package : omniORBpy
# PortableServer__POA.py Created on: 2000/02/24
# Author : Duncan Grisby (dpg1)
#
# Copyright (C) 2002-2013 Apasphere Ltd
# Copyright (C) 2000 AT&T Laboratories Cambridge
#
# This file is part of the omniORBpy library
#
# The omniORBpy library is free software; you can redistribute it
# and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation;
# either version 2.1 of the License, or (at your option) any later
# version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA
#
#
# Description:
# PortableServer skeletons
import omniORB
from omniORB import CORBA, PortableServer
import _omnipy
# ServantManager skeleton
class ServantManager (PortableServer.Servant):
_NP_RepositoryId = PortableServer.ServantManager._NP_RepositoryId
_omni_op_d = {}
_omni_special = 1
ServantManager._omni_skeleton = ServantManager
# ServantActivator skeleton
class ServantActivator (ServantManager):
_NP_RepositoryId = PortableServer.ServantActivator._NP_RepositoryId
_omni_op_d = {"incarnate": PortableServer.ServantActivator._d_incarnate,
"etherealize":PortableServer.ServantActivator._d_etherealize}
_omni_op_d.update(ServantManager._omni_op_d)
_omni_special = 1
ServantActivator._omni_skeleton = ServantActivator
# ServantLocator skeleton
class ServantLocator (ServantManager):
_NP_RepositoryId = PortableServer.ServantLocator._NP_RepositoryId
_omni_op_d = {"preinvoke": PortableServer.ServantLocator._d_preinvoke,
"postinvoke": PortableServer.ServantLocator._d_postinvoke}
_omni_op_d.update(ServantManager._omni_op_d)
_omni_special = 1
ServantLocator._omni_skeleton = ServantLocator
# AdapterActivator skeleton
class AdapterActivator (PortableServer.Servant):
_NP_RepositoryId = PortableServer.AdapterActivator._NP_RepositoryId
_omni_op_d = {"unknown_adapter":
PortableServer.AdapterActivator._d_unknown_adapter}
_omni_special = 1
AdapterActivator._omni_skeleton = AdapterActivator
| amonmoce/corba_examples | omniORBpy-4.2.1/python3/omniORB/PortableServer__POA.py | Python | mit | 2,639 |
import json
import os
import tarfile
import unittest
from hashlib import md5
from time import sleep
from netjsonconfig import OpenWrt
from netjsonconfig.exceptions import ValidationError
from netjsonconfig.utils import _TabsMixin
class TestBackend(unittest.TestCase, _TabsMixin):
maxDiff = None
def test_config_copy(self):
config = {'interfaces': []}
o = OpenWrt(config)
o.validate()
self.assertDictEqual(config, {'interfaces': []})
def test_json_method(self):
config = {
"type": "DeviceConfiguration",
"interfaces": [
{
"name": "lo",
"type": "loopback",
"addresses": [
{
"address": "127.0.0.1",
"mask": 8,
"proto": "static",
"family": "ipv4",
}
],
}
],
}
o = OpenWrt(config)
self.assertEqual(json.loads(o.json()), config)
def test_string_argument(self):
OpenWrt('{}')
def test_validate(self):
o = OpenWrt({'interfaces': 'WRONG'})
with self.assertRaises(ValidationError):
o.validate()
o = OpenWrt({'interfaces': []})
o.validate()
o.config['interfaces'] = 'CHANGED'
try:
o.validate()
except ValidationError as e:
self.assertEqual(e.details.instance, 'CHANGED')
self.assertIn("ValidationError 'CHANGED' is not of type 'array'", str(e))
else:
self.fail('ValidationError not raised')
def test_find_bridge_skip_error(self):
o = OpenWrt({'interfaces': ['WRONG']})
with self.assertRaises(ValidationError):
o.validate()
def test_type_error(self):
with self.assertRaises(TypeError):
OpenWrt([])
with self.assertRaises(TypeError):
OpenWrt('NOTJSON[]\{\}')
def test_system_invalid_timezone(self):
o = OpenWrt({"general": {"hostname": "test_system", "timezone": "WRONG"}})
with self.assertRaises(ValidationError):
o.validate()
def test_schema_radio_wrong_driver(self):
o = OpenWrt(
{
"radios": [
{
"name": "radio0",
"phy": "phy0",
"driver": "iamwrong",
"protocol": "802.11ac",
"channel": 132,
"channel_width": 80,
"tx_power": 8,
}
]
}
)
with self.assertRaises(ValidationError):
o.validate()
def test_schema_radio_wrong_protocol(self):
o = OpenWrt(
{
"radios": [
{
"name": "radio0",
"phy": "phy0",
"driver": "mac80211",
"protocol": "802.11ad", # ad is not supported by OpenWRT yet
"channel": 132,
"channel_width": 80,
"tx_power": 8,
}
]
}
)
with self.assertRaises(ValidationError):
o.validate()
_config1 = {
"interfaces": [
{
"name": "wlan0",
"type": "wireless",
"addresses": [
{
"address": "192.168.1.1",
"mask": 24,
"proto": "static",
"family": "ipv4",
}
],
"wireless": {
"radio": "radio0",
"mode": "access_point",
"ssid": "MyWifiAP",
"hidden": True,
},
}
],
"radios": [
{
"name": "radio0",
"phy": "phy0",
"driver": "mac80211",
"protocol": "802.11n",
"channel": 3,
"channel_width": 20,
"tx_power": 3,
}
],
}
def test_generate(self):
o = OpenWrt(self._config1)
tar = tarfile.open(fileobj=o.generate(), mode='r')
self.assertEqual(len(tar.getmembers()), 2)
# network
network = tar.getmember('etc/config/network')
contents = tar.extractfile(network).read().decode()
expected = self._tabs(
"""config interface 'wlan0'
option ifname 'wlan0'
option ipaddr '192.168.1.1'
option netmask '255.255.255.0'
option proto 'static'
"""
)
self.assertEqual(contents, expected)
# wireless
wireless = tar.getmember('etc/config/wireless')
contents = tar.extractfile(wireless).read().decode()
expected = self._tabs(
"""config wifi-device 'radio0'
option channel '3'
option htmode 'HT20'
option hwmode '11g'
option phy 'phy0'
option txpower '3'
option type 'mac80211'
config wifi-iface 'wifi_wlan0'
option device 'radio0'
option hidden '1'
option ifname 'wlan0'
option mode 'ap'
option network 'wlan0'
option ssid 'MyWifiAP'
"""
)
self.assertEqual(contents, expected)
tar.close()
def test_double_rendering(self):
o = OpenWrt(self._config1)
self.assertEqual(o.render(), o.render())
def test_write(self):
o = OpenWrt({"general": {"hostname": "test"}})
o.write(name='test', path='/tmp')
tar = tarfile.open('/tmp/test.tar.gz', mode='r')
self.assertEqual(len(tar.getmembers()), 1)
tar.close()
os.remove('/tmp/test.tar.gz')
def test_templates_type_error(self):
config = {"general": {"hostname": "test_templates"}}
with self.assertRaises(TypeError):
OpenWrt(config, templates={'a': 'a'})
def test_templates_config_error(self):
config = {"general": {"hostname": "test_templates"}}
with self.assertRaises(TypeError):
OpenWrt(config, templates=['O{]O'])
def test_templates(self):
loopback_template = {
"interfaces": [
{
"name": "lo",
"type": "loopback",
"addresses": [
{
"address": "127.0.0.1",
"mask": 8,
"proto": "static",
"family": "ipv4",
}
],
}
]
}
radio_template = {
"interfaces": [
{
"name": "wlan0",
"type": "wireless",
"addresses": [
{
"address": "192.168.1.1",
"mask": 24,
"proto": "static",
"family": "ipv4",
}
],
"wireless": {
"radio": "radio0",
"mode": "access_point",
"ssid": "MyWifiAP",
"hidden": True,
},
}
],
"radios": [
{
"name": "radio0",
"phy": "phy0",
"driver": "mac80211",
"protocol": "802.11n",
"channel": 3,
"channel_width": 20,
"tx_power": 3,
}
],
}
config = {"general": {"hostname": "test_templates"}}
o = OpenWrt(config, templates=[loopback_template, radio_template])
self.assertEqual(o.config['general']['hostname'], 'test_templates')
self.assertIn('radios', o.config)
self.assertEqual(len(o.config['radios']), 1)
self.assertEqual(o.config['radios'][0]['name'], 'radio0')
self.assertIn('interfaces', o.config)
self.assertEqual(len(o.config['interfaces']), 2)
self.assertEqual(o.config['interfaces'][0]['name'], 'lo')
self.assertEqual(o.config['interfaces'][1]['name'], 'wlan0')
def test_file_inclusion(self):
o = OpenWrt(
{
"files": [
{
"path": "/etc/crontabs/root",
"mode": "0644",
"contents": '* * * * * echo "test" > /etc/testfile\n'
'* * * * * echo "test2" > /etc/testfile2',
},
{"path": "/etc/dummy.conf", "mode": "0644", "contents": "testing!"},
]
}
)
output = o.render()
self.assertNotIn('package files', output)
self.assertIn('* * * * * echo', output)
# ensure the additional files are there present in the tar.gz archive
tar = tarfile.open(fileobj=o.generate(), mode='r')
self.assertEqual(len(tar.getmembers()), 2)
# first file
crontab = tar.getmember('etc/crontabs/root')
contents = tar.extractfile(crontab).read().decode()
self.assertEqual(contents, o.config['files'][0]['contents'])
self.assertEqual(crontab.mtime, 0)
self.assertEqual(crontab.mode, 420)
# second file
dummy = tar.getmember('etc/dummy.conf')
contents = tar.extractfile(dummy).read().decode()
self.assertEqual(contents, o.config['files'][1]['contents'])
self.assertEqual(dummy.mode, 420)
tar.close()
def test_file_permissions(self):
o = OpenWrt(
{
"files": [
{
"path": "/tmp/hello.sh",
"mode": "0755",
"contents": "echo 'hello world'",
}
]
}
)
tar = tarfile.open(fileobj=o.generate(), mode='r')
script = tar.getmember('tmp/hello.sh')
# check permissions
self.assertEqual(script.mode, 493)
tar.close()
def test_file_schema(self):
c = {
"files": [
{
"path": "/tmp/hello.sh",
"mode": "0644",
"contents": "echo 'hello world'",
}
]
}
# valid
c['files'][0]['mode'] = '3555'
o = OpenWrt(c)
o.validate()
# valid
c['files'][0]['mode'] = '755'
o = OpenWrt(c)
o.validate()
# too long
c['files'][0]['mode'] = '00777'
o = OpenWrt(c)
with self.assertRaises(ValidationError):
o.validate()
# too short
c['files'][0]['mode'] = '75'
o = OpenWrt(c)
with self.assertRaises(ValidationError):
o.validate()
# invalid
c['files'][0]['mode'] = '0855'
o = OpenWrt(c)
with self.assertRaises(ValidationError):
o.validate()
def test_checksum(self):
"""ensures checksum of same config doesn't change"""
o = OpenWrt({"general": {"hostname": "test"}})
# md5 is good enough and won't slow down test execution too much
checksum1 = md5(o.generate().getvalue()).hexdigest()
sleep(1)
checksum2 = md5(o.generate().getvalue()).hexdigest()
self.assertEqual(checksum1, checksum2)
def test_override(self):
config = {
"interfaces": [{"name": "eth0", "type": "ethernet", "disabled": False}]
}
template = {
"interfaces": [{"name": "eth0", "type": "ethernet", "disabled": True}]
}
o = OpenWrt(config, templates=[template])
self.assertFalse(o.config['interfaces'][0]['disabled'])
def test_value_error(self):
with self.assertRaises(ValueError):
OpenWrt()
with self.assertRaises(ValueError):
OpenWrt(templates=[])
with self.assertRaises(ValueError):
OpenWrt(context=[])
def test_override_file(self):
o = OpenWrt(
{
"files": [
{
"path": "/etc/crontabs/root",
"mode": "0644",
"contents": "*/5 * * * * /command1\n*/5 * * * * /command2",
}
]
},
templates=[
{
"files": [
{
"path": "/etc/crontabs/root",
"mode": "0644",
"contents": "*/5 * * * * /command1",
}
]
}
],
)
expected = """
# ---------- files ---------- #
# path: /etc/crontabs/root
# mode: 0644
*/5 * * * * /command1
*/5 * * * * /command2
"""
self.assertEqual(o.render(), expected)
# ensure the additional files are there present in the tar.gz archive
tar = tarfile.open(fileobj=o.generate(), mode='r')
self.assertEqual(len(tar.getmembers()), 1)
def _get_wireguard_empty_configuration(self):
return {
'interfaces': [
{
'addresses': [],
'fwmark': '',
'ip6prefix': [],
'mtu': 1420,
'name': '',
'network': '',
'nohostroute': False,
'port': 51820,
'private_key': '{{private_key}}',
'type': 'wireguard',
}
],
'wireguard_peers': [
{
'allowed_ips': [''],
'endpoint_host': '',
'endpoint_port': 51820,
'interface': '',
'persistent_keepalive': 60,
'preshared_key': '',
'public_key': '',
'route_allowed_ips': True,
}
],
}
def _get_vxlan_wireguard_empty_configuration(self):
wireguard_config = self._get_wireguard_empty_configuration()
vxlan_config = {
'disabled': False,
'mac': '',
'mtu': 1280,
'name': 'vxlan',
'network': '',
'port': 4789,
'rxcsum': True,
'ttl': 64,
'tunlink': '',
'txcsum': True,
'type': 'vxlan',
'vni': 0,
'vtep': '',
}
wireguard_config['interfaces'].append(vxlan_config)
return wireguard_config
def test_wireguard_auto_client(self):
with self.subTest('No arguments provided'):
expected = self._get_wireguard_empty_configuration()
self.assertDictEqual(OpenWrt.wireguard_auto_client(), expected)
with self.subTest('Required arguments provided'):
expected = self._get_wireguard_empty_configuration()
expected['interfaces'][0].update(
{
'name': 'wg',
'private_key': '{{private_key}}',
'addresses': [
{
'address': '10.0.0.2',
'family': 'ipv4',
'mask': 32,
'proto': 'static',
},
],
}
)
expected['wireguard_peers'][0].update(
{
'allowed_ips': ['10.0.0.1/24'],
'endpoint_host': '0.0.0.0',
'public_key': 'server_public_key',
'interface': 'wg',
}
)
self.assertDictEqual(
OpenWrt.wireguard_auto_client(
host='0.0.0.0',
public_key='server_public_key',
server={'name': 'wg', 'port': 51820},
server_ip_network='10.0.0.1/24',
ip_address='10.0.0.2',
),
expected,
)
def test_vxlan_wireguard_auto_client(self):
with self.subTest('No arguments provided'):
expected = self._get_vxlan_wireguard_empty_configuration()
self.assertDictEqual(OpenWrt.vxlan_wireguard_auto_client(), expected)
with self.subTest('Required arguments provided'):
expected = self._get_vxlan_wireguard_empty_configuration()
expected['interfaces'][0].update(
{'name': 'wg', 'private_key': '{{private_key}}'}
)
expected['wireguard_peers'][0].update(
{
'allowed_ips': ['10.0.0.1/24'],
'endpoint_host': '0.0.0.0',
'public_key': 'server_public_key',
'interface': 'wg',
}
)
expected['interfaces'][1].update(
{'tunlink': 'wg', 'vni': 1, 'vtep': '10.0.0.1'}
)
self.assertDictEqual(
OpenWrt.vxlan_wireguard_auto_client(
host='0.0.0.0',
public_key='server_public_key',
server={'name': 'wg', 'port': 51820},
server_ip_network='10.0.0.1/24',
vni=1,
server_ip_address='10.0.0.1',
),
expected,
)
| openwisp/netjsonconfig | tests/openwrt/test_backend.py | Python | gpl-3.0 | 17,902 |
from urllib.parse import urljoin
from twisted.internet import reactor
from twisted.web import server, resource, static, util
class SiteTest(object):
def setUp(self):
super(SiteTest, self).setUp()
self.site = reactor.listenTCP(0, test_site(), interface="127.0.0.1")
self.baseurl = "http://localhost:%d/" % self.site.getHost().port
def tearDown(self):
super(SiteTest, self).tearDown()
self.site.stopListening()
def url(self, path):
return urljoin(self.baseurl, path)
class NoMetaRefreshRedirect(util.Redirect):
def render(self, request):
content = util.Redirect.render(self, request)
return content.replace(b'http-equiv=\"refresh\"',
b'http-no-equiv=\"do-not-refresh-me\"')
def test_site():
r = resource.Resource()
r.putChild(b"text", static.Data(b"Works", "text/plain"))
r.putChild(b"html", static.Data(b"<body><p class='one'>Works</p><p class='two'>World</p></body>", "text/html"))
r.putChild(b"enc-gb18030", static.Data(b"<p>gb18030 encoding</p>", "text/html; charset=gb18030"))
r.putChild(b"redirect", util.Redirect(b"/redirected"))
r.putChild(b"redirect-no-meta-refresh", NoMetaRefreshRedirect(b"/redirected"))
r.putChild(b"redirected", static.Data(b"Redirected here", "text/plain"))
return server.Site(r)
if __name__ == '__main__':
port = reactor.listenTCP(0, test_site(), interface="127.0.0.1")
print("http://localhost:%d/" % port.getHost().port)
reactor.run()
| eLRuLL/scrapy | scrapy/utils/testsite.py | Python | bsd-3-clause | 1,514 |
#!/usr/bin/env python
"""
You can precisely specify dashes with an on/off ink rect sequence in
points.
"""
from pylab import *
dashes = [5,2,10,5] # 5 points on, 2 off, 3 on, 1 off
l, = plot(arange(20), '--')
l.set_dashes(dashes)
savefig('dash_control')
show()
| sniemi/SamPy | sandbox/src1/examples/dash_control.py | Python | bsd-2-clause | 264 |
#
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
# See http://pciids.sourceforge.net/pci.ids.bz2
class Pci:
BASE_CLASS_STORAGE = 1
CLASS_STORAGE_SCSI = 0
CLASS_STORAGE_IDE = 1
CLASS_STORAGE_FLOPPY = 2
CLASS_STORAGE_IPI = 3
CLASS_STORAGE_RAID = 4
CLASS_STORAGE_OTHER = 80
BASE_CLASS_NETWORK = 2
CLASS_NETWORK_ETHERNET = 0
CLASS_NETWORK_TOKEN_RING = 1
CLASS_NETWORK_FDDI = 2
CLASS_NETWORK_ATM = 3
CLASS_NETWORK_OTHER = 80
CLASS_NETWORK_WIRELESS = 128
BASE_CLASS_DISPLAY = 3
CLASS_DISPLAY_VGA = 0
CLASS_DISPLAY_XGA = 1
CLASS_DISPLAY_3D = 2
CLASS_DISPLAY_OTHER = 80
BASE_CLASS_MULTIMEDIA = 4
CLASS_MULTIMEDIA_VIDEO = 0
CLASS_MULTIMEDIA_AUDIO = 1
CLASS_MULTIMEDIA_PHONE = 2
CLASS_MULTIMEDIA_AUDIO_DEVICE = 3
CLASS_MULTIMEDIA_OTHER = 80
BASE_CLASS_BRIDGE = 6
CLASS_BRIDGE_HOST = 0
CLASS_BRIDGE_ISA = 1
CLASS_BRIDGE_EISA = 2
CLASS_BRIDGE_MC = 3
CLASS_BRIDGE_PCI = 4
CLASS_BRIDGE_PCMCIA = 5
CLASS_BRIDGE_NUBUS = 6
CLASS_BRIDGE_CARDBUS = 7
CLASS_BRIDGE_RACEWAY = 8
CLASS_BRIDGE_OTHER = 80
BASE_CLASS_COMMUNICATION = 7
CLASS_COMMUNICATION_SERIAL = 0
CLASS_COMMUNICATION_PARALLEL = 1
CLASS_COMMUNICATION_MULTISERIAL = 2
CLASS_COMMUNICATION_MODEM = 3
CLASS_COMMUNICATION_OTHER = 80
BASE_CLASS_INPUT = 9
CLASS_INPUT_KEYBOARD = 0
CLASS_INPUT_PEN = 1
CLASS_INPUT_MOUSE = 2
CLASS_INPUT_SCANNER = 3
CLASS_INPUT_GAMEPORT = 4
CLASS_INPUT_OTHER = 80
BASE_CLASS_SERIAL = 12
CLASS_SERIAL_FIREWIRE = 0
CLASS_SERIAL_ACCESS = 1
BASE_CLASS_WIRELESS = 13
CLASS_WIRELESS_BLUETOOTH = 17
CLASS_SERIAL_SSA = 2
CLASS_SERIAL_USB = 3
CLASS_SERIAL_FIBER = 4
CLASS_SERIAL_SMBUS = 5
| jds2001/ocp-checkbox | checkbox/lib/pci.py | Python | gpl-3.0 | 3,097 |
"""Useful functions for the IHC component."""
import asyncio
from homeassistant.core import callback
async def async_pulse(hass, ihc_controller, ihc_id: int):
"""Send a short on/off pulse to an IHC controller resource."""
await async_set_bool(hass, ihc_controller, ihc_id, True)
await asyncio.sleep(0.1)
await async_set_bool(hass, ihc_controller, ihc_id, False)
@callback
def async_set_bool(hass, ihc_controller, ihc_id: int, value: bool):
"""Set a bool value on an IHC controller resource."""
return hass.async_add_executor_job(
ihc_controller.set_runtime_value_bool, ihc_id, value
)
@callback
def async_set_int(hass, ihc_controller, ihc_id: int, value: int):
"""Set a int value on an IHC controller resource."""
return hass.async_add_executor_job(
ihc_controller.set_runtime_value_int, ihc_id, value
)
| jawilson/home-assistant | homeassistant/components/ihc/util.py | Python | apache-2.0 | 868 |
#coding=UTF-8
'''
Created on 2011-7-6
@author: Administrator
'''
from urlparse import urlparse
import cookielib
import urllib2,urllib
from pyquery.pyquery import PyQuery
import re
import time
import datetime
import urllib2
from lxml import etree
import datetime
import time
from urlparse import urlparse
import re
from lxml.cssselect import CSSSelector
import mimetypes
import cookielib
import simplejson as js
import random
from config import housetype, checkPath, makePath,fitment,toward,deposit
import threading
from BeautifulSoup import BeautifulSoup
homepath="d:\\home\\spider\\"
class LinkCrawl(object):
def __init__(self,citycode="",kind=""):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.endtime=str(datetime.date.today() -datetime.timedelta(days=7))
self.clinks=[]
self.pn=[]
self.citycode=citycode
self.baseUrl="http://%s.ganji.com"%self.citycode
if kind=="1":
self.urlpath="/fang5/a1u2%s/"
else:
self.urlpath="/fang1/u2%s/"
def __getAllNeedLinks(self):
cond=True
idx=0
checkit="0"
while cond:
url=self.baseUrl+self.urlpath%("f"+str(idx*32))
print url
req=urllib2.Request(url, None, self.header)
p=self.br.open(req).read()
check=PyQuery(p)("ul.pageLink li a.c").text()
if check==checkit:
break
else:
checkit=check
links=PyQuery(p)("div.list dl a.list_title")
print len(links)
for link in links:
lk=self.baseUrl+PyQuery(link).attr("href")
if lk not in self.clinks:
self.clinks.append(lk)
idx=idx+1
print len(self.clinks)
def runme(self):
#self.__initPageNum()
self.__getAllNeedLinks()
print len(self.clinks)
return self.clinks
class ContentCrawl(object):
def __init__(self,links,citycode,kind):
cj = cookielib.MozillaCookieJar()
self.br=urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(cj),urllib2.HTTPRedirectHandler())
self.pdb={}
self.header={
"User-Agent":'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB6.6; .NET CLR 3.5.30729)',
}
self.urls=links
self.kind=kind
self.fd={}
self.citycode=citycode
if kind=="1":
self.folder="sell\\"
if kind=="2":
self.folder="rent\\"
if kind=="3":
self.folder="buy\\"
else:
self.folder="req\\"
#js resgx
self.agencyname_regex="agencyname:'(.*)',"
self.username_regex="username:'(.*?)',"
self.house_room_regex="(\d+)室"
self.house_hall_regex="(\d+)厅"
self.house_toilet_regex="(\d+)卫"
self.borough_name_regex="<li><i>小区:</i>(.*?)</li>"
self.borough_name1_regex="<li><i>小区:</i>(.*?)</a>"
self.house_addr_regex="<li><i>地段:</i>(.*?)</li>"
self.house_floor_regex="第(\d+)层"
self.house_topfloor_regex="共(\d+)层"
self.house_price_regex=""
self.belong_regex="<li><i>产权:</i>(.*)</li>"
self.house_age_regex="(\d+)年"
self.house_totalarea_regex="(\d+)㎡"
self.house_totalarea_req_regex="(\d+)-(\d+)㎡"
self.house_title_regex="<h1>(.*)</h1>"
def __addText(self,tag, no_tail=False):
text = []
if tag.text:
text.append(tag.text)
for child in tag.getchildren():
text.append(self.__addText(child))
if not no_tail and tag.tail:
text.append(tag.tail)
return "".join(text)
def getText(self,html):
text=[]
for tag in html:
text.append(self.__addText(tag, no_tail=True))
return ' '.join([t.strip() for t in text if t.strip()])
def ChuShou(self,url):
self.fd['house_flag'] = 1
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('ul',{'class':'info'})
detail_mer_str =str(detail_mer).replace(" ", "")
#非个人房源 return
#print re.search(self.agencyname_regex, response).group(1)
if re.search(self.agencyname_regex, response):
agencyname=re.search(self.agencyname_regex, response).group(1)
if agencyname != '个人房源':return
else:
return
if re.search(self.username_regex, response):
username=re.search(self.username_regex, response).group(1)
self.fd['owner_name'] = username
else:
self.fd['owner_name'] = None
owner_phone = soup('img')
self.fd['owner_phone'] = ''
for phone in owner_phone:
if phone['src'].find('http://image.58.com/showphone.aspx') != -1:
self.fd['owner_phone'] = phone['src']
#没有联系方式 return
if not self.fd['owner_phone']:return
if soup.find('div',{"class":'other'}):
posttime = soup.find('div',{"class":'other'}).contents[0]
posttime = re.sub('\n|\r| |\t','',posttime)
posttime = posttime.replace('发布时间:','').replace(' 浏览','')
else:
posttime = ''
if not posttime:
return
elif posttime.find('-') !=-1:
s = datetime.datetime(int(posttime.split('-')[0]),int(posttime.split('-')[1],),int(posttime.split('-')[2]))
posttime = int(time.mktime(s.timetuple()))
elif posttime.find('分钟') !=-1:
n = int(posttime.replace('分钟前',''))*60
posttime = int(time.time() - n)
elif posttime.find('小时') !=-1:
n = int(posttime.replace('小时前',''))*60*60
posttime = int(time.time() - n)
self.fd['posttime'] = posttime
if (time.time() - self.fd['posttime']) > 3600*24*7:
return
print "++++++++++++++++"
print time.strftime('%Y %m %d', time.localtime(self.fd['posttime']))
if re.search(self.house_floor_regex, detail_mer_str):
house_floor=re.search(self.house_floor_regex, detail_mer_str).group(1)
self.fd['house_floor'] = house_floor
else:
self.fd['house_floor'] = None
if re.search(self.house_topfloor_regex, detail_mer_str):
house_topfloor=re.search(self.house_topfloor_regex, detail_mer_str).group(1)
self.fd['house_topfloor'] = house_topfloor
else:
self.fd['house_topfloor'] = None
if re.search(self.house_totalarea_regex, detail_mer_str):
house_totalarea=re.search(self.house_totalarea_regex, detail_mer_str).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
#类型
self.fd['house_type'] = housetype(detail_mer_str)
self.fd['house_price'] = detail_mer.em.string
if re.search(self.house_room_regex, detail_mer_str):
house_room=re.search(self.house_room_regex, detail_mer_str).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, detail_mer_str):
house_hall=re.search(self.house_hall_regex, detail_mer_str).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, detail_mer_str):
house_toilet=re.search(self.house_toilet_regex, detail_mer_str).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
if re.search(self.house_title_regex, response):
house_title=re.search(self.house_title_regex, response).group(1)
self.fd['house_title'] = house_title
else:
self.fd['house_title'] = ''
#描述
detail_box = soup.find('div',{'class':'maincon'})
if detail_box:
house_desc = str(detail_box)
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时,请说是在58同城上看到的,谢谢!","",house_desc)
else:
self.fd['house_desc'] = None
#小区名
if re.search(self.borough_name_regex, detail_mer_str):
borough_name=re.search(self.borough_name_regex, detail_mer_str).group(1)
self.fd['borough_name'] = re.sub("\(.*\)|<.*?>","",borough_name)
else:
self.fd['borough_name'] = ''
#区域
area_box = detail_mer.find(text="区域:").parent.parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#朝向
self.fd['house_toward'] = toward(detail_mer_str)
self.fd['house_fitment'] = fitment(detail_mer_str)
def QiuGou(self,url):
self.fd['city'] = ''
self.fd['house_flag'] = 3
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('ul',{'class':'info'})
detail_mer_str =str(detail_mer).replace(" ", "")
#非个人房源 return
#print re.search(self.agencyname_regex, response).group(1)
if re.search(self.agencyname_regex, response):
agencyname=re.search(self.agencyname_regex, response).group(1)
if agencyname != '个人房源':return
else:
return
if re.search(self.username_regex, response):
username=re.search(self.username_regex, response).group(1)
self.fd['owner_name'] = username
else:
self.fd['owner_name'] = None
owner_phone = soup('img')
self.fd['owner_phone'] = ''
for phone in owner_phone:
if phone['src'].find('http://image.58.com/showphone.aspx') != -1:
self.fd['owner_phone'] = phone['src']
#没有联系方式 return
if not self.fd['owner_phone']:return
if soup.find('div',{"class":'other'}):
posttime = soup.find('div',{"class":'other'}).contents[0]
posttime = re.sub('\n|\r| |\t','',posttime)
posttime = posttime.replace('发布时间:','').replace(' 浏览','')
else:
posttime = ''
if not posttime:
return
elif posttime.find('-') !=-1:
s = datetime.datetime(int(posttime.split('-')[0]),int(posttime.split('-')[1],),int(posttime.split('-')[2]))
posttime = int(time.mktime(s.timetuple()))
elif posttime.find('分钟') !=-1:
n = int(posttime.replace('分钟前',''))*60
posttime = int(time.time() - n)
elif posttime.find('小时') !=-1:
n = int(posttime.replace('小时前',''))*60*60
posttime = int(time.time() - n)
self.fd['posttime'] = posttime
if (time.time() - self.fd['posttime']) > 3600*24*7:
return
print "++++++++++++++++"
print time.strftime('%Y %m %d', time.localtime(self.fd['posttime']))
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
if re.search(self.house_totalarea_req_regex, detail_mer_str):
house_totalarea_min=re.search(self.house_totalarea_req_regex, detail_mer_str).group(1)
house_totalarea_max=re.search(self.house_totalarea_req_regex, detail_mer_str).group(2)
self.fd['house_totalarea'] = house_totalarea_min
self.fd['house_totalarea_max'] = house_totalarea_max
self.fd['house_totalarea_min'] = house_totalarea_min
else:
if re.search(self.house_totalarea_regex, detail_mer_str):
house_totalarea=re.search(self.house_totalarea_regex, detail_mer_str).group(1)
self.fd['house_totalarea'] = house_totalarea
self.fd['house_totalarea_max'] = house_totalarea
self.fd['house_totalarea_min'] = house_totalarea
else:
self.fd['house_totalarea'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
#类型
self.fd['house_type'] = housetype(detail_mer_str)
house_price = detail_mer.em.string
if house_price.find('-'):
self.fd['house_price_max'] = int(house_price.split('-')[0])
self.fd['house_price_min'] = int(house_price.split('-')[1])
self.fd['house_price'] = int(house_price.split('-')[0])
else:
self.fd['house_price_min'] = int(house_price)
self.fd['house_price_min'] = int(house_price)
self.fd['house_price'] = int(house_price)
if re.search(self.house_room_regex, detail_mer_str):
house_room=re.search(self.house_room_regex, detail_mer_str).group(1)
self.fd['house_room'] = house_room
self.fd['house_room1'] = house_room
else:
self.fd['house_room'] = '0'
self.fd['house_room1'] = '0'
self.fd['house_hall'] = '0'
self.fd['house_toilet'] = '0'
self.fd['house_toilet'] = '0'
if re.search(self.house_title_regex, response):
house_title=re.search(self.house_title_regex, response).group(1)
self.fd['house_title'] = house_title
else:
self.fd['house_title'] = ''
#描述
detail_box = soup.find('div',{'class':'maincon'})
if detail_box:
house_desc = str(detail_box)
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时,请说是在58同城上看到的,谢谢!","",house_desc)
else:
self.fd['house_desc'] = None
#小区名
if re.search(self.house_addr_regex, detail_mer_str):
house_addr = re.search(self.house_addr_regex, detail_mer_str).group(1)
self.fd['house_addr'] = house_addr
self.fd['borough_name'] = house_addr
else:
self.fd['house_addr'] = ''
self.fd['borough_name'] = ''
#区域
#print detail_mer
area_box = detail_mer.find(text="地段:").parent.parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
self.fd['house_age'] = 0
#朝向
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
def ChuZu(self,url):
self.fd['house_flag'] = 2
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('ul',{'class':'info'})
detail_mer_str =re.sub("\n|\t\r| ","",str(detail_mer))
#print detail_mer_str
#非个人房源 return
#print re.search(self.agencyname_regex, response).group(1)
if re.search(self.agencyname_regex, response):
agencyname=re.search(self.agencyname_regex, response).group(1)
if agencyname != '个人房源':return
else:
return
if re.search(self.username_regex, response):
username=re.search(self.username_regex, response).group(1)
self.fd['owner_name'] = username
else:
self.fd['owner_name'] = None
owner_phone = soup('img')
self.fd['owner_phone'] = ''
for phone in owner_phone:
if phone['src'].find('http://image.58.com/showphone.aspx') != -1:
self.fd['owner_phone'] = phone['src']
#没有联系方式 return
if not self.fd['owner_phone']:return
if soup.find('div',{"class":'other'}):
posttime = soup.find('div',{"class":'other'}).contents[0]
posttime = re.sub('\n|\r| |\t','',posttime)
posttime = posttime.replace('发布时间:','').replace(' 浏览','')
else:
posttime = ''
if not posttime:
return
elif posttime.find('-') !=-1:
s = datetime.datetime(int(posttime.split('-')[0]),int(posttime.split('-')[1],),int(posttime.split('-')[2]))
posttime = int(time.mktime(s.timetuple()))
elif posttime.find('分钟') !=-1:
n = int(posttime.replace('分钟前',''))*60
posttime = int(time.time() - n)
elif posttime.find('小时') !=-1:
n = int(posttime.replace('小时前',''))*60*60
posttime = int(time.time() - n)
self.fd['posttime'] = posttime
if (time.time() - self.fd['posttime']) > 3600*24*7:
return
print "++++++++++++++++"
print time.strftime('%Y %m %d', time.localtime(self.fd['posttime']))
if re.search(self.house_floor_regex, detail_mer_str):
house_floor=re.search(self.house_floor_regex, detail_mer_str).group(1)
self.fd['house_floor'] = house_floor
else:
self.fd['house_floor'] = None
if re.search(self.house_topfloor_regex, detail_mer_str):
house_topfloor=re.search(self.house_topfloor_regex, detail_mer_str).group(1)
self.fd['house_topfloor'] = house_topfloor
else:
self.fd['house_topfloor'] = None
if re.search(self.house_totalarea_regex, detail_mer_str):
house_totalarea=re.search(self.house_totalarea_regex, detail_mer_str).group(1)
self.fd['house_totalarea'] = house_totalarea
else:
self.fd['house_totalarea'] = None
#类型
self.fd['house_type'] = housetype(detail_mer_str)
self.fd['house_price'] = detail_mer.em.string
if re.search(self.house_room_regex, detail_mer_str):
house_room=re.search(self.house_room_regex, detail_mer_str).group(1)
self.fd['house_room'] = house_room
else:
self.fd['house_room'] = '0'
if re.search(self.house_hall_regex, detail_mer_str):
house_hall=re.search(self.house_hall_regex, detail_mer_str).group(1)
self.fd['house_hall'] = house_hall
else:
self.fd['house_hall'] = '0'
if re.search(self.house_toilet_regex, detail_mer_str):
house_toilet=re.search(self.house_toilet_regex, detail_mer_str).group(1)
self.fd['house_toilet'] = house_toilet
else:
self.fd['house_toilet'] = '0'
if re.search(self.house_title_regex, response):
house_title=re.search(self.house_title_regex, response).group(1)
self.fd['house_title'] = house_title
else:
self.fd['house_title'] = ''
#描述
detail_box = soup.find('div',{'class':'maincon'})
if detail_box:
house_desc = str(detail_box)
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时,请说是在58同城上看到的,谢谢!","",house_desc)
else:
self.fd['house_desc'] = None
#小区名
if re.search(self.borough_name_regex, detail_mer_str):
borough_name=re.search(self.borough_name_regex, detail_mer_str).group(1)
self.fd['borough_name'] = re.sub("\(.*\)|<.*?>","",borough_name)
else:
self.fd['borough_name'] = ''
#区域
area_box = detail_mer.find(text="区域:").parent.parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
if re.search(self.house_age_regex, response):
house_age=re.search(self.house_age_regex, response).group(1)
self.fd['house_age'] = house_age
else:
self.fd['house_age'] = None
#朝向
self.fd['house_toward'] = toward(detail_mer_str)
self.fd['house_fitment'] = fitment(detail_mer_str)
self.fd['house_deposit'] = deposit(detail_mer_str)
def QiuZu(self,url):
self.fd['house_flag'] = 3
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
self.fd['house_age'] = 0
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
self.fd['house_deposit'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
self.fd['house_totalarea'] = 0
request = urllib2.Request(url, None, self.header)
response = urllib2.urlopen(request).read()
tree = etree.HTML(response)
soup =BeautifulSoup(response)
detail_mer = soup.find('ul',{'class':'info'})
detail_mer_str =str(detail_mer).replace(" ", "")
#非个人房源 return
#print re.search(self.agencyname_regex, response).group(1)
if re.search(self.agencyname_regex, response):
agencyname=re.search(self.agencyname_regex, response).group(1)
if agencyname == '经纪人':return
else:
return
if re.search(self.username_regex, response):
username=re.search(self.username_regex, response).group(1)
self.fd['owner_name'] = username
else:
self.fd['owner_name'] = None
owner_phone = soup('img')
self.fd['owner_phone'] = ''
for phone in owner_phone:
if phone['src'].find('http://image.58.com/showphone.aspx') != -1:
self.fd['owner_phone'] = phone['src']
#没有联系方式 return
if not self.fd['owner_phone']:return
if soup.find('div',{"class":'other'}):
posttime = soup.find('div',{"class":'other'}).contents[0]
posttime = re.sub('\n|\r| |\t','',posttime.replace(" ", " "))
posttime = posttime.replace('发布时间:','').replace(' 浏览','')
else:
posttime = ''
print posttime
if not posttime:
return
elif posttime.find('-') !=-1:
s = datetime.datetime(int(posttime.split('-')[0]),int(posttime.split('-')[1],),int(posttime.split('-')[2]))
posttime = int(time.mktime(s.timetuple()))
elif posttime.find('分钟') !=-1:
n = int(posttime.replace('分钟前',''))*60
posttime = int(time.time() - n)
elif posttime.find('小时') !=-1:
n = int(posttime.replace('小时前',''))*60*60
posttime = int(time.time() - n)
self.fd['posttime'] = posttime
if (time.time() - self.fd['posttime']) > 3600*24*7:
return
print "++++++++++++++++"
print time.strftime('%Y %m %d', time.localtime(self.fd['posttime']))
self.fd['house_floor'] = 0
self.fd['house_topfloor'] = 0
if re.search(self.house_totalarea_req_regex, detail_mer_str):
house_totalarea_min=re.search(self.house_totalarea_req_regex, detail_mer_str).group(1)
house_totalarea_max=re.search(self.house_totalarea_req_regex, detail_mer_str).group(2)
self.fd['house_totalarea'] = house_totalarea_min
self.fd['house_totalarea_max'] = house_totalarea_max
self.fd['house_totalarea_min'] = house_totalarea_min
else:
if re.search(self.house_totalarea_regex, detail_mer_str):
house_totalarea=re.search(self.house_totalarea_regex, detail_mer_str).group(1)
self.fd['house_totalarea'] = house_totalarea
self.fd['house_totalarea_max'] = house_totalarea
self.fd['house_totalarea_min'] = house_totalarea
else:
self.fd['house_totalarea'] = 0
self.fd['house_totalarea_max'] = 0
self.fd['house_totalarea_min'] = 0
#类型
self.fd['house_type'] = housetype(detail_mer_str)
house_price = detail_mer.em.string
if house_price:
house_price = house_price.replace('元','')
if house_price.find("以上") != -1:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = house_price.replace('以上','')
self.fd['house_price'] = house_price.replace('以上','')
elif house_price.find("以下") != -1:
self.fd['house_price_max'] = house_price.replace('以下','')
self.fd['house_price_min'] = 0
self.fd['house_price'] = house_price.replace('以下','')
elif house_price.find("-") != -1:
self.fd['house_price_max'] = house_price.split('-')[1]
self.fd['house_price_min'] = house_price.split('-')[0]
self.fd['house_price'] = house_price.split('-')[0]
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
else:
self.fd['house_price_max'] = 0
self.fd['house_price_min'] = 0
self.fd['house_price'] = 0
if re.search(self.house_room_regex, detail_mer_str):
house_room=re.search(self.house_room_regex, detail_mer_str).group(1)
self.fd['house_room'] = house_room
self.fd['house_room1'] = house_room
else:
self.fd['house_room'] = '0'
self.fd['house_room1'] = '0'
self.fd['house_hall'] = '0'
self.fd['house_toilet'] = '0'
self.fd['house_toilet'] = '0'
if re.search(self.house_title_regex, response):
house_title=re.search(self.house_title_regex, response).group(1)
self.fd['house_title'] = house_title
else:
self.fd['house_title'] = ''
#描述
detail_box = soup.find('div',{'class':'maincon'})
if detail_box:
house_desc = str(detail_box)
self.fd['house_desc'] = re.sub("<.*?>|\n|\r|\t|联系我时,请说是在58同城上看到的,谢谢!","",house_desc)
else:
self.fd['house_desc'] = None
#小区名
if re.search(self.house_addr_regex, detail_mer_str):
house_addr = re.search(self.house_addr_regex, detail_mer_str).group(1)
self.fd['house_addr'] = house_addr
self.fd['borough_name'] = house_addr
else:
self.fd['house_addr'] = ''
self.fd['borough_name'] = ''
#区域
#print detail_mer
area_box = detail_mer.find(text="地段:").parent.parent
area_a = area_box('a')
if area_a and len(area_a)>1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = area_a[1].string
elif area_a and len(area_a)==1:
self.fd['cityarea'] = area_a[0].string
self.fd['section'] = None
else:
self.fd['cityarea'] = None
self.fd['section'] = None
self.fd['house_age'] = 0
#朝向
self.fd['house_toward'] = 0
self.fd['house_fitment'] = 0
def extractDict(self):
for url in self.urls:
if checkPath(homepath,self.folder,url):
pass
else:
self.fd["posttime"] = 0
if self.kind=="1":
self.ChuShou(url)
elif self.kind=="2":
self.ChuZu(url)
elif self.kind=="3":
self.QiuGou(url)
else:
self.QiuZu(url)
self.fd['city'] = urlparse(url)[1].replace('.58.com',"")
#makePath(homepath,self.folder,url)
#超过七天
if (time.time() -self.fd["posttime"]) > 7*24*36000:return
self.fd["c"]="houseapi"
self.fd["a"]="savehouse"
self.fd["is_checked"] = 0
self.fd["web_flag"] = "58"
if not self.fd["is_checked"]:
for i in self.fd.items():
print i[0],i[1]
# req=urllib2.Request("http://site.jjr360.com/app.php", urllib.urlencode(self.fd))
#p=self.br.open(req).read().strip()
#print p.decode('gbk')
class fetchData(threading.Thread):
def __init__(self,d):
threading.Thread.__init__(self)
self.d=d
def run(self):
lc=LinkCrawl(self.d["citycode"],self.d["kind"])
clinks=lc.runme()
cc=ContentCrawl(clinks,self.d["citycode"],self.d["kind"])
cc.extractDict()
if __name__=="__main__":
#lc=LinkCrawl(citycode="gz",kind="1")
#lc.runme()
url1 = "http://su.58.com/ershoufang/6432469244037x.shtml"
url2 = "http://su.58.com/zufang/6433540736258x.shtml"
url3 = "http://su.58.com/ershoufang/6383611408516x.shtml"
url4 = "http://su.58.com/qiuzu/6268009935368x.shtml"
cc=ContentCrawl([url4],citycode="su",kind="4")
cc.extractDict() | ptphp/PyLib | src/webpy1/webpy/fetch/tongcheng58.py | Python | apache-2.0 | 32,102 |
"""
usage: __main__.py [-h] [--reset_prefs] [--no_prefs] [-image_out IMAGE_OUT]
[-plugin PLUGIN]
[tree]
Launch Kataja visualisation environment.
positional arguments:
tree bracket tree or source tree filename
optional arguments:
-h, --help show this help message and exit
--reset_prefs reset the current preferences file to default
--no_prefs don't use preferences file -- don't save it either
-image_out IMAGE_OUT draw tree into given file (name.pdf or name.png) and
exit
-plugin PLUGIN start with the given plugin (default: 'FreeDrawing'
or
import kataja
kataja.start(**kwargs)
Launch kataja with arguments
or
import kataja
kataja.draw(tree, image_out="kataja_tree.pdf", **kwargs])
Draw tree into file and exit kataja
"""
import argparse
import datetime
import os
import sys
from PyQt6 import QtWidgets, QtCore
import kataja
from kataja.singletons import running_environment, log
# QtPrintSupport is imported here only because py2app then knows to add it as a framework.
# libqcocoa.dynlib requires QtPrintSupport. <-- not needed anymore?
def load_version():
if running_environment.run_mode == 'source':
parent = os.path.dirname(os.path.dirname(__file__))
try:
with open(os.path.join(parent, 'VERSION')) as version_file:
version = version_file.read().strip()
except FileNotFoundError:
date = str(datetime.datetime.now())
build_number = 1
version_name = '0.1'
version = ' | '.join([date, build_number, version_name])
else:
import pkg_resources
version = pkg_resources.get_distribution('kataja').version
return version
def bump_and_save_version(version_str):
old_date, build_number, version_name = version_str.split(' | ', 2)
build_number = int(build_number)
date = str(datetime.datetime.now())
build_number += 1
new_version_str = ' | '.join([date, str(build_number), version_name])
try:
parent = os.path.dirname(os.path.dirname(__file__))
with open(os.path.join(parent, 'VERSION'), 'w') as version_file:
version_file.write(new_version_str)
version_file.write('\n')
except IOError:
print('write failed')
return new_version_str
def launch_from_command_line():
parser = argparse.ArgumentParser(description='Launch Kataja visualisation environment.')
parser.add_argument('--reset_prefs', action='store_true', default=False,
help='reset the current preferences file to default')
parser.add_argument('--no_prefs', action='store_true', default=False,
help="don't use preferences file -- don't save it either")
parser.add_argument('-image_out', type=str,
help="draw tree into given file (name.pdf or name.png) and exit")
parser.add_argument('-plugin', type=str, default='',
help="start with the given plugin")
parser.add_argument('tree', type=str, nargs='?',
help='bracket tree or source tree filename')
kwargs = vars(parser.parse_args())
silent = True if kwargs['image_out'] else False
print(f"Launching Kataja {kataja.__version__} with Python {sys.version_info.major}.{sys.version_info.minor}")
app = prepare_app()
log.info('Starting Kataja...')
if not silent:
# splash_color = QtGui.QColor(238, 232, 213)
# splash_pix = QtGui.QPixmap(os.path.join(running_environment.resources_path, 'katajalogo.png'))
# splash = QtWidgets.QSplashScreen(splash_pix)
# splash.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.SplashScreen |
# QtCore.Qt.FramelessWindowHint | QtCore.Qt.NoDropShadowWindowHint)
# splash.showMessage(f'{kataja.__author__} | Fetching version...', QtCore.Qt.AlignBottom |
# QtCore.Qt.AlignmentFlag.AlignHCenter, splash_color)
# app.processEvents()
# splash.show()
# app.processEvents()
version_str = load_version()
if running_environment.run_mode == 'source':
version_str = bump_and_save_version(version_str)
# splash.showMessage(f'{kataja.__author__} | {version_str}',
# QtCore.Qt.AlignBottom | QtCore.Qt.AlignmentFlag.AlignHCenter, splash_color)
# splash.repaint()
app.processEvents()
# importing KatajaMain here because it is slow, and splash screen is now up
from kataja.KatajaMain import KatajaMain
window = KatajaMain(app, **kwargs)
if not silent:
# splash.finish(window)
app.setActiveWindow(window)
app.processEvents()
app.exec()
def start(**kwargs):
from kataja.KatajaMain import KatajaMain
app = prepare_app()
window = KatajaMain(app, **kwargs)
app.setActiveWindow(window)
app.processEvents()
app.exec_()
def draw(tree, image_out='kataja_tree.pdf', **kwargs):
from kataja.KatajaMain import KatajaMain
app = prepare_app()
KatajaMain(app, tree=tree, image_out=image_out, **kwargs)
app.processEvents()
app.exec_()
def prepare_app():
app = QtWidgets.QApplication(sys.argv)
#app.setAttribute(QtCore.Qt.ApplicationAttribute.AA_UseHighDpiPixmaps)
app.setApplicationName('Kataja')
app.setOrganizationName('Purma')
app.setOrganizationDomain('purma.fi')
app.setStyle('fusion')
return app
| jpurma/Kataja | kataja/launcher.py | Python | gpl-3.0 | 5,544 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
# Copyright(C) 2012 François Revol
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.video import CapVideo, BaseVideo
from weboob.tools.backend import Module
from weboob.capabilities.collection import CapCollection, CollectionNotFound
from .browser import VimeoBrowser
import re
__all__ = ['VimeoModule']
class VimeoModule(Module, CapVideo, CapCollection):
NAME = 'vimeo'
MAINTAINER = u'François Revol'
EMAIL = 'revol@free.fr'
VERSION = '1.1'
DESCRIPTION = 'Vimeo video streaming website'
LICENSE = 'AGPLv3+'
BROWSER = VimeoBrowser
SORTBY = ['relevance', 'rating', 'views', 'time']
def search_videos(self, pattern, sortby=CapVideo.SEARCH_RELEVANCE, nsfw=False):
return self.browser.search_videos(pattern, self.SORTBY[sortby])
def get_video(self, _id):
return self.browser.get_video(self.parse_id(_id))
def fill_video(self, video, fields):
if fields != ['thumbnail']:
# if we don't want only the thumbnail, we probably want also every fields
video = self.browser.get_video(video.id, video)
if 'thumbnail' in fields and video.thumbnail:
video.thumbnail.data = self.browser.open(video.thumbnail.url).content
return video
def parse_id(self, _id):
m = re.match('https?://vimeo.com/(.*)', _id)
if m:
return m.group(1)
return _id
def iter_resources(self, objs, split_path):
if BaseVideo in objs:
collection = self.get_collection(objs, split_path)
if collection.path_level == 0:
yield self.get_collection(objs, [u'latest'])
if collection.split_path == [u'latest']:
for video in self.browser.latest_videos():
yield video
def validate_collection(self, objs, collection):
if collection.path_level == 0:
return
if BaseVideo in objs and collection.split_path == [u'latest']:
collection.title = u'Latest Vimeo videos'
return
raise CollectionNotFound(collection.split_path)
OBJECTS = {BaseVideo: fill_video}
| Konubinix/weboob | modules/vimeo/module.py | Python | agpl-3.0 | 2,848 |
# -*- coding: utf-8 -*-
class Parser(object):
'''
Base class for parsers
'''
def noun_tokenize(self, sentence):
'''
Extract only nouns
'''
tagged_tokens = self.pos_tokenize(sentence)
nouns = self.noun_filter(tagged_tokens)
nouns = self.stopword_filter(nouns, key=lambda x: x[0])
nouns = self.normalize(nouns, key=lambda x: x[0])
return nouns
def normalize(self, tokens, key=lambda x: x):
'''
Convert tokens to lowercase
'''
return [key(token).lower() for token in tokens]
| mpkato/mobileclick | mobileclick/nlp/parser.py | Python | mit | 591 |
from math import *
m = 9.8e-28
q = 4.8e-10
print( "q = {:.3e} [cgs]".format( q ) )
print( "m = {:.3e} [g]".format( m ) )
ev_to_cgs = 1.60218e-12
E_along = 1000 * ev_to_cgs
v_along = sqrt( 2 * E_along / m )
E_perp = 100 * ev_to_cgs
v_perp = sqrt( 2 * E_perp/2 / m )
print( "E_along = {:.3e} [eV] = {:.3e} [erg]".format( E_along / ev_to_cgs, E_along ) )
print( "E_perp = {:.3e} [eV] = {:.3e} [erg]".format( E_perp / ev_to_cgs, E_perp ) )
print( "v_along = {:.3e} [cm/s]".format( v_along ) )
print( "p_along = {:.3e} [g * cm/s]".format( v_along * m ) )
print( "v_perp = {:.3e} [cm/s]".format( v_perp ) )
print( "p_perp = {:.3e} [g * cm/s]".format( v_perp * m ) )
print( "" )
H = 1000
speed_of_light = 3.0e10
cyclotron_fr = q * H / m / speed_of_light
cyclotron_period = 2.0 * pi / cyclotron_fr
single_period_distance_along_field = v_along * cyclotron_period
larmor_r = m * speed_of_light * sqrt(2 * E_perp / m) / q / H
print( "H = {:.3e} [Gs]".format( H ) )
print( "c = {:.3e} [cm/s]".format( speed_of_light ) )
print( "Omega = {:.3e} [1/s]".format( cyclotron_fr ) )
print( "Cyclotron period = {:.3e} [s]".format( cyclotron_period ) )
print( "Single period distance along field= {:.3e} [cm]".format(
single_period_distance_along_field ) )
print( "Larmor_r = {:.3e} [cm]".format( larmor_r ) )
print( "" )
z_distance = 5
t = z_distance / v_along
print( "z_distance = {:f} [cm]".format( z_distance ) )
print( "t = {:.3e} [s]".format( t ) )
sim_time = 3.0e-9
n_of_revolutions = sim_time / cyclotron_period
n_of_steps = 1000
dt = sim_time / n_of_steps
print( "simulation_time = {:.3e} [s]".format( sim_time ) )
print( "n_of_revolutions = {:.1f}".format( n_of_revolutions ) )
print( "number_of_time_steps = {:d}".format( n_of_steps ) )
print( "time_step_size = {:.3e} [s]".format( dt ) )
| epicf/ef | examples/single_particle_in_magnetic_field/estimates.py | Python | mit | 1,787 |
import psutil
def generate_process_graph():
"""
Returns a process graph of all the processes in the system.
"""
return ProcessGraph(psutil.process_iter())
class ProcessGraph(object):
def __init__(self, processes):
self.snapshots = {}
self.root_pids = []
self.child_map = {}
self.mem_percents_including_children = {}
self._update_process_dicts(processes)
self._update_process_children_map()
self._get_percents_including_children()
self._update_root_pids()
def _update_process_dicts(self, processes):
"""Creates a dict of the dicts of each process on the system.
Probably faster than calling p.get_whatever() many times, and, rather
importantly, gives a /snapshot/ of the system's processes at a certain
time.
"""
self.p_dicts = {}
for process in processes:
self._snapshot_process(process)
def get_percent_including_children(self, pid):
"""Gets the percent of RAM a process is using, including that used by
all of its children."""
if pid in self.mem_percents_including_children:
return self.mem_percents_including_children[pid]
try:
total_percent = self.get_memory_percent(pid) + sum(
self.get_percent_including_children(child)
for child in self.get_child_pids(pid))
self.mem_percents_including_children[pid] = total_percent
return total_percent
except psutil.NoSuchProcess:
# processes are ephemeral, this one must have disappeared
return 0
except KeyError:
# processes are ephemeral, this one must have newly appeared
# and not have been put into the maps earlier: just ignore it
return 0
def _update_root_pids(self):
"""Gets pids of all processes in p_dicts that have no parents."""
self.root_pids = []
for pid, p_dict in self.p_dicts.items():
parent_pid = p_dict['parent']
# processes without parents are root processes
# WORKAROUND FOR OSX: pid 0's parent is itself, so we need to check
# if a process's parent is itself
if parent_pid is None or parent_pid == pid:
self.root_pids.append(pid)
def _update_process_children_map(self):
"""Creates a dict of the children of each process in the system.
This is way way way faster than calling psutil.get_children()
each time we want to iterate on a process's children.
Indexed by process PID.
"""
# create a list for each process
for pid in self.p_dicts:
self.child_map[pid] = []
# add each process to its parent's child list
for pid, p_dict in self.p_dicts.items():
parent_pid = p_dict['parent']
# in OSX, the process with PID=0 is it's own parent.
# We need to check for recursive relationships like this to
# prevent infinite recursion.
if parent_pid is not None and parent_pid != pid:
self.child_map[parent_pid].append(pid)
def _snapshot_process(self, process):
try:
p = process.as_dict(
attrs=['pid', 'name', 'memory_percent', 'cpu_percent',
'username', 'memory_info'])
parent = process.parent()
p['parent'] = parent.pid if parent is not None else None
self.p_dicts[p['pid']] = p
except psutil.NoSuchProcess:
# processes are ephemeral, this one must have disappeared, so
# ignore it
pass
def _get_percents_including_children(self):
for pid in self.p_dicts:
# call it so the value gets cached
self.get_percent_including_children(pid)
def get_name(self, pid):
return self.p_dicts[pid]['name']
def get_memory_percent(self, pid):
return self.p_dicts[pid]['memory_percent']
def get_cpu_percent(self, pid):
return self.p_dicts[pid]['cpu_percent']
def get_username(self, pid):
return self.p_dicts[pid]['username']
def get_memory_info(self, pid):
return self.p_dicts[pid]['memory_info']
def get_parent_pid(self, pid):
return self.p_dicts[pid]['parent']
def get_child_pids(self, pid):
return self.child_map[pid]
def get_root_pids(self):
return self.root_pids
| Spferical/visram | visram/processes.py | Python | mit | 4,514 |
## This file is part of PyGaze - the open-source toolbox for eye tracking
##
## PyGaze is a Python module for easily creating gaze contingent experiments
## or other software (as well as non-gaze contingent experiments/software)
## Copyright (C) 2012-2013 Edwin S. Dalmaijer
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>
#
# version: 0.4 (25-03-2013)
# MAIN
DUMMYMODE = False # False for gaze contingent display, True for dummy mode (using mouse or joystick)
LOGFILENAME = 'default' # logfilename, without path
LOGFILE = LOGFILENAME[:] # .txt; adding path before logfilename is optional; logs responses (NOT eye movements, these are stored in an EDF file!)
TRIALS = 5
# DISPLAY
# used in libscreen, for the *_display functions. The values may be adjusted,
# but not the constant's names
SCREENNR = 0 # number of the screen used for displaying experiment
DISPTYPE = 'pygame' # either 'psychopy' or 'pygame'
DISPSIZE = (1920,1080) # canvas size
SCREENSIZE = (34.5, 19.7) # physical display size in cm
MOUSEVISIBLE = False # mouse visibility
BGC = (125,125,125) # backgroundcolour
FGC = (0,0,0) # foregroundcolour
# SOUND
# defaults used in libsound. The values may be adjusted, but not the constants'
# names
SOUNDOSCILLATOR = 'sine' # 'sine', 'saw', 'square' or 'whitenoise'
SOUNDFREQUENCY = 440 # Herz
SOUNDLENGTH = 100 # milliseconds (duration)
SOUNDATTACK = 0 # milliseconds (fade-in)
SOUNDDECAY = 5 # milliseconds (fade-out)
SOUNDBUFFERSIZE = 1024 # increase if playback is choppy
SOUNDSAMPLINGFREQUENCY = 48000 # samples per second
SOUNDSAMPLESIZE = -16 # determines bit depth (negative is signed
SOUNDCHANNELS = 2 # 1 = mono, 2 = stereo
# INPUT
# used in libinput. The values may be adjusted, but not the constant names.
MOUSEBUTTONLIST = None # None for all mouse buttons; list of numbers for buttons of choice (e.g. [1,3] for buttons 1 and 3)
MOUSETIMEOUT = None # None for no timeout, or a value in milliseconds
KEYLIST = None # None for all keys; list of keynames for keys of choice (e.g. ['space','9',':'] for space, 9 and ; keys)
KEYTIMEOUT = 1 # None for no timeout, or a value in milliseconds
JOYBUTTONLIST = None # None for all joystick buttons; list of button numbers (start counting at 0) for buttons of choice (e.g. [0,3] for buttons 0 and 3 - may be reffered to as 1 and 4 in other programs)
JOYTIMEOUT = None # None for no timeout, or a value in milliseconds
# EYETRACKER
# general
TRACKERTYPE = 'smi' # either 'smi', 'eyelink' or 'dummy' (NB: if DUMMYMODE is True, trackertype will be set to dummy automatically)
SACCVELTHRESH = 35 # degrees per second, saccade velocity threshold
SACCACCTHRESH = 9500 # degrees per second, saccade acceleration threshold
# EyeLink only
# SMI only
SMIIP = '127.0.0.1'
SMISENDPORT = 4444
SMIRECEIVEPORT = 5555
# FRL
# Used in libgazecon.FRL. The values may be adjusted, but not the constant names.
FRLSIZE = 200 # pixles, FRL-size
FRLDIST = 125 # distance between fixation point and FRL
FRLTYPE = 'gauss' # 'circle', 'gauss', 'ramp' or 'raisedCosine'
FRLPOS = 'center' # 'center', 'top', 'topright', 'right', 'bottomright', 'bottom', 'bottomleft', 'left', or 'topleft'
# CURSOR
# Used in libgazecon.Cursor. The values may be adjusted, but not the constants' names
CURSORTYPE = 'cross' # 'rectangle', 'ellipse', 'plus' (+), 'cross' (X), 'arrow'
CURSORSIZE = 20 # pixels, either an integer value or a tuple for width and height (w,h)
CURSORCOLOUR = 'pink' # colour name (e.g. 'red'), a tuple RGB-triplet (e.g. (255, 255, 255) for white or (0,0,0) for black), or a RGBA-value (e.g. (255,0,0,255) for red)
CURSORFILL = True # True for filled cursor, False for non filled cursor
CURSORPENWIDTH = 3 # cursor edge width in pixels (only if cursor is not filled)
| esdalmaijer/EyeTribe_test | experiment/pygaze/examples/simple_tracker_experiment/constants.py | Python | gpl-3.0 | 4,442 |
"Bilateral grid."
import sys
from halide import *
int_t = Int(32)
float_t = Float(32)
def main():
def lerp(a, b, alpha):
return (1.0-alpha)*a + alpha*b
input = ImageParam(float_t, 3, 'input')
r_sigma = Param(float_t, 0.1)
s_sigma = 8
x = Var('x')
y = Var('y')
z = Var('z')
c = Var('c')
clamped = Func('clamped')
clamped[x, y] = input[clamp(x, 0, input.width()-1),
clamp(y, 0, input.height()-1),0]
r = RDom(0, s_sigma, 0, s_sigma, 'r')
val = clamped[x * s_sigma + r.x - s_sigma/2, y * s_sigma + r.y - s_sigma/2]
val = clamp(val, 0.0, 1.0)
zi = cast(int_t, val * (1.0/r_sigma) + 0.5)
grid = Func('grid')
grid[x, y, z, c] = 0.0
grid[x, y, zi, c] += select(c == 0, val, 1.0)
# Blur the grid using a five-tap filter
blurx, blury, blurz = Func('blurx'), Func('blury'), Func('blurz')
blurx[x, y, z] = grid[x-2, y, z] + grid[x-1, y, z]*4 + grid[x, y, z]*6 + grid[x+1, y, z]*4 + grid[x+2, y, z]
blury[x, y, z] = blurx[x, y-2, z] + blurx[x, y-1, z]*4 + blurx[x, y, z]*6 + blurx[x, y+1, z]*4 + blurx[x, y+2, z]
blurz[x, y, z] = blury[x, y, z-2] + blury[x, y, z-1]*4 + blury[x, y, z]*6 + blury[x, y, z+1]*4 + blury[x, y, z+2]
# Take trilinear samples to compute the output
val = clamp(clamped[x, y], 0.0, 1.0)
zv = val * (1.0/r_sigma)
zi = cast(int_t, zv)
zf = zv - zi
xf = cast(float_t, x % s_sigma) / s_sigma
yf = cast(float_t, y % s_sigma) / s_sigma
xi = x/s_sigma
yi = y/s_sigma
interpolated = Func('interpolated')
interpolated[x, y] = lerp(lerp(lerp(blurz[xi, yi, zi], blurz[xi+1, yi, zi], xf),
lerp(blurz[xi, yi+1, zi], blurz[xi+1, yi+1, zi], xf), yf),
lerp(lerp(blurz[xi, yi, zi+1], blurz[xi+1, yi, zi+1], xf),
lerp(blurz[xi, yi+1, zi+1], blurz[xi+1, yi+1, zi+1], xf), yf), zf)
# Normalize
smoothed = Func('smoothed')
smoothed[x, y, c] = interpolated[x, y, 0]/interpolated[x, y, 1]
schedule = 1
if schedule == 0:
pass
elif schedule == 1:
# Best schedule for CPU
grid.compute_root().parallel(z)
#grid.update().reorder(c, x, y).parallel(y) # This fails with SEGFAULT
blurx.compute_root().parallel(z).vectorize(x, 4)
blury.compute_root().parallel(z).vectorize(x, 4)
blurz.compute_root().parallel(z).vectorize(x, 4)
smoothed.compute_root().parallel(y).vectorize(x, 4)
elif schedule == 2:
# Best schedule for GPU
gridz = grid.arg(2)
grid.compute_root().cudaTile(x, y, 16, 16)
grid.update().root().cudaTile(x, y, 16, 16)
blurx.compute_root().cudaTile(x, y, 8, 8)
blury.compute_root().cudaTile(x, y, 8, 8)
blurz.compute_root().cudaTile(x, y, 8, 8)
smoothed.compute_root().cudaTile(x, y, s_sigma, s_sigma)
else:
raise ValueError
eval_func = filter_image(input, smoothed, builtin_image('rgb.png'), disp_time=True)
I = eval_func()
if len(sys.argv) >= 2:
I.save(sys.argv[1])
else:
I.show()
if __name__ == '__main__':
main()
| mikeseven/Halide | python_bindings/apps/bilateral_grid.py | Python | mit | 3,216 |
from kivy.graphics import Color, Line, Quad
from modeful.ui.diagram.relationship import Trigonometry
from modeful.ui.diagram.relationship.association import Association
class Aggregation(Association):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with self.canvas.before:
Color(1, 1, 1)
self._diamond_bg = Quad(points=[0]*8)
Color(0, 0, 0, .5)
self._diamond_line = Line(points=[], width=1, close=True)
def redraw(self, x1, y1, x2, y2):
super().redraw(x1, y1, x2, y2)
points = Trigonometry.get_diamond_points(x1, y1, x2, y2, size=15)
self._diamond_bg.points = points
self._diamond_line.points = points
| Modeful/poc | modeful/ui/diagram/relationship/aggregation.py | Python | gpl-3.0 | 774 |
from __future__ import absolute_import
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .utils import add_pyspark_path_if_needed, quiet_py4j
add_pyspark_path_if_needed()
from .testcase import SparkTestingBaseReuse
import os
import sys
from itertools import chain
import time
import operator
import tempfile
import random
import struct
from functools import reduce
from pyspark.context import SparkConf, SparkContext, RDD
from pyspark.streaming.context import StreamingContext
class StreamingTestCase(SparkTestingBaseReuse):
"""Basic common test case for Spark Streaming tests. Provides a
Spark Streaming context as well as some helper methods for creating
streaming input and collecting streaming output.
Modeled after PySparkStreamingTestCase."""
timeout = 15 # seconds
duration = .5
@classmethod
def setUpClass(cls):
super(StreamingTestCase, cls).setUpClass()
cls.sc.setCheckpointDir("/tmp")
@classmethod
def tearDownClass(cls):
super(StreamingTestCase, cls).tearDownClass()
@classmethod
def _sort_result_based_on_key(cls, result):
return map(lambda x: sorted(x), result)
def setUp(self):
self.ssc = StreamingContext(self.sc, self.duration)
def tearDown(self):
self.ssc.stop(False)
def wait_for(self, result, n):
start_time = time.time()
while len(result) < n and time.time() - start_time < self.timeout:
time.sleep(0.01)
if len(result) < n:
print("timeout after", self.timeout)
def _take(self, dstream, n):
"""
Return the first `n` elements in the stream (will start and stop).
"""
results = []
def take(_, rdd):
if rdd and len(results) < n:
results.extend(rdd.take(n - len(results)))
dstream.foreachRDD(take)
self.ssc.start()
self.wait_for(results, n)
return results
def _collect(self, dstream, n, block=True):
"""
Collect each RDDs into the returned list.
:return: list, which will have the collected items.
"""
result = []
def get_output(_, rdd):
if rdd and len(result) < n:
r = rdd.collect()
if r:
result.append(r)
dstream.foreachRDD(get_output)
if not block:
return result
self.ssc.start()
self.wait_for(result, n)
return result
def run_func(self, input, func, expected, sort=False, input2=None):
"""
@param input: dataset for the test. This should be list of lists
or list of RDDs.
@param input2: Optional second dataset for the test. If provided your
func must take two PythonDStreams as input.
@param func: wrapped function. This function should return
PythonDStream.
@param expected: expected output for this testcase.
Warning: If output is longer than expected this will silently
discard the additional output. TODO: fail when this happens.
"""
if not isinstance(input[0], RDD):
input = [self.sc.parallelize(d, 1) for d in input]
input_stream = self.ssc.queueStream(input)
if input2 and not isinstance(input2[0], RDD):
input2 = [self.sc.parallelize(d, 1) for d in input2]
# Apply test function to stream.
if input2:
input_stream2 = self.ssc.queueStream(input2)
stream = func(input_stream, input_stream2)
else:
stream = func(input_stream)
result = self._collect(stream, len(expected))
if sort:
self._sort_result_based_on_key(result)
self._sort_result_based_on_key(expected)
self.assertEqual(expected, result)
| snithish/spark-testing-base | python/sparktestingbase/streamingtestcase.py | Python | apache-2.0 | 4,566 |
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, absolute_import
from .script_interface import ScriptInterfaceHelper, script_interface_register
import numpy as np
@script_interface_register
class MeanVarianceCalculator(ScriptInterfaceHelper):
"""
Accumulates results from observables.
Parameters
----------
obs : Instance of :class:`espressomd.observables.Observable`.
delta_N : :obj:`int`
Number of timesteps between subsequent samples for the auto update mechanism.
Methods
-------
update
Update the accumulator (get the current values from the observable).
get_mean
Returns the samples mean values of the respective observable with which the
accumulator was initialized.
get_variance
Returns the samples variance for the observable.
"""
_so_name = "Accumulators::MeanVarianceCalculator"
_so_bind_methods = (
"update",
"get_mean",
"get_variance"
)
_so_creation_policy = "LOCAL"
@script_interface_register
class Correlator(ScriptInterfaceHelper):
"""
Calculates correlations based on results from observables.
Parameters
----------
obs1, obs2 : Instances of :class:`espressomd.observables.Observable`.
The observables A and B that are to be correlated. If `obs2`
is omitted, autocorrelation of `obs1` is calculated by
default.
corr_operation : :obj:`str`
The operation that is performed on :math:`A(t)` and
:math:`B(t+\\tau)` to obtain :math:`C(\\tau)`. The
following operations are currently available:
* `scalar_product`: Scalar product of :math:`A` and
:math:`B`, i.e., :math:`C=\sum\limits_{i} A_i B_i`
* `componentwise_product`: Componentwise product of
:math:`A` and :math:`B`, i.e., :math:`C_i = A_i B_i`
* `square_distance_componentwise`: Each component of
the correlation vector is the square of the difference
between the corresponding components of the
observables, i.E., :math:`C_i = (A_i-B_i)^2`. Example:
when :math:`A` is `ParticlePositions`, it produces the
mean square displacement (for each component
separately).
* `tensor_product`: Tensor product of :math:`A` and
:math:`B`, i.e., :math:`C_{i \\cdot l_B + j} = A_i B_j`
with :math:`l_B` the length of :math:`B`.
* `complex_conjugate_product`: assuming that the observables
consist of a complex and real part
:math:`A=(A_x+iA_y)`, and :math:`B=(B_x+iB_y)`, this
operation computes the result :math:`C=(C_x+iC_y)`,
as:
.. math::
C_x = A_xB_x + A_yB_y\\\\
C_y = A_yB_x - A_xB_y
* `fcs_acf`:
Fluorescence Correlation Spectroscopy (FCS)
autocorrelation function, i.e.,
.. math::
G_i(\\tau) =
\\frac{1}{N} \\left< \\exp \\left(
- \\frac{\\Delta x_i^2(\\tau)}{w_x^2}
- \\frac{\\Delta y_i^2(\\tau)}{w_y^2}
- \\frac{\\Delta z_i^2(\\tau)}{w_z^2}
\\right) \\right>
where
.. math::
\\Delta x_i^2(\\tau) = \\left( x_i(0) - x_i(\\tau) \\right)^2
is the square displacement of particle
:math:`i` in the :math:`x` direction, and :math:`w_x`
is the beam waist of the intensity profile of the
exciting laser beam,
.. math::
W(x,y,z) = I_0 \\exp
\\left( - \\frac{2x^2}{w_x^2} - \\frac{2y^2}{w_y^2} -
\\frac{2z^2}{w_z^2} \\right).
The values of :math:`w_x`, :math:`w_y`, and :math:`w_z`
are passed to the correlator as `args`
The above equations are a
generalization of the formula presented by Hoefling
et. al. :cite:`hofling11a`. For more information, see
references therein. Per each 3 dimensions of the
observable, one dimension of the correlation output
is produced. If `fcs_acf` is used with other
observables than `ParticlePositions`, the physical
meaning of the result is unclear.
delta_N : :obj:`int`
Number of timesteps between subsequent samples for the auto update mechanism.
tau_max : :obj:`float`
This is the maximum value of :math:`\tau` for which the
correlation should be computed. Warning: Unless you are using
the multiple tau correlator, choosing `tau_max` of more than
100`dt` will result in a huge computational overhead. In a
multiple tau correlator with reasonable parameters, `tau_max`
can span the entire simulation without too much additional cpu
time.
tau_lin : :obj:`int`
The number of data-points for which the results are linearly spaced
in `tau`. This is a parameter of the multiple tau correlator. If you
want to use it, make sure that you know how it works. By default, it
is set equal to `tau_max` which results in the trivial linear
correlator. By setting `tau_lin` < `tau_max` the multiple
tau correlator is switched on. In many cases, `tau_lin`=16 is a
good choice but this may strongly depend on the observables you are
correlating. For more information, we recommend to read
Ref. :cite:`ramirez10a` or to perform your own tests.
compress1 and compress2 : :obj:`str`
These functions are used to compress the data when
going to the next level of the multiple tau
correlator. This is done by producing one value out of two.
The following compression functions are available:
* `discard2`: (default value) discard the second value from the time series, use the first value as the result
* `discard1`: discard the first value from the time series, use the second value as the result
* `linear`: make a linear combination (average) of the two values
If only `compress1` is specified, then
the same compression function is used for both
observables. If both `compress1` and `compress2` are specified,
then `compress1` is used for `obs1` and `compress2` for `obs2`.
Both `discard1` and `discard2` are safe for all
observables but produce poor statistics in the
tail. For some observables, `linear` compression
can be used which makes an average of two
neighboring values but produces systematic
errors. Depending on the observable, the
systematic error using the `linear` compression
can be anything between harmless and disastrous.
For more information, we recommend to read Ref.
:cite:`ramirez10a` or to perform your own tests.
args: :obj:`float[3]`
Three floats which are passed as arguments to the
correlation function. Currently it is only used by
fcs_acf. Other correlation operations will ignore these
values.
"""
_so_name = "Accumulators::Correlator"
_so_bind_methods = (
"update",
"finalize")
_so_creation_policy = "LOCAL"
def result(self):
res = np.array(self.call_method("get_correlation"))
return res.reshape((self.n_result, 2 + self.dim_corr))
@script_interface_register
class AutoUpdateAccumulators(ScriptInterfaceHelper):
"""
Class for handling auto-update of Accumulators used by
:class:`espressomd.System`.
"""
_so_name = "Accumulators::AutoUpdateAccumulators"
_so_creation_policy = "LOCAL"
def add(self, Accumulator):
"""
Adds a Accumulator instance to the auto-update list in the system.
"""
self.call_method("add", object=Accumulator)
def remove(self, Accumulator):
"""
Removes an MeanVarianceCalculator from the auto-update list.
"""
self.call_method("remove", object=Accumulator)
| hmenke/espresso | src/python/espressomd/accumulators.py | Python | gpl-3.0 | 10,294 |
"""
Models for activity
"""
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.timesince import timesince as timesince
from django.db import models
class Activity(models.Model):
actor = models.ForeignKey(User)
verb = models.CharField(max_length=200)
action_content_type = models.ForeignKey(ContentType, related_name='action_contenttype')
action_object_id = models.PositiveIntegerField()
action_object = generic.GenericForeignKey('action_content_type', 'action_object_id')
target_content_type = models.ForeignKey(ContentType, related_name='target_contenttype')
target_object_id = models.PositiveIntegerField()
target_object = generic.GenericForeignKey('target_content_type', 'target_object_id')
timestamp = models.DateTimeField(auto_now_add=True)
@property
def timesince(self):
return timesince(
self.timestamp, None).encode('utf8').replace(b'\xc2\xa0', b' ').decode('utf8')
| openhealthcare/opal-activity | activity/models.py | Python | agpl-3.0 | 1,075 |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.utils.generalized_advantage_estimation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.utils import value_ops
def _naive_gae_as_ground_truth(discounts, rewards, values, final_value,
td_lambda):
"""A naive GAE closely resembles equation (16) in the paper.
Slow, for testing purpose only.
For full paper see https://arxiv.org/abs/1506.02438.pdf
Args:
discounts: `np.array` with shape [T, B].
rewards: `np.array` with shape [T, B].
values: `np.array` with shape [T, B].
final_value: `np.array` with shape [B].
td_lambda: A float scalar.
Returns:
A `np.array` with shape[T, B] representing the advantages.
"""
episode_length = len(values)
values_t_puls_1 = np.concatenate([values, final_value[None, :]], axis=0)
delta_v = [
(rewards[t] + discounts[t] * values_t_puls_1[t + 1] - values_t_puls_1[t])
for t in range(episode_length)
]
weighted_discounts = discounts * td_lambda
advantages = []
for s in range(episode_length):
advantage = np.copy(delta_v[s])
for t in range(s + 1, episode_length):
advantage += np.prod(weighted_discounts[s:t], axis=0) * delta_v[t]
advantages.append(advantage)
return np.array(advantages)
def _numpy_discounted_return(rewards, discounts, final_value):
"""A naive reward to do implemented in python.
Slow, for testing purpose only.
Args:
rewards: `np.array` with shape [T, B].
discounts: `np.array` with shape [T, B].
final_value: `np.array` with shape [B].
Returns:
A `np.array` with shape[T, B] representing the target values.
"""
if final_value is None:
final_value = np.zeros_like(rewards[-1])
discounted_returns = np.zeros_like(rewards)
accumulated_rewards = final_value
for t in reversed(range(len(rewards))):
discounted_returns[t] = rewards[t] + discounts[t] * accumulated_rewards
accumulated_rewards = discounted_returns[t]
return discounted_returns
class DiscountedReturnTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('single_batch_single_step_without_final_value', 1, 1, False),
('single_batch_single_step_with_final_value', 1, 1, True),
('multiple_batch_multiple_step_without_final_value', 7, 9, False),
('multiple_batch_multiple_step_with_final_value', 7, 9, True),
)
def testDiscountedReturnIsCorrectlyComputed(self,
num_time_steps,
batch_size,
with_final_value):
rewards = np.random.rand(num_time_steps, batch_size).astype(np.float32)
discounts = np.random.rand(num_time_steps, batch_size).astype(np.float32)
final_value = np.random.rand(batch_size).astype(
np.float32) if with_final_value else None
discounted_return = value_ops.discounted_return(
rewards=rewards, discounts=discounts, final_value=final_value)
single_discounted_return = value_ops.discounted_return(
rewards=rewards, discounts=discounts, final_value=final_value,
provide_all_returns=False)
expected = _numpy_discounted_return(
rewards=rewards, discounts=discounts, final_value=final_value)
self.assertAllClose(discounted_return, expected)
self.assertAllClose(single_discounted_return, expected[0])
@parameterized.named_parameters(
('single_batch_single_step_without_final_value', 1, 1, False),
('single_batch_single_step_with_final_value', 1, 1, True),
('multiple_batch_multiple_step_without_final_value', 7, 9, False),
('multiple_batch_multiple_step_with_final_value', 7, 9, True),
)
def testTimeMajorBatchMajorDiscountedReturnsAreSame(self,
num_time_steps,
batch_size,
with_final_value):
rewards = np.random.rand(num_time_steps, batch_size).astype(np.float32)
discounts = np.random.rand(num_time_steps, batch_size).astype(np.float32)
final_value = np.random.rand(batch_size).astype(
np.float32) if with_final_value else None
time_major_discounted_return = value_ops.discounted_return(
rewards=rewards,
discounts=discounts,
final_value=final_value)
batch_major_discounted_return = value_ops.discounted_return(
rewards=tf.transpose(rewards),
discounts=tf.transpose(discounts),
final_value=final_value,
time_major=False)
self.assertAllClose(time_major_discounted_return,
tf.transpose(batch_major_discounted_return))
single_time_major_discounted_return = value_ops.discounted_return(
rewards=rewards,
discounts=discounts,
final_value=final_value,
provide_all_returns=False)
single_batch_major_discounted_return = value_ops.discounted_return(
rewards=tf.transpose(rewards),
discounts=tf.transpose(discounts),
final_value=final_value,
time_major=False,
provide_all_returns=False)
self.assertAllClose(single_time_major_discounted_return,
time_major_discounted_return[0])
self.assertAllClose(single_batch_major_discounted_return,
time_major_discounted_return[0])
def testDiscountedReturnWithFinalValueMatchPrecomputedResult(self):
discounted_return = value_ops.discounted_return(
rewards=tf.constant([1] * 9, dtype=tf.float32),
discounts=tf.constant(
[1, 1, 1, 1, 0, 0.9, 0.9, 0.9, 0.9], dtype=tf.float32),
final_value=tf.constant(8, dtype=tf.float32))
expected = [
5, 4, 3, 2, 1, 8 * 0.9**4 + 3.439, 8 * 0.9**3 + 2.71, 8 * 0.9**2 + 1.9,
8 * 0.9 + 1
]
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(discounted_return, expected)
class GeneralizedAdvantageEstimationTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
('single_batch_single_step', 1, 1, 0.7),
('multiple_batch_multiple_step', 7, 9, 0.7),
('multiple_batch_multiple_step_lambda_0', 7, 9, 0.),
('multiple_batch_multiple_step_lambda_1', 7, 9, 1.),
)
def testAdvantagesAreCorrectlyComputed(self,
batch_size,
num_time_steps,
td_lambda):
rewards = np.random.rand(num_time_steps, batch_size).astype(np.float32)
discounts = np.random.rand(num_time_steps, batch_size).astype(np.float32)
values = np.random.rand(num_time_steps, batch_size).astype(np.float32)
final_value = np.random.rand(batch_size).astype(np.float32)
ground_truth = _naive_gae_as_ground_truth(
discounts=discounts,
rewards=rewards,
values=values,
final_value=final_value,
td_lambda=td_lambda)
advantages = value_ops.generalized_advantage_estimation(
discounts=discounts,
rewards=rewards,
values=values,
final_value=final_value,
td_lambda=td_lambda)
self.assertAllClose(advantages, ground_truth)
def testAdvantagesMatchPrecomputedResult(self):
advantages = value_ops.generalized_advantage_estimation(
discounts=tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.9, 0.9, 0.9, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0, 0.9, 0.9, 0.9, 0.0]]),
rewards=tf.fill([2, 9], 1.0),
values=tf.fill([2, 9], 3.0),
final_value=tf.fill([2], 3.0),
td_lambda=0.95,
time_major=False)
# Precomputed according to equation (16) in paper.
ground_truth = tf.constant([[
2.0808625, 1.13775, 0.145, -0.9, -2.0, 0.56016475, -0.16355, -1.01, -2.0
], [
2.0808625, 1.13775, 0.145, -0.9, -2.0, 0.56016475, -0.16355, -1.01, -2.0
]])
self.assertAllClose(advantages, ground_truth)
if __name__ == '__main__':
tf.test.main()
| tensorflow/agents | tf_agents/utils/value_ops_test.py | Python | apache-2.0 | 8,908 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-11-25 03:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('terms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='termsandconditions',
name='markdown',
field=models.TextField(help_text='Formatted in Markdown', verbose_name='Terms and conditions'),
),
]
| sebastienbarbier/723e_server | seven23/models/terms/migrations/0002_auto_20171125_0356.py | Python | mit | 509 |
import numpy as np
import csv, os, random
from collections import Counter
def dirty_pairtree(htid):
period = htid.find('.')
prefix = htid[0:period]
postfix = htid[(period+1): ]
if '=' in postfix:
postfix = postfix.replace('+',':')
postfix = postfix.replace('=','/')
dirtyname = prefix + "." + postfix
return dirtyname
def get_metadata(classpath, volumeIDs):
classdict = dict()
datedict = dict()
birthdict = dict()
with open(classpath, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for fields in reader:
volid = fields['htid']
if volid not in volumeIDs:
print('Missing ' + volid)
continue
theclass = fields['class']
pubdate = int(fields['pubdate'])
birthdate = int(fields['birthdate'])
if theclass == 'elite':
classdict[volid] = 1
elif theclass == 'vulgar':
classdict[volid] = 0
else:
classdict[volid] = 0
print('Anomalous class for ' + volid)
datedict[volid] = pubdate
birthdict[volid] = birthdate
return classdict, datedict, birthdict
## MAIN code starts here.
sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fic1899/'
extension = '.fic.tsv'
VOCABSIZE = 10000
classpath = '/Users/tunder/Dropbox/GenreProject/metadata/richficmeta1899.csv'
if not sourcefolder.endswith('/'):
sourcefolder = sourcefolder + '/'
# This just makes things easier.
# Get a list of files.
allthefiles = os.listdir(sourcefolder)
random.shuffle(allthefiles)
volumeIDs = list()
volumepaths = list()
for filename in allthefiles:
if filename.endswith(extension):
volID = filename.replace(extension, "")
# The volume ID is basically the filename minus its extension.
# Extensions are likely to be long enough that there is little
# danger of accidental occurrence inside a filename. E.g.
# '.fic.tsv'
path = sourcefolder + filename
volumeIDs.append(volID)
volumepaths.append(path)
# Get the class and date vectors, indexed by volume ID
classdict, datedict, birthdict = get_metadata(classpath, volumeIDs)
# make a vocabulary list and a volsize dict
wordcounts = Counter()
volsizes = Counter()
datebins = [1880,1884,1888,1892,1896, 1900, 1910, 1920]
# datebins = [10, 1800, 1810, 1820, 1830, 1840, 1850, 1860, 1870, 1880, 1890]
NUMBINS = len(datebins)
for volid, volpath in zip(volumeIDs, volumepaths):
with open(volpath, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
word = fields[0]
if len(word) > 1 and word[0].isalpha():
count = int(fields[1])
wordcounts[word] += 1
volsizes[volid] += count
etymological_categories = ['pre', 'post', 'stopword', 'missing']
etymo = dict()
with open('/Users/tunder/Dropbox/PythonScripts/mine/metadata/ReMergedEtymologies.txt', encoding = 'utf-8') as f:
for line in f:
fields = line.split('\t')
date = int(fields[1])
if date > 800 and date < 1150:
etymo[fields[0]] = 'pre'
elif date >= 1150 and date < 1700:
etymo[fields[0]] = 'post'
else:
etymo[fields[0]] = 'stopword'
vocablist = [x[0] for x in wordcounts.most_common(VOCABSIZE)]
VOCABSIZE = len(vocablist)
vocabset = set(vocablist)
# Here's the crucial change from make granger data. We map all
# words onto an etymological category
vocabmapper = dict()
for idx, word in enumerate(vocablist):
if word in etymo:
vocabmapper[word] = etymo[word]
else:
vocabmapper[word] = 'missing'
binsforcategory = dict()
for category in [0,1]:
datematrix = list()
for i in range(NUMBINS):
etymmatrix = dict()
for etym in etymological_categories:
etymmatrix[etym] = 0
datematrix.append(etymmatrix)
binsforcategory[category] = datematrix
datemapper = dict()
for volid in volumeIDs:
date = datedict[volid]
for idx, dateceiling in enumerate(datebins):
if date < dateceiling:
datemapper[volid] = idx
break
volpre = Counter()
volpost = Counter()
for volid, volpath in zip(volumeIDs, volumepaths):
with open(volpath, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
word = fields[0]
if word in vocabset:
count = int(fields[1])
dateidx = datemapper[volid]
category = classdict[volid]
etymcategory = vocabmapper[word]
if etymcategory == 'pre':
volpre[volid] += count
elif etymcategory == 'post':
volpost[volid] += count
with open('/Users/tunder/Dropbox/GenreProject/python/reception/fiction/birthdates.csv', mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['volid', 'pubdate', 'birthdate', 'prominence', 'post', 'pre', 'ratio'])
for volid in volumeIDs:
pubdate = datedict[volid]
birthdate = birthdict[volid]
prominence = classdict[volid]
post = volpost[volid]
pre = volpre[volid]
ratio = pre / (post + 0.0001)
writer.writerow([volid, pubdate, birthdate, prominence, post, pre, ratio])
# with open('/Users/tunder/Dropbox/GenreProject/python/granger/vulgarficratio.csv', mode = 'w', encoding = 'utf-8') as f:
# writer = csv.writer(f)
# writer.writerow(['date', 'ratio'])
# for idx, row in enumerate(binsforcategory[0]):
# writer.writerow([str(datebins[idx]), str(row['ratio'])])
| tedunderwood/GenreProject | python/reception/fiction/makeficetymratio.py | Python | mit | 5,788 |
from django.test import TestCase
from candidator.models import Topic, Position, TakenPosition, Category
from popolo.models import Person
from candidator.comparer import Comparer, InformationHolder
class ComparisonTestCase(TestCase):
def setUp(self):
self.person1 = Person.objects.create(name=u"Person1")
self.person2 = Person.objects.create(name=u"Person2")
self.person3 = Person.objects.create(name=u"Person3")
self.marihuana_topic = Topic.objects.create(label=u"Marihuana")
self.marihuana_yes = Position.objects.create(
topic=self.marihuana_topic,
label=u"MarihuanaYes"
)
self.marihuana_no = Position.objects.create(
topic=self.marihuana_topic,
label=u"MarihuanaNo"
)
self.chamomile_topic = Topic.objects.create(label="Chamomile")
self.chamomile_yes = Position.objects.create(
topic=self.chamomile_topic,
label=u"ChamomileYes"
)
self.chamomile_no = Position.objects.create(
topic=self.chamomile_topic,
label=u"ChamomileNo"
)
self.religion_topic = Topic.objects.create(label=u"Religion")
self.religion_yes = Position.objects.create(
topic=self.religion_topic,
label=u"ReligionYes"
)
self.religion_no = Position.objects.create(
topic=self.religion_topic,
label=u"ReligionNo"
)
self.gay_marriage_topic = Topic.objects.create(label=u"GayMarriage")
self.gay_marriage_yes = Position.objects.create(
topic=self.gay_marriage_topic,
label=u"GayMarriageYes"
)
self.gay_marriage_no = Position.objects.create(
topic=self.gay_marriage_topic,
label=u"GayMarriageNo"
)
#
# topic\person | person1 | person2 | person3
#===================================================
# marihuana | y | n | n
# chamomille | y | n | n
# religion | n | y | n
# gay marriage | y | y | y
#
self.person1_chamomile_yes = TakenPosition.objects.create(
topic=self.chamomile_topic,
position=self.chamomile_yes,
person=self.person1
)
self.person1_marihuana_yes = TakenPosition.objects.create(
topic=self.marihuana_topic,
position=self.marihuana_yes,
person=self.person1
)
self.person1_religion_no = TakenPosition.objects.create(
topic=self.religion_topic,
position=self.religion_no,
person=self.person1
)
self.person1_gay_marriage_yes = TakenPosition.objects.create(
topic=self.gay_marriage_topic,
position=self.gay_marriage_yes,
person=self.person1
)
self.person2_chamomile_no = TakenPosition.objects.create(
topic=self.chamomile_topic,
position=self.chamomile_no,
person=self.person2
)
self.person2_marihuana_no = TakenPosition.objects.create(
topic=self.marihuana_topic,
position=self.marihuana_no,
person=self.person2
)
self.person2_religion_yes = TakenPosition.objects.create(
topic=self.religion_topic,
position=self.religion_yes,
person=self.person2
)
self.person2_gay_marriage_yes = TakenPosition.objects.create(
topic=self.gay_marriage_topic,
position=self.gay_marriage_yes,
person=self.person2
)
self.person3_chamomile_no = TakenPosition.objects.create(
topic=self.chamomile_topic,
position=self.chamomile_no,
person=self.person3
)
self.person3_marihuana_no = TakenPosition.objects.create(
topic=self.marihuana_topic,
position=self.marihuana_no,
person=self.person3
)
self.person3_religion_no = TakenPosition.objects.create(
topic=self.religion_topic,
position=self.religion_no,
person=self.person3
)
self.person3_gay_marriage_yes = TakenPosition.objects.create(
topic=self.gay_marriage_topic,
position=self.gay_marriage_yes,
person=self.person3
)
def test_compare_one_on_one(self):
'''Compare one on one'''
self.maxDiff = None
comparer = Comparer()
marihuana_position = TakenPosition(
topic=self.marihuana_topic,
position=self.marihuana_yes,
)
religion_position = TakenPosition(
topic=self.religion_topic,
position=self.religion_yes,
)
positions = {
self.marihuana_topic.id: marihuana_position,
self.religion_topic.id: religion_position
}
topics = [
self.marihuana_topic,
self.religion_topic
]
comparer.topics = topics
result = comparer.one_on_one(self.person1, positions)
expected_result = {
self.marihuana_topic.id: {
"topic": self.marihuana_topic,
"match": True,
"my_position": self.marihuana_yes,
"their_position": self.marihuana_yes
},
self.religion_topic.id: {
"topic": self.religion_topic,
"match": False,
"my_position": self.religion_yes,
"their_position": self.religion_no
}
}
self.assertEquals(result, expected_result)
def test_information_holder(self):
'''InformationHolder'''
information_holder = InformationHolder()
marihuana_position = TakenPosition(
topic=self.marihuana_topic,
position=self.marihuana_yes,
)
religion_position = TakenPosition(
topic=self.religion_topic,
position=self.religion_yes,
)
information_holder.add_position(marihuana_position)
information_holder.add_position(religion_position)
self.assertEquals(information_holder.positions[self.marihuana_topic.id], marihuana_position)
self.assertEquals(information_holder.positions[self.religion_topic.id], religion_position)
information_holder.add_person(self.person1)
self.assertEquals(information_holder.persons, [self.person1])
information_holder.add_person(self.person2)
self.assertEquals(information_holder.persons, [self.person1, self.person2])
information_holder.add_person(self.person3)
self.assertEquals(information_holder.persons, [self.person1, self.person2, self.person3])
information_holder.add_topic(self.marihuana_topic)
self.assertEquals(information_holder.topics, [self.marihuana_topic])
def test_information_holder_with_categories(self):
information_holder = InformationHolder()
herbs_category = Category.objects.create(name="Herbs")
self.marihuana_topic.category = herbs_category
self.marihuana_topic.save()
others_category = Category.objects.create(name="Others")
self.religion_topic.category = others_category
self.religion_topic.save()
information_holder.add_category(herbs_category)
information_holder.add_category(others_category)
self.assertEquals(information_holder.categories, [herbs_category, others_category])
self.assertEquals(information_holder.topics, [self.marihuana_topic, self.religion_topic])
def test_split_positions_in_categories(self):
information_holder = InformationHolder()
herbs_category = Category.objects.create(name="Herbs")
self.marihuana_topic.category = herbs_category
self.marihuana_topic.save()
self.chamomile_topic.category = herbs_category
self.chamomile_topic.save()
others_category = Category.objects.create(name="Others")
self.religion_topic.category = others_category
self.religion_topic.save()
self.gay_marriage_topic.category = others_category
self.gay_marriage_topic.save()
marihuana_position = TakenPosition(
topic=self.marihuana_topic,
position=self.marihuana_yes,
)
religion_position = TakenPosition(
topic=self.religion_topic,
position=self.religion_yes,
)
chamomile_position = TakenPosition(
topic=self.chamomile_topic,
position=self.chamomile_no,
)
gay_marriage_position = TakenPosition(
topic=self.gay_marriage_topic,
position=self.gay_marriage_yes,
)
information_holder.add_position(marihuana_position)
information_holder.add_position(religion_position)
information_holder.add_position(chamomile_position)
information_holder.add_position(gay_marriage_position)
positions_by_herbs = information_holder.positions_by(herbs_category)
self.assertEquals(positions_by_herbs[self.marihuana_topic.id], marihuana_position)
self.assertEquals(positions_by_herbs[self.chamomile_topic.id], chamomile_position)
positions_by_others = information_holder.positions_by(others_category)
self.assertEquals(positions_by_others[self.religion_topic.id], religion_position)
self.assertEquals(positions_by_others[self.gay_marriage_topic.id], gay_marriage_position)
def test_compare_categories_with_information_holder(self):
information_holder = InformationHolder()
herbs_category = Category.objects.create(name="Herbs")
self.marihuana_topic.category = herbs_category
self.marihuana_topic.save()
self.chamomile_topic.category = herbs_category
self.chamomile_topic.save()
others_category = Category.objects.create(name="Others")
self.religion_topic.category = others_category
self.religion_topic.save()
self.gay_marriage_topic.category = others_category
self.gay_marriage_topic.save()
marihuana_position = TakenPosition(
topic=self.marihuana_topic,
position=self.marihuana_no,
)
religion_position = TakenPosition(
topic=self.religion_topic,
position=self.religion_no,
)
chamomile_position = TakenPosition(
topic=self.chamomile_topic,
position=self.chamomile_no,
)
gay_marriage_position = TakenPosition(
topic=self.gay_marriage_topic,
position=self.gay_marriage_yes,
)
information_holder.add_position(marihuana_position)
information_holder.add_position(religion_position)
information_holder.add_position(chamomile_position)
information_holder.add_position(gay_marriage_position)
information_holder.add_person(self.person1)
information_holder.add_person(self.person2)
information_holder.add_person(self.person3)
information_holder.add_category(herbs_category)
information_holder.add_category(others_category)
comparer = Comparer()
result = comparer.compare(information_holder)
#
# topic\person | person1 | person2 | person3 | my positions
#====================================================================
# marihuana | y | n | n | n
# chamomille | y | n | n | n
# religion | n | y | n | n
# gay marriage | y | y | y | y
#====================================================================
# Afinity % | 50% | 75% | 100% | N/A
#
self.maxDiff = None
expected_result = [{"person": self.person3,
"explanation": {
herbs_category.slug: {
"category": herbs_category,
"per_topic": {
self.marihuana_topic.id: {
"topic": self.marihuana_topic,
"match": True,
'my_position': self.marihuana_no,
'their_position': self.marihuana_no
},
self.chamomile_topic.id: {
"topic": self.chamomile_topic,
"match": True,
'my_position': self.chamomile_no,
'their_position': self.chamomile_no
},
}
},
others_category.slug: {
"category": others_category,
"per_topic": {
self.religion_topic.id: {
"topic": self.religion_topic,
"match": True,
'my_position': self.religion_no,
'their_position': self.religion_no
},
self.gay_marriage_topic.id: {
"topic": self.gay_marriage_topic,
"match": True,
'my_position': self.gay_marriage_yes,
'their_position': self.gay_marriage_yes
}
}
}
},
"percentage": 1.0
},
{"person": self.person2,
"explanation": {
herbs_category.slug: {
"category": herbs_category,
"per_topic": {
self.marihuana_topic.id: {
"topic": self.marihuana_topic,
"match": True,
'my_position': self.marihuana_no,
'their_position': self.marihuana_no
},
self.chamomile_topic.id: {
"topic": self.chamomile_topic,
"match": True,
'my_position': self.chamomile_no,
'their_position': self.chamomile_no
}
}
},
others_category.slug: {
"category": others_category,
"per_topic": {
self.religion_topic.id: {
"topic": self.religion_topic,
"match": False,
'my_position': self.religion_no,
'their_position': self.religion_yes
},
self.gay_marriage_topic.id: {
"topic": self.gay_marriage_topic,
"match": True,
'my_position': self.gay_marriage_yes,
'their_position': self.gay_marriage_yes
}
}
}
},
"percentage": 0.75
},
{"person": self.person1,
"explanation": {
herbs_category.slug: {
"category": herbs_category,
"per_topic": {
self.marihuana_topic.id: {
"topic": self.marihuana_topic,
"match": False,
'my_position': self.marihuana_no,
'their_position': self.marihuana_yes
},
self.chamomile_topic.id: {
"topic": self.chamomile_topic,
"match": False,
'my_position': self.chamomile_no,
'their_position': self.chamomile_yes
}
}
},
others_category.slug: {
"category": others_category,
"per_topic": {
self.religion_topic.id: {
"topic": self.religion_topic,
"match": True,
'my_position': self.religion_no,
'their_position': self.religion_no
},
self.gay_marriage_topic.id: {
"topic": self.gay_marriage_topic,
"match": True,
'my_position': self.gay_marriage_yes,
'their_position': self.gay_marriage_yes
}
}
}
},
"percentage": 0.5
}]
self.assertEquals(result, expected_result)
#
# topic\person | person1 | person2 | person3 | my positions
#====================================================================
# marihuana | y | n | n | n
# chamomille | y | n | n | n
# religion | - | y | n | n
# gay marriage | y | y | y | y
#====================================================================
# Afinity % | 25% | 75% | 100% | N/A
#
self.person1_religion_no.delete()
result = comparer.compare(information_holder)
self.assertEquals(result[2]['percentage'], 0.25)
def test_compare_one_on_one_not_giving_a_taken_position(self):
'''Compare one on one'''
comparer = Comparer()
marihuana_position = TakenPosition(
topic=self.marihuana_topic,
position=self.marihuana_yes,
)
positions = {
self.marihuana_topic.id: marihuana_position
}
topics = [
self.marihuana_topic,
self.religion_topic
]
comparer.topics = topics
result = comparer.one_on_one(self.person1, positions)
expected_result = {
self.marihuana_topic.id: {
"topic": self.marihuana_topic,
"match": True,
"my_position": self.marihuana_yes,
"their_position": self.marihuana_yes
},
self.religion_topic.id: {
"topic": self.religion_topic,
"match": False,
"my_position": None,
"their_position": self.religion_no
}
}
self.assertEquals(result, expected_result)
# If there are no taken positions it should for a given position it should
# automatically determine that this is not a match.
taken_position = TakenPosition.objects.get(person=self.person1, topic=self.religion_topic)
taken_position.position = None
taken_position.save()
result2 = comparer.one_on_one(self.person1, positions)
self.assertFalse(result2[self.religion_topic.id]["match"])
TakenPosition.objects.filter(topic=self.religion_topic).delete()
result3 = comparer.one_on_one(self.person1, positions)
self.assertFalse(result3[self.religion_topic.id]["match"])
| lfalvarez/django-candidator | candidator/tests/comparison_tests.py | Python | mit | 21,582 |
### File: konane.py
### Classes defined: KonaneError, Konane, Player, SimplePlayer,
### RandomPlayer, HumanPlayer
import random
import copy
class KonaneError(AttributeError):
"""
This class is used to indicate a problem in the konane game.
"""
class Konane:
"""
This class implements Konane, the Hawaiian version of checkers.
The board is represented as a two-dimensional list. Each
location on the board contains one of the following symbols:
'B' for a black piece
'W' for a white piece
'.' for an empty location
The black player always goes first. The opening moves by both
players are special cases and involve removing one piece from
specific designated locations. Subsequently, each move is a
jump over one of the opponent's pieces into an empty location.
The jump may continue in the same direction, if appropriate.
The jumped pieces are removed, and then it is the opponent's
turn. Play continues until one player has no possible moves,
making the other player the winner.
"""
def __init__(self, n):
self.size = n
self.reset()
def reset(self):
"""
Resets the starting board state.
"""
self.board = []
value = 'B'
for i in range(self.size):
row = []
for j in range(self.size):
row.append(value)
value = self.opponent(value)
self.board.append(row)
if self.size%2 == 0:
value = self.opponent(value)
def __str__(self):
result = " "
for i in range(self.size):
result += str(i) + " "
result += "\n"
for i in range(self.size):
result += str(i) + " "
for j in range(self.size):
result += str(self.board[i][j]) + " "
result += "\n"
return result
def valid(self, row, col):
"""
Returns true if the given row and col represent a valid location on
the konane board.
"""
return row >= 0 and col >= 0 and row < self.size and col < self.size
def contains(self, board, row, col, symbol):
"""
Returns true if the given row and col represent a valid location on
the konane board and that location contains the given symbol.
"""
return self.valid(row,col) and board[row][col]==symbol
def countSymbol(self, board, symbol):
"""
Returns the number of instances of the symbol on the board.
"""
count = 0
for r in range(self.size):
for c in range(self.size):
if board[r][c] == symbol:
count += 1
return count
def opponent(self, player):
"""
Given a player symbol, returns the opponent's symbol, 'B' for black,
or 'W' for white.
"""
if player == 'B':
return 'W'
else:
return 'B'
def distance(self, r1, c1, r2, c2):
"""
Returns the distance between two points in a vertical or
horizontal line on the konane board. Diagonal jumps are NOT
allowed.
"""
return abs(r1-r2 + c1-c2)
def makeMove(self, player, move):
"""
Updates the current board with the next board created by the given
move.
"""
self.board = self.nextBoard(self.board, player, move)
def nextBoard(self, board, player, move):
"""
Given a move for a particular player from (r1,c1) to (r2,c2) this
executes the move on a copy of the current konane board. It will
raise a KonaneError if the move is invalid. It returns the copy of
the board, and does not change the given board.
"""
r1 = move[0]
c1 = move[1]
r2 = move[2]
c2 = move[3]
next = copy.deepcopy(board)
if not (self.valid(r1, c1) and self.valid(r2, c2)):
raise KonaneError
if next[r1][c1] != player:
raise KonaneError
dist = self.distance(r1, c1, r2, c2)
if dist == 0:
if self.openingMove(board):
next[r1][c1] = "."
return next
raise KonaneError
if next[r2][c2] != ".":
raise KonaneError
jumps = dist/2
dr = (r2 - r1)/dist
dc = (c2 - c1)/dist
for i in range(jumps):
if next[r1+dr][c1+dc] != self.opponent(player):
raise KonaneError
next[r1][c1] = "."
next[r1+dr][c1+dc] = "."
r1 += 2*dr
c1 += 2*dc
next[r1][c1] = player
return next
def openingMove(self, board):
return self.countSymbol(board, ".") <= 1
def generateFirstMoves(self, board):
"""
Returns the special cases for the first move of the game.
"""
moves = []
moves.append([0]*4)
moves.append([self.size-1]*4)
moves.append([self.size/2]*4)
moves.append([(self.size/2)-1]*4)
return moves
def generateSecondMoves(self, board):
"""
Returns the special cases for the second move of the game, based
on where the first move occurred.
"""
moves = []
if board[0][0] == ".":
moves.append([0,1]*2)
moves.append([1,0]*2)
return moves
elif board[self.size-1][self.size-1] == ".":
moves.append([self.size-1,self.size-2]*2)
moves.append([self.size-2,self.size-1]*2)
return moves
elif board[self.size/2-1][self.size/2-1] == ".":
pos = self.size/2 -1
else:
pos = self.size/2
moves.append([pos,pos-1]*2)
moves.append([pos+1,pos]*2)
moves.append([pos,pos+1]*2)
moves.append([pos-1,pos]*2)
return moves
def check(self, board, r, c, rd, cd, factor, opponent):
"""
Checks whether a jump is possible starting at (r,c) and going
in the direction determined by the row delta, rd, and the
column delta, cd. The factor is used to recursively check for
multiple jumps in the same direction. Returns all possible
jumps in the given direction.
"""
if self.contains(board,r+factor*rd,c+factor*cd,opponent) and \
self.contains(board,r+(factor+1)*rd,c+(factor+1)*cd,'.'):
return [[r,c,r+(factor+1)*rd,c+(factor+1)*cd]] + \
self.check(board,r,c,rd,cd,factor+2,opponent)
else:
return []
def generateMoves(self, board, player):
"""
Generates and returns all legal moves for the given player
using the current board configuration.
"""
if self.openingMove(board):
if player=='B':
return self.generateFirstMoves(board)
else:
return self.generateSecondMoves(board)
else:
moves = []
rd = [-1,0,1,0]
cd = [0,1,0,-1]
for r in range(self.size):
for c in range(self.size):
if board[r][c] == player:
for i in range(len(rd)):
moves += self.check(board,r,c,rd[i],cd[i],1,
self.opponent(player))
return moves
def playOneGame(self, p1, p2, show):
"""
Given two instances of players, will play out a game
between them. Returns 'B' if black wins, or 'W' if
white wins. When show is true, it will display each move
in the game.
"""
self.reset()
p1.initialize('B')
p2.initialize('W')
log = open(p1.name + "vs" + p2.name + ".log", "w")
log.write(p1.name + " vs " + p2.name +"\n")
print p1.name, "vs", p2.name
while True:
log.write(str(self))
log.write("\nplayer B's turn\n")
if show:
print self
print "player B's turn"
move = p1.getMove(self.board)
if move == []:
log.write("Game over: " + p1.name + " loses.\n")
print "Game over"
return 'W'
try:
self.makeMove('B', move)
except KonaneError:
log.write("Game over: Invalid move by " + p1.name + "\n")
log.write(str(move) + "\n")
log.write(str(self))
print "Game over: Invalid move by", p1.name
print move
print self
return 'W'
log.write(str(move) + "\n")
log.write(str(self))
log.write("\nplayer W's turn\n")
if show:
print move
print
print self
print "player W's turn"
move = p2.getMove(self.board)
if move == []:
log.write("Game over: " + p2.name + " loses.\n")
print "Game over"
return 'B'
try:
self.makeMove('W', move)
except KonaneError:
log.write("Game over: Invalid move by " + p2.name + "\n")
log.write(str(move) + "\n")
log.write(str(self))
print "Game over: Invalid move by", p2.name
print move
print self
return 'B'
log.write(str(move) + "\n")
if show:
print move
print
log.close()
def playNGames(self, n, p1, p2, show):
"""
Will play out n games between player p1 and player p2.
The players alternate going first. Prints the total
number of games won by each player.
"""
first = p1
second = p2
for i in range(n):
print "Game", i
winner = self.playOneGame(first, second, show)
if winner == 'B':
first.won()
second.lost()
print first.name, "wins"
else:
first.lost()
second.won()
print second.name, "wins"
first, second = second, first
class Player:
"""
A base class for Konane players. All players must implement
the the initialize and getMove methods.
"""
def __init__(self):
self.name = "Player"
self.wins = 0
self.losses = 0
def results(self):
result = self.name
result += " Wins:" + str(self.wins)
result += " Losses:" + str(self.losses)
return result
def lost(self):
self.losses += 1
def won(self):
self.wins += 1
def reset(self):
self.wins = 0
self.losses = 0
def initialize(self, side):
"""
Records the player's side, either 'B' for black or
'W' for white. Should also set the name of the player.
"""
pass
def getMove(self, board):
"""
Given the current board, should return a valid move.
"""
pass
class SimplePlayer(Konane, Player):
"""
Always chooses the first move from the set of possible moves.
"""
def initialize(self, side):
self.side = side
self.name = "Simple"
def getMove(self, board):
moves = self.generateMoves(board, self.side)
n = len(moves)
if n == 0:
return []
else:
return moves[0]
class RandomPlayer(Konane, Player):
"""
Chooses a random move from the set of possible moves.
"""
def initialize(self, side):
self.side = side
self.name = "Random"
def getMove(self, board):
moves = self.generateMoves(board, self.side)
n = len(moves)
if n == 0:
return []
else:
return moves[random.randrange(0, n)]
class HumanPlayer(Konane, Player):
"""
Prompts a human player for a move.
"""
def initialize(self, side):
self.side = side
self.name = "Human"
def getMove(self, board):
moves = self.generateMoves(board, self.side)
while True:
print "Possible moves:", moves
n = len(moves)
if n == 0:
print "You must concede"
return []
index = input("Enter index of chosen move (0-"+ str(n-1) +
") or -1 to concede: ")
if index == -1:
return []
if 0 <= index <= (n-1):
print "returning", moves[index]
return moves[index]
else:
print "Invalid choice, try again."
if __name__ == '__main__':
game = Konane(8)
game.playOneGame(RandomPlayer(8), HumanPlayer(8), True)
| okiyama/EEA-Opponent-Modeling | Konane/mycode/konane.py | Python | gpl-2.0 | 12,968 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-22 16:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=254, unique=True, verbose_name='email address'),
),
]
| auto-mat/klub | local_migrations/migrations_auth/0009_auto_20170822_1628.py | Python | gpl-3.0 | 514 |
import asyncio
from unittest import mock
import pytest
from waterbutler.core import utils
class TestAsyncRetry:
@pytest.mark.asyncio
async def test_returns_success(self):
mock_func = mock.Mock(return_value='Foo')
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
x = await retryable()
assert x == 'Foo'
assert mock_func.call_count == 1
@pytest.mark.asyncio
async def test_retries_until(self):
mock_func = mock.Mock(side_effect=[Exception(), 'Foo'])
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
x = await retryable()
assert x == 'Foo'
assert mock_func.call_count == 2
@pytest.mark.asyncio
async def test_retries_then_raises(self):
mock_func = mock.Mock(side_effect=Exception('Foo'))
retryable = utils.async_retry(5, 0, raven=None)(mock_func)
with pytest.raises(Exception) as e:
coro = await retryable()
assert e.type == Exception
assert e.value.args == ('Foo',)
assert mock_func.call_count == 6
@pytest.mark.asyncio
async def test_retries_by_its_self(self):
mock_func = mock.Mock(side_effect=Exception())
retryable = utils.async_retry(8, 0, raven=None)(mock_func)
retryable()
await asyncio.sleep(.1)
assert mock_func.call_count == 9
async def test_docstring_survives(self):
async def mytest():
'''This is a docstring'''
pass
retryable = utils.async_retry(8, 0, raven=None)(mytest)
assert retryable.__doc__ == '''This is a docstring'''
@pytest.mark.asyncio
async def test_kwargs_work(self):
async def mytest(mack, *args, **kwargs):
mack()
assert args == ('test', 'Foo')
assert kwargs == {'test': 'Foo', 'baz': 'bam'}
return True
retryable = utils.async_retry(8, 0, raven=None)(mytest)
merk = mock.Mock(side_effect=[Exception(''), 5])
fut = retryable(merk, 'test', 'Foo', test='Foo', baz='bam')
assert await fut
assert merk.call_count == 2
@pytest.mark.asyncio
async def test_all_retry(self):
mock_func = mock.Mock(side_effect=Exception())
retryable = utils.async_retry(8, 0, raven=None)(mock_func)
retryable()
retryable()
await asyncio.sleep(.1)
assert mock_func.call_count == 18
| TomBaxter/waterbutler | tests/core/test_utils.py | Python | apache-2.0 | 2,451 |
# various imported libraries
from django import forms
from django.contrib.auth.models import User
from share.models import File
from datetime import date, timedelta
from captcha.fields import CaptchaField
# Form for uploading the files
class FileForm(forms.Form):
#File
file = forms.FileField(label="File to upload")
# whether the password is set or not
password_set = forms.BooleanField(label="Set password ?", required=False)
# setting the password
password = forms.CharField(label="", widget=forms.PasswordInput, required=False)
# setting the expiry date
expiry_date = forms.DateField(label="Expiry date", initial=date.today()+ timedelta(days=7))
# making public
public = forms.BooleanField(label="Make Public", required=False)
# captcha
captcha = CaptchaField(id_prefix='file')
class Meta:
model = File
fields = ['file', 'password_set', 'password', 'expiry_date', 'public', 'captcha']
# Form for user registration
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'password'] | roopansh/rshare | share/forms.py | Python | mit | 1,195 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-24 19:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Collection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
]
| MarkDavidson/taxii2_demo | taxii2/migrations/0001_initial.py | Python | mit | 836 |
import pytest
from petra.place import Place
def test_place():
place = Place("place_0")
assert place.name == "place_0"
assert repr(place) == "Place('place_0')"
| simone-campagna/petra | tests/test_place.py | Python | apache-2.0 | 173 |
# -*- coding: utf-8 -*-
import os
# Reaction-Diffusion Simulation Using Gray-Scott Model
# https://en.wikipedia.org/wiki/Reaction-diffusion_system
# http://www.labri.fr/perso/nrougier/teaching/numpy/numpy.html#
# FB - 20160130
import random
import numpy as np
from PIL import Image, ImageDraw
n = 256
imgx = n
imgy = n # image size
image = Image.new("RGB", (imgx, imgy))
draw = ImageDraw.Draw(image)
pixels = image.load()
steps = 10000
params = []
params.append((0.16, 0.08, 0.035, 0.065)) # Bacteria 1
params.append((0.14, 0.06, 0.035, 0.065)) # Bacteria 2
params.append((0.16, 0.08, 0.060, 0.062)) # Coral
params.append((0.19, 0.05, 0.060, 0.062)) # Fingerprint
params.append((0.10, 0.10, 0.018, 0.050)) # Spirals
params.append((0.12, 0.08, 0.020, 0.050)) # Spirals Dense
params.append((0.10, 0.16, 0.020, 0.050)) # Spirals Fast
params.append((0.16, 0.08, 0.020, 0.055)) # Unstable
params.append((0.16, 0.08, 0.050, 0.065)) # Worms 1
params.append((0.16, 0.08, 0.054, 0.063)) # Worms 2
params.append((0.16, 0.08, 0.035, 0.060)) # Zebrafish
(Du, Dv, F, k) = random.choice(params)
Z = np.zeros((n + 2, n + 2), [('U', np.double), ('V', np.double)])
U, V = Z['U'], Z['V']
u, v = U[1:-1, 1:-1], V[1:-1, 1:-1]
r = 20
u[...] = 1.0
U[n / 2 - r:n / 2 + r, n / 2 - r:n / 2 + r] = 0.50
V[n / 2 - r:n / 2 + r, n / 2 - r:n / 2 + r] = 0.25
u += 0.05 * np.random.random((n, n))
v += 0.05 * np.random.random((n, n))
p = 0
for i in xrange(steps):
Lu = (U[0:-2, 1:-1] +
U[1:-1, 0:-2] - 4 * U[1:-1, 1:-1] + U[1:-1, 2:] +
U[2:, 1:-1])
Lv = (V[0:-2, 1:-1] +
V[1:-1, 0:-2] - 4 * V[1:-1, 1:-1] + V[1:-1, 2:] +
V[2:, 1:-1])
uvv = u * v * v
u += (Du * Lu - uvv + F * (1 - u))
v += (Dv * Lv + uvv - (F + k) * v)
pn = 100 * (i + 1) / steps # percent completed
if pn != p:
p = pn
print("%" + str(p).zfill(2))
# paint the final state
vMin = V.min()
vMax = V.max()
for iy in range(imgy):
for ix in range(imgx):
w = V[iy, ix]
c = int(255 * (w - vMin) / (vMax - vMin))
pixels[ix, iy] = (c, c, c)
label = "Du=" + str(Du) + " Dv=" + str(Dv) + " F=" + str(F) + " k=" + str(k)
draw.text((0, 0), label, (0, 255, 0))
image.save("ReactionDiffusionSim.png", "PNG")
os.system("pause")
| NicovincX2/Python-3.5 | Analyse (mathématiques)/Analyse à plusieurs variables/Équation aux dérivées partielles/Système à réaction-diffusion/reaction_diffusion_numpy.py | Python | gpl-3.0 | 2,280 |
# pylint: disable=missing-module-docstring, missing-class-docstring
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('substitutions', '0003_auto_20171120_0745'),
]
operations = [
migrations.AlterModelOptions(
name='apps',
options={
'permissions': (('can_view', 'Can view the substitution application'),),
'verbose_name': 'Application',
'verbose_name_plural': 'Applications',
},
),
]
| studybuffalo/studybuffalo | study_buffalo/substitutions/migrations/0004_auto_20171120_1205.py | Python | gpl-3.0 | 593 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ConfigParser import ConfigParser
from parser.logParser import Parser, LogObserverPlugin
from os import path
from importlib import import_module
import sys
def import_plugins(plug_list):
"""
Dada una lista de plugins para activar intenta importar los modulos correspondinetes
desde la carpeta plugins
"""
import_base = path.join(path.dirname(path.abspath(__file__)), 'plugins')
sys.path.append(import_base)
print "Cargando plugins"
for plug in plug_list:
try:
import_module(plug)
print "{0}: OK".format(plug)
except ImportError, e:
print "{0}: ERR {1}".format(plug, e)
except ValueError, e:
# pasar en blanco si el nombre del plugin esta vacio
pass
def main():
config = ConfigParser()
# leer archivo de configuración
config.read('config.ini')
# obtener lista de plugins
p_list = config.get('main', 'plugins')
p_list = p_list.split(',')
# cargar los lugins
import_plugins(p_list)
# inicializar el parser
par = Parser(config.get('main', 'logfile'))
# incializar cada uno de los plugins
plugs = LogObserverPlugin.get_plugins(parser=par, config=config)
# inciar el procesamiento de los logs
print "Parsing..."
par.parse()
# notificar a cada plugin para que escriba sus archivos de salida
print "Escribiendo salida"
for p in plugs:
p.writeOutput()
print "Listo"
return 0
if __name__ == '__main__':
main()
| ybenitezf/miner | miner.py | Python | gpl-2.0 | 1,564 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from collections import defaultdict
from six.moves import range
from zincutils.zinc_analysis import (APIs, Compilations, CompileSetup, Relations,
SourceInfos, Stamps, ZincAnalysis)
class ZincAnalysisParser(object):
"""Parses a zinc analysis file."""
class ParseError(Exception):
pass
def parse_from_path(self, infile_path):
"""Parse a ZincAnalysis instance from a text file."""
with open(infile_path, 'rb') as infile:
return self.parse(infile)
def parse(self, infile):
"""Parse a ZincAnalysis instance from an open text file."""
def parse_element(cls):
parsed_sections = [self._parse_section(infile, header) for header in cls.headers]
return cls(parsed_sections)
self._verify_version(infile)
compile_setup = parse_element(CompileSetup)
relations = parse_element(Relations)
stamps = parse_element(Stamps)
apis = parse_element(APIs)
source_infos = parse_element(SourceInfos)
compilations = parse_element(Compilations)
return ZincAnalysis(compile_setup, relations, stamps, apis, source_infos, compilations)
def parse_products(self, infile):
"""An efficient parser of just the products section."""
self._verify_version(infile)
return self._find_repeated_at_header(infile, b'products')
def parse_deps(self, infile, classes_dir):
self._verify_version(infile)
# Note: relies on the fact that these headers appear in this order in the file.
bin_deps = self._find_repeated_at_header(infile, b'binary dependencies')
src_deps = self._find_repeated_at_header(infile, b'direct source dependencies')
ext_deps = self._find_repeated_at_header(infile, b'direct external dependencies')
# TODO(benjy): Temporary hack until we inject a dep on the scala runtime jar.
scalalib_re = re.compile(r'scala-library-\d+\.\d+\.\d+\.jar$')
filtered_bin_deps = defaultdict(list)
for src, deps in bin_deps.iteritems():
filtered_bin_deps[src] = filter(lambda x: scalalib_re.search(x) is None, deps)
transformed_ext_deps = {}
def fqcn_to_path(fqcn):
return os.path.join(classes_dir, fqcn.replace(b'.', os.sep) + b'.class')
for src, fqcns in ext_deps.items():
transformed_ext_deps[src] = [fqcn_to_path(fqcn) for fqcn in fqcns]
ret = defaultdict(list)
for d in [filtered_bin_deps, src_deps, transformed_ext_deps]:
for src, deps in d.items():
ret[src].extend(deps)
return ret
def rebase_from_path(self, infile_path, outfile_path, pants_home_from, pants_home_to, java_home=None):
with open(infile_path, 'rb') as infile:
with open(outfile_path, 'wb') as outfile:
self.rebase(infile, outfile, pants_home_from, pants_home_to, java_home)
def rebase(self, infile, outfile, pants_home_from, pants_home_to, java_home=None):
self._verify_version(infile)
outfile.write(ZincAnalysis.FORMAT_VERSION_LINE)
def rebase_element(cls):
for header in cls.headers:
self._rebase_section(cls, header, infile, outfile, pants_home_from, pants_home_to, java_home)
rebase_element(CompileSetup)
rebase_element(Relations)
rebase_element(Stamps)
rebase_element(APIs)
rebase_element(SourceInfos)
rebase_element(Compilations)
def _rebase_section(self, cls, header, lines_iter, outfile,
pants_home_from, pants_home_to, java_home=None):
# Booleans describing the rebasing logic to apply, if any.
rebase_pants_home_anywhere = header in cls.pants_home_anywhere
rebase_pants_home_prefix = header in cls.pants_home_prefix_only
filter_java_home_anywhere = java_home and header in cls.java_home_anywhere
filter_java_home_prefix = java_home and header in cls.java_home_prefix_only
# Check the header and get the number of items.
line = next(lines_iter)
if header + b':\n' != line:
raise self.ParseError('Expected: "{}:". Found: "{}"'.format(header, line))
n = self._parse_num_items(next(lines_iter))
# Iterate over the lines, applying rebasing/dropping logic as required.
rebased_lines = []
num_rebased_items = 0
for _ in range(n):
line = next(lines_iter)
drop_line = ((filter_java_home_anywhere and java_home in line) or
(filter_java_home_prefix and line.startswith(java_home)))
if not drop_line:
if rebase_pants_home_anywhere:
rebased_line = line.replace(pants_home_from, pants_home_to)
elif rebase_pants_home_prefix and line.startswith(pants_home_from):
rebased_line = pants_home_to + line[len(pants_home_from):]
else:
rebased_line = line
rebased_lines.append(rebased_line)
num_rebased_items += 1
if not cls.inline_vals: # These values are blobs and never need to be rebased.
rebased_lines.append(next(lines_iter))
elif not cls.inline_vals:
next(lines_iter) # Also drop the non-inline value.
# Write the rebased lines back out.
outfile.write(header + b':\n')
outfile.write(b'{} items\n'.format(num_rebased_items))
chunk_size = 10000
for i in range(0, len(rebased_lines), chunk_size):
outfile.write(b''.join(rebased_lines[i:i+chunk_size]))
def _find_repeated_at_header(self, lines_iter, header):
header_line = header + b':\n'
while next(lines_iter) != header_line:
pass
return self._parse_section(lines_iter, expected_header=None)
def _verify_version(self, lines_iter):
version_line = next(lines_iter)
if version_line != ZincAnalysis.FORMAT_VERSION_LINE:
raise self.ParseError('Unrecognized version line: ' + version_line)
def _parse_section(self, lines_iter, expected_header=None):
"""Parse a single section."""
if expected_header:
line = next(lines_iter)
if expected_header + b':\n' != line:
raise self.ParseError('Expected: "{}:". Found: "{}"'.format(expected_header, line))
n = self._parse_num_items(next(lines_iter))
relation = defaultdict(list) # Values are lists, to accommodate relations.
for _ in range(n):
k, _, v = next(lines_iter).partition(b' -> ')
if len(v) == 1: # Value on its own line.
v = next(lines_iter)
relation[k].append(v[:-1])
return relation
_num_items_re = re.compile(r'(\d+) items\n')
def _parse_num_items(self, line):
"""Parse a line of the form '<num> items' and returns <num> as an int."""
matchobj = self._num_items_re.match(line)
if not matchobj:
raise self.ParseError('Expected: "<num> items". Found: "{0}"'.format(line))
return int(matchobj.group(1))
| pantsbuild/zincutils | zincutils/zinc_analysis_parser.py | Python | apache-2.0 | 6,902 |
# This script reads the carrier database
# and display it along a path in histogram form
# along with a representation of the carriers in energy space
from __future__ import print_function
from yambopy import *
import matplotlib.gridspec as gridspec
from matplotlib.colors import Normalize
from scipy.optimize import curve_fit
import os
############
# SETTINGS #
############
folder = 'rt-30x30'
calc = 'QSSIN-100.0fs-2.08eV-300K-DG' # Where RT carrier output is
path = [[0.0,0.0,0.0],[0.5,0.0,0.0],[0.33333,0.33333,0.0],[0.0,0.0,0.0]]
nbv = 2 ; nbc = 2 # nb of valence and conduction bands
occ_scaling = 1 # max occupation will be 1eV high
degen_thres = 0.1 # Energy below which two bands are considered degenerate
########
# INIT #
########
# For saving pictures
os.system('mkdir -p occupations/%s/%s'%(folder,calc))
# Instance containing bandstructure (as used in RT sim) and occupations
yrt = YamboRTDB(folder=folder,calc=calc)
yrt.get_path(path) # Generates kindex
### aliases
times = [i * 1e15 for i in yrt.times] # carriers output times, in fs
nbands = yrt.nbands # number of bands in the RT simulation
if nbv+nbc != nbands:
raise NameError('Incompatible number of bands, set nbv and nbc in script.')
## 'path-plot' variables
kindex = yrt.bands_indexes # kpoint indexes (in order) to draw path
eigenvalues = yrt.eigenvalues[kindex,:] # eigenvalues of the bands included in the RT simulation
#
max_occ = np.amax(yrt.occupations[:,kindex,:]) # used to size the distribution plots
occupations = yrt.occupations[:,kindex,:]/max_occ*occ_scaling # format time,kindex,band index (from 0 to nbands, only on path)
norm=Normalize(vmin=0, vmax=occ_scaling, clip=False) # normalizatin class for the color gradiant on bands
#
xocc = np.arange(len(kindex)) # array of ints to plot occupation on path properly
##
## 'fit' variables and function
# FD distrib for fit
def fermi_dirac(E,a,T): # declare E first for fit
return 1/(1+np.exp((E-a)/T))
#
KtoeV = 8.61733e-5
#
# xeng is an array of values to plot the fit properly
xeng = np.linspace(np.amin(eigenvalues[:,list(range(nbv))]), np.amax(eigenvalues[:,list(range(nbv,nbands))]),1000)
##
##############
# EXT. FIELD #
##############
# The external field is read from the o- file
ext = np.loadtxt('%s/%s/pulse/o-pulse.external_field'%(folder,calc))
field = ext[:,2]/max(abs(ext[:,2])) # polarization : x=1,y=2,z=3
##################
# ENERGY DISTRIB #
##################
# Sort the (n,k) pairs between positive and negative energies
# (If the same energy appears twice, it must not be summed over)
list_e=[] ; list_h=[]
for k in range(yrt.nkpoints):
for n in range(yrt.nbands):
e = yrt.eigenvalues[k,n]
if e<=0.0:
list_h.append((k,n))
else:
list_e.append((k,n))
# Build the occupation tables occ_x[t,(nk)_index,(e|occ)]
occ_e = np.zeros((len(times),len(list_e),2))
for t in range(len(times)):
for i,(k,n) in enumerate(list_e):
occ_e[t,i,0]=yrt.eigenvalues[k,n]
occ_e[t,i,1]=yrt.occupations[t,k,n]
occ_h = np.zeros((len(times),len(list_h),2))
for t in range(len(times)):
for i,(k,n) in enumerate(list_h):
occ_h[t,i,0]=yrt.eigenvalues[k,n]
occ_h[t,i,1]=yrt.occupations[t,k,n]
# *(-1) on holes to fit the same way as electrons
occ_h *= -1
#################
# BAR PLOT DATA #
#################
# occupations in CBs/VBs are summed for easier reading if there are more than one
# Recall that 'occupations' was normalized then multiplied by occ_scaling (for esthetics)
if nbv > 1:
# one entry per band +1 for the total occ
occ_v = np.zeros((len(times),len(kindex),nbv+1))
occ_c = np.zeros((len(times),len(kindex),nbc+1))
for n in range(nbv):
occ_v[:,:,n] = -occupations[:,:,n] # minus sign to get positive occupations
np.add(occ_v[:,:,n],occ_v[:,:,nbv],occ_v[:,:,nbv]) # each time we add the occ of the current band to the total
for n in range(nbc):
occ_c[:,:,n] = occupations[:,:,n+nbv] # +nbv to read CBs
np.add(occ_c[:,:,n],occ_c[:,:,nbc],occ_c[:,:,nbc]) # each time we add the occ of the current band to the total
####################
# TIME LOOP & PLOT #
####################
# Gridspec allows to place subplots on a grid
# spacing for exemple can be customised
gs = gridspec.GridSpec(9, 8)
# y range for band structure & energy plots
ymin_v= np.amin(eigenvalues[:,:nbv])-0.1
ymin_c= np.amin(eigenvalues[:,nbv:])-0.1
ymax_v= max(np.amax(eigenvalues[:,:nbv])+np.amax(occ_c[:,:,nbv:])+0.1, np.amax(eigenvalues[:,:nbv])+0.1)
ymax_c= max(np.amin(eigenvalues[:,nbv:])+np.amax(occ_c[:,:,nbv:])+0.1, np.amax(eigenvalues[:,nbv:])+0.1)
###
for t in range(len(times)):
i=t
print(times[i])
name = 'occupations/'+folder+'/'+calc+'/%d.png' % (times[t])
fig = plt.figure()
fig.suptitle('Occupation of the bands and fit to the Fermi-Dirac distribution',fontsize=14,ha='center')
####### bandstructure w/ occupation plot
ax1c = plt.subplot(gs[0:4,0:-2])
ax1v = plt.subplot(gs[4:8,0:-2])
# remove x ticks
ax1c.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax1v.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
# set x range
ax1c.set_xlim((0,xocc[-1]))
ax1v.set_xlim((0,xocc[-1]))
# y range is defined with ax3 and ax4 (they share y axis with ax1)
# Plot band structure
ax1v.plot(eigenvalues[:,:nbv],'k-',lw=2,zorder=0)
ax1c.plot(eigenvalues[:,nbv:],'k-',lw=2,zorder=0)
## Colored spots when degen is beyond degen_thres
# For that, we compare eigenvalues of (k,n) with (k,n+1)
# note : if more than 2 VB/CB, this scatter scheme might not be optimal (e.g. with 1 + 2 degen bands)
# VB
for n in range(nbv-1): # we compare n and n+1 <= nbv
# bool array with condition on degeneracy
diff_eigen = abs(eigenvalues[:,n]-eigenvalues[:,n+1])
# plot for points that respect the condition
ax1v.scatter(xocc[diff_eigen>degen_thres],eigenvalues[diff_eigen>degen_thres,n] ,s=30, c=occ_v[t,diff_eigen>degen_thres,n] ,cmap='plasma',alpha=1,edgecolors='none',norm=norm)
ax1v.scatter(xocc[diff_eigen>degen_thres],eigenvalues[diff_eigen>degen_thres,n+1],s=30, c=occ_v[t,diff_eigen>degen_thres,n+1],cmap='plasma',alpha=1,edgecolors='none',norm=norm)
# CB
for n in range(nbc-1):
diff_eigen = abs(eigenvalues[:,nbv+n]-eigenvalues[:,nbv+n+1])
ax1c.scatter(xocc[diff_eigen>degen_thres],eigenvalues[diff_eigen>degen_thres,nbv+n] ,s=30, c=occ_c[t,diff_eigen>degen_thres,n] ,cmap='plasma',alpha=1,edgecolors='none',norm=norm)
ax1c.scatter(xocc[diff_eigen>degen_thres],eigenvalues[diff_eigen>degen_thres,nbv+n+1],s=30, c=occ_c[t,diff_eigen>degen_thres,n+1],cmap='plasma',alpha=1,edgecolors='none',norm=norm)
## occupation in the form of histograms
# small y-shift for better reading
ax1v.bar(xocc,occ_v[t,:,nbv],width=0.4,bottom=eigenvalues[:,nbv-1]+0.1,color='blue',edgecolor='none')
ax1c.bar(xocc,occ_c[t,:,nbc],width=0.4,bottom=eigenvalues[:,nbands-1]+0.1,color='red',edgecolor='none')
# text and labels
fig.text(0.05,0.6,'Energy (eV)',size=16,rotation='vertical')
fig.text(0.50,0.91, '%d fs'%times[t],size=16)
######## field plot
ax2 = plt.subplot(gs[-1,:])
# remove ticks and labels
ax2.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax2.tick_params(axis='y',which='both',left='off',right='off',labelleft='off')
# text
ax2.set_ylabel('Field')
# frame size
ax2.set_xlim((0,times[-1]))
ax2.set_ylim((-1.3,1.3))
ax2.plot(field[:int(times[t])])
## Plot of the occupation as a function of energy (rotated to match the band structure)
ax3 = plt.subplot(gs[0:4,-2:],sharey=ax1c)
ax4 = plt.subplot(gs[4:8,-2:],sharey=ax1v)
# plot the data
try: # does not break if fit is not found
fit,cov = curve_fit(fermi_dirac,occ_e[i,:,0],occ_e[i,:,1])
except RuntimeError:
fit=np.array([0,0])
ax3.scatter(occ_e[i,:,1],occ_e[i,:,0],s=10,color='black')
ax3.plot(fermi_dirac(xeng,fit[0],fit[1]),xeng,'r-')
ax3.text(0.5,0.9,'Electrons\nT = %d K'%(fit[1]/KtoeV),transform=ax3.transAxes,ha='center',va='center')
try:
fit,cov = curve_fit(fermi_dirac,occ_h[i,:,0],occ_h[i,:,1])
except RuntimeError:
fit=np.array([0,0])
ax4.scatter(occ_h[i,:,1],-occ_h[i,:,0],color='black')
ax4.plot(fermi_dirac(xeng,fit[0],fit[1]),-xeng,'b-')
ax4.text(0.5,0.1,'Holes\nT = %d K'%(fit[1]/KtoeV),transform=ax4.transAxes,ha='center',va='center')
# set x and y range
ax4.set_xlim(-0.1*max_occ,1.1*max_occ)
ax3.set_xlim(-0.1*max_occ,1.1*max_occ)
ax3.set_ylim(( ymin_c,ymax_c ))
ax4.set_ylim(( ymin_v,ymax_v ))
# hide some ticks/labels
ax3.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax3.tick_params(axis='y',labelleft='off',labelright='off')
ax4.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
ax4.tick_params(axis='y',labelleft='off',labelright='off')
plt.savefig( name ,transparent=False,dpi=300)
print(name)
#plt.show()
plt.close(fig)
| alexandremorlet/yambopy | scripts/realtime/plot_occ.py | Python | bsd-3-clause | 9,213 |
import json
import xml.dom.minidom as minidom
class Article(object):
itemTag = {"content_type":"text/x-wiki","type":"article"}
#itemTag = {"content_type":"text/x-wiki","type":"article","wikiident":"lo","url":"http://asdlkf/","source-url":"http://sourceurl/","source":"http://source/"}
attributes = {}
include = True #""" True if this article should be included in the metabook """
def __init__(self,attributes):
self.attributes = attributes
def getInclude(self):
""" @return True if this article should be included in the metabook """
return self.include
def toDict(self):
#if not self.include: return None
article = self.itemTag.copy()
article.update(self.attributes) # merge dicts
return article
class Metabook(object):
"""
I am your metabook and wish you a pleasant evening.
Sequence of usage:
m = Metabook()
m.loadTemplate(...)
m.loadArticles(xml input)
m.createBook()
m.write(output)
If template, in- and output are files, use fromFileToFile()
"""
ArticleClass = Article # final
artTags = ["title"] # final
m = {} # Dict metabook
template = None
items = []
#source = "" # String input file, xmldump
#dest = "" # FileObject destination of json metabook
def getClone(self):
m = Metabook()
m.template = self.template # No copy() neccessary here
m.ArticleClass = self.ArticleClass
m.artTags = self.artTags
#m.m = self.m.copy()
#m.dest = self.dest
return m
def getArtTags(self,filename,tagnames):
"""
Get Article Tags
Reads all specified tags from an xml file and returns a list of all tags.
@filename XML-file
@tagnames List of String Tagnames
@return List of Dict<String Tagname, String Value>
"""
dom=minidom.parse(filename)
out = []
elements=dom.getElementsByTagName("page")
for element in elements:
tagdict = {}
for tagname in tagnames:
tags = element.getElementsByTagName(tagname)
if len(tags) > 0:
tagdict[tagname] = self.getText(tags[0])
else:
tagdict[tagname] = ""
out.append(tagdict)
return out
def getText(self,element):
"""
@element xml Node
@return String content
"""
return element.childNodes[0].data
def load_data(self,filename):
""" Unserialize data from jsonfile """
with open(filename, "r") as infile:
outdict = json.load(infile)
return outdict
def loadTemplate(self,jsonStruct):
"""
Loads an existing json file at the beginning
@jsonStruct File object
"""
self.template = json.load(jsonStruct)
#self.m = self.load_data(source)
def loadArticles(self,source):
"""
Loads the articles and saves them as objects to self.items
"""
pages = self.getArtTags(source,self.artTags)
self.items = [self.ArticleClass(page) for page in pages]
"""return
items=[]
for page in pages:
item = self.ArticleClass(page)
if item.getInclude():
items.append(item.toDict())
self.m["items"] = items
"""
def createBook(self):
"""
Convert all article objects to dicts and merge them with the template.
The result is saved to self.m
"""
if self.template is None:
self.m = []
else:
self.m = self.template.copy()
self.m["items"] = []
for item in self.items:
if item.getInclude():
self.m["items"].append(item.toDict())
def __call__(self,source):
"""
Creates a metabook for @source and writes it to self.m. To continue,
use write()
@source xml-dump
"""
self.loadArticles(source)
self.createBook()
def write(self,dest):
json.dump(self.m,dest)
def fromFileToFile(jsonStructFile,xmldump,output):
"""
Creates a Metabook from a file and writes it to a file.
Short cut Function. This loads a metabook template file, creates the
metabook content from @xmldump and writes the book to @output.
@jsonStructFile String path to Metabook template
@xmldump String path
@output String path
"""
#m = MetabookTranslated()
with open(jsonStructFile,"r") as f:
self.loadTemplate(f)
self.__call__(xmldump)
with open(output,"w") as f:
self.write(f)
| Limezero/libreoffice | helpcontent2/wiki-to-help/metabook.py | Python | gpl-3.0 | 4,798 |
# -*- coding: utf-8 -*-
# Copyright: (c) 2008, Jarek Zgoda <jarek.zgoda@gmail.com>
__revision__ = '$Id: models.py 28 2009-10-22 15:03:02Z jarek.zgoda $'
import re
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.timezone import now as timezone_now
from zerver.lib.send_email import send_email
from zerver.lib.utils import generate_random_token
from zerver.models import PreregistrationUser, EmailChangeStatus
from typing import Any, Dict, Optional, Text, Union
B16_RE = re.compile('^[a-f0-9]{40}$')
def generate_key():
# type: () -> Text
return generate_random_token(40)
class ConfirmationManager(models.Manager):
url_pattern_name = 'confirmation.views.confirm'
def confirm(self, confirmation_key):
# type: (str) -> Union[bool, PreregistrationUser, EmailChangeStatus]
if B16_RE.search(confirmation_key):
try:
confirmation = self.get(confirmation_key=confirmation_key)
except self.model.DoesNotExist:
return False
time_elapsed = timezone_now() - confirmation.date_sent
if time_elapsed.total_seconds() > settings.EMAIL_CONFIRMATION_DAYS * 24 * 3600:
return False
obj = confirmation.content_object
obj.status = getattr(settings, 'STATUS_ACTIVE', 1)
obj.save(update_fields=['status'])
return obj
return False
def get_link_for_object(self, obj, host):
# type: (Union[ContentType, int], str) -> Text
key = generate_key()
self.create(content_object=obj, date_sent=timezone_now(), confirmation_key=key)
return self.get_activation_url(key, host)
def get_activation_url(self, confirmation_key, host):
# type: (Text, str) -> Text
return u'%s%s%s' % (settings.EXTERNAL_URI_SCHEME,
host,
reverse(self.url_pattern_name,
kwargs={'confirmation_key': confirmation_key}))
class EmailChangeConfirmationManager(ConfirmationManager):
url_pattern_name = 'zerver.views.user_settings.confirm_email_change'
class Confirmation(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
date_sent = models.DateTimeField('sent')
confirmation_key = models.CharField('activation key', max_length=40)
objects = ConfirmationManager()
class Meta(object):
verbose_name = 'confirmation email'
verbose_name_plural = 'confirmation emails'
def __unicode__(self):
# type: () -> Text
return 'confirmation email for %s' % (self.content_object,)
class EmailChangeConfirmation(Confirmation):
class Meta(object):
proxy = True
objects = EmailChangeConfirmationManager()
# Conirmation pathways for which there is no content_object that we need to
# keep track of.
def check_key_is_valid(creation_key):
# type: (Text) -> bool
if not RealmCreationKey.objects.filter(creation_key=creation_key).exists():
return False
days_sofar = (timezone_now() - RealmCreationKey.objects.get(creation_key=creation_key).date_created).days
# Realm creation link expires after settings.REALM_CREATION_LINK_VALIDITY_DAYS
if days_sofar <= settings.REALM_CREATION_LINK_VALIDITY_DAYS:
return True
return False
def generate_realm_creation_url():
# type: () -> Text
key = generate_key()
RealmCreationKey.objects.create(creation_key=key, date_created=timezone_now())
return u'%s%s%s' % (settings.EXTERNAL_URI_SCHEME,
settings.EXTERNAL_HOST,
reverse('zerver.views.create_realm',
kwargs={'creation_key': key}))
class RealmCreationKey(models.Model):
creation_key = models.CharField('activation key', max_length=40)
date_created = models.DateTimeField('created', default=timezone_now)
| jrowan/zulip | confirmation/models.py | Python | apache-2.0 | 4,244 |
H, W = map(int, input().split())
N = int(input())
A = [int(x) for x in input().split()]
board = [[0] * W for _ in range(H)]
colors = [i + 1 for i in range(N) for _ in range(A[i])]
for i in range(H):
if i % 2 == 0:
for j in range(W):
board[i][j] = colors[i * W + j]
else:
for j in range(W):
board[i][W-j-1] = colors[i * W + j]
for row in board:
print(*row)
| knuu/competitive-programming | atcoder/arc/arc080_b.py | Python | mit | 409 |
import click
from os import listdir, makedirs, environ
from os.path import isdir, join, exists, abspath
from click import ClickException
from shutil import copytree
def create_at_path(name, path, my):
if exists(path):
raise ClickException("Role %s already exists" % path)
roledir = environ.get("ROLER_ROLEDIR")
if my and roledir and exists(join(roledir, name)):
copytree(join(roledir, name), path)
print("Copied role: %s" % abspath(path))
return
# Create the directory layout
makedirs(join(path, 'defaults'))
makedirs(join(path, 'files'))
makedirs(join(path, 'meta'))
makedirs(join(path, 'tasks'))
makedirs(join(path, 'templates'))
# Create the empty main.yml files
open(join(path, 'defaults', 'main.yml'), 'a').close()
open(join(path, 'meta', 'main.yml'), 'a').close()
open(join(path, 'tasks', 'main.yml'), 'a').close()
# Output created role path
print("Created role: %s" % abspath(path))
@click.command()
@click.argument('name')
@click.option('--my/--no-my', default=False,
help='If ROLER_ROLEDIR envar is set it will copy the role from that directory')
def create_role(name, my):
""" A simple tool that creates an ansible role with the
reccomended directory layout """
roles_dir = [member for member in listdir('.')
if isdir(member) and member == 'roles']
if roles_dir:
create_at_path(name, join(".", "roles", name), my)
else:
create_at_path(name, join(".", name), my)
if __name__ == '__main__':
create_role()
| TheDivic/roler | roler.py | Python | mit | 1,592 |
from django.core.management import call_command
from django.test import TestCase
from poradnia.judgements.factories import CourtFactory
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class RunCourtSessionParserTestCase(TestCase):
def test_run_command_basic(self):
court = CourtFactory(name="MyFooCourt")
stdout = StringIO()
call_command("run_court_session_parser", stdout=stdout)
self.assertIn("MyFooCourt", stdout.getvalue())
| watchdogpolska/poradnia | poradnia/judgements/tests/test_commands.py | Python | mit | 507 |
#!/usr/bin/env python
import bisect, os, sys, getopt, infodata, glob
import scipy, scipy.signal, ppgplot
import numpy as Num
from presto import rfft, next2_to_n
from psr_utils import coord_to_string
from optparse import OptionParser
from Pgplot import *
class candidate:
def __init__(self, DM, sigma, time, bin, downfact):
self.DM = DM
self.sigma = sigma
self.time = time
self.bin = bin
self.downfact = downfact
def __str__(self):
return "%7.2f %7.2f %13.6f %10d %3d\n"%\
(self.DM, self.sigma, self.time, self.bin, self.downfact)
def __cmp__(self, other):
# Sort by time (i.e. bin) by default)
return cmp(self.bin, other.bin)
def cmp_sigma(self, other):
#Comparison function to sort candidates by significance
retval = -cmp(self.sigma, other.sigma)
return retval
def fft_convolve(fftd_data, fftd_kern, lo, hi):
"""
fft_convolve(fftd_data, fftd_kern, lo, hi):
Perform a convolution with the complex floating point vectors
'fftd_data' and 'fftd_kern'. The returned vector will start at
at bin 'lo' (must be an integer), and go up to but not
include bin 'hi' (also an integer).
"""
# Note: The initial FFTs should be done like:
# fftd_kern = rfft(kernel, -1)
# fftd_data = rfft(data, -1)
prod = Num.multiply(fftd_data, fftd_kern)
prod.real[0] = fftd_kern.real[0] * fftd_data.real[0]
prod.imag[0] = fftd_kern.imag[0] * fftd_data.imag[0]
return rfft(prod, 1)[lo:hi].astype(Num.float32)
def make_fftd_kerns(downfacts, fftlen):
fftd_kerns = []
for downfact in downfacts:
kern = Num.zeros(fftlen, dtype=Num.float32)
# These offsets produce kernels that give results
# equal to scipy.signal.convolve
if downfact % 2: # Odd number
kern[:downfact/2+1] += 1.0
kern[-(downfact/2):] += 1.0
else: # Even number
kern[:downfact/2+1] += 1.0
if (downfact > 2):
kern[-(downfact/2-1):] += 1.0
# The following normalization preserves the
# RMS=1 characteristic of the data
fftd_kerns.append(rfft(kern / Num.sqrt(downfact), -1))
return fftd_kerns
def prune_related1(hibins, hivals, downfact):
# Remove candidates that are close to other candidates
# but less significant. This one works on the raw
# candidate arrays and uses the single downfact
# that they were selected with.
toremove = set()
for ii in xrange(0, len(hibins)-1):
if ii in toremove: continue
xbin, xsigma = hibins[ii], hivals[ii]
for jj in xrange(ii+1, len(hibins)):
ybin, ysigma = hibins[jj], hivals[jj]
if (abs(ybin-xbin) > downfact/2):
break
else:
if jj in toremove:
continue
if (xsigma > ysigma):
toremove.add(jj)
else:
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for bin in toremove:
del(hibins[bin])
del(hivals[bin])
return hibins, hivals
def prune_related2(dm_candlist, downfacts):
# Remove candidates that are close to other candidates
# but less significant. This one works on the candidate
# instances and looks at the different downfacts of the
# the different candidates.
toremove = set()
for ii in xrange(0, len(dm_candlist)-1):
if ii in toremove: continue
xx = dm_candlist[ii]
xbin, xsigma = xx.bin, xx.sigma
for jj in xrange(ii+1, len(dm_candlist)):
yy = dm_candlist[jj]
ybin, ysigma = yy.bin, yy.sigma
if (abs(ybin-xbin) > max(downfacts)/2):
break
else:
if jj in toremove:
continue
prox = max([xx.downfact/2, yy.downfact/2, 1])
if (abs(ybin-xbin) <= prox):
if (xsigma > ysigma):
toremove.add(jj)
else:
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for bin in toremove:
del(dm_candlist[bin])
return dm_candlist
def prune_border_cases(dm_candlist, offregions):
# Ignore those that are located within a half-width
# of the boundary between data and padding
#print offregions
toremove = set()
for ii in xrange(len(dm_candlist)-1, -1, -1):
cand = dm_candlist[ii]
loside = cand.bin-cand.downfact/2
hiside = cand.bin+cand.downfact/2
if hiside < offregions[0][0]: break
for off, on in offregions:
if (hiside > off and loside < on):
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for ii in toremove:
del(dm_candlist[ii])
return dm_candlist
full_usage = """
usage: single_pulse_search.py [options] .dat files _or_ .singlepulse files
[-h, --help] : Display this help
[-m, --maxwidth] : Set the max downsampling in sec (see below for default)
[-p, --noplot] : Look for pulses but do not generate a plot
[-t, --threshold] : Set a different threshold SNR (default=5.0)
[-x, --xwin] : Don't make a postscript plot, just use an X-window
[-s, --start] : Only plot events occuring after this time (s)
[-e, --end] : Only plot events occuring before this time (s)
[-g, --glob] : Use the files from these glob expressions (in quotes)
[-f, --fast] : Use a less-accurate but much faster method of detrending
[-b, --nobadblocks] : Don't check for bad-blocks (may save strong pulses)
[-d, --detrendlen] : Chunksize for detrending (pow-of-2 in 1000s, default=1)
Perform a single-pulse search (or simply re-plot the results of a
single-pulse search) on a set of de-dispersed time series (.dat
files).
The search attempts to find pulses by matched-filtering the data with
a series of different width boxcar functions. The possible boxcar
sizes are [1, 2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150, 220, 300]
bins. By default the boxcars <= 30 are used. You can specify
that the larger boxcars are used with the -m (or --maxwidth) option.
The matched filtering (and accounting for all the possible 'phase'
offsets of each boxcar) is accomplished by convolving the boxcars
with the full resolution data. 'Duplicate' candidates from this
process are filtered, leaving only the most significant. The time
series are initially smoothed (by default) using a piecewise linear
fit to the data where each piece is 1000 data points long.
If the input files are .singlepulse files, we won't actually perform
a search, we'll only read in the output .singlepulse files and make
a plot using the information they contain (along with the
corresponding .inf files).
Notes on usage and performance:
-- single_pulse_search.py is tuned for finding *narrow* pulses
(i.e. those of only a few bins width). Because of this, you
should always search appropriately downsampled data (as
recommended by DDplan.py, for instance) where dispersion
smearing is <~ 1 time series bin.
-- the linear-piecewise detrending is very useful in long
observations with modern instrumentation where you can see
long timescale power fluctuations. Strong pulses can skew the
statistics of the 1000-bin chunks, though, and caused some
suppression in the detection levels of bright pulses (weak
pulses are mostly unaffected since they don't strongly change
the statistics). If your data have no long-timescale
fluctuations (for instance, if you are processing old 1-bit
analog filterbank data which is AC-coupled or if you remove
rednoise via realfft/rednoise/(inverse-)realfft), I recommend
using the -f/--fast flag. And if you want to find wide
pulses, it might be worth making the chunksize bigger (i.e.
4000 or 8000).
-- The bad-block detection and removal code can and does remove
blocks that have very strong, and particularly strong and broad,
pulses in them. It can also quite effectively remove RFI-
infused portions of the data. Whether to turn it on or off
depends on your data. Note that if there are multiple pulses,
only the brightest will usually be "bad-blocked" and removed.
-- The fourier-domain matched filtering used here has no phase-
dependent effects. So a 15-bin pulse can be found with equal
significance no matter which bin it starts in in the time series.
-- The definition of "sigma" used is possibly slightly different
from that used in other codes for S/N:
sigma = sum(signal-bkgd_level)/RMS/sqrt(boxcar_width)
where the bkgd_level is typically 0 after detrending and RMS=1
after normalization. This definition has the advantage that
you will get (basically) the same sigma for any pulse no
matter how much the input time series has been downsampled as
long as the pulse is still resolved.
Copyright Scott Ransom <sransom@nrao.edu>, 2015
"""
usage = "usage: %prog [options] .dat files _or_ .singlepulse files"
def read_singlepulse_files(infiles, threshold, T_start, T_end):
DMs = []
candlist = []
num_v_DMstr = {}
for ii, infile in enumerate(infiles):
if infile.endswith(".singlepulse"):
filenmbase = infile[:infile.rfind(".singlepulse")]
else:
filenmbase = infile
info = infodata.infodata(filenmbase+".inf")
DMstr = "%.2f"%info.DM
DMs.append(info.DM)
num_v_DMstr[DMstr] = 0
if ii==0:
info0 = info
if os.stat(infile)[6]:
try:
cands = Num.loadtxt(infile)
if len(cands.shape)==1:
cands = Num.asarray([cands])
for cand in cands:
if cand[2] < T_start: continue
if cand[2] > T_end: break
if cand[1] >= threshold:
candlist.append(candidate(*cand))
num_v_DMstr[DMstr] += 1
except: # No candidates in the file
IndexError
DMs.sort()
return info0, DMs, candlist, num_v_DMstr
def main():
parser = OptionParser(usage)
parser.add_option("-x", "--xwin", action="store_true", dest="xwin",
default=False, help="Don't make a postscript plot, just use an X-window")
parser.add_option("-p", "--noplot", action="store_false", dest="makeplot",
default=True, help="Look for pulses but do not generate a plot")
parser.add_option("-m", "--maxwidth", type="float", dest="maxwidth", default=0.0,
help="Set the max downsampling in sec (see below for default)")
parser.add_option("-t", "--threshold", type="float", dest="threshold", default=5.0,
help="Set a different threshold SNR (default=5.0)")
parser.add_option("-s", "--start", type="float", dest="T_start", default=0.0,
help="Only plot events occuring after this time (s)")
parser.add_option("-e", "--end", type="float", dest="T_end", default=1e9,
help="Only plot events occuring before this time (s)")
parser.add_option("-g", "--glob", type="string", dest="globexp", default=None,
help="Process the files from this glob expression")
parser.add_option("-f", "--fast", action="store_true", dest="fast",
default=False, help="Use a faster method of de-trending (2x speedup)")
parser.add_option("-b", "--nobadblocks", action="store_false", dest="badblocks",
default=True, help="Don't check for bad-blocks (may save strong pulses)")
parser.add_option("-d", "--detrendlen", type="int", dest="detrendfact", default=1,
help="Chunksize for detrending (pow-of-2 in 1000s)")
(opts, args) = parser.parse_args()
if len(args)==0:
if opts.globexp==None:
print full_usage
sys.exit(0)
else:
args = []
for globexp in opts.globexp.split():
args += glob.glob(globexp)
useffts = True
dosearch = True
if opts.xwin:
pgplot_device = "/XWIN"
else:
pgplot_device = ""
fftlen = 8192 # Should be a power-of-two for best speed
chunklen = 8000 # Must be at least max_downfact less than fftlen
assert(opts.detrendfact in [1,2,4,8,16,32])
detrendlen = opts.detrendfact*1000
if (detrendlen > chunklen):
chunklen = detrendlen
fftlen = int(next2_to_n(chunklen))
blocks_per_chunk = chunklen / detrendlen
overlap = (fftlen - chunklen)/2
worklen = chunklen + 2*overlap # currently it is fftlen...
max_downfact = 30
default_downfacts = [2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150, 220, 300]
if args[0].endswith(".singlepulse"):
filenmbase = args[0][:args[0].rfind(".singlepulse")]
dosearch = False
elif args[0].endswith(".dat"):
filenmbase = args[0][:args[0].rfind(".dat")]
else:
filenmbase = args[0]
# Don't do a search, just read results and plot
if not dosearch:
info, DMs, candlist, num_v_DMstr = \
read_singlepulse_files(args, opts.threshold, opts.T_start, opts.T_end)
orig_N, orig_dt = int(info.N), info.dt
obstime = orig_N * orig_dt
else:
DMs = []
candlist = []
num_v_DMstr = {}
# Loop over the input files
for filenm in args:
if filenm.endswith(".dat"):
filenmbase = filenm[:filenm.rfind(".dat")]
else:
filenmbase = filenm
info = infodata.infodata(filenmbase+".inf")
DMstr = "%.2f"%info.DM
DMs.append(info.DM)
N, dt = int(info.N), info.dt
obstime = N * dt
# Choose the maximum width to search based on time instead
# of bins. This helps prevent increased S/N when the downsampling
# changes as the DM gets larger.
if opts.maxwidth > 0.0:
downfacts = [x for x in default_downfacts if x*dt <= opts.maxwidth]
else:
downfacts = [x for x in default_downfacts if x <= max_downfact]
if len(downfacts) == 0:
downfacts = [default_downfacts[0]]
if (filenm == args[0]):
orig_N = N
orig_dt = dt
if useffts:
fftd_kerns = make_fftd_kerns(default_downfacts, fftlen)
if info.breaks:
offregions = zip([x[1] for x in info.onoff[:-1]],
[x[0] for x in info.onoff[1:]])
# If last break spans to end of file, don't read it in (its just padding)
if offregions[-1][1] == N - 1:
N = offregions[-1][0] + 1
outfile = open(filenmbase+'.singlepulse', mode='w')
# Compute the file length in detrendlens
roundN = N/detrendlen * detrendlen
numchunks = roundN / chunklen
# Read in the file
print 'Reading "%s"...'%filenm
timeseries = Num.fromfile(filenm, dtype=Num.float32, count=roundN)
# Split the timeseries into chunks for detrending
numblocks = roundN/detrendlen
timeseries.shape = (numblocks, detrendlen)
stds = Num.zeros(numblocks, dtype=Num.float64)
# de-trend the data one chunk at a time
print ' De-trending the data and computing statistics...'
for ii, chunk in enumerate(timeseries):
if opts.fast: # use median removal instead of detrending (2x speedup)
tmpchunk = chunk.copy()
tmpchunk.sort()
med = tmpchunk[detrendlen/2]
chunk -= med
tmpchunk -= med
else:
# The detrend calls are the most expensive in the program
timeseries[ii] = scipy.signal.detrend(chunk, type='linear')
tmpchunk = timeseries[ii].copy()
tmpchunk.sort()
# The following gets rid of (hopefully) most of the
# outlying values (i.e. power dropouts and single pulses)
# If you throw out 5% (2.5% at bottom and 2.5% at top)
# of random gaussian deviates, the measured stdev is ~0.871
# of the true stdev. Thus the 1.0/0.871=1.148 correction below.
# The following is roughly .std() since we already removed the median
stds[ii] = Num.sqrt((tmpchunk[detrendlen/40:-detrendlen/40]**2.0).sum() /
(0.95*detrendlen))
stds *= 1.148
# sort the standard deviations and separate those with
# very low or very high values
sort_stds = stds.copy()
sort_stds.sort()
# identify the differences with the larges values (this
# will split off the chunks with very low and very high stds
locut = (sort_stds[1:numblocks/2+1] -
sort_stds[:numblocks/2]).argmax() + 1
hicut = (sort_stds[numblocks/2+1:] -
sort_stds[numblocks/2:-1]).argmax() + numblocks/2 - 2
std_stds = scipy.std(sort_stds[locut:hicut])
median_stds = sort_stds[(locut+hicut)/2]
print " pseudo-median block standard deviation = %.2f" % (median_stds)
if (opts.badblocks):
lo_std = median_stds - 4.0 * std_stds
hi_std = median_stds + 4.0 * std_stds
# Determine a list of "bad" chunks. We will not search these.
bad_blocks = Num.nonzero((stds < lo_std) | (stds > hi_std))[0]
print " identified %d bad blocks out of %d (i.e. %.2f%%)" % \
(len(bad_blocks), len(stds),
100.0*float(len(bad_blocks))/float(len(stds)))
stds[bad_blocks] = median_stds
else:
bad_blocks = []
print " Now searching..."
# Now normalize all of the data and reshape it to 1-D
timeseries /= stds[:,Num.newaxis]
timeseries.shape = (roundN,)
# And set the data in the bad blocks to zeros
# Even though we don't search these parts, it is important
# because of the overlaps for the convolutions
for bad_block in bad_blocks:
loind, hiind = bad_block*detrendlen, (bad_block+1)*detrendlen
timeseries[loind:hiind] = 0.0
# Convert to a set for faster lookups below
bad_blocks = set(bad_blocks)
# Step through the data
dm_candlist = []
for chunknum in xrange(numchunks):
loind = chunknum*chunklen-overlap
hiind = (chunknum+1)*chunklen+overlap
# Take care of beginning and end of file overlap issues
if (chunknum==0): # Beginning of file
chunk = Num.zeros(worklen, dtype=Num.float32)
chunk[overlap:] = timeseries[loind+overlap:hiind]
elif (chunknum==numchunks-1): # end of the timeseries
chunk = Num.zeros(worklen, dtype=Num.float32)
chunk[:-overlap] = timeseries[loind:hiind-overlap]
else:
chunk = timeseries[loind:hiind]
# Make a set with the current block numbers
lowblock = blocks_per_chunk * chunknum
currentblocks = set(Num.arange(blocks_per_chunk) + lowblock)
localgoodblocks = Num.asarray(list(currentblocks -
bad_blocks)) - lowblock
# Search this chunk if it is not all bad
if len(localgoodblocks):
# This is the good part of the data (end effects removed)
goodchunk = chunk[overlap:-overlap]
# need to pass blocks/chunklen, localgoodblocks
# dm_candlist, dt, opts.threshold to cython routine
# Search non-downsampled data first
# NOTE: these nonzero() calls are some of the most
# expensive calls in the program. Best bet would
# probably be to simply iterate over the goodchunk
# in C and append to the candlist there.
hibins = Num.flatnonzero(goodchunk>opts.threshold)
hivals = goodchunk[hibins]
hibins += chunknum * chunklen
hiblocks = hibins/detrendlen
# Add the candidates (which are sorted by bin)
for bin, val, block in zip(hibins, hivals, hiblocks):
if block not in bad_blocks:
time = bin * dt
dm_candlist.append(candidate(info.DM, val, time, bin, 1))
# Prepare our data for the convolution
if useffts: fftd_chunk = rfft(chunk, -1)
# Now do the downsampling...
for ii, downfact in enumerate(downfacts):
if useffts:
# Note: FFT convolution is faster for _all_ downfacts, even 2
goodchunk = fft_convolve(fftd_chunk, fftd_kerns[ii],
overlap, -overlap)
else:
# The normalization of this kernel keeps the post-smoothing RMS = 1
kernel = Num.ones(downfact, dtype=Num.float32) / \
Num.sqrt(downfact)
smoothed_chunk = scipy.signal.convolve(chunk, kernel, 1)
goodchunk = smoothed_chunk[overlap:-overlap]
#hibins = Num.nonzero(goodchunk>opts.threshold)[0]
hibins = Num.flatnonzero(goodchunk>opts.threshold)
hivals = goodchunk[hibins]
hibins += chunknum * chunklen
hiblocks = hibins/detrendlen
hibins = hibins.tolist()
hivals = hivals.tolist()
# Now walk through the new candidates and remove those
# that are not the highest but are within downfact/2
# bins of a higher signal pulse
hibins, hivals = prune_related1(hibins, hivals, downfact)
# Insert the new candidates into the candlist, but
# keep it sorted...
for bin, val, block in zip(hibins, hivals, hiblocks):
if block not in bad_blocks:
time = bin * dt
bisect.insort(dm_candlist,
candidate(info.DM, val, time, bin, downfact))
# Now walk through the dm_candlist and remove the ones that
# are within the downsample proximity of a higher
# signal-to-noise pulse
dm_candlist = prune_related2(dm_candlist, downfacts)
print " Found %d pulse candidates"%len(dm_candlist)
# Get rid of those near padding regions
if info.breaks: prune_border_cases(dm_candlist, offregions)
# Write the pulses to an ASCII output file
if len(dm_candlist):
#dm_candlist.sort(cmp_sigma)
outfile.write("# DM Sigma Time (s) Sample Downfact\n")
for cand in dm_candlist:
outfile.write(str(cand))
outfile.close()
# Add these candidates to the overall candidate list
for cand in dm_candlist:
candlist.append(cand)
num_v_DMstr[DMstr] = len(dm_candlist)
if (opts.makeplot):
# Step through the candidates to make a SNR list
DMs.sort()
snrs = []
for cand in candlist:
if not Num.isinf(cand.sigma):
snrs.append(cand.sigma)
if snrs:
maxsnr = max(int(max(snrs)), int(opts.threshold)) + 3
else:
maxsnr = int(opts.threshold) + 3
# Generate the SNR histogram
snrs = Num.asarray(snrs)
(num_v_snr, lo_snr, d_snr, num_out_of_range) = \
scipy.stats.histogram(snrs,
int(maxsnr-opts.threshold+1),
[opts.threshold, maxsnr])
snrs = Num.arange(maxsnr-opts.threshold+1, dtype=Num.float64) * d_snr \
+ lo_snr + 0.5*d_snr
num_v_snr = num_v_snr.astype(Num.float32)
num_v_snr[num_v_snr==0.0] = 0.001
# Generate the DM histogram
num_v_DM = Num.zeros(len(DMs))
for ii, DM in enumerate(DMs):
num_v_DM[ii] = num_v_DMstr["%.2f"%DM]
DMs = Num.asarray(DMs)
# open the plot device
short_filenmbase = filenmbase[:filenmbase.find("_DM")]
if opts.T_end > obstime:
opts.T_end = obstime
if pgplot_device:
ppgplot.pgopen(pgplot_device)
else:
if (opts.T_start > 0.0 or opts.T_end < obstime):
ppgplot.pgopen(short_filenmbase+'_%.0f-%.0fs_singlepulse.ps/VPS'%
(opts.T_start, opts.T_end))
else:
ppgplot.pgopen(short_filenmbase+'_singlepulse.ps/VPS')
ppgplot.pgpap(7.5, 1.0) # Width in inches, aspect
# plot the SNR histogram
ppgplot.pgsvp(0.06, 0.31, 0.6, 0.87)
ppgplot.pgswin(opts.threshold, maxsnr,
Num.log10(0.5), Num.log10(2*max(num_v_snr)))
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCLNST", 0, 0)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Signal-to-Noise")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Number of Pulses")
ppgplot.pgsch(1.0)
ppgplot.pgbin(snrs, Num.log10(num_v_snr), 1)
# plot the DM histogram
ppgplot.pgsvp(0.39, 0.64, 0.6, 0.87)
# Add [1] to num_v_DM in YMAX below so that YMIN != YMAX when max(num_v_DM)==0
ppgplot.pgswin(min(DMs)-0.5, max(DMs)+0.5, 0.0, 1.1*max(num_v_DM+[1]))
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\u-3\d)")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Number of Pulses")
ppgplot.pgsch(1.0)
ppgplot.pgbin(DMs, num_v_DM, 1)
# plot the SNR vs DM plot
ppgplot.pgsvp(0.72, 0.97, 0.6, 0.87)
ppgplot.pgswin(min(DMs)-0.5, max(DMs)+0.5, opts.threshold, maxsnr)
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "DM (pc cm\u-3\d)")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "Signal-to-Noise")
ppgplot.pgsch(1.0)
cand_ts = Num.zeros(len(candlist), dtype=Num.float32)
cand_SNRs = Num.zeros(len(candlist), dtype=Num.float32)
cand_DMs = Num.zeros(len(candlist), dtype=Num.float32)
for ii, cand in enumerate(candlist):
cand_ts[ii], cand_SNRs[ii], cand_DMs[ii] = \
cand.time, cand.sigma, cand.DM
ppgplot.pgpt(cand_DMs, cand_SNRs, 20)
# plot the DM vs Time plot
ppgplot.pgsvp(0.06, 0.97, 0.08, 0.52)
ppgplot.pgswin(opts.T_start, opts.T_end, min(DMs)-0.5, max(DMs)+0.5)
ppgplot.pgsch(0.8)
ppgplot.pgbox("BCNST", 0, 0, "BCNST", 0, 0)
ppgplot.pgmtxt('B', 2.5, 0.5, 0.5, "Time (s)")
ppgplot.pgmtxt('L', 1.8, 0.5, 0.5, "DM (pc cm\u-3\d)")
# Circles are symbols 20-26 in increasing order
snr_range = 12.0
cand_symbols = (cand_SNRs-opts.threshold)/snr_range * 6.0 + 20.5
cand_symbols = cand_symbols.astype(Num.int32)
cand_symbols[cand_symbols>26] = 26
for ii in [26, 25, 24, 23, 22, 21, 20]:
inds = Num.nonzero(cand_symbols==ii)[0]
ppgplot.pgpt(cand_ts[inds], cand_DMs[inds], ii)
# Now fill the infomation area
ppgplot.pgsvp(0.05, 0.95, 0.87, 0.97)
ppgplot.pgsch(1.0)
ppgplot.pgmtxt('T', 0.5, 0.0, 0.0,
"Single pulse results for '%s'"%short_filenmbase)
ppgplot.pgsch(0.8)
# first row
ppgplot.pgmtxt('T', -1.1, 0.02, 0.0, 'Source: %s'%\
info.object)
ppgplot.pgmtxt('T', -1.1, 0.33, 0.0, 'RA (J2000):')
ppgplot.pgmtxt('T', -1.1, 0.5, 0.0, info.RA)
ppgplot.pgmtxt('T', -1.1, 0.73, 0.0, 'N samples: %.0f'%orig_N)
# second row
ppgplot.pgmtxt('T', -2.4, 0.02, 0.0, 'Telescope: %s'%\
info.telescope)
ppgplot.pgmtxt('T', -2.4, 0.33, 0.0, 'DEC (J2000):')
ppgplot.pgmtxt('T', -2.4, 0.5, 0.0, info.DEC)
ppgplot.pgmtxt('T', -2.4, 0.73, 0.0, 'Sampling time: %.2f \gms'%\
(orig_dt*1e6))
# third row
if info.instrument.find("pigot") >= 0:
instrument = "Spigot"
else:
instrument = info.instrument
ppgplot.pgmtxt('T', -3.7, 0.02, 0.0, 'Instrument: %s'%instrument)
if (info.bary):
ppgplot.pgmtxt('T', -3.7, 0.33, 0.0, 'MJD\dbary\u: %.12f'%info.epoch)
else:
ppgplot.pgmtxt('T', -3.7, 0.33, 0.0, 'MJD\dtopo\u: %.12f'%info.epoch)
ppgplot.pgmtxt('T', -3.7, 0.73, 0.0, 'Freq\dctr\u: %.1f MHz'%\
((info.numchan/2-0.5)*info.chan_width+info.lofreq))
ppgplot.pgiden()
ppgplot.pgend()
if __name__ == '__main__':
if (0):
# The following is for profiling
import hotshot
prof = hotshot.Profile("hotshot_edi_stats")
prof.runcall(main)
prof.close()
# To see the results:
if (0):
from hotshot import stats
s = stats.load("hotshot_edi_stats")
s.sort_stats("time").print_stats()
else:
main()
| pscholz/presto | bin/single_pulse_search.py | Python | gpl-2.0 | 30,794 |
# img
# trigger = attributes[12]
# http://ws-tcg.com/en/cardlist
# edit
import os
import requests
import sqlite3
def get_card(browser):
attributes = browser.find_elements_by_xpath('//table[@class="status"]/tbody/tr/td')
image = attributes[0].find_element_by_xpath('./img').get_attribute('src')
if attributes[1].find_element_by_xpath('./span[@class="kana"]').text:
card_name = attributes[1].find_element_by_xpath('./span[@class="kana"]').text
else:
card_name = None
card_no = attributes[2].text if attributes[2].text else None
rarity = attributes[3].text if attributes[3].text else None
expansion = attributes[4].text if attributes[4].text else None
if attributes[5].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/w.gif":
side = "Weiß"
elif attributes[5].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/s.gif":
side = "Schwarz"
else:
side = None
card_type = attributes[6].text if attributes[6].text else None
if attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/yellow.gif":
color = "Yellow"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/green.gif":
color = "Green"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/red.gif":
color = "Red"
elif attributes[7].find_element_by_xpath('./img').get_attribute("src") == "http://ws-tcg.com/en/cardlist/partimages/blue.gif":
color = "Blue"
else:
color = None
level = attributes[8].text if attributes[8].text else None
cost = attributes[9].text if attributes[9].text else None
power = attributes[10].text if attributes[10].text else None
soul = len(attributes[11].find_elements_by_xpath('./img[contains(@src, "http://ws-tcg.com/en/cardlist/partimages/soul.gif")]'))
special_attribute = attributes[13].text if attributes[13].text else None
text = attributes[14].text if attributes[14].text else None
flavor_text = attributes[15].text if attributes[15].text else None
if not os.path.exists("images"):
os.makedirs("images")
if not os.path.exists("images/" + card_no.split("/")[0]):
os.makedirs("images/" + card_no.split("/")[0])
r = requests.get(image, stream=True)
if r.status_code == 200:
with open("images/" + card_no + ".jpg", 'wb') as f:
for chunk in r:
f.write(chunk)
card = (card_name, card_no, rarity, expansion, side, card_type, color, level, cost, power, soul,
special_attribute, text, flavor_text)
connection = sqlite3.connect('cards.sqlite3')
cursor = connection.cursor()
cursor.execute('INSERT INTO cards (name, no, rarity, expansion, side, type, color, level, cost, power, soul,'
'special_attribute, text, flavor_text) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?, ?)', card)
connection.commit()
connection.close()
| electronicdaisy/WeissSchwarzTCGDatabase | card.py | Python | mit | 3,176 |
#
# Copyright 2010 Markus Pielmeier
#
# This file is part of brainfs.
#
# brainfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# brainfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with brainfs. If not, see <http://www.gnu.org/licenses/>.
#
#====================================================================
# first set up exception handling and logging
import logging
import sys
def setUpLogging():
def exceptionCallback(eType, eValue, eTraceBack):
import cgitb
txt = cgitb.text((eType, eValue, eTraceBack))
logging.fatal(txt)
# sys.exit(1)
format = '%(asctime)s %(levelname)s %(name)s: %(message)s'
# configure file logger
logging.basicConfig(level = logging.DEBUG,
format = format,
filename = '/tmp/brainfs.log',
filemode = 'a')
# configure console logger
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setLevel(logging.DEBUG)
consoleFormatter = logging.Formatter(format)
consoleHandler.setFormatter(consoleFormatter)
logging.getLogger().addHandler(consoleHandler)
# replace default exception handler
sys.excepthook = exceptionCallback
logging.debug('Logging and exception handling has been set up')
if __name__ == '__main__':
from os import environ as env
if 'DEBUG' in env:
setUpLogging()
#====================================================================
# here the application begins
import errno
import fuse
import subject_directory
if not hasattr(fuse, '__version__'):
raise RuntimeError, \
"your fuse-py doesn't know of fuse.__version__, probably it's too old."
fuse.fuse_python_api = (0, 2)
class BrainFS(fuse.Fuse):
def __init__(self, initwd, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
# TODO load subjects from persistent store
self.subjects = []
self.view = subject_directory.SubjectDirectoryView(self.subjects)
# TODO implement generic view delgate
def getattr(self, path):
return self.view.getattr(path)
def readdir(self, path, offset):
return self.view.readdir(path, offset)
def readlink(self, path):
return self.view.readlink(path)
def open(self, path, flags):
return self.view.open(path, flags)
def read(self, path, size, offset):
return self.view.read(path, size, offset)
def write(self, path, data, pos):
return self.view.write(path, data, pos)
def symlink(self, path, linkPath):
return self.view.symlink(path, linkPath)
def main():
import os
fs = BrainFS(os.getcwd(),
version = "%prog " + fuse.__version__,
dash_s_do = 'setsingle')
fs.parse(errex = 1)
opts, args = fs.cmdline
return fs.main()
if __name__ == '__main__':
import sys
sys.exit(main())
| marook/brainfs | src/modules/brainfs/brainfs.py | Python | gpl-3.0 | 3,382 |
import json
import websocket
import logging
import random
try:
import http.client as HttpClient
except:
import httplib as HttpClient
from .protocol import ReallyClientProtocol
import threading
import collections
import socket
import exceptions
from .tracker import ReallyTracker
from .responses import GetResponse, ReadResponse, CreateResponse, Response, SubscribeResponse
from .r import R
from .reallyobject import Subscribe
from concurrent.futures import Future
REALLY_STATE_DISCONNECTED = "disconnected"
REALLY_STATE_ONLINE = "online"
REALLY_PROTOCOL_VERSION = "0.1"
class Really(object):
def __init__(self, server_host="localhost", server_port=9000, ssl=False):
self._is_ssl = ssl
self._state = REALLY_STATE_DISCONNECTED
self._server_host = server_host
self._server_port = server_port
if self._is_ssl:
base_ws = "wss://"
else:
base_ws = "ws://"
self._socket_url = "%s%s:%s/v%s/socket" % (base_ws, server_host, server_port, REALLY_PROTOCOL_VERSION)
self._who = None
self._accessToken = None
self._protocol = ReallyClientProtocol(self)
self._tag_lock = threading.Lock()
self._tracker = None
self._last_tag = 0
self._callbacks = collections.defaultdict(list)
def _gen_tag(self):
with self._tag_lock:
if self._last_tag > 50000:
self._last_tag = 0
self._last_tag += 1
return self._last_tag
def _raw_send(self, data):
self._websocket.send(json.dumps(data))
def login_anonymous(self):
if self._is_ssl:
connection = HttpClient.HTTPSConnection(self._server_host, self._server_port)
else:
connection = HttpClient.HTTPConnection(self._server_host, self._server_port)
connection.request('POST', '/auth/anonymous/')
response = connection.getresponse()
if response.status == 200:
self._access_token = json.loads(response.read().decode())['accessToken']
self._who = "anonymous"
self._connect()
else:
raise Exception("Cannot authenticate (HTTP %s), reason: %s" % (response.status, response.reason))
def on(self, event, callback):
self._callbacks[event].append(callback)
def is_online(self):
return self._state == REALLY_STATE_ONLINE
def is_logged_in(self):
if self._access_token:
return True
else:
return False
def who_am_i(self):
# if self._state != REALLY_STATE_ONLINE:
return self._who
def _start_tracker(self):
self._tracker = ReallyTracker(self, self._protocol)
self._tracker_thread = threading.Thread(target=self._tracker.run_till_terminated)
self._tracker_thread.daemon = True
self._tracker_thread.start()
def _fire(self, evt, **kwargs):
for callback in self._callbacks[evt]:
callback(**kwargs)
def _connect(self):
if self.is_online():
logging.info("already connected")
return
self._websocket = websocket.create_connection(self._socket_url)
self._fire('connect')
self._raw_send(self._protocol.init_message(self._gen_tag(), self._access_token))
raw_response = self._websocket.recv()
response = json.loads(raw_response)
logging.debug("INITIALIZE RESPONSE: %s", response)
if response['evt'] != 'initialized':
self._websocket.close()
logging.warning("Initialization failure, response %s", response)
raise exceptions.InitializationException("Server didn't like our initialize message")
logging.info("Connection to Really Server [%s] is now initialized.", self._socket_url)
self._who = response['body']
self._fire('initialize')
self._start_tracker()
self._state = REALLY_STATE_ONLINE
def close(self):
if self._websocket:
self._websocket.close()
self._state = REALLY_STATE_DISCONNECTED
self._tracker.request_termination()
self._tracker_thread.join()
# CRUD API
def get(self, r, fields=None):
if not self.is_online():
raise exceptions.DisconnectedException("Really is currently offline")
if not isinstance(r, (str, R)):
raise TypeError("r must be a string or an instance of class pyreally.R")
tag = self._gen_tag()
req = self._protocol.get_message(tag, r, fields)
future = Future()
self._tracker.register_future(tag, GetResponse, future)
self._raw_send(req)
logging.debug("GET request sent: %s", req)
return future
def query(self, r, query=None, query_args=None, fields=None, ascending=None, limit=None, pagination_token=None, skip=None, include_total=None):
if not self.is_online():
raise exceptions.DisconnectedException("Really is currently offline")
if not isinstance(r, (str, R)):
raise TypeError("r must be a string or an instance of class pyreally.R")
tag = self._gen_tag()
req = self._protocol.query_message(tag, r, query, query_args, fields, ascending, limit, pagination_token, skip, include_total)
future = Future()
self._tracker.register_future(tag, ReadResponse, future)
self._raw_send(req)
logging.debug("READ request sent: %s", req)
return future
def create(self, r, body):
if not self.is_online():
raise exceptions.DisconnectedException("Really is currently offline")
if not isinstance(r, (str, R)):
raise TypeError("r must be a string or an instance of class pyreally.R")
tag = self._gen_tag()
req = self._protocol.create_message(tag, r, body)
future = Future()
self._tracker.register_future(tag, CreateResponse, future)
self._raw_send(req)
logging.debug("CREATE request sent: %s", req)
return future
def delete(self, r):
if not self.is_online():
raise exceptions.DisconnectedException("Really is currently offline")
if not isinstance(r, (str, R)):
raise TypeError("r must be a string or an instance of class pyreally.R")
tag = self._gen_tag()
req = self._protocol.delete_message(tag, r)
future = Future()
self._tracker.register_future(tag, Response, future)
self._raw_send(req)
logging.debug("DELETE request sent: %s", req)
return future
def update(self, r, ops, rev):
if not self.is_online():
raise exceptions.DisconnectedException("Really is currently offline")
if not isinstance(r, (str, R)):
raise TypeError("r must be a string or an instance of class pyreally.R")
tag = self._gen_tag()
req = self._protocol.update_message(tag, r, ops, rev)
future = Future()
self._tracker.register_future(tag, Response, future)
self._raw_send(req)
logging.debug("UPDATE request sent: %s", req)
return future
def subscribe(self, r, callback, rev=None, fields=None):
return self.multi_subscribe([Subscribe(r, rev, callback, fields)])
return future
def multi_subscribe(self, subs):
if not self.is_online():
raise exceptions.DisconnectedException("Really is currently offline")
tag = self._gen_tag()
req = self._protocol.subscribe_message(tag, subs)
future = Future()
self._tracker.register_future(tag, SubscribeResponse, future)
self._raw_send(req)
logging.debug("SUBSCRIBE request sent %s", req)
return future
# def unsubscribe(self, sub_id):
def multi_unsubscribe(self, sub_ids):
if not self.is_online():
raise exceptions.DisconnectedException("Really is currently offline")
tag = self._gen_tag()
req = self._protocol.unsubscribe_message(tag, sub_ids)
future = Future()
self._tracker.register_future(tag, SubscribeResponse, future)
self._raw_send(req)
logging.debug("SUBSCRIBE request sent %s", req)
return future
| reallylabs/pyreally | pyreally/really.py | Python | apache-2.0 | 8,232 |
import csv
import datetime
import os
import re
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.utils.text import slugify
from django_date_extensions.fields import ApproximateDate
from pombola.core.models import (
Person, Place, Position, PositionTitle, Organisation)
def parse_approximate_date(s):
"""Take a partial ISO 8601 date, and return an ApproximateDate for it
>>> ad = parse_approximate_date('2014-02-17')
>>> type(ad)
<class 'django_date_extensions.fields.ApproximateDate'>
>>> ad
2014-02-17
>>> parse_approximate_date('2014-02')
2014-02-00
>>> parse_approximate_date('2014')
2014-00-00
"""
for regexp in [
r'^(\d{4})-(\d{2})-(\d{2})$',
r'^(\d{4})-(\d{2})$',
r'^(\d{4})$'
]:
m = re.search(regexp, s)
if m:
return ApproximateDate(*(int(g, 10) for g in m.groups()))
if s == 'future':
return ApproximateDate(future=True)
raise Exception, "Couldn't parse '{0}' as an ApproximateDate".format(s)
def adjust_approximate_date(ad, by_days):
"""Return an ApproximateDate offset from another by some days
This refuses to adjust a 'future' ApproximateDate, and treats
those without month or day specified as being on the first day of
the month, or the first day of the year respectively.
>>> ad = ApproximateDate(2014, 2, 17)
>>> adjust_approximate_date(ad, -1)
2014-02-16
>>> ad = ApproximateDate(2014, 1, 1)
>>> adjust_approximate_date(ad, -1)
2013-12-31
>>> ad = ApproximateDate(2014, 2)
>>> adjust_approximate_date(ad, 50)
2014-03-23
>>> ad = ApproximateDate(2014)
>>> adjust_approximate_date(ad, 40)
2014-02-10
"""
if ad.future:
raise Exception, "You can't adjust a future date"
day = ad.day or 1
month = ad.month or 1
d = datetime.date(ad.year, month, day)
d = d + datetime.timedelta(days=by_days)
return ApproximateDate(d.year, d.month, d.day)
# FIXME: @mhl suggests this could be done in a simpler way using transactions:
def get_or_create(model, **kwargs):
"""An alternative to Django's get_or_create where save() is optional
This is based on Django's get_or_create from
django/db/models/query.py, but in this version the special keyword
argument 'commit' (which defaults to True) can be set to False to
specify that the model shouldn't be saved."""
commit = kwargs.pop('commit', False)
defaults = kwargs.pop('defaults', {})
lookup = kwargs.copy()
try:
result = model.objects.get(**lookup)
print " Found {0} with params {1}".format(model.__name__, lookup)
return result
except model.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
o = model(**params)
if commit:
print " Saving {0} with params {1}".format(model.__name__, kwargs)
o.save()
else:
print " Not saving {0} (no --commit) with params {1}".format(model.__name__, kwargs)
return o
raise Exception("Failed get_or_create")
class Command(NoArgsCommand):
help = "Import the state governors of Nigeria, as of February 2014"
option_list = NoArgsCommand.option_list + (
make_option('--commit',
action='store_true',
default=False,
dest='commit',
help='Actually update the database'),
)
def handle_noargs(self, **options):
command_directory = os.path.dirname(__file__)
data_directory = os.path.realpath(
os.path.join(command_directory,
'..',
'..',
'data'))
governor_pt = get_or_create(
PositionTitle,
commit=options['commit'],
name='Governor',
slug='governor')
party_member_pt = get_or_create(
PositionTitle,
commit=options['commit'],
name='Member',
slug='member')
with open(os.path.join(data_directory,
'governors-wikipedia-2012-02-14.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
place_name = row['Place Name']
place_name = re.sub('(?i)\s+State\s*$', '', place_name)
place = Place.objects.get(kind__slug='state',
name=place_name)
person_name = row['Current Governor']
person_slug = slugify(person_name)
person = get_or_create(
Person,
commit=options['commit'],
slug=person_slug,
legal_name=person_name)
office_start_date = parse_approximate_date(row['Elected/Took office'])
governor_position = get_or_create(
Position,
commit=options['commit'],
person=person,
place=place,
title=governor_pt,
category='political',
defaults={
'organisation': None,
'start_date': office_start_date,
'end_date': ApproximateDate(future=True)
})
# Now create party memberships:
party_details = row['Party'].strip()
party_position_a = None
party_position_b = None
if party_details:
m = re.search(r'^([^;]+)(?:; (.+) from ((\d{4})(?:-(\d{2})(?:-(\d{2}))?)?))?$',
party_details)
if not m:
raise Exception, "Unknown format of party '{0}'".format(party_details)
party_a, party_b, b_onward_date, b_onward_year, b_onward_month, b_onward_day = m.groups()
# Create a position in that party:
party = Organisation.objects.get(kind__slug='party',
name=party_a)
end_date = ApproximateDate(future=True)
party_position_a = get_or_create(
Position,
commit=options['commit'],
person=person,
place=None,
title=party_member_pt,
category='political',
organisation=party,
defaults={
'start_date': office_start_date,
'end_date': ApproximateDate(future=True)
})
if party_b:
new_party_from = parse_approximate_date(b_onward_date)
old_part_to = adjust_approximate_date(
new_party_from, -1)
party_position_a.end_date = old_part_to
if options['commit']:
party_position_a.save()
party = Organisation.objects.get(kind__slug='party',
name=party_b)
party_position_b = get_or_create(
Position,
commit=options['commit'],
person=person,
place=None,
title=party_member_pt,
category='political',
organisation=party,
defaults={
'start_date': new_party_from,
'end_date': ApproximateDate(future=True)
})
| mysociety/pombola | pombola/nigeria/management/commands/nigeria_add_governors.py | Python | agpl-3.0 | 8,002 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Tue Mar 11 13:01:43 2014
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x05\x3e\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x69\x6e\x6b\
\x73\x63\x61\x70\x65\x2e\x6f\x72\x67\x2f\x29\x20\x2d\x2d\x3e\x0a\
\x3c\x73\x76\x67\x0a\x20\x20\x20\x78\x6d\x6c\x6e\x73\x3a\x73\x76\
\x67\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\
\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x0a\x20\
\x20\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\x2f\x2f\
\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\x30\x2f\
\x73\x76\x67\x22\x0a\x20\x20\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\
\x22\x31\x2e\x30\x22\x0a\x20\x20\x20\x77\x69\x64\x74\x68\x3d\x22\
\x36\x34\x35\x22\x0a\x20\x20\x20\x68\x65\x69\x67\x68\x74\x3d\x22\
\x35\x38\x35\x22\x0a\x20\x20\x20\x69\x64\x3d\x22\x73\x76\x67\x32\
\x22\x3e\x0a\x20\x20\x3c\x64\x65\x66\x73\x0a\x20\x20\x20\x20\x20\
\x69\x64\x3d\x22\x64\x65\x66\x73\x34\x22\x20\x2f\x3e\x0a\x20\x20\
\x3c\x67\x0a\x20\x20\x20\x20\x20\x69\x64\x3d\x22\x6c\x61\x79\x65\
\x72\x31\x22\x3e\x0a\x20\x20\x20\x20\x3c\x70\x61\x74\x68\x0a\x20\
\x20\x20\x20\x20\x20\x20\x64\x3d\x22\x4d\x20\x32\x39\x37\x2e\x32\
\x39\x37\x34\x37\x2c\x35\x35\x30\x2e\x38\x36\x38\x32\x33\x20\x43\
\x20\x32\x38\x33\x2e\x35\x32\x32\x34\x33\x2c\x35\x33\x35\x2e\x34\
\x33\x31\x39\x31\x20\x32\x34\x39\x2e\x31\x32\x36\x38\x2c\x35\x30\
\x35\x2e\x33\x33\x38\x35\x35\x20\x32\x32\x30\x2e\x38\x36\x32\x37\
\x37\x2c\x34\x38\x33\x2e\x39\x39\x34\x31\x32\x20\x43\x20\x31\x33\
\x37\x2e\x31\x31\x38\x36\x37\x2c\x34\x32\x30\x2e\x37\x35\x32\x32\
\x38\x20\x31\x32\x35\x2e\x37\x32\x31\x30\x38\x2c\x34\x31\x31\x2e\
\x35\x39\x39\x39\x20\x39\x31\x2e\x37\x31\x39\x32\x33\x38\x2c\x33\
\x38\x30\x2e\x32\x39\x30\x38\x38\x20\x43\x20\x32\x39\x2e\x30\x33\
\x34\x37\x31\x2c\x33\x32\x32\x2e\x35\x37\x30\x37\x31\x20\x32\x2e\
\x34\x31\x33\x36\x32\x32\x2c\x32\x36\x34\x2e\x35\x38\x30\x38\x36\
\x20\x32\x2e\x35\x30\x34\x38\x34\x37\x38\x2c\x31\x38\x35\x2e\x39\
\x35\x31\x32\x34\x20\x43\x20\x32\x2e\x35\x34\x39\x33\x35\x39\x34\
\x2c\x31\x34\x37\x2e\x35\x36\x37\x33\x39\x20\x35\x2e\x31\x36\x35\
\x36\x31\x35\x32\x2c\x31\x33\x32\x2e\x37\x37\x39\x32\x39\x20\x31\
\x35\x2e\x39\x31\x34\x37\x33\x34\x2c\x31\x31\x30\x2e\x31\x35\x33\
\x39\x38\x20\x43\x20\x33\x34\x2e\x31\x35\x31\x34\x33\x33\x2c\x37\
\x31\x2e\x37\x36\x38\x32\x36\x37\x20\x36\x31\x2e\x30\x31\x34\x39\
\x39\x36\x2c\x34\x33\x2e\x32\x34\x34\x36\x36\x37\x20\x39\x35\x2e\
\x33\x36\x30\x30\x35\x32\x2c\x32\x35\x2e\x37\x39\x39\x34\x35\x37\
\x20\x43\x20\x31\x31\x39\x2e\x36\x38\x35\x34\x35\x2c\x31\x33\x2e\
\x34\x34\x33\x36\x37\x35\x20\x31\x33\x31\x2e\x36\x38\x32\x37\x2c\
\x37\x2e\x39\x35\x34\x32\x30\x34\x36\x20\x31\x37\x32\x2e\x33\x30\
\x34\x34\x38\x2c\x37\x2e\x37\x32\x39\x36\x32\x33\x36\x20\x43\x20\
\x32\x31\x34\x2e\x37\x39\x37\x37\x37\x2c\x37\x2e\x34\x39\x34\x37\
\x38\x39\x36\x20\x32\x32\x33\x2e\x37\x34\x33\x31\x31\x2c\x31\x32\
\x2e\x34\x34\x39\x33\x34\x37\x20\x32\x34\x38\x2e\x37\x33\x39\x31\
\x39\x2c\x32\x36\x2e\x31\x38\x31\x34\x35\x39\x20\x43\x20\x32\x37\
\x39\x2e\x31\x36\x33\x37\x2c\x34\x32\x2e\x38\x39\x35\x37\x37\x37\
\x20\x33\x31\x30\x2e\x34\x37\x39\x30\x39\x2c\x37\x38\x2e\x36\x31\
\x37\x31\x36\x37\x20\x33\x31\x36\x2e\x39\x35\x32\x34\x32\x2c\x31\
\x30\x33\x2e\x39\x39\x32\x30\x35\x20\x4c\x20\x33\x32\x30\x2e\x39\
\x35\x30\x35\x32\x2c\x31\x31\x39\x2e\x36\x36\x34\x34\x35\x20\x4c\
\x20\x33\x33\x30\x2e\x38\x31\x30\x31\x35\x2c\x39\x38\x2e\x30\x37\
\x39\x39\x34\x32\x20\x43\x20\x33\x38\x36\x2e\x35\x32\x36\x33\x32\
\x2c\x2d\x32\x33\x2e\x38\x39\x32\x39\x38\x36\x20\x35\x36\x34\x2e\
\x34\x30\x38\x35\x31\x2c\x2d\x32\x32\x2e\x30\x36\x38\x31\x31\x20\
\x36\x32\x36\x2e\x33\x31\x32\x34\x34\x2c\x31\x30\x31\x2e\x31\x31\
\x31\x35\x33\x20\x43\x20\x36\x34\x35\x2e\x39\x35\x30\x31\x31\x2c\
\x31\x34\x30\x2e\x31\x38\x37\x35\x38\x20\x36\x34\x38\x2e\x31\x30\
\x36\x30\x38\x2c\x32\x32\x33\x2e\x36\x32\x34\x37\x20\x36\x33\x30\
\x2e\x36\x39\x32\x35\x36\x2c\x32\x37\x30\x2e\x36\x32\x34\x34\x20\
\x43\x20\x36\x30\x37\x2e\x39\x37\x37\x32\x39\x2c\x33\x33\x31\x2e\
\x39\x33\x33\x37\x37\x20\x35\x36\x35\x2e\x33\x31\x32\x35\x35\x2c\
\x33\x37\x38\x2e\x36\x37\x34\x39\x33\x20\x34\x36\x36\x2e\x36\x38\
\x36\x32\x32\x2c\x34\x35\x30\x2e\x33\x30\x30\x39\x38\x20\x43\x20\
\x34\x30\x32\x2e\x30\x30\x35\x34\x2c\x34\x39\x37\x2e\x32\x37\x34\
\x36\x32\x20\x33\x32\x38\x2e\x38\x30\x31\x34\x38\x2c\x35\x36\x38\
\x2e\x33\x34\x36\x38\x34\x20\x33\x32\x33\x2e\x37\x30\x35\x35\x35\
\x2c\x35\x37\x38\x2e\x33\x32\x39\x30\x31\x20\x43\x20\x33\x31\x37\
\x2e\x37\x39\x30\x30\x37\x2c\x35\x38\x39\x2e\x39\x31\x36\x35\x34\
\x20\x33\x32\x33\x2e\x34\x32\x33\x33\x39\x2c\x35\x38\x30\x2e\x31\
\x34\x34\x39\x31\x20\x32\x39\x37\x2e\x32\x39\x37\x34\x37\x2c\x35\
\x35\x30\x2e\x38\x36\x38\x32\x33\x20\x7a\x22\x0a\x20\x20\x20\x20\
\x20\x20\x20\x69\x64\x3d\x22\x70\x61\x74\x68\x32\x34\x31\x37\x22\
\x0a\x20\x20\x20\x20\x20\x20\x20\x73\x74\x79\x6c\x65\x3d\x22\x66\
\x69\x6c\x6c\x3a\x23\x66\x66\x30\x30\x30\x30\x22\x20\x2f\x3e\x0a\
\x20\x20\x20\x20\x3c\x67\x0a\x20\x20\x20\x20\x20\x20\x20\x74\x72\
\x61\x6e\x73\x66\x6f\x72\x6d\x3d\x22\x74\x72\x61\x6e\x73\x6c\x61\
\x74\x65\x28\x31\x32\x39\x2e\x32\x38\x35\x37\x31\x2c\x2d\x36\x34\
\x2e\x32\x38\x35\x37\x31\x34\x29\x22\x0a\x20\x20\x20\x20\x20\x20\
\x20\x69\x64\x3d\x22\x67\x32\x32\x32\x31\x22\x20\x2f\x3e\x0a\x20\
\x20\x3c\x2f\x67\x3e\x0a\x3c\x2f\x73\x76\x67\x3e\x0a\
\x00\x00\x06\x06\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x75\x74\x66\
\x2d\x38\x22\x3f\x3e\x0d\x0a\x3c\x21\x44\x4f\x43\x54\x59\x50\x45\
\x20\x73\x76\x67\x20\x50\x55\x42\x4c\x49\x43\x20\x22\x2d\x2f\x2f\
\x57\x33\x43\x2f\x2f\x44\x54\x44\x20\x53\x56\x47\x20\x31\x2e\x31\
\x20\x54\x69\x6e\x79\x2f\x2f\x45\x4e\x22\x20\x22\x68\x74\x74\x70\
\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x47\x72\
\x61\x70\x68\x69\x63\x73\x2f\x53\x56\x47\x2f\x31\x2e\x31\x2f\x44\
\x54\x44\x2f\x73\x76\x67\x31\x31\x2d\x74\x69\x6e\x79\x2e\x64\x74\
\x64\x22\x3e\x0d\x0a\x3c\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\
\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\
\x72\x67\x2f\x32\x30\x30\x30\x2f\x73\x76\x67\x22\x20\x78\x6d\x6c\
\x6e\x73\x3a\x78\x6c\x69\x6e\x6b\x3d\x22\x68\x74\x74\x70\x3a\x2f\
\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x31\x39\x39\x39\
\x2f\x78\x6c\x69\x6e\x6b\x22\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\
\x22\x31\x2e\x31\x22\x20\x62\x61\x73\x65\x50\x72\x6f\x66\x69\x6c\
\x65\x3d\x22\x74\x69\x6e\x79\x22\x20\x78\x3d\x22\x30\x70\x78\x22\
\x20\x79\x3d\x22\x30\x70\x78\x22\x20\x77\x69\x64\x74\x68\x3d\x22\
\x34\x38\x30\x70\x78\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\
\x38\x30\x70\x78\x22\x20\x76\x69\x65\x77\x42\x6f\x78\x3d\x22\x30\
\x20\x30\x20\x34\x38\x30\x20\x34\x38\x30\x22\x20\x78\x6d\x6c\x3a\
\x73\x70\x61\x63\x65\x3d\x22\x70\x72\x65\x73\x65\x72\x76\x65\x22\
\x3e\x0d\x0a\x20\x20\x3c\x67\x20\x69\x64\x3d\x22\x74\x72\x61\x73\
\x68\x63\x61\x6e\x22\x3e\x0d\x0a\x20\x20\x09\x3c\x70\x61\x74\x68\
\x20\x64\x3d\x22\x4d\x33\x37\x38\x2e\x34\x30\x35\x2c\x31\x31\x32\
\x2e\x34\x36\x38\x68\x2d\x36\x33\x2e\x36\x30\x32\x63\x30\x2e\x30\
\x31\x34\x2d\x30\x2e\x34\x33\x36\x2c\x30\x2e\x30\x37\x2d\x30\x2e\
\x38\x37\x32\x2c\x30\x2e\x30\x37\x2d\x31\x2e\x33\x32\x34\x56\x39\
\x38\x2e\x33\x34\x37\x63\x30\x2d\x32\x33\x2e\x32\x38\x31\x2d\x31\
\x39\x2e\x30\x37\x2d\x34\x32\x2e\x33\x34\x38\x2d\x34\x32\x2e\x33\
\x35\x32\x2d\x34\x32\x2e\x33\x34\x38\x68\x2d\x37\x30\x2e\x35\x39\
\x34\x20\x20\x20\x63\x2d\x32\x33\x2e\x32\x39\x39\x2c\x30\x2d\x34\
\x32\x2e\x33\x35\x32\x2c\x31\x39\x2e\x30\x36\x36\x2d\x34\x32\x2e\
\x33\x35\x32\x2c\x34\x32\x2e\x33\x34\x38\x76\x31\x32\x2e\x37\x39\
\x37\x63\x30\x2c\x30\x2e\x34\x35\x32\x2c\x30\x2e\x30\x35\x33\x2c\
\x30\x2e\x38\x38\x39\x2c\x30\x2e\x30\x36\x38\x2c\x31\x2e\x33\x32\
\x34\x68\x2d\x36\x33\x2e\x36\x63\x2d\x31\x31\x2e\x36\x34\x38\x2c\
\x30\x2d\x32\x31\x2e\x31\x37\x36\x2c\x39\x2e\x35\x33\x35\x2d\x32\
\x31\x2e\x31\x37\x36\x2c\x32\x31\x2e\x31\x37\x36\x20\x20\x20\x76\
\x32\x31\x2e\x31\x37\x31\x63\x30\x2c\x31\x31\x2e\x36\x35\x33\x2c\
\x39\x2e\x35\x32\x37\x2c\x32\x31\x2e\x31\x38\x38\x2c\x32\x31\x2e\
\x31\x37\x36\x2c\x32\x31\x2e\x31\x38\x38\x68\x37\x2e\x30\x36\x32\
\x76\x2d\x31\x34\x2e\x31\x32\x36\x48\x33\x37\x31\x2e\x33\x34\x76\
\x31\x34\x2e\x31\x32\x36\x68\x37\x2e\x30\x36\x34\x63\x31\x31\x2e\
\x36\x34\x31\x2c\x30\x2c\x32\x31\x2e\x31\x37\x2d\x39\x2e\x35\x33\
\x35\x2c\x32\x31\x2e\x31\x37\x2d\x32\x31\x2e\x31\x38\x38\x76\x2d\
\x32\x31\x2e\x31\x37\x31\x20\x20\x20\x43\x33\x39\x39\x2e\x35\x37\
\x35\x2c\x31\x32\x32\x2e\x30\x30\x33\x2c\x33\x39\x30\x2e\x30\x34\
\x35\x2c\x31\x31\x32\x2e\x34\x36\x38\x2c\x33\x37\x38\x2e\x34\x30\
\x35\x2c\x31\x31\x32\x2e\x34\x36\x38\x7a\x20\x4d\x31\x39\x34\x2e\
\x36\x35\x33\x2c\x39\x31\x2e\x32\x39\x37\x68\x38\x34\x2e\x37\x30\
\x33\x76\x32\x31\x2e\x31\x37\x31\x68\x2d\x38\x34\x2e\x37\x30\x33\
\x56\x39\x31\x2e\x32\x39\x37\x7a\x22\x2f\x3e\x0d\x0a\x20\x20\x09\
\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x31\x31\x37\x2e\x32\x32\
\x35\x2c\x31\x38\x33\x2e\x30\x36\x32\x76\x32\x31\x31\x2e\x37\x35\
\x31\x63\x30\x2c\x31\x31\x2e\x36\x35\x38\x2c\x39\x2e\x35\x32\x33\
\x2c\x32\x31\x2e\x31\x38\x38\x2c\x32\x31\x2e\x31\x38\x2c\x32\x31\
\x2e\x31\x38\x38\x68\x31\x39\x37\x2e\x36\x33\x39\x63\x31\x31\x2e\
\x36\x35\x2c\x30\x2c\x32\x31\x2e\x31\x38\x2d\x39\x2e\x35\x33\x2c\
\x32\x31\x2e\x31\x38\x2d\x32\x31\x2e\x31\x38\x38\x56\x31\x38\x33\
\x2e\x30\x36\x32\x48\x31\x31\x37\x2e\x32\x32\x35\x7a\x20\x20\x20\
\x20\x4d\x31\x38\x37\x2e\x38\x31\x31\x2c\x33\x37\x30\x2e\x31\x31\
\x32\x63\x30\x2c\x33\x2e\x38\x38\x38\x2d\x33\x2e\x31\x38\x2c\x37\
\x2e\x30\x35\x39\x2d\x37\x2e\x30\x35\x35\x2c\x37\x2e\x30\x35\x39\
\x68\x2d\x31\x34\x2e\x31\x31\x37\x63\x2d\x33\x2e\x38\x39\x31\x2c\
\x30\x2d\x37\x2e\x30\x36\x32\x2d\x33\x2e\x31\x37\x31\x2d\x37\x2e\
\x30\x36\x32\x2d\x37\x2e\x30\x35\x39\x56\x32\x32\x38\x2e\x39\x33\
\x38\x20\x20\x20\x63\x30\x2d\x33\x2e\x38\x37\x34\x2c\x33\x2e\x31\
\x37\x32\x2d\x37\x2e\x30\x36\x32\x2c\x37\x2e\x30\x36\x32\x2d\x37\
\x2e\x30\x36\x32\x68\x31\x34\x2e\x31\x31\x37\x63\x33\x2e\x38\x37\
\x35\x2c\x30\x2c\x37\x2e\x30\x35\x35\x2c\x33\x2e\x31\x38\x38\x2c\
\x37\x2e\x30\x35\x35\x2c\x37\x2e\x30\x36\x32\x56\x33\x37\x30\x2e\
\x31\x31\x32\x7a\x20\x4d\x32\x35\x31\x2e\x33\x34\x32\x2c\x33\x37\
\x30\x2e\x31\x31\x32\x20\x20\x20\x63\x30\x2c\x33\x2e\x38\x38\x38\
\x2d\x33\x2e\x31\x38\x39\x2c\x37\x2e\x30\x35\x39\x2d\x37\x2e\x30\
\x35\x35\x2c\x37\x2e\x30\x35\x39\x68\x2d\x31\x34\x2e\x31\x32\x35\
\x63\x2d\x33\x2e\x38\x38\x33\x2c\x30\x2d\x37\x2e\x30\x35\x35\x2d\
\x33\x2e\x31\x37\x31\x2d\x37\x2e\x30\x35\x35\x2d\x37\x2e\x30\x35\
\x39\x56\x32\x32\x38\x2e\x39\x33\x38\x63\x30\x2d\x33\x2e\x38\x37\
\x34\x2c\x33\x2e\x31\x37\x32\x2d\x37\x2e\x30\x36\x32\x2c\x37\x2e\
\x30\x35\x35\x2d\x37\x2e\x30\x36\x32\x68\x31\x34\x2e\x31\x32\x35\
\x20\x20\x20\x63\x33\x2e\x38\x36\x35\x2c\x30\x2c\x37\x2e\x30\x35\
\x35\x2c\x33\x2e\x31\x38\x38\x2c\x37\x2e\x30\x35\x35\x2c\x37\x2e\
\x30\x36\x32\x56\x33\x37\x30\x2e\x31\x31\x32\x7a\x20\x4d\x33\x31\
\x34\x2e\x38\x37\x34\x2c\x33\x37\x30\x2e\x31\x31\x32\x63\x30\x2c\
\x33\x2e\x38\x38\x38\x2d\x33\x2e\x31\x38\x39\x2c\x37\x2e\x30\x35\
\x39\x2d\x37\x2e\x30\x36\x32\x2c\x37\x2e\x30\x35\x39\x68\x2d\x31\
\x34\x2e\x31\x31\x37\x20\x20\x20\x63\x2d\x33\x2e\x38\x38\x35\x2c\
\x30\x2d\x37\x2e\x30\x35\x35\x2d\x33\x2e\x31\x37\x31\x2d\x37\x2e\
\x30\x35\x35\x2d\x37\x2e\x30\x35\x39\x56\x32\x32\x38\x2e\x39\x33\
\x38\x63\x30\x2d\x33\x2e\x38\x37\x34\x2c\x33\x2e\x31\x37\x2d\x37\
\x2e\x30\x36\x32\x2c\x37\x2e\x30\x35\x35\x2d\x37\x2e\x30\x36\x32\
\x68\x31\x34\x2e\x31\x31\x37\x63\x33\x2e\x38\x37\x33\x2c\x30\x2c\
\x37\x2e\x30\x36\x32\x2c\x33\x2e\x31\x38\x38\x2c\x37\x2e\x30\x36\
\x32\x2c\x37\x2e\x30\x36\x32\x56\x33\x37\x30\x2e\x31\x31\x32\x7a\
\x22\x2f\x3e\x0d\x0a\x20\x20\x3c\x2f\x67\x3e\x0d\x0a\x3c\x2f\x73\
\x76\x67\x3e\x0d\x0a\
"
qt_resource_name = "\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x09\
\x08\x97\x87\xa7\
\x00\x68\
\x00\x65\x00\x61\x00\x72\x00\x74\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x09\
\x08\x9b\xad\xc7\
\x00\x74\
\x00\x72\x00\x61\x00\x73\x00\x68\x00\x2e\x00\x73\x00\x76\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x2a\x00\x00\x00\x00\x00\x01\x00\x00\x05\x42\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| xran-deex/PyChat | sys_rc.py | Python | mit | 13,017 |
from Components.PerServiceDisplay import PerServiceBase
from enigma import iPlayableService
from Source import Source
from Components.Element import cached
import NavigationInstance
class CurrentService(PerServiceBase, Source):
def __init__(self, navcore):
Source.__init__(self)
PerServiceBase.__init__(self, navcore,
{
iPlayableService.evStart: self.serviceEvent,
iPlayableService.evEnd: self.serviceEvent,
# FIXME: we should check 'interesting_events'
# which is not always provided.
iPlayableService.evUpdatedInfo: self.serviceEvent,
iPlayableService.evUpdatedEventInfo: self.serviceEvent,
iPlayableService.evNewProgramInfo: self.serviceEvent,
iPlayableService.evCuesheetChanged: self.serviceEvent,
iPlayableService.evVideoSizeChanged: self.serviceEvent,
iPlayableService.evHBBTVInfo: self.serviceEvent
}, with_event=True)
self.navcore = navcore
def serviceEvent(self, event):
self.changed((self.CHANGED_SPECIFIC, event))
@cached
def getCurrentService(self):
return self.navcore.getCurrentService()
def getCurrentServiceReference(self):
return self.navcore.getCurrentlyPlayingServiceReference()
service = property(getCurrentService)
@cached
def getCurrentServiceRef(self):
if NavigationInstance.instance is not None:
return NavigationInstance.instance.getCurrentlyPlayingServiceOrGroup()
return None
serviceref = property(getCurrentServiceRef)
def destroy(self):
PerServiceBase.destroy(self)
Source.destroy(self)
| BlackHole/enigma2-obh10 | lib/python/Components/Sources/CurrentService.py | Python | gpl-2.0 | 1,504 |
class X(object):
def testChain(self):
return X()
def g():
return X()
g().testChain().testChain()
| akosyakov/intellij-community | python/testData/completion/chainedCall.after.py | Python | apache-2.0 | 116 |
from django import http
from django.conf.urls import patterns
from django.contrib import admin
from django.db import models
from django.forms.models import modelform_factory
from django.shortcuts import get_object_or_404
from django.template import loader, Context
from django.views.generic import View
def get_printable_field_value(instance, fieldname):
""" Get the display value of a model field, showing a comma-delimited
list for M2M fields.
"""
field = instance._meta.get_field(fieldname)
field_value = getattr(instance, fieldname)
if isinstance(field, models.ManyToManyField):
field_value = ', '.join([unicode(f) for f in
field_value.all()])
return field_value
class AjaxModelFormView(View):
""" Handles AJAX updates of a single field on an object
(You likely don't need to use this directly as the admin
registers a URL for it itself.)
"""
model = None
valid_fields = None
def __init__(self, model, valid_fields, **kwargs):
self.model = model
self.valid_fields = valid_fields
def post(self, request, object_id, *args, **kwargs):
if not request.user or not request.user.is_staff:
return http.HttpResponseForbidden()
request = request.POST.copy()
fieldname = request.pop('field', None)[0]
form_prefix = request.pop('prefix', None)[0]
# prevent setting fields that weren't made AJAX-editable
if fieldname not in self.valid_fields:
return http.HttpResponseBadRequest()
ItemForm = modelform_factory(self.model, fields=(fieldname,))
instance = get_object_or_404(self.model, pk=object_id)
form = ItemForm(request, instance=instance, prefix=form_prefix)
if not form or not form.is_valid():
return http.HttpResponseBadRequest()
form.save()
new_value = get_printable_field_value(instance, fieldname)
return http.HttpResponse(new_value)
class AjaxModelAdmin(admin.ModelAdmin):
""" Admin class providing support for inline forms in
listview that are submitted through AJAX.
"""
def __init__(self, *args, **kwargs):
HANDLER_NAME_TPL = "_%s_ajax_handler"
if not hasattr(self, 'ajax_list_display'):
self.ajax_list_display = []
self.list_display = list(self.list_display)
self.list_display = self.list_display + map(lambda name: HANDLER_NAME_TPL % name,
self.ajax_list_display)
super(AjaxModelAdmin, self).__init__(*args, **kwargs)
for name in self.ajax_list_display:
setattr(self, HANDLER_NAME_TPL % name,
self._get_field_handler(name))
self.ajax_item_template = loader.get_template('ajax_changelist/'
'field_form.html')
def get_urls(self):
""" Add endpoint for saving a new field value. """
urls = super(AjaxModelAdmin, self).get_urls()
list_urls = patterns('',
(r'^(?P<object_id>\d+)$',
AjaxModelFormView.as_view(model=self.model,
valid_fields=self.ajax_list_display)))
return list_urls + urls
def _get_field_handler(self, fieldname):
""" Handle rendering of AJAX-editable fields for the changelist, by
dynamically building a callable for each field.
"""
def handler_function(obj, *args, **kwargs):
ItemForm = modelform_factory(self.model, fields=(fieldname,))
form = ItemForm(instance=obj, prefix="c" + unicode(obj.id))
field_value = get_printable_field_value(obj, fieldname)
# Render the field value and edit form
return self.ajax_item_template.render(Context({
'object_id': obj.id,
'field_name': fieldname,
'form': form.as_p(),
'field_value': field_value
}))
handler_function.allow_tags = True
handler_function.short_description = fieldname
return handler_function
class Media:
#FIXME: dripping jQueries is straight-up wack.
js = ('//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'ajax_changelist/js/lib/jquery.django_csrf.js',
'ajax_changelist/js/admin.js',)
css = {
'all': ('ajax_changelist/css/admin.css',)
}
| SohoTechLabs/django-ajax-changelist | ajax_changelist/admin.py | Python | mit | 4,495 |
"""Provides the constants needed for component."""
ATTR_AUX_HEAT = 'aux_heat'
ATTR_AWAY_MODE = 'away_mode'
ATTR_CURRENT_HUMIDITY = 'current_humidity'
ATTR_CURRENT_TEMPERATURE = 'current_temperature'
ATTR_FAN_LIST = 'fan_list'
ATTR_FAN_MODE = 'fan_mode'
ATTR_HOLD_MODE = 'hold_mode'
ATTR_HUMIDITY = 'humidity'
ATTR_MAX_HUMIDITY = 'max_humidity'
ATTR_MAX_TEMP = 'max_temp'
ATTR_MIN_HUMIDITY = 'min_humidity'
ATTR_MIN_TEMP = 'min_temp'
ATTR_OPERATION_LIST = 'operation_list'
ATTR_OPERATION_MODE = 'operation_mode'
ATTR_SWING_LIST = 'swing_list'
ATTR_SWING_MODE = 'swing_mode'
ATTR_TARGET_TEMP_HIGH = 'target_temp_high'
ATTR_TARGET_TEMP_LOW = 'target_temp_low'
ATTR_TARGET_TEMP_STEP = 'target_temp_step'
DEFAULT_MIN_TEMP = 7
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_HUMITIDY = 30
DEFAULT_MAX_HUMIDITY = 99
DOMAIN = 'climate'
SERVICE_SET_AUX_HEAT = 'set_aux_heat'
SERVICE_SET_AWAY_MODE = 'set_away_mode'
SERVICE_SET_FAN_MODE = 'set_fan_mode'
SERVICE_SET_HOLD_MODE = 'set_hold_mode'
SERVICE_SET_HUMIDITY = 'set_humidity'
SERVICE_SET_OPERATION_MODE = 'set_operation_mode'
SERVICE_SET_SWING_MODE = 'set_swing_mode'
SERVICE_SET_TEMPERATURE = 'set_temperature'
STATE_HEAT = 'heat'
STATE_COOL = 'cool'
STATE_IDLE = 'idle'
STATE_AUTO = 'auto'
STATE_MANUAL = 'manual'
STATE_DRY = 'dry'
STATE_FAN_ONLY = 'fan_only'
STATE_ECO = 'eco'
SUPPORT_TARGET_TEMPERATURE = 1
SUPPORT_TARGET_TEMPERATURE_HIGH = 2
SUPPORT_TARGET_TEMPERATURE_LOW = 4
SUPPORT_TARGET_HUMIDITY = 8
SUPPORT_TARGET_HUMIDITY_HIGH = 16
SUPPORT_TARGET_HUMIDITY_LOW = 32
SUPPORT_FAN_MODE = 64
SUPPORT_OPERATION_MODE = 128
SUPPORT_HOLD_MODE = 256
SUPPORT_SWING_MODE = 512
SUPPORT_AWAY_MODE = 1024
SUPPORT_AUX_HEAT = 2048
SUPPORT_ON_OFF = 4096
| MartinHjelmare/home-assistant | homeassistant/components/climate/const.py | Python | apache-2.0 | 1,686 |
from django.contrib import admin
from musicvoting.models import Artist, Album, User, Track
# Register your models here.
admin.site.register(Artist)
admin.site.register(Album)
admin.site.register(User)
admin.site.register(Track)
| WIStudent/musicvotingsite | musicvoting/admin.py | Python | gpl-3.0 | 228 |
from __future__ import absolute_import
import time
from mock import patch, create_autospec
from tornado.ioloop import IOLoop
from tornado.concurrent import Future
import nsq
from nsq import event
_conn_port = 4150
def get_reader(max_in_flight=5):
return nsq.Reader("test", "test",
message_handler=_message_handler,
lookupd_http_addresses=["http://localhost:4161"],
max_in_flight=max_in_flight,
max_backoff_duration=2.0,
)
def get_ioloop():
ioloop = create_autospec(IOLoop, instance=True)
ioloop.time.side_effect = time.time
ioloop.call_later.side_effect = lambda dt, cb: ioloop.add_timeout(time.time() + dt, cb)
return ioloop
def get_conn(reader):
global _conn_port
with patch('nsq.conn.IOStream', autospec=True) as iostream:
instance = iostream.return_value
instance.connect.return_value = Future()
instance.read_bytes.return_value = Future()
conn = reader.connect_to_nsqd('localhost', _conn_port)
_conn_port += 1
conn.trigger(event.READY, conn=conn)
return conn
def send_message(conn):
msg = _get_message(conn)
conn.in_flight += 1
conn.trigger(event.MESSAGE, conn=conn, message=msg)
return msg
def _get_message(conn):
msg = nsq.Message("1234", "{}", 1234, 0)
msg.on('finish', conn._on_message_finish)
msg.on('requeue', conn._on_message_requeue)
return msg
def _message_handler(msg):
msg.enable_async()
| bitly/pynsq | tests/reader_unit_test_helpers.py | Python | mit | 1,538 |
# -*- coding: utf-8 -*-
import h5py
import os
from collections import namedtuple
PathValue = namedtuple('PathValue', ['path', 'value'])
class HiisiHDF(h5py.File):
"""hdf5 file handle written on top of h5py.File.
Module offers easy to use search, and write methods for handling
HDF5 files.
"""
CACHE = {'search_attribute':None,
'dataset_paths':[],
'group_paths':[],
'attribute_paths':[]}
def __init__(self, *args, **kwargs):
super(HiisiHDF, self).__init__(*args, **kwargs)
@staticmethod
def _clear_cache():
HiisiHDF.CACHE = {'search_attribute':None,
'dataset_paths':[],
'group_paths':[],
'attribute_paths':[]}
@staticmethod
def _is_dataset(name, obj):
if isinstance(obj, h5py.Dataset):
HiisiHDF.CACHE['dataset_paths'].append(obj.name)
@staticmethod
def _is_group(name, obj):
if isinstance(obj, h5py.Group):
HiisiHDF.CACHE['group_paths'].append(obj.name)
@staticmethod
def _find_attr_paths(name, obj):
if HiisiHDF.CACHE['search_attribute'] in obj.attrs:
HiisiHDF.CACHE['attribute_paths'].append(obj.name)
@staticmethod
def _is_attr_path(name, obj):
if HiisiHDF.CACHE['search_attribute'] in obj.attrs:
return obj.name
def attr_exists(self, attr):
"""Returns True if at least on instance of the attribute is found
"""
gen = self.attr_gen(attr)
n_instances = len(list(gen))
if n_instances > 0:
return True
else:
return False
def is_unique_attr(self, attr):
"""Returns true if only single instance of the attribute is found
"""
gen = self.attr_gen(attr)
n_instances = len(list(gen))
if n_instances == 1:
return True
else:
return False
def datasets(self):
"""Method returns a list of dataset paths.
Examples
--------
>>> for dataset in h5f.datasets():
print(dataset)
'/dataset1/data1/data'
'/dataset1/data2/data'
'/dataset2/data1/data'
'/dataset2/data2/data'
"""
HiisiHDF._clear_cache()
self.visititems(HiisiHDF._is_dataset)
return HiisiHDF.CACHE['dataset_paths']
def groups(self):
"""Method returns a list of all goup paths
Examples
--------
>>> for group in h5f.groups():
print(group)
'/'
'/dataset1'
'/dataset1/data1'
'/dataset1/data2'
"""
HiisiHDF._clear_cache()
self.CACHE['group_paths'].append('/')
self.visititems(HiisiHDF._is_group)
return HiisiHDF.CACHE['group_paths']
def attr_gen(self, attr):
"""Returns attribute generator that yields namedtuples containing
path value pairs
Parameters
----------
attr : str
Name of the search attribute
Returns
-------
attr_generator : generator
Returns a generator that yields named tuples with field names
path and value.
Examples
--------
>>> gen = h5f.attr_gen('elangle')
>>> pair = next(gen)
>>> print(pair.path)
'/dataset1/where'
>>> print(pair.value)
0.5
"""
HiisiHDF._clear_cache()
HiisiHDF.CACHE['search_attribute'] = attr
HiisiHDF._find_attr_paths('/', self['/']) # Check root attributes
self.visititems(HiisiHDF._find_attr_paths)
path_attr_gen = (PathValue(attr_path, self[attr_path].attrs.get(attr)) for attr_path in HiisiHDF.CACHE['attribute_paths'])
return path_attr_gen
def create_from_filedict(self, filedict):
"""
Creates h5 file from dictionary containing the file structure.
Filedict is a regular dictinary whose keys are hdf5 paths and whose
values are dictinaries containing the metadata and datasets. Metadata
is given as normal key-value -pairs and dataset arrays are given using
'DATASET' key. Datasets must be numpy arrays.
Method can also be used to append existing hdf5 file. If the file is
opened in read only mode, method does nothing.
Examples
--------
Create newfile.h5 and fill it with data and metadata
>>> h5f = HiisiHDF('newfile.h5', 'w')
>>> filedict = {'/':{'attr1':'A'},
'/dataset1/data1/data':{'DATASET':np.zeros(100), 'quantity':'emptyarray'}, 'B':'b'}
>>> h5f.create_from_filedict(filedict)
"""
if self.mode in ['r+','w', 'w-', 'x', 'a']:
for h5path, path_content in filedict.items():
if 'DATASET' in path_content.keys():
# If path exist, write only metadata
if h5path in self:
for key, value in path_content.items():
if key != 'DATASET':
self[h5path].attrs[key] = value
else:
try:
group = self.create_group(os.path.dirname(h5path))
except ValueError:
group = self[os.path.dirname(h5path)]
pass # This pass has no effect?
new_dataset = group.create_dataset(os.path.basename(h5path), data=path_content['DATASET'])
for key, value in path_content.items():
if key != 'DATASET':
new_dataset.attrs[key] = value
else:
try:
group = self.create_group(h5path)
except ValueError:
group = self[h5path]
for key, value in path_content.items():
group.attrs[key] = value
def search(self, attr, value, tolerance=0):
"""Find paths with a key value match
Parameters
----------
attr : str
name of the attribute
value : str or numerical value
value of the searched attribute
Keywords
--------
tolerance : float
tolerance used when searching for matching numerical
attributes. If the value of the attribute found from the file
differs from the searched value less than the tolerance, attributes
are considered to be the same.
Returns
-------
results : list
a list of all matching paths
Examples
--------
>>> for result in h5f.search('elangle', 0.5, 0.1):
print(result)
'/dataset1/where'
>>> for result in h5f.search('quantity', 'DBZH'):
print(result)
'/dataset1/data2/what'
'/dataset2/data2/what'
'/dataset3/data2/what'
'/dataset4/data2/what'
'/dataset5/data2/what'
"""
found_paths = []
gen = self.attr_gen(attr)
for path_attr_pair in gen:
# if attribute is numerical use numerical_value_tolerance in
# value comparison. If attribute is string require exact match
if isinstance(path_attr_pair.value, str):
type_name = 'str'
else:
type_name = path_attr_pair.value.dtype.name
if 'int' in type_name or 'float' in type_name:
if abs(path_attr_pair.value - value) <= tolerance:
found_paths.append(path_attr_pair.path)
else:
if path_attr_pair.value == value:
found_paths.append(path_attr_pair.path)
return found_paths
| karjaljo/hiisi | hiisi/hiisihdf.py | Python | mit | 8,001 |
from pyGBot.Plugins.games.fluxxfiles.fluxx import FluxxCard
from pyGBot.Plugins.games.fluxxfiles.goals import GoalCard
from pyGBot.Plugins.games.fluxxfiles.game import pretty_print_list
class RuleCard(FluxxCard):
information = """
This rule goes into effect as soon as you play it.
Place this card face up in the center of the table.
Discard it when a newer rule contradicts it.
""".strip()
type = "Rule"
def __init__(self, title, short_title, description):
FluxxCard.__init__(self, title, short_title, description)
self.draw_amount = -1
self.play_amount = -1
self.hand_limit = -1
self.keeper_limit = -1
def apply_rules(self, rules):
pass
def play(self, player):
self.owner.game.rule_pile.receive(self)
class BasicRules(RuleCard):
def __init__(self):
RuleCard.__init__(self, "Draw 1, Play 1", "R_BASIC", """
Draw 1 card per turn.
Play 1 card per turn.
No Hand Limit.
No Keeper Limit.
Leave this card on the table even when these rules
are replaced by New Rule cards.
""")
self.information = """
Start by shuffling the deck and deal 3 cards to
each player. Place this card in the center of the
table.
"""
self.draw_amount = 1
self.play_amount = 1
self.hand_limit = -1
self.keeper_limit = -1
class DrawRuleCard(RuleCard):
def __init__(self, amount):
RuleCard.__init__(self, "Draw %d" % amount, "R_D%d" % amount, """
Draw %d cards per turn.
The person who played this card should immediately
draw extra cards (if needed) so that they have
drawn %d cards this turn.
""" % (amount, amount))
self.draw_amount = amount
def apply_rules(self, rules):
# Discard the current draw rule card.
cards = [c for c in rules.cards if c.draw_amount > 1]
rules.discard(cards)
class PlayRuleCard(RuleCard):
def __init__(self, amount):
RuleCard.__init__(self, "Play %s" % amount, "R_P%d" % amount,
"""
Play %s cards per turn.
If you have fewer than %d cards in your hand,
play all your cards.
""" % (amount, amount))
self.play_amount = amount
def apply_rules(self, rules):
# Discard the current play rule card.
cards = [c for c in rules.cards if c.play_amount > 1 or hasattr(c, "magic")]
rules.discard(cards)
class PlayAllMagic(object):
def __add__(self, *a, **b):
return self
__radd__ = __add__
magic = True
def __str__(self):
return "All"
class PlayAllRuleCard(PlayRuleCard):
def __init__(self):
PlayRuleCard.__init__(self, 0)
RuleCard.__init__(self, "Play All", "R_P0", """
Play all of the cards in your hand each turn.
""")
self.play_amount = PlayAllMagic()
class HandLimitRuleCard(RuleCard):
def __init__(self, amount):
RuleCard.__init__(self, "Hand Limit %d" % amount,
"R_LH%d" % amount, """
If it isn't your turn, you should only have %d cards
in your hand. If you have extra cards, discard them
immediately.
During your turn, you may ignore the Hand Limit, as
long as you discard all but %d of your cards when
you end your turn.
""" % (amount, amount))
self.hand_limit = amount
def apply_rules(self, rules):
# Discard the current hand limit rule card.
cards = [c for c in rules.cards if c.hand_limit > -1]
rules.discard(cards)
class KeeperLimitRuleCard(RuleCard):
def __init__(self, amount):
RuleCard.__init__(self, "Keeper Limit %d" % amount,
"R_LK%d" % amount, """
If it isn't your turn, you should only have %d Keepers
in play. Discard any extras immediately.
You may play new Keepers during your turn as long as
you discard all but %d at the end of your turn.
""" % (amount, amount))
self.keeper_limit = amount
def apply_rules(self, rules):
# Discard the current keeper limit rule card.
cards = [c for c in rules.cards if c.keeper_limit > -1]
rules.discard(cards)
class NoHandBonusRuleCard(RuleCard):
def __init__(self):
RuleCard.__init__(self, "No-Hand Bonus", "R_BH", """
If you have no cards in your hand at the start of your
turn, draw a new hand of 3 cards before observing
the current Draw rule.
""")
class PoorBonusRuleCard(RuleCard):
def __init__(self):
RuleCard.__init__(self, "Poor Bonus", "R_BP", """
If you have fewer Keepers in than than anyone else,
draw 1 extra card.
""")
class RichBonusRuleCard(RuleCard):
def __init__(self):
RuleCard.__init__(self, "Rich Bonus", "R_BR", """
If you have fewer Keepers in than than anyone else,
you may choose to play 1 extra card.
""")
class InflationRuleCard(RuleCard):
def __init__(self):
RuleCard.__init__(self, "Inflation (X=X+1)", "R_I", """
Any time a numeral is seen on another card, add one
to that numeral. For example, 1 becomes 2, while one
remains one.""")
class DoubleAgendaRuleCard(RuleCard, GoalCard):
goal1, goal2 = None, None
def __init__(self):
RuleCard.__init__(self, "Double Agenda", "R_DA", """
A second Goal can now be played. After this, whoever plays
a new Goal (or causes this card to be discarded) must
choose which of the current Goals to discard.
You win if you satisfy either Goal. (In the case of a tie,
continue playing until a clear winner emerges.)
""")
self.type = "Rule"
def check_for_win(self, game):
if self.goal1 is not None:
self.goal1.check_for_win(game)
if self.goal2 is not None:
self.goal2.check_for_win(game)
def replace_goal(self, new_goal, player):
regex = "1|2|[gG]_[a-zA-Z]+"
def callback(message):
player = self.player
choice = message.strip()
if choice == "1" or self.goal1.short_title == choice.upper():
player.plugin.pubout("%s replaced %s with %s." % \
(player.name, self.goal1, self.new_goal))
self.goal1 = self.new_goal
player.halt_game = None
return True
elif choice == "2" or self.goal2.short_title == choice.upper():
player.plugin.pubout("%s replaced %s with %s." % \
(player.name, self.goal2, self.new_goal))
self.goal2 = self.new_goal
player.halt_game = None
return True
else:
player.request_input("Which goal do you want to remove? (1 or 2)",
(callback, regex))
if self.goal1 is None:
self.goal1 = new_goal
elif self.goal2 is None:
self.goal2 = new_goal
else:
self.new_goal = new_goal
player.halt_game = self.title
self.player = player
return player.request_input(("The goals are 1: '%s' and 2: '%s'. " +
"Which goal do you want to remove?") %
(self.goal1, self.goal2), (callback, regex))
def play(self, player):
self.owner.cards.remove(self)
self.goal1 = self.owner.game.current_goal
if self.goal1 is None:
self.owner.game.current_goal = self
else:
self.goal1.replace_goal(self, player)
def __str__(self):
s = pretty_print_list((self.goal1, self.goal2))
if s != "":
s = ": " + s
return "Double Agenda (R_DA)%s" % s
class ReverseOrderRuleCard(RuleCard):
def __init__(self):
RuleCard.__init__(self, "Reverse Order", "R_RO", """
At the end of the turn in which this card is played,
turn order will proceeed in the opposite direction.
When this rule is discarded, the original turn order
is restored.
""")
def apply_rules(self, rules):
rules.game.reverse_played = True
class FirstPlayRandomRuleCard(RuleCard):
def __init__(self):
RuleCard.__init__(self, "First Play Random", "R_FP", """
The first card you play must be chosed at random
from your hand by the player on your left.
Ignore this rule if, at the start of your turn, the
current Rule cards allow you to play only one card.
""")
| pyGBot/pyGBot | pyGBot/Plugins/games/fluxxfiles/rules.py | Python | gpl-3.0 | 8,471 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate
from erpnext.stock.doctype.item.test_item import create_item
from erpnext.stock.doctype.delivery_note.test_delivery_note import create_delivery_note
from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry
from erpnext.controllers.stock_controller import QualityInspectionRejectedError, QualityInspectionRequiredError, QualityInspectionNotSubmittedError
# test_records = frappe.get_test_records('Quality Inspection')
class TestQualityInspection(unittest.TestCase):
def setUp(self):
create_item("_Test Item with QA")
frappe.db.set_value("Item", "_Test Item with QA", "inspection_required_before_delivery", 1)
def test_qa_for_delivery(self):
make_stock_entry(item_code="_Test Item with QA", target="_Test Warehouse - _TC", qty=1, basic_rate=100)
dn = create_delivery_note(item_code="_Test Item with QA", do_not_submit=True)
self.assertRaises(QualityInspectionRequiredError, dn.submit)
qa = create_quality_inspection(reference_type="Delivery Note", reference_name=dn.name, status="Rejected")
dn.reload()
self.assertRaises(QualityInspectionRejectedError, dn.submit)
frappe.db.set_value("Quality Inspection Reading", {"parent": qa.name}, "status", "Accepted")
dn.reload()
dn.submit()
qa.cancel()
dn.reload()
dn.cancel()
def test_qa_not_submit(self):
dn = create_delivery_note(item_code="_Test Item with QA", do_not_submit=True)
qa = create_quality_inspection(reference_type="Delivery Note", reference_name=dn.name, do_not_submit=True)
dn.items[0].quality_inspection = qa.name
self.assertRaises(QualityInspectionNotSubmittedError, dn.submit)
qa.delete()
dn.delete()
def test_value_based_qi_readings(self):
# Test QI based on acceptance values (Non formula)
dn = create_delivery_note(item_code="_Test Item with QA", do_not_submit=True)
readings = [{
"specification": "Iron Content", # numeric reading
"min_value": 0.1,
"max_value": 0.9,
"reading_1": "0.4"
},
{
"specification": "Particle Inspection Needed", # non-numeric reading
"numeric": 0,
"value": "Yes",
"reading_value": "Yes"
}]
qa = create_quality_inspection(reference_type="Delivery Note", reference_name=dn.name,
readings=readings, do_not_save=True)
qa.save()
# status must be auto set as per formula
self.assertEqual(qa.readings[0].status, "Accepted")
self.assertEqual(qa.readings[1].status, "Accepted")
qa.delete()
dn.delete()
def test_formula_based_qi_readings(self):
dn = create_delivery_note(item_code="_Test Item with QA", do_not_submit=True)
readings = [{
"specification": "Iron Content", # numeric reading
"formula_based_criteria": 1,
"acceptance_formula": "reading_1 > 0.35 and reading_1 < 0.50",
"reading_1": "0.4"
},
{
"specification": "Calcium Content", # numeric reading
"formula_based_criteria": 1,
"acceptance_formula": "reading_1 > 0.20 and reading_1 < 0.50",
"reading_1": "0.7"
},
{
"specification": "Mg Content", # numeric reading
"formula_based_criteria": 1,
"acceptance_formula": "mean < 0.9",
"reading_1": "0.5",
"reading_2": "0.7",
"reading_3": "random text" # check if random string input causes issues
},
{
"specification": "Calcium Content", # non-numeric reading
"formula_based_criteria": 1,
"numeric": 0,
"acceptance_formula": "reading_value in ('Grade A', 'Grade B', 'Grade C')",
"reading_value": "Grade B"
}]
qa = create_quality_inspection(reference_type="Delivery Note", reference_name=dn.name,
readings=readings, do_not_save=True)
qa.save()
# status must be auto set as per formula
self.assertEqual(qa.readings[0].status, "Accepted")
self.assertEqual(qa.readings[1].status, "Rejected")
self.assertEqual(qa.readings[2].status, "Accepted")
self.assertEqual(qa.readings[3].status, "Accepted")
qa.delete()
dn.delete()
def create_quality_inspection(**args):
args = frappe._dict(args)
qa = frappe.new_doc("Quality Inspection")
qa.report_date = nowdate()
qa.inspection_type = args.inspection_type or "Outgoing"
qa.reference_type = args.reference_type
qa.reference_name = args.reference_name
qa.item_code = args.item_code or "_Test Item with QA"
qa.sample_size = 1
qa.inspected_by = frappe.session.user
qa.status = args.status or "Accepted"
if not args.readings:
create_quality_inspection_parameter("Size")
readings = {"specification": "Size", "min_value": 0, "max_value": 10}
else:
readings = args.readings
if args.status == "Rejected":
readings["reading_1"] = "12" # status is auto set in child on save
if isinstance(readings, list):
for entry in readings:
create_quality_inspection_parameter(entry["specification"])
qa.append("readings", entry)
else:
qa.append("readings", readings)
if not args.do_not_save:
qa.save()
if not args.do_not_submit:
qa.submit()
return qa
def create_quality_inspection_parameter(parameter):
if not frappe.db.exists("Quality Inspection Parameter", parameter):
frappe.get_doc({
"doctype": "Quality Inspection Parameter",
"parameter": parameter,
"description": parameter
}).insert() | ESS-LLP/erpnext | erpnext/stock/doctype/quality_inspection/test_quality_inspection.py | Python | gpl-3.0 | 5,291 |
import pytest
import importlib
from spectralDNS import config, get_solver, solve
from TG2D import initialize, regression_test
@pytest.fixture(params=('1', '2'))
def args(request):
"""Check for uniform and non-uniform cube"""
if request.param[-1] == '1':
_args = ['--M', '5', '5', '--L', '2*pi', '2*pi']
else:
_args = ['--M', '6', '4', '--L', '6*pi', '4*pi']
return _args + ['NS2D']
def test_NS2D(args):
config.update(
{
'nu': 0.01,
'dt': 0.05,
'T': 10
}, 'doublyperiodic')
solver = get_solver(regression_test=regression_test,
mesh='doublyperiodic',
parse_args=args)
context = solver.get_context()
initialize(solver, **context)
solve(solver, context)
config.params.dealias = '3/2-rule'
initialize(solver, **context)
solve(solver, context)
config.params.dealias = '2/3-rule'
config.params.optimization = 'cython'
importlib.reload(solver)
initialize(solver, **context)
solve(solver, context)
config.params.write_result = 1
config.params.checkpoint = 1
config.params.dt = 0.01
config.params.t = 0.0
config.params.tstep = 0
config.params.T = 0.04
solver.regression_test = lambda c: None
initialize(solver, **context)
solve(solver, context)
| spectralDNS/spectralDNS | tests/test_NS2D.py | Python | lgpl-3.0 | 1,361 |
# Copyright 2016 Milos Svana
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .warcreader import WarcFile
from .warcreader import Webpage
| msvana/warcreader | warcreader/__init__.py | Python | apache-2.0 | 657 |
from sqlalchemy.ext.hybrid import hybrid_method, hybrid_property
from ext import db
from config import avatar_tmpl
from .mixin import BaseMixin
friendship = db.Table(
'friends',
db.Column('user_id', db.String(20), db.ForeignKey('users.id')),
db.Column('friend_id', db.String(20), db.ForeignKey('users.id')),
mysql_charset='utf8mb4'
)
group_relationship = db.Table(
'group_relationship',
db.Column('group_id', db.String(20), db.ForeignKey('groups.id'),
nullable=False),
db.Column('user_id', db.String(20), db.ForeignKey('users.id'),
nullable=False),
mysql_charset='utf8mb4'
)
mp_relationship = db.Table(
'mp_relationship',
db.Column('mp_id', db.String(20), db.ForeignKey('mps.id'),
nullable=False),
db.Column('user_id', db.String(20), db.ForeignKey('users.id'),
nullable=False),
mysql_charset='utf8mb4'
)
class CoreMixin(BaseMixin):
@property
def avatar(self):
return avatar_tmpl.format(self.id)
def to_dict(self):
rs = super().to_dict()
rs['avatar'] = self.avatar
return rs
class User(CoreMixin, db.Model):
__tablename__ = 'users'
__table_args__ = {'mysql_charset': 'utf8mb4'}
id = db.Column(db.String(20), primary_key=True) # puid
sex = db.Column(db.SmallInteger, default=2)
nick_name = db.Column(db.String(60), index=True)
signature = db.Column(db.String(512), default='')
province = db.Column(db.String(20), default='')
city = db.Column(db.String(20), default='')
groups = db.relationship('Group', secondary=group_relationship,
backref='members')
mps = db.relationship('MP', secondary=mp_relationship,
backref='users')
friends = db.relationship('User',
secondary=friendship,
primaryjoin=(friendship.c.user_id == id),
secondaryjoin = (friendship.c.friend_id == id),
lazy = 'dynamic'
)
def __repr__(self):
return '<User %r>' % self.nick_name
@hybrid_method
def add_friend(self, user):
if not self.is_friend(user):
self.friends.append(user)
user.friends.append(self)
return self
@hybrid_method
def del_friend(self, user):
if self.is_friend(user):
self.friends.remove(user)
user.friends.remove(self)
return self
@hybrid_method
def is_friend(self, user):
return self.friends.filter(
friendship.c.friend_id == user.id).count() > 0
@hybrid_method
def add_group(self, group):
if not self.is_in_group(group):
self.groups.append(group)
@hybrid_method
def del_group(self, group):
if self.is_in_group(group):
self.groups.remove(group)
@hybrid_method
def is_in_group(self, group):
return group in self.groups
class Group(CoreMixin, db.Model):
__tablename__ = 'groups'
__table_args__ = {'mysql_charset': 'utf8mb4'}
id = db.Column(db.String(20), primary_key=True) # puid
owner_id = db.Column(db.String(20), index=True)
nick_name = db.Column(db.String(60), index=True)
def __repr__(self):
return '<Group %r>' % self.nick_name
@hybrid_method
def is_member(self, user):
return user in self.members
@hybrid_method
def add_member(self, user):
if not self.is_member(user):
self.members.append(user)
@hybrid_method
def del_member(self, user):
if self.is_member(user):
self.members.remove(user)
@hybrid_property
def count(self):
return len(self.members)
def to_dict(self):
rs = super().to_dict()
rs['count'] = self.count
return rs
class MP(CoreMixin, db.Model):
__tablename__ = 'mps'
__table_args__ = {'mysql_charset': 'utf8mb4'}
id = db.Column(db.String(20), primary_key=True) # puid
city = db.Column(db.String(20), default='')
province = db.Column(db.String(20), default='')
nick_name = db.Column(db.String(60), index=True)
signature = db.Column(db.String(255), default='')
def __repr__(self):
return '<MP %r>' % self.nick_name
| dongweiming/wechat-admin | models/core.py | Python | gpl-3.0 | 4,344 |
import json
from django import forms
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
class JSONEditorWidget(forms.Widget):
template_name = 'django_admin_json_editor/editor.html'
def __init__(self, schema, collapsed=True, sceditor=False, editor_options=None):
super(JSONEditorWidget, self).__init__()
self._schema = schema
self._collapsed = collapsed
self._sceditor = sceditor
self._editor_options = {
'theme': 'bootstrap4',
'iconlib': 'fontawesome4',
}
self._editor_options.update(editor_options or {})
def render(self, name, value, attrs=None, renderer=None):
if callable(self._schema):
schema = self._schema(self)
else:
schema = self._schema
schema['title'] = ' '
schema['options'] = {'collapsed': int(self._collapsed)}
editor_options = self._editor_options.copy()
editor_options['schema'] = schema
context = {
'name': name,
'data': value,
'editor_options': json.dumps(editor_options),
}
return mark_safe(render_to_string(self.template_name, context))
@property
def media(self):
css = {
'all': [
'django_admin_json_editor/fontawesome/css/font-awesome.min.css',
'django_admin_json_editor/style.css',
]
}
js = [
'django_admin_json_editor/jsoneditor/jsoneditor.min.js',
]
if self._editor_options['theme'] == 'bootstrap4':
css['all'].append('django_admin_json_editor/bootstrap/css/bootstrap.min.css')
js.append('django_admin_json_editor/jquery/jquery-3.5.1.slim.min.js')
js.append('django_admin_json_editor/bootstrap/js/bootstrap.bundle.min.js')
if self._sceditor:
css['all'].append('django_admin_json_editor/sceditor/themes/default.min.css')
js.append('django_admin_json_editor/sceditor/jquery.sceditor.bbcode.min.js')
return forms.Media(css=css, js=js)
| abogushov/django-admin-json-editor | django_admin_json_editor/admin.py | Python | mit | 2,131 |
# -*- coding: utf-8 -*-
# __author__ = chenchiyuan
from __future__ import division, unicode_literals, print_function
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.forms import ClearableFileInput
from django.utils.safestring import mark_safe
from django.conf import settings
from hawaii.apps.weixin.models import Photo
class UpyunImageWidget(ClearableFileInput):
def render(self, name, value, attrs=None):
template = super(UpyunImageWidget, self).render(name, value, attrs)
url = "%s%s" % (settings.IMG_HOST, value)
append = '<p>%s</p><a href="%s" target="_blank"><img src="%s" width="100" height="100"></a>' % (value, url, url)
return mark_safe(template + append)
class ForeighUpyunImageWidget(ForeignKeyRawIdWidget):
def render(self, name, value, *args, **kwargs):
template = super(ForeighUpyunImageWidget, self).render(name, value, *args, **kwargs)
photo = Photo.get_by_unique(id=value)
if not photo:
return template
url = photo.url
append = '<p>%s</p><a href="%s" target="_blank"><img src="%s" width="100" height="100"></a>' % (photo.name, url, url)
return mark_safe(template + append) | chenchiyuan/hawaii | hawaii/apps/weixin/widgets.py | Python | bsd-3-clause | 1,224 |
# -*- coding: utf-8 -*-
"""pybooru.pybooru
This module contains pybooru main class for access to API calls,
authentication and return JSON response.
Classes:
_Pybooru -- Main pybooru classs, define Pybooru object and do requests.
"""
# __furute__ imports
from __future__ import absolute_import
# External imports
import re
import requests
# pybooru imports
from . import __version__
from .exceptions import (PybooruError, PybooruHTTPError)
from .resources import (SITE_LIST, HTTP_STATUS_CODE)
class _Pybooru(object):
"""Pybooru main class.
Attributes:
site_name (str): Get or set site name set.
site_url (str): Get or set the URL of Moebooru/Danbooru based site.
username (str): Return user name.
last_call (dict): Return last call.
"""
def __init__(self, site_name='', site_url='', username=''):
"""Initialize Pybooru.
Keyword arguments:
site_name (str): The site name in 'SITE_LIST', default sites.
site_url (str): URL of on Moebooru/Danbooru based sites.
username (str): Your username of the site (Required only for
functions that modify the content).
Raises:
PybooruError: When 'site_name' and 'site_url' are empty.
"""
# Attributes
self.__site_name = '' # for site_name property
self.__site_url = '' # for site_url property
self.username = username
self.last_call = {}
# Set HTTP Client
self.client = requests.Session()
headers = {'user-agent': 'Pybooru/{0}'.format(__version__),
'content-type': 'application/json; charset=utf-8'}
self.client.headers = headers
# Validate site_name or site_url
if site_name:
self.site_name = site_name
elif site_url:
self.site_url = site_url
else:
raise PybooruError("Unexpected empty arguments, specify parameter "
"'site_name' or 'site_url'.")
@property
def site_name(self):
"""Get or set site name.
:getter: Return site name.
:setter: Validate and set site name.
:type: string
"""
return self.__site_name
@site_name.setter
def site_name(self, site_name):
"""Function that sets and checks the site name and set url.
Parameters:
site_name (str): The site name in 'SITE_LIST', default sites.
Raises:
PybooruError: When 'site_name' isn't valid.
"""
if site_name in SITE_LIST:
self.__site_name = site_name
self.__site_url = SITE_LIST[site_name]['url']
else:
raise PybooruError(
"The 'site_name' is not valid, specify a valid 'site_name'.")
@property
def site_url(self):
"""Get or set site url.
:getter: Return site url.
:setter: Validate and set site url.
:type: string
"""
return self.__site_url
@site_url.setter
def site_url(self, url):
"""URL setter and validator for site_url property.
Parameters:
url (str): URL of on Moebooru/Danbooru based sites.
Raises:
PybooruError: When URL scheme or URL are invalid.
"""
# Regular expression to URL validate
regex = re.compile(
r'^(?:http|https)://' # Scheme only HTTP/HTTPS
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?| \
[A-Z0-9-]{2,}(?<!-)\.?)|' # Domain
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # or ipv6
r'(?::\d+)?' # Port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
# Validate URL
if re.match('^(?:http|https)://', url):
if re.search(regex, url):
self.__site_url = url
else:
raise PybooruError("Invalid URL: {0}".format(url))
else:
raise PybooruError(
"Invalid URL scheme, use HTTP or HTTPS: {0}".format(url))
@staticmethod
def _get_status(status_code):
"""Get status message for status code.
Parameters:
status_code (int): HTTP status code.
Returns:
status message (str).
"""
return "{0}, {1}".format(*HTTP_STATUS_CODE.get(
status_code, ('Undefined', 'undefined')))
def _request(self, url, api_call, request_args, method='GET'):
"""Function to request and returning JSON data.
Parameters:
url (str): Base url call.
api_call (str): API function to be called.
request_args (dict): All requests parameters.
method (str): (Defauld: GET) HTTP method 'GET' or 'POST'
Raises:
PybooruHTTPError: HTTP Error.
requests.exceptions.Timeout: When HTTP Timeout.
ValueError: When can't decode JSON response.
"""
try:
if method != 'GET':
# Reset content-type for data encoded as a multipart form
self.client.headers.update({'content-type': None})
response = self.client.request(method, url, **request_args)
self.last_call.update({
'API': api_call,
'url': response.url,
'status_code': response.status_code,
'status': self._get_status(response.status_code),
'headers': response.headers
})
if response.status_code in (200, 201, 202):
return response.json()
elif response.status_code == 204:
return True
raise PybooruHTTPError("In _request", response.status_code,
response.url)
except requests.exceptions.Timeout:
raise PybooruError("Timeout! url: {0}".format(response.url))
except ValueError as e:
raise PybooruError("JSON Error: {0} in line {1} column {2}".format(
e.msg, e.lineno, e.colno))
| LuqueDaniel/pybooru | pybooru/pybooru.py | Python | mit | 6,195 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import Qt
from calibre.gui2.convert.look_and_feel_ui import Ui_Form
from calibre.gui2.convert import Widget
class LookAndFeelWidget(Widget, Ui_Form):
TITLE = _('Look & Feel')
ICON = I('lookfeel.png')
HELP = _('Control the look and feel of the output')
COMMIT_NAME = 'look_and_feel'
FILTER_CSS = {
'fonts': {'font-family'},
'margins': {'margin', 'margin-left', 'margin-right', 'margin-top',
'margin-bottom'},
'padding': {'padding', 'padding-left', 'padding-right', 'padding-top',
'padding-bottom'},
'floats': {'float'},
'colors': {'color', 'background', 'background-color'},
}
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
Widget.__init__(self, parent,
['change_justification', 'extra_css', 'base_font_size',
'font_size_mapping', 'line_height', 'minimum_line_height',
'embed_font_family', 'embed_all_fonts', 'subset_embedded_fonts',
'smarten_punctuation', 'unsmarten_punctuation',
'disable_font_rescaling', 'insert_blank_line',
'remove_paragraph_spacing',
'remove_paragraph_spacing_indent_size',
'insert_blank_line_size',
'input_encoding', 'filter_css', 'expand_css',
'asciiize', 'keep_ligatures',
'linearize_tables']
)
for val, text in [
('original', _('Original')),
('left', _('Left align')),
('justify', _('Justify text'))
]:
self.opt_change_justification.addItem(text, (val))
self.db, self.book_id = db, book_id
self.initialize_options(get_option, get_help, db, book_id)
self.opt_disable_font_rescaling.toggle()
self.opt_disable_font_rescaling.toggle()
self.button_font_key.clicked.connect(self.font_key_wizard)
self.opt_remove_paragraph_spacing.toggle()
self.opt_remove_paragraph_spacing.toggle()
self.opt_smarten_punctuation.stateChanged.connect(
lambda state: state != Qt.Unchecked and
self.opt_unsmarten_punctuation.setCheckState(Qt.Unchecked))
self.opt_unsmarten_punctuation.stateChanged.connect(
lambda state: state != Qt.Unchecked and
self.opt_smarten_punctuation.setCheckState(Qt.Unchecked))
def get_value_handler(self, g):
if g is self.opt_change_justification:
ans = unicode(g.itemData(g.currentIndex()) or '')
return ans
if g is self.opt_filter_css:
ans = set()
for key, item in self.FILTER_CSS.iteritems():
w = getattr(self, 'filter_css_%s'%key)
if w.isChecked():
ans = ans.union(item)
ans = ans.union(set([x.strip().lower() for x in
unicode(self.filter_css_others.text()).split(',')]))
return ','.join(ans) if ans else None
if g is self.opt_font_size_mapping:
val = unicode(g.text()).strip()
val = [x.strip() for x in val.split(',' if ',' in val else ' ') if x.strip()]
return ', '.join(val) or None
return Widget.get_value_handler(self, g)
def set_value_handler(self, g, val):
if g is self.opt_change_justification:
for i in range(g.count()):
c = unicode(g.itemData(i) or '')
if val == c:
g.setCurrentIndex(i)
break
return True
if g is self.opt_filter_css:
if not val:
val = ''
items = frozenset([x.strip().lower() for x in val.split(',')])
for key, vals in self.FILTER_CSS.iteritems():
w = getattr(self, 'filter_css_%s'%key)
if not vals - items:
items = items - vals
w.setChecked(True)
else:
w.setChecked(False)
self.filter_css_others.setText(', '.join(items))
return True
def connect_gui_obj_handler(self, gui_obj, slot):
if gui_obj is self.opt_filter_css:
for key in self.FILTER_CSS:
w = getattr(self, 'filter_css_%s'%key)
w.stateChanged.connect(slot)
self.filter_css_others.textChanged.connect(slot)
return
raise NotImplementedError()
def font_key_wizard(self):
from calibre.gui2.convert.font_key import FontKeyChooser
d = FontKeyChooser(self, self.opt_base_font_size.value(),
unicode(self.opt_font_size_mapping.text()).strip())
if d.exec_() == d.Accepted:
self.opt_font_size_mapping.setText(', '.join(['%.1f'%x for x in
d.fsizes]))
self.opt_base_font_size.setValue(d.dbase)
| elssar/calibre | src/calibre/gui2/convert/look_and_feel.py | Python | gpl-3.0 | 5,251 |
import certifi
import hashlib
import json
import time
import os.path
import queue
import ssl
import threading
import urllib.error
import urllib.parse
import urllib.request
import zlib
class HashHandler:
hash_dict = None
hash_addresses = None
downloads_failed_list = []
downloads_total_counter = 0
downloads_completed_counter = 0
def __init__(self, *, addresses, max_attempts, logging):
self.log = logging
self.hash_addresses = addresses
self.download_queue = queue.Queue()
self.validate_queue = queue.Queue()
self.max_attempts = max_attempts
self.download_callback_lock = threading.Lock()
self.validate_callback_lock = threading.Lock()
self.ssl_context = ssl.create_default_context()
self.ssl_context.load_verify_locations(certifi.where())
def build_validate_queue(self, path=None, hash_dict=None):
""" Build the validate queue from a hash dictionary.
Loop through a hash dictionary to create one entry for each file that needs to be validated.
If the dictionary entry is a file, generate extra information and store it in the queue entry.
If the dictionary entry is a directory, pass it back into this function to process its entries.
:param dict hash_dict: The dictionary to parse
:param list path: The path of the files in this dictionary
"""
if not path:
path = []
if not hash_dict:
hash_dict = self.hash_dict["files"]
for key in hash_dict:
if "hash" in hash_dict[key]:
# If a hash is present, fill out the rest of the file info
hash_dict[key]["attempted"] = 0
hash_dict[key]["name"] = key
hash_dict[key]["path"] = str.join("/", path)
hash_dict[key]["url"] = self.__make_url(self.hash_dict["host"], self.hash_dict["version"],
hash_dict[key]["path"], hash_dict[key]["name"] + ".gz")
self.validate_queue.put(hash_dict[key])
else:
# If there is no hash, this is a directory which needs to be processed
dir_path = path.copy()
dir_path.append(key)
self.build_validate_queue(dir_path, hash_dict[key])
return
def download_hash(self):
""" Download and store the fastest most recent copy of the hash file.
Download each hash file in the address list. Time each download to determine the best option. Pick the fastest
download unless it is an older version of the hash file.
"""
context = ssl.create_default_context()
context.load_verify_locations(certifi.where())
# Check each address for hash files
for address in self.hash_addresses:
try:
# Download the hash file and record the download time
start_time = time.perf_counter()
request = urllib.request.urlopen(address, context=context)
download_time = time.perf_counter() - start_time
# Parse the response into a dictionary
text = request.read().decode("utf-8")
hash_dict = json.loads(text)
# Check if this is the best hash file so far
if self.hash_dict is None:
self.hash_dict = hash_dict
continue
version_diff = self.__compare_versions(self.hash_dict["version"], hash_dict["version"])
if version_diff > 0:
self.log.info("Newer: " + address)
self.hash_dict = hash_dict
continue
elif version_diff < 0:
self.log.info("Older: " + address)
continue
elif download_time < self.hash_dict["download_time"]:
self.log.info("Faster: " + address)
self.hash_dict = hash_dict
continue
except urllib.error.HTTPError:
self.log.error("Hash Download Failed", tb=True)
except ValueError:
self.log.error("Invalid Hash", tb=True)
except:
self.log.error("Unknown Error", tb=True)
return
def get_downloads_failed_list(self):
""" Getter for the list of failed downloads.
:return list:
"""
return self.downloads_failed_list
def get_downloads_failed(self):
return len(self.downloads_failed_list)
def get_downloads_total(self):
return self.downloads_total_counter
def get_downloads_completed(self):
return self.downloads_completed_counter
def get_downloads_remaining(self):
return self.downloads_total_counter - self.downloads_completed_counter
def get_version(self):
""" Getter for the version of the hash dictionary.
:return str: version if a hash dictionary is present, otherwise None
"""
if self.hash_dict:
return str(self.hash_dict["version"])
else:
return None
def start_downloading(self, *, callback=None, destination=None, threads=1, wait=False):
""" Spawn file download processing threads.
:param int threads: The number of concurrent threads to spawn
:param callable callback: A callback function to run after each download
:param bool wait: Determines whether to block this function until the download queue is empty
:return:
"""
self.downloads_failed_list.clear()
for i in range(threads):
t = threading.Thread(target=self.__download_processor, daemon=True,
kwargs={"callback": callback, "destination": destination})
t.start()
if wait:
self.download_queue.join()
return
def start_validating(self, *, callback=None, threads=1, wait=False):
""" Spawn file validation processing threads.
:param int threads: The number of concurrent threads to spawn
:param callable callback: A callback function to run after each validation
:param bool wait: Determines whether to block this function until the validate queue is empty
:return:
"""
for i in range(threads):
t = threading.Thread(target=self.__validate_processor, daemon=True, kwargs={"callback": callback})
t.start()
if wait:
self.validate_queue.join()
return
@staticmethod
def __compare_versions(version1, version2):
""" Compares two version strings to find which is newer.
Compares each segment of the version strings individually to see which version is the newest.
:param str version1: the first version
:param str version2: the second version (the one to test)
:return int: 1 for newer, -1 for older, 0 for equal
"""
# Convert the version strings to lists
version1_list = version1.split(".")
version2_list = version2.split(".")
# Pad the version lists if needed
max_length = max([len(version1_list), len(version2_list)])
if len(version1_list) < max_length:
version1_list += [0] * (max_length - len(version1_list))
if len(version2_list) < max_length:
version2_list += [0] * (max_length - len(version2_list))
# Compare the lists
for i in range(0, max_length):
if version2_list[i] > version1_list[i]:
return 1
elif version2_list[i] < version1_list[i]:
return -1
return 0
@staticmethod
def __get_hash(data):
""" Return the sha1 hash of some data.
:param data: The data to hash
:return: The hash
"""
return hashlib.sha1(data).hexdigest()
@staticmethod
def __read_file(file_path):
""" Open a file and return its contents.
:param str file_path: The file to read
:return str: The file contents
"""
return open(file_path, 'rb').read()
def __create_path(self, path):
base = os.path.split(path)[0]
if not os.path.exists(base):
self.__create_path(base)
os.mkdir(path)
def __save_file(self, data, file_path):
""" Save data to a file.
:param data: The data to save
:param str file_path: The file to save to
:return:
"""
folder = os.path.split(file_path)[0]
if not os.path.exists(folder):
self.__create_path(folder)
file = open(file_path, 'wb')
file.write(data)
file.close()
return
def __validate_file(self, file_path, file_name, file_hash):
""" Validate that the file matches its hash.
:param str file_path: The path of the file
:param str file_name: The name of the file
:param str file_hash: The hash of the file
:return bool: True if the file validates, False if it does not
"""
full_path = os.path.join(file_path, file_name)
if os.path.exists(full_path) and self.__get_hash(self.__read_file(full_path)) == file_hash:
return True
else:
return False
def __validate_processor(self, callback=None):
""" A processor task which keeps checking a queue for more files to validate.
:param callable callback: A callback function to run after each validation
"""
# Keep this function running on a timer
while True:
# If the queue is empty, wait a while before checking again
if self.validate_queue.empty():
time.sleep(1)
# Get the validation entry
entry = self.validate_queue.get()
# If the file does not validate, add it to the download queue
if not self.__validate_file(entry["path"], entry["name"], entry["hash"]):
self.download_queue.put(entry)
self.downloads_total_counter += 1
# Either way, mark the task as done
self.validate_queue.task_done()
if callback and callable(callback):
with self.validate_callback_lock:
callback()
def __download_file(self, source, destination, validation_hash=None):
""" Download a file.
:param str source: The address to download from
:param str destination: The location to save the file
:param str validation_hash: The hash to validate against
:return:
:raise ValueError: The download integrity could not be validated
"""
data = urllib.request.urlopen(source, context=self.ssl_context).read()
if source.endswith(".gz"):
data = zlib.decompress(data, 15+32)
if hash and not self.__get_hash(data) == validation_hash:
raise ValueError
self.__save_file(data, destination)
return
def __download_processor(self, *, callback=None, destination):
""" A processor task which keeps checking a queue for more files to download.
:param callable callback: A callback function to run after each download
"""
# Keep this function running on a timer
while True:
# If the queue is empty, wait a while before checking again
if self.download_queue.empty():
time.sleep(1)
# Get the download entry
entry = self.download_queue.get()
# Retry the download until a success or an exception handler breaks out
while True:
try:
# Increment the attempt counter and try to download
entry["attempted"] += 1
path = os.path.join(destination, entry["path"], entry["name"])
self.__download_file(entry["url"], path, entry["hash"])
# If you get this far, the download succeeded - break from the retry loop
self.downloads_completed_counter += 1
break
except:
if entry["attempted"] < self.max_attempts:
# Log the exception
self.log.warning("Download failed: " + entry["name"], tb=True)
else:
# If the max number of attempts has been reached, log it and break from the retry loop
self.log.error("Download failed: " + entry["name"], tb=True)
self.downloads_failed_list.append(entry)
break
# Whether successful or not, this task is now down
self.download_queue.task_done()
if callback and callable(callback):
with self.download_callback_lock:
callback()
@staticmethod
def __make_url(*args):
return os.path.join(*args).replace("\\", "/").replace(" ", "%20")
class DownloadException(Exception):
pass
class DownloadValidateException(DownloadException):
pass
class DownloadMaxAttemptException(DownloadException):
pass
| Naozumi/updater | updater/hash.py | Python | mit | 13,294 |
# -*- coding: utf-8 -*-
import os
import pytest
from scripts import compile_data
TESTS_DIR = os.path.abspath(os.path.join(__file__, '..'))
MOCK_DATA_DIR = os.path.join(TESTS_DIR, 'mock_data')
PID_FAIL = '401'
PID_SUCCESS = '1'
PID_SUCCESS_2 = '2'
def _csv_path(stage, pid):
return os.path.join(MOCK_DATA_DIR, stage, '{}.csv'.format(pid))
def test_get_data_file_paths_returns_list_of_paths():
mock_practice_csvs = compile_data.get_csv_paths(MOCK_DATA_DIR, 'practice')
assert len(mock_practice_csvs) == 3
assert _csv_path('practice', PID_SUCCESS) in mock_practice_csvs
def test_extract_sart_blocks_with_2_practice():
# NOTE: tests out get_csv_as_dataframe() from compile_data
csv_path = _csv_path('practice', PID_SUCCESS)
df = compile_data.get_csv_as_dataframe(csv_path)
blocks = compile_data.extract_sart_blocks(df)
assert len(blocks) == 2
for b in blocks:
assert isinstance(b, compile_data.pd.DataFrame)
# number of trials
assert len(blocks[0].index.values) == 18
assert len(blocks[1].index.values) == 27
def get_csv_as_df(stage, pid):
"""Take an experiment stage and participant ID and return a pandas
data frame.
"""
csv_path = os.path.join(MOCK_DATA_DIR, stage, '{}.csv'.format(pid))
df = compile_data.get_csv_as_dataframe(csv_path)
return df
def test_extract_sart_blocks_with_4_practices():
"""Examine blocks performed by participant who failed practice
"""
df = get_csv_as_df('practice', PID_FAIL)
blocks = compile_data.extract_sart_blocks(df)
assert len(blocks) == 4
# number of trials
assert len(blocks[0].index.values) == 18
assert len(blocks[1].index.values) == 27
assert len(blocks[2].index.values) == 27
assert len(blocks[3].index.values) == 27
for b in blocks:
# class
assert isinstance(b, compile_data.pd.DataFrame)
# trials
for idx, series in b.iterrows():
series['trial_type'] == 'multi-stim-multi-response'
def test_extract_sart_blocks_with_experiment_trials_plus_survey():
df = get_csv_as_df('experiment', PID_SUCCESS)
blocks = compile_data.extract_sart_blocks(df, with_survey=True)
# basic structure
assert len(blocks) == 5
for b in blocks:
assert isinstance(b, compile_data.pd.DataFrame)
# number of trials + multi-choice survey responses
EXPECTED_BLOCK_LENGTH = 228
assert len(blocks[0].index.values) == EXPECTED_BLOCK_LENGTH
assert len(blocks[1].index.values) == EXPECTED_BLOCK_LENGTH
assert len(blocks[2].index.values) == EXPECTED_BLOCK_LENGTH
assert len(blocks[3].index.values) == EXPECTED_BLOCK_LENGTH
assert len(blocks[4].index.values) == EXPECTED_BLOCK_LENGTH
# trial structure
trial_type_mc = 'survey-multi-choice'
trial_type_msmr = 'multi-stim-multi-response'
b4 = blocks[4]
b4_last_idx = b4.last_valid_index()
b4_first_idx = b4.first_valid_index()
assert b4.ix[b4_first_idx]['trial_type'] == trial_type_msmr
assert b4.ix[b4_last_idx-3]['trial_type'] == trial_type_msmr
# last three trials should be multiple choice
assert b4.ix[b4_last_idx-2]['trial_type'] == trial_type_mc
assert b4.ix[b4_last_idx-1]['trial_type'] == trial_type_mc
assert b4.ix[b4_last_idx]['trial_type'] == trial_type_mc
def test_compile_practice_with_passing_data():
df = get_csv_as_df('practice', PID_SUCCESS)
data = compile_data.compile_practice_data(df)
assert data['id'] == PID_SUCCESS
assert data['num_practice_blk2s'] == 1
assert data['passed_practice'] is True
assert data['time_practice_blk1_ms'] == 25851
assert data['time_practice_blk2_1_ms'] == 29900
assert data['time_practice_ms'] == 134626
def test_compile_practice_with_failing_data():
df = get_csv_as_df('practice', PID_FAIL)
data = compile_data.compile_practice_data(df)
assert data['id'] == PID_FAIL
assert data['num_practice_blk2s'] == 3
assert data['passed_practice'] is False
assert data['time_practice_blk1_ms'] == 25572
assert data['time_practice_blk2_1_ms'] == 29968
assert data['time_practice_blk2_2_ms'] == 29962
assert data['time_practice_blk2_3_ms'] == 29964
# baseline evaluation of valence and arousal
assert data['arousal_baseline_feeling'] == '1'
assert data['arousal_baseline_mind_body'] == '2'
assert data['time_practice_ms'] == 152517
def test_get_response_from_json():
json = '{"Q0":"3"}'
resp1 = compile_data.get_response_from_json(json)
assert resp1 == "3"
json = '{"Q0":"2<br>Often or<br>very much"}'
resp1 = compile_data.get_response_from_json(json)
assert resp1 == "2<br>Often or<br>very much"
def test_get_response_via_node_id():
df = get_csv_as_df('follow_up', PID_SUCCESS)
resp1 = compile_data.get_response_via_node_id(df, '0.0-1.0-0.0')
assert resp1 == '28'
resp2 = compile_data.get_response_via_node_id(df, '0.0-2.0-0.0')
assert resp2 == 'Female'
def test__format_rts():
rts = ['[667]']
rts_strf = compile_data._format_rts(rts)
assert isinstance(rts_strf, list)
assert len(rts_strf) == 1
for rt in rts_strf:
assert isinstance(rt, int)
def _get_sart_experiment_block(pid, block_index=0):
df = get_csv_as_df('experiment', pid)
blocks = compile_data.extract_sart_blocks(df, with_survey=True)
b = blocks[block_index]
trial_block = b.loc[b['trial_type'] == 'multi-stim-multi-response']
return trial_block
def test__format_rts_with_data():
pid = PID_SUCCESS
sart_block = _get_sart_experiment_block(pid)
rt_strf = compile_data._format_rts(sart_block['rt'])
assert isinstance(rt_strf, list)
assert len(rt_strf) == 222
for rt in rt_strf:
assert isinstance(rt, int)
def test__is_anticipation_error():
assert not compile_data._is_anticipation_error('[667]')
assert not compile_data._is_anticipation_error('[100]')
assert compile_data._is_anticipation_error('[99]')
assert compile_data._is_anticipation_error('[15]')
assert not compile_data._is_anticipation_error('[-1]')
def test__add_anticipation_errors_to_df():
pid = PID_SUCCESS
df = _get_sart_experiment_block(pid, 2)
df_anticip = compile_data._add_anticipation_errors(df)
assert 'anticipate_error' in df_anticip
anticipated = list(df_anticip.anticipate_error)
assert anticipated.count(True) == 2
def test___calculate_go_errors():
pid = PID_SUCCESS
df = _get_sart_experiment_block(pid, 2)
# check known values
assert list(df.correct.values).count(False) == 4
# add anticipation errors
dfa = compile_data._add_anticipation_errors(df)
# check known values
assert list(dfa.anticipate_error.values).count(True) == 2
# anticipation errors are added to error count
assert list(dfa.correct.values).count(False) == 6
go_errors = compile_data._calculate_go_errors(dfa, 'go')
assert isinstance(go_errors, compile_data.pd.Series)
assert list(go_errors).count(True) == 1
nogo_errors = compile_data._calculate_go_errors(dfa, 'no_go')
assert isinstance(nogo_errors, compile_data.pd.Series)
assert list(nogo_errors).count(True) == 3
def test__calculate_nogo_error_rt_avgs_blk3():
pid = PID_SUCCESS
df = _get_sart_experiment_block(pid, 2)
df = compile_data._add_anticipation_errors(df)
df['nogo_error'] = compile_data._calculate_go_errors(df, 'no_go')
assert list(df['nogo_error']).count(True) == 3
adjacent_rts = compile_data._calculate_nogo_error_rt_avgs(df)
assert adjacent_rts['prev4_avg'] == 371.0
assert adjacent_rts['num_prev4_rts'] == 12
assert adjacent_rts['next4_avg'] == 435.75
assert adjacent_rts['num_next4_rts'] == 12
def test__calculate_nogo_error_rt_avgs_blk4():
pid = PID_SUCCESS
df = _get_sart_experiment_block(pid, 3)
df = compile_data._add_anticipation_errors(df)
df['nogo_error'] = compile_data._calculate_go_errors(df, 'no_go')
assert list(df['nogo_error']).count(True) == 5
adjacent_rts = compile_data._calculate_nogo_error_rt_avgs(df)
assert adjacent_rts['prev4_avg'] == 318.833333333
assert adjacent_rts['num_prev4_rts'] == 18
assert adjacent_rts['next4_avg'] == 407.105263158
assert adjacent_rts['num_next4_rts'] == 19
def test__get_correct_rts_blk1():
pid = PID_SUCCESS
sart_block = _get_sart_experiment_block(pid)
df = compile_data._add_anticipation_errors(sart_block)
rts = compile_data._get_correct_rts(df)
assert len(rts) == 218
assert round(compile_data.np.mean(rts), 2) == 364.58
def test__get_correct_rts_blk4():
pid = PID_SUCCESS
sart_block = _get_sart_experiment_block(pid, 3)
df = compile_data._add_anticipation_errors(sart_block)
rts = compile_data._get_correct_rts(df)
assert len(rts) == 198
assert round(compile_data.np.mean(rts), 2) == 351.56
def test_summarize_block_performance_blk4():
pid = PID_SUCCESS
sart_block = _get_sart_experiment_block(pid, 4)
p = compile_data.summarize_block_performance(sart_block)
assert p['num_trials'] == 225
assert p['rt_avg'] == 404.205263158
assert p['anticipated_num_errors'] == 25
assert p['anticipated'] == 0.111111111
assert p['go_num_errors'] == 6
assert p['go_errors'] == 0.026666667
assert p['nogo_num_errors'] == 0
assert p['nogo_errors'] == 0.0
assert p['accuracy'] == 0.862222222 # 194/225
total_error_prop = (p['anticipated'] + p['go_errors'] + p['nogo_errors'])
# average RTs before and after no-go errors
assert p['nogo_prev4_avg'] == None # no no-go errors
assert p['nogo_next4_avg'] == None # no no-go errors
# ensure that calculations match up
rnd = compile_data.ROUND_NDIGITS
assert round(total_error_prop, rnd-1) == round(1 - p['accuracy'], rnd-1)
def test_summarize_sart_chunk():
pid = PID_SUCCESS
df = get_csv_as_df('experiment', pid)
blocks = compile_data.extract_sart_blocks(df, with_survey=True)
# fourth block
b4 = blocks[3]
b4s = compile_data.summarize_sart_chunk(b4)
assert b4s['anticipated'] == 0.062222222
assert b4s['accuracy'] == 0.88
assert b4s['effort'] == 7
assert b4s['discomfort'] == 7
assert b4s['boredom'] == 6
assert b4s['ratings_time_min'] == 19.616666667
assert b4s['num_trials'] == 225
assert b4s['nogo_prev4_avg'] == 318.833333333
assert b4s['nogo_next4_avg'] == 407.105263158
# last (fifth) block
b5 = blocks[-1]
b5s = compile_data.summarize_sart_chunk(b5)
assert b5s['anticipated'] == 0.111111111
assert b5s['accuracy'] == 0.862222222
assert b5s['effort'] == 7
assert b5s['discomfort'] == 7
assert b5s['boredom'] == 6
assert b5s['ratings_time_min'] == 24.183333333
assert b5s['num_trials'] == 225
assert b4s['nogo_prev4_avg'] == 318.833333333
assert b4s['nogo_next4_avg'] == 407.105263158
def test__calculate_ratings_proportions():
ratings = [5, 2, 3, 7, 6, 4, 3, 3] # 8 ratings, 7 possible changes
# ratings proportions
rp = compile_data._calculate_ratings_proportions(ratings)
assert rp['ups'] == 0.285714286 # 2 of 7
assert rp['downs'] == 0.571428571 # 4 of 7
assert rp['sames'] == 0.142857143 # 1 of 7
def test_complete_compile_experiment_data():
pid = PID_SUCCESS
df = get_csv_as_df('experiment', pid)
ed = compile_data.compile_experiment_data(df)
assert ed['num_trials'] == 1125
assert ed['trials_per_block'] == 225
assert ed['num_blocks'] == 5
assert ed['forecasted_enjoyment'] == 4
assert ed['forecasted_performance'] == 5
assert ed['forecasted_effort'] == 4
assert ed['forecasted_discomfort'] == 3
assert ed['forecasted_fatigue'] == 5
assert ed['forecasted_motivation'] == 6
assert ed['antecedent_boredom'] == 3
# check keys for each block's real-time data
blk_summary_keys = [
'anticipated', 'nogo_next4_avg', 'nogo_prev4_avg', 'go_errors',
'effort', 'num_trials', 'discomfort', 'rt_avg', 'nogo_errors',
'accuracy'
]
for i in range(1, (ed['num_blocks'] + 1)):
blk_key_prefix = "blk{}".format(i)
blk_keys = [k for k in ed.keys() if k.startswith(blk_key_prefix)]
assert len(blk_keys) == 16
for k in blk_summary_keys:
expected_blk_key = "{}_{}".format(blk_key_prefix, k)
assert expected_blk_key in blk_keys
# affective ratings
assert ed['prop_effort_ups'] == 0.25 # 1/4
assert ed['prop_effort_downs'] == 0.0 # 0/4
assert ed['prop_effort_sames'] == 0.75 # 3/4
assert ed['prop_discomfort_ups'] == 0.5 # 2/4
assert ed['prop_discomfort_downs'] == 0.0 # 0/4
assert ed['prop_discomfort_sames'] == 0.5 # 2/4
assert ed['prop_boredom_ups'] == 0.5 # 2/4
assert ed['prop_boredom_downs'] == 0.0 # 0/4
assert ed['prop_boredom_sames'] == 0.5 # 2/4
# go, no-go, and anticipated error variable weighted averages
assert ed['nogo_num_errors'] == 18
assert ed['nogo_error_prev_rt_avg'] == 352.12857142837146
assert ed['nogo_error_next_rt_avg'] == 395.67605633805636
# proportion of go, no-go, and anticipated errors across all trials
# also proportion of trials that were completed accurately
assert ed['avg_go_errors'] == 0.013333333
assert ed['avg_nogo_errors'] == 0.016
assert ed['avg_anticipation_errors'] == 0.036444444
assert ed['avg_accuracy'] == 0.934222222
# regression variables for blocks
assert ed['accuracy_slope'] == -0.007162023
assert ed['accuracy_intercept'] == 1.040912496
assert ed['effort_slope'] == 0.04296334
assert ed['effort_intercept'] == 6.15998945
assert ed['discomfort_slope'] == 0.107323927
assert ed['discomfort_intercept'] == 4.801231237
assert ed['boredom_slope'] == 0.107323927
assert ed['boredom_intercept'] == 3.801231237
# peak-end calculations
assert ed['start_effort'] == 6
assert ed['peak_effort'] == 7
assert ed['min_effort'] == 6
assert ed['end_effort'] == 7
assert ed['avg_effort'] == 6.8
assert ed['start_discomfort'] == 5
assert ed['peak_discomfort'] == 7
assert ed['min_discomfort'] == 5
assert ed['end_discomfort'] == 7
assert ed['avg_discomfort'] == 6.4
assert ed['start_boredom'] == 4
assert ed['peak_boredom'] == 6
assert ed['min_boredom'] == 4
assert ed['end_boredom'] == 6
assert ed['avg_boredom'] == 5.4
assert ed['avg_blk_accuracy'] == 0.934222222
assert ed['max_blk_accuracy'] == 0.982222222
assert ed['min_blk_accuracy'] == 0.862222222
assert ed['start_blk_accuracy'] == 0.982222222
assert ed['end_blk_accuracy'] == 0.862222222
assert ed['auc_accuracy'] == 3.748888888
assert ed['auc_effort'] == 27.5
assert ed['auc_discomfort'] == 26.0
# post-experiment evaluation of valence and arousal
assert ed['arousal_post_mind_body'] == '3'
assert ed['arousal_post_feeling'] == '1'
assert ed['time_experiment_ms'] == 1475020
def test_compile_demographics_data_after_practice_failure():
pid = PID_FAIL
df = get_csv_as_df('follow_up', pid)
data = compile_data.compile_demographic_data(df)
expected_answers = [
('age', '23'),
('dob', '05/1994'),
('sex', 'Female'),
('sms_1', '1'),
('sms_2', '4'),
('sms_3', '3'),
('sms_4', '4'),
('sms_5', '2'),
('sms_6', '2'),
('sms_7', '4'),
('sms_8', '4'),
('sms_9', '1'),
('sms_10', '4'),
('sms_11', '2'),
('sms_12', '2'),
('sms_13', '5'),
('sms_14', '2'),
('sms_15', '2'),
('sms_16', '4'),
('sms_17', '3'),
('sms_18', '3'),
('sms_19', '3'),
('sms_20', '4'),
('sms_21', '3'),
('state_boredom_1', '3'),
('state_boredom_2', '4'),
('state_boredom_3', '3'),
('state_boredom_4', '2'),
('state_boredom_5', '4'),
('state_boredom_6', '3'),
('state_boredom_7', '6'),
('state_boredom_8', '5'),
('time_delay_b4_retrospect_ms', None),
('time_follow_up_ms', 122441),
]
for label, answer in expected_answers:
print label
assert data[label] == answer
def test_compile_demographics_data_after_practice_success():
pid = PID_SUCCESS
df = get_csv_as_df('follow_up', pid)
data = compile_data.compile_demographic_data(df)
print data
expected_answers = [
('age', '28'),
('dob', '03/1989'),
('sex', 'Female'),
('sms_1', '3'),
('sms_2', '3'),
('sms_3', '1'),
('sms_4', '3'),
('sms_5', '3'),
('sms_6', '3'),
('sms_7', '1'),
('sms_8', '2'),
('sms_9', '5'),
('sms_10', '2'),
('sms_11', '3'),
('sms_12', '3'),
('sms_13', '3'),
('sms_14', '4'),
('sms_15', '3'),
('sms_16', '1'),
('sms_17', '3'),
('sms_18', '1'),
('sms_19', '3'),
('sms_20', '3'),
('sms_21', '4'),
('state_boredom_1', '2'),
('state_boredom_2', '5'),
('state_boredom_3', '3'),
('state_boredom_4', '2'),
('state_boredom_5', '2'),
('state_boredom_6', '5'),
('state_boredom_7', '6'),
('state_boredom_8', '6'),
('time_delay_b4_retrospect_ms', 191192),
('time_follow_up_ms', 252719),
]
for label, answer in expected_answers:
assert data[label] == answer
def test_compile_retrospective_data_after_practice_success():
pid = PID_SUCCESS
df = get_csv_as_df('follow_up', pid)
data = compile_data.compile_retrospective_data(df)
print data
expected_answers = [
('tlx_scale_1', '7'),
('tlx_scale_2', '2'),
('tlx_scale_3', '4'),
('tlx_scale_4', '6'),
('tlx_scale_5', '7'),
('tlx_scale_6', '6'),
('tlx_scale_7', '6'),
('tlx_scale_8', '5'),
('tlx_scale_9', '7'),
('tlx_scale_10', '2'),
('tlx_scale_11', '2'),
('tlx_scale_12', '6'),
('tlx_scale_13', '2'),
]
for label, answer in expected_answers:
assert data[label] == answer
| shamrt/jsSART | scripts/tests/test_compile_data.py | Python | mit | 18,242 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to identify optimal substrates for film growth
"""
from dataclasses import dataclass
from typing import Tuple
from pymatgen.analysis.elasticity.strain import Deformation, Strain
from pymatgen.analysis.interfaces.zsl import ZSLGenerator, ZSLMatch, reduce_vectors
from pymatgen.core import Structure
from pymatgen.core.surface import (
SlabGenerator,
get_symmetrically_distinct_miller_indices,
)
Miller3D = Tuple[int, int, int]
Vector3D = Tuple[float, float, float]
Matrix3D = Tuple[Vector3D, Vector3D, Vector3D]
@dataclass
class SubstrateMatch(ZSLMatch):
"""
A substrate match building on the Zur and McGill algorithm. This match class inlcudes the miller
planes of the film and substrate the full strain tensor, the Von Mises strain, the ground state
energy if provided, and the elastic energy
"""
film_miller: Miller3D
substrate_miller: Miller3D
strain: Strain
von_mises_strain: float
ground_state_energy: float
elastic_energy: float
@classmethod
def from_zsl(
cls,
match: ZSLMatch,
film: Structure,
film_miller,
substrate_miller,
elasticity_tensor=None,
ground_state_energy=0,
):
"""Generate a substrate match from a ZSL match plus metadata"""
# Get the appropriate surface structure
struc = SlabGenerator(film, film_miller, 20, 15, primitive=False).get_slab().oriented_unit_cell
dfm = Deformation(match.match_transformation)
strain = dfm.green_lagrange_strain.convert_to_ieee(struc, initial_fit=False)
von_mises_strain = strain.von_mises_strain
if elasticity_tensor is not None:
energy_density = elasticity_tensor.energy_density(strain)
elastic_energy = film.volume * energy_density / len(film.sites)
else:
elastic_energy = 0
return cls(
film_miller=film_miller,
substrate_miller=substrate_miller,
strain=strain,
von_mises_strain=von_mises_strain,
elastic_energy=elastic_energy,
ground_state_energy=ground_state_energy,
**{
k: getattr(match, k)
for k in [
"film_sl_vectors",
"substrate_sl_vectors",
"film_vectors",
"substrate_vectors",
"film_transformation",
"substrate_transformation",
]
},
)
@property
def total_energy(self):
"""Total energy of this match"""
return self.ground_state_energy + self.elastic_energy
class SubstrateAnalyzer(ZSLGenerator):
"""
This class applies a set of search criteria to identify suitable
substrates for film growth. It first uses a topoplogical search by Zur
and McGill to identify matching super-lattices on various faces of the
two materials. Additional criteria can then be used to identify the most
suitable substrate. Currently, the only additional criteria is the
elastic strain energy of the super-lattices
"""
def __init__(self, film_max_miller=1, substrate_max_miller=1, **kwargs):
"""
Initializes the substrate analyzer
Args:
zslgen(ZSLGenerator): Defaults to a ZSLGenerator with standard
tolerances, but can be fed one with custom tolerances
film_max_miller(int): maximum miller index to generate for film
surfaces
substrate_max_miller(int): maximum miller index to generate for
substrate surfaces
"""
self.film_max_miller = film_max_miller
self.substrate_max_miller = substrate_max_miller
self.kwargs = kwargs
super().__init__(**kwargs)
def generate_surface_vectors(self, film_millers, substrate_millers):
"""
Generates the film/substrate slab combinations for a set of given
miller indicies
Args:
film_millers(array): all miller indices to generate slabs for
film
substrate_millers(array): all miller indicies to generate slabs
for substrate
"""
vector_sets = []
for f in film_millers:
film_slab = SlabGenerator(self.film, f, 20, 15, primitive=False).get_slab()
film_vectors = reduce_vectors(film_slab.lattice.matrix[0], film_slab.lattice.matrix[1])
for s in substrate_millers:
substrate_slab = SlabGenerator(self.substrate, s, 20, 15, primitive=False).get_slab()
substrate_vectors = reduce_vectors(substrate_slab.lattice.matrix[0], substrate_slab.lattice.matrix[1])
vector_sets.append((film_vectors, substrate_vectors, f, s))
return vector_sets
def calculate(
self,
film,
substrate,
elasticity_tensor=None,
film_millers=None,
substrate_millers=None,
ground_state_energy=0,
lowest=False,
):
"""
Finds all topological matches for the substrate and calculates elastic
strain energy and total energy for the film if elasticity tensor and
ground state energy are provided:
Args:
film(Structure): conventional standard structure for the film
substrate(Structure): conventional standard structure for the
substrate
elasticity_tensor(ElasticTensor): elasticity tensor for the film
in the IEEE orientation
film_millers(array): film facets to consider in search as defined by
miller indicies
substrate_millers(array): substrate facets to consider in search as
defined by miller indicies
ground_state_energy(float): ground state energy for the film
lowest(bool): only consider lowest matching area for each surface
"""
self.film = film
self.substrate = substrate
# Generate miller indicies if none specified for film
if film_millers is None:
film_millers = sorted(get_symmetrically_distinct_miller_indices(self.film, self.film_max_miller))
# Generate miller indicies if none specified for substrate
if substrate_millers is None:
substrate_millers = sorted(
get_symmetrically_distinct_miller_indices(self.substrate, self.substrate_max_miller)
)
# Check each miller index combination
surface_vector_sets = self.generate_surface_vectors(film_millers, substrate_millers)
for [
film_vectors,
substrate_vectors,
film_miller,
substrate_miller,
] in surface_vector_sets:
for match in self(film_vectors, substrate_vectors, lowest):
sub_match = SubstrateMatch.from_zsl(
match=match,
film=film,
film_miller=film_miller,
substrate_miller=substrate_miller,
elasticity_tensor=elasticity_tensor,
ground_state_energy=ground_state_energy,
)
yield sub_match
| gmatteo/pymatgen | pymatgen/analysis/interfaces/substrate_analyzer.py | Python | mit | 7,398 |
"""Tests for the CVEcheckerTask worker task."""
from datadiff.tools import assert_equal
from flexmock import flexmock
from pathlib import Path
import pytest
from f8a_worker.object_cache import EPVCache
from f8a_worker.workers import CVEcheckerTask
@pytest.mark.usefixtures("dispatcher_setup")
class TestCVEchecker(object):
"""Tests for the CVEcheckerTask worker task."""
@pytest.mark.parametrize(('cve_id', 'score', 'vector', 'severity'), [
('CVE-2017-0249', 7.3, 'CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:L/A:L', 'high'),
('cve-2015-1164', 4.3, 'AV:N/AC:M/Au:N/C:N/I:P/A:N', 'medium')
])
def test_get_cve_impact(self, cve_id, score, vector, severity):
"""Test the method CVEcheckerTask.get_cve_impact."""
score_, vector_, severity_ = CVEcheckerTask.get_cve_impact(cve_id)
assert score_ == score
assert vector_ == vector
assert severity_ == severity
@pytest.mark.parametrize(('version',), [
('1.6.4',),
('1.7.1',)
])
@pytest.mark.usefixtures('victims_zip_s3', 'npm')
def test_npm_servestatic(self, version):
"""Tests CVE reports for selected package from NPM ecosystem."""
args = {'ecosystem': 'npm', 'name': 'serve-static', 'version': version}
task = CVEcheckerTask.create_test_instance(task_name='security_issues')
results = task.execute(args)
assert isinstance(results, dict)
assert set(results.keys()) == {'details', 'status', 'summary'}
assert results['status'] == 'success'
assert results['summary'] == ['CVE-2015-1164']
expected_details = [{
"attribution": "https://github.com/victims/victims-cve-db, CC BY-SA 4.0, modified",
"cvss": {
"score": 4.3,
"vector": "AV:N/AC:M/Au:N/C:N/I:P/A:N"
},
"description": "Open redirect vulnerability in the serve-static plugin "
"before 1.7.2 for Node.js, when mounted at the root, allows "
"remote attackers to redirect users to arbitrary web sites "
"and conduct phishing attacks via a // (slash slash) followed "
"by a domain in the PATH_INFO to the default URI.\n",
"id": "CVE-2015-1164",
"references": [
"http://nodesecurity.io/advisories/serve-static-open-redirect",
"https://bugzilla.redhat.com/show_bug.cgi?id=1181917",
"https://github.com/expressjs/serve-static/issues/26",
"https://nvd.nist.gov/vuln/detail/CVE-2015-1164",
"https://github.com/expressjs/serve-static/blob/master/HISTORY.md#165--2015-02-04"
],
"severity": "medium"
}]
assert_equal(results.get('details'), expected_details)
@pytest.mark.usefixtures('victims_zip_s3', 'npm')
def test_npm_servestatic_not_affected(self):
"""Tests CVE reports for selected package from NPM ecosystem."""
args = {'ecosystem': 'npm', 'name': 'serve-static', 'version': '1.7.5'}
task = CVEcheckerTask.create_test_instance(task_name='security_issues')
results = task.execute(args)
assert isinstance(results, dict)
assert set(results.keys()) == {'details', 'status', 'summary'}
assert results['status'] == 'success'
assert results['summary'] == []
assert_equal(results.get('details'), [])
@pytest.mark.usefixtures('victims_zip_s3', 'maven')
def test_maven_commons_compress(self):
"""Tests CVE reports for selected packages from Maven ecosystem."""
args = {'ecosystem': 'maven', 'name': 'org.apache.commons:commons-compress',
'version': '1.4'}
task = CVEcheckerTask.create_test_instance(task_name='security_issues')
results = task.execute(arguments=args)
assert isinstance(results, dict)
assert set(results.keys()) == {'details', 'status', 'summary'}
expected_details = [
{
"attribution": "https://github.com/victims/victims-cve-db, CC BY-SA 4.0, modified",
"cvss": {
"score": 5.0,
"vector": "AV:N/AC:L/Au:N/C:N/I:N/A:P"
},
"description": "Algorithmic complexity vulnerability in the sorting algorithms "
"in bzip2 compressing stream (BZip2CompressorOutputStream) "
"in Apache Commons Compress before 1.4.1 allows remote attackers "
"to cause a denial of service (CPU consumption) via a file "
"with many repeating inputs.\n",
"id": "CVE-2012-2098",
"references": [
"https://nvd.nist.gov/vuln/detail/CVE-2012-2098"
],
"severity": "medium"
}
]
assert_equal(results.get('details'), expected_details, results.get('details'))
@pytest.mark.usefixtures('victims_zip_s3', 'pypi')
def test_python_pyjwt(self):
"""Tests CVE reports for selected package from PyPi ecosystem."""
args = {'ecosystem': 'pypi', 'name': 'pyjwt', 'version': '1.5.0'}
task = CVEcheckerTask.create_test_instance(task_name='security_issues')
results = task.execute(arguments=args)
assert isinstance(results, dict)
assert set(results.keys()) == {'details', 'status', 'summary'}
assert results['status'] == 'success'
assert results['summary'] == ['CVE-2017-11424']
# http://www.cvedetails.com/version/94328/Makotemplates-Mako-0.3.3.html
expected_details = [{
"attribution": "https://github.com/victims/victims-cve-db, CC BY-SA 4.0, modified",
"cvss": {
"score": 5.0,
"vector": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:H/A:N"
},
"description": "In PyJWT 1.5.0 and below the `invalid_strings` check in "
"`HMACAlgorithm.prepare_key` does not account for all PEM "
"encoded public keys. Specifically, the PKCS1 PEM encoded "
"format would be allowed because it is prefaced with the string "
"`-----BEGIN RSA PUBLIC KEY-----` which is not accounted for. "
"This enables symmetric/asymmetric key confusion attacks against "
"users using the PKCS1 PEM encoded public keys, which would allow "
"an attacker to craft JWTs from scratch.\n",
"id": "CVE-2017-11424",
"references": [
"https://github.com/jpadilla/pyjwt/pull/277",
"https://nvd.nist.gov/vuln/detail/CVE-2017-11424"
],
"severity": "high"
}]
assert_equal(results.get('details'), expected_details)
@pytest.mark.usefixtures('nuget')
def test_nuget_system_net_http(self):
"""Tests CVE reports for selected package from Nuget ecosystem."""
args = {'ecosystem': 'nuget', 'name': 'System.Net.Http', 'version': '4.1.1'}
task = CVEcheckerTask.create_test_instance(task_name='security_issues')
results = task.execute(arguments=args)
assert isinstance(results, dict)
assert set(results.keys()) == {'details', 'status', 'summary'}
# https://github.com/dotnet/announcements/issues/12
# http://www.cvedetails.com/version/220163/Microsoft-System.net.http-4.1.1.html
assert set(results.get('summary')) >= {'CVE-2017-0247', 'CVE-2017-0248',
'CVE-2017-0249', 'CVE-2017-0256'}
details = results.get('details')
assert isinstance(details, list) and len(details) >= 4
for detail in details:
assert set(detail.keys()) == {'cvss', 'description', 'id', 'references', 'severity'}
assert detail['description']
assert detail['references']
assert set(detail['cvss'].keys()) == {'score', 'vector'}
| miteshvp/fabric8-analytics-worker | tests/workers/test_cvecheck.py | Python | gpl-3.0 | 8,157 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'secret-key'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| hszjj221/myflasky | config.py | Python | gpl-3.0 | 1,307 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-20 16:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scoping', '0120_auto_20170720_1653'),
]
operations = [
migrations.RemoveField(
model_name='networkproperties',
name='doc',
),
migrations.DeleteModel(
name='NetworkProperties',
),
]
| mcallaghan/tmv | BasicBrowser/scoping/migrations/0121_auto_20170720_1656.py | Python | gpl-3.0 | 482 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'(?P<question_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='results'),
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
url(r'^(?P<question_id>[0-9]+)/vote.$', views.vote, name='vote'),
]
| EMCain/ovc-polls | mysite/polls/urls.py | Python | mit | 404 |
# This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# scapy.contrib.description = MPLS
# scapy.contrib.status = loads
from scapy.packet import Packet, bind_layers, Padding
from scapy.fields import BitField, ByteField, ShortField
from scapy.layers.inet import IP, UDP
from scapy.contrib.bier import BIER
from scapy.layers.inet6 import IPv6
from scapy.layers.l2 import Ether, GRE
from scapy.compat import orb
class EoMCW(Packet):
name = "EoMCW"
fields_desc = [BitField("zero", 0, 4),
BitField("reserved", 0, 12),
ShortField("seq", 0)]
def guess_payload_class(self, payload):
if len(payload) >= 1:
return Ether
return Padding
class MPLS(Packet):
name = "MPLS"
fields_desc = [BitField("label", 3, 20),
BitField("cos", 0, 3),
BitField("s", 1, 1),
ByteField("ttl", 0)]
def guess_payload_class(self, payload):
if len(payload) >= 1:
if not self.s:
return MPLS
ip_version = (orb(payload[0]) >> 4) & 0xF
if ip_version == 4:
return IP
elif ip_version == 5:
return BIER
elif ip_version == 6:
return IPv6
else:
if orb(payload[0]) == 0 and orb(payload[1]) == 0:
return EoMCW
else:
return Ether
return Padding
bind_layers(Ether, MPLS, type=0x8847)
bind_layers(UDP, MPLS, dport=6635)
bind_layers(GRE, MPLS, proto=0x8847)
bind_layers(MPLS, MPLS, s=0)
bind_layers(MPLS, EoMCW)
bind_layers(EoMCW, Ether, zero=0, reserved=0)
| smainand/scapy | scapy/contrib/mpls.py | Python | gpl-2.0 | 2,257 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .base import TrackStatsHookBase
class ModelHookManager:
"""
This class registers and manages a set of hooks of subclassed from
`TrackStatsHookBase`. The given hook is registered on all modules within
'named_modules'.
Tracking is started and stopped for all hooks via `self.start_tracking()` and
`self.stop_tracking()`. Alternatively, this class can be used a context manager to
automate these calls. For example,
```
with hook_manager as hooks:
... # Train here
stats = hooks.get_statitics()
```
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function that
takes a name and module as inputs and then outputs a dictionary of
arguments to pass to the hook
"""
def __init__(
self,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
assert hook_type in ["forward", "backward", "pre_forward"]
assert issubclass(hook_class, TrackStatsHookBase)
# Register the hooks via class method.
tracked_vals = self.register_storage_hooks(named_modules,
hook_class=hook_class,
hook_type=hook_type,
hook_args=hook_args)
# These are the functions that called every forward or backward pass.
self.hooks = tracked_vals[0]
# These are handles to the hooks; PyTorch lets the user unregister
# hooks through these handles.
self._hook_handles = tracked_vals[1]
# These are the filtered modules that will be tracked.
self.tracked_modules = tracked_vals[2]
# Keep track of whether tracking is on.
self._tracking = False
@property
def tracking(self):
return self._tracking
def __enter__(self):
"""Start tracking when `with` is called."""
self.start_tracking()
return self
def __exit__(self, *args):
"""Stop tracking when `with` block is left."""
self.stop_tracking()
@classmethod
def register_storage_hooks(
cls,
named_modules,
hook_class,
hook_type="forward",
hook_args=None,
):
"""
Register hook on each module in 'named_modules'.
:param named_modules: dict mapping names to modules
:param hook_class: class subclassed from `TrackStatsHookBase`
:param hook_type: whether to register the hook as "forward" or "backward"
or "pre_forward"
:param hook_args: either a dictionary of args to pass to hook, or a function
that takes a name and module as inputs and then outputs a
dictionary of arguments to pass to the hook
"""
assert hook_type in ["forward", "backward", "pre_forward"]
hooks = []
handles = []
tracked_modules = dict()
# Register hooks on the modules.
for n, m in named_modules.items():
if callable(hook_args):
args = hook_args(n, m)
else:
args = hook_args or {}
hook = hook_class(name=n, **args)
if hook_type == "forward":
handle = m.register_forward_hook(hook)
elif hook_type == "pre_forward":
handle = m.register_forward_pre_hook(hook)
else:
handle = m.register_backward_hook(hook)
hooks.append(hook)
handles.append(handle)
tracked_modules[n] = m
return hooks, handles, tracked_modules
def start_tracking(self):
self._tracking = True
for hook in self.hooks:
hook.start_tracking()
def stop_tracking(self):
self._tracking = False
for hook in self.hooks:
hook.stop_tracking()
def get_statistics(self):
"""
This returns a generator with elements
`(name, module, statistic_0, ..., statistic_n)`.
"""
return (
(name, module, *hook.get_statistics())
for (name, module), hook in zip(self.tracked_modules.items(), self.hooks)
)
def remove_hooks(self):
"""
Remove all hooks from the model and stop tracking statistics.
"""
for handle in self._hook_handles:
handle.remove()
self.hooks = []
self._hook_handles = []
self.tracked_modules = dict()
| mrcslws/nupic.research | src/nupic/research/frameworks/pytorch/hooks/hook_manager.py | Python | agpl-3.0 | 5,818 |
import typedef_class
a = typedef_class.RealA()
a.a = 3
b = typedef_class.B()
b.testA(a)
| DGA-MI-SSI/YaCo | deps/swig-3.0.7/Examples/test-suite/python/typedef_class_runme.py | Python | gpl-3.0 | 90 |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import tensorflow as tf
from scipy.stats import pearsonr
from load_mnist import load_mnist
import influence.experiments as experiments
import influence.dataset as dataset
from influence.dataset import DataSet
from influence.smooth_hinge import SmoothHinge
from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS
from tensorflow.contrib.learn.python.learn.datasets import base
data_sets = load_mnist('data')
pos_class = 1
neg_class = 7
X_train = data_sets.train.x
Y_train = data_sets.train.labels
X_test = data_sets.test.x
Y_test = data_sets.test.labels
X_train, Y_train = dataset.filter_dataset(X_train, Y_train, pos_class, neg_class)
X_test, Y_test = dataset.filter_dataset(X_test, Y_test, pos_class, neg_class)
# Round dataset size off to the nearest 100, just for batching convenience
num_train = int(np.floor(len(Y_train) / 100) * 100)
num_test = int(np.floor(len(Y_test) / 100) * 100)
X_train = X_train[:num_train, :]
Y_train = Y_train[:num_train]
X_test = X_test[:num_test, :]
Y_test = Y_test[:num_test]
train = DataSet(X_train, Y_train)
validation = None
test = DataSet(X_test, Y_test)
data_sets = base.Datasets(train=train, validation=validation, test=test)
num_classes = 2
input_side = 28
input_channels = 1
input_dim = input_side * input_side * input_channels
weight_decay = 0.01
use_bias = False
batch_size = 100
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
max_lbfgs_iter = 1000
temps = [0, 0.001, 0.1]
num_temps = len(temps)
num_params = 784
# Get weights from hinge
tf.reset_default_graph()
temp = 0
model = SmoothHinge(
use_bias=use_bias,
temp=temp,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output',
log_dir='log',
model_name='smooth_hinge_17_t-%s' % temp)
model.train()
model.load_checkpoint(iter_to_load=0)
hinge_W = model.sess.run(model.params)[0]
model_margins = model.sess.run(model.margin, feed_dict=model.all_test_feed_dict)
# Look at np.argsort(model_margins)[:10] to pick a test example
np.random.seed(92)
num_to_remove = 100
params = np.zeros([num_temps, num_params])
margins = np.zeros([num_temps, num_train])
influences = np.zeros([num_temps, num_train])
actual_loss_diffs = np.zeros([num_temps, num_to_remove])
predicted_loss_diffs = np.zeros([num_temps, num_to_remove])
indices_to_remove = np.zeros([num_temps, num_to_remove], dtype=np.int32)
test_idx = 1597
for counter, temp in enumerate(temps):
tf.reset_default_graph()
model = SmoothHinge(
use_bias=use_bias,
temp=temp,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output',
log_dir='log',
model_name='smooth_hinge_17_t-%s' % temp)
if temp == 0:
model.load_checkpoint(iter_to_load=0)
else:
params_feed_dict = {}
params_feed_dict[model.W_placeholder] = hinge_W
model.sess.run(model.set_params_op, feed_dict=params_feed_dict)
model.print_model_eval()
cur_params, cur_margins = model.sess.run([model.params, model.margin], feed_dict=model.all_train_feed_dict)
cur_influences = model.get_influence_on_test_loss(
test_indices=[test_idx],
train_idx=np.arange(num_train),
force_refresh=False)
params[counter, :] = np.concatenate(cur_params)
margins[counter, :] = cur_margins
influences[counter, :] = cur_influences
if temp == 0:
actual_loss_diffs[counter, :], predicted_loss_diffs[counter, :], indices_to_remove[counter, :] = experiments.test_retraining(
model,
test_idx,
iter_to_load=0,
force_refresh=False,
num_steps=2000,
remove_type='maxinf',
num_to_remove=num_to_remove)
np.savez(
'output/hinge_results',
temps=temps,
indices_to_remove=indices_to_remove,
actual_loss_diffs=actual_loss_diffs,
predicted_loss_diffs=predicted_loss_diffs,
influences=influences
) | kohpangwei/influence-release | scripts/run_hinge_experiment.py | Python | mit | 4,640 |
assists = crosses = 3
chancesCreated = goals = 4
shotsOnTarget = 5
successfulDribbles = 10
points = 89
# points = goals * 9 + assists * 6 + chancesCreated * 3 + \
# shotsOnTarget * 2 + crosses + successfulDribbles
| the-zebulan/CodeWars | katas/beta/grasshopper_fantasy_points.py | Python | mit | 223 |
import pingo
rpi = pingo.rpi.RaspberryPi()
fmt = '{:>22s} {:2d} {:<2d} {}'
for loc1, pin1 in sorted(rpi.pins.items())[::2]:
loc2 = loc1 + 1
pin2 = rpi.pins[loc2]
print fmt.format(pin1, loc1, loc2, pin2)
| garoa/pingo | pingo/examples/rpi_examples/pin_map.py | Python | mit | 217 |
import os
import csv
import simplejson as json
from tempfile import mkstemp
from zipfile import ZipFile
import datetime
import hashlib
DLI_MANIFEST_FILENAME = 'upload_info.json'
CSV_DATA_FILENAME = 'data.csv'
DEFAULT_ARCHIVE_NAME = 'upload.zip'
def write_tmp_file(content):
'''Write any data to a temporary file.
Remember to os.remove(filename) after use.
@param content: data to be written to a file
return filename of the created temporary file
'''
fp, filename = mkstemp()
file = open(filename, 'w+b')
file.write(content)
os.close(fp)
return filename
def write_tmp_csv_file(csv_data, sli_manifest):
'''Write a CSV temporary file with values in csv_data - list of dicts.
@param csv_data: list of dicts
@param sli_manifest: json sli_manifest
'''
fieldnames = [part['columnName'] for part in sli_manifest['dataSetSLIManifest']['parts']]
fp, filename = mkstemp()
file = open(filename, 'w+b')
writer = csv.DictWriter(file, fieldnames=fieldnames,
delimiter=sli_manifest['dataSetSLIManifest']['csvParams']['separatorChar'],
quotechar=sli_manifest['dataSetSLIManifest']['csvParams']['quoteChar'],
quoting=csv.QUOTE_ALL)
headers = dict((n, n) for n in fieldnames)
writer.writerow(headers)
for line in csv_data:
for key in fieldnames:
#some incredible magic with additional date field
if not key in line and key.endswith('_dt'):
h = hashlib.md5()
#h.update(line[key[:-3]])
h.update('123456')
line[key] = h.hexdigest()[:6]
#formatting the date properly
if isinstance(line[key], datetime.datetime):
line[key] = line[key].strftime("%Y-%m-%d")
#make 0/1 from bool
if isinstance(line[key], bool):
line[key] = int(line[key])
writer.writerow(line)
os.close(fp)
return filename
def write_tmp_zipfile(files):
'''Zip files into a single file.
Remember to os.remove(filename) after use.
@param files: list of tuples (path_to_the_file, name_of_the_file)
return filename of the created temporary zip file
'''
fp, filename = mkstemp()
zip_file = ZipFile(filename, "w")
for path, name in files:
zip_file.write(path, name)
zip_file.close()
os.close(fp)
return filename
def create_archive(data, sli_manifest):
'''Zip the data and sli_manifest files to an archive.
Remember to os.remove(filename) after use.
@param data: csv data
@param sli_manifest: json sli_manifest
return the filename to the temporary zip file
'''
if isinstance(data, list):
data_path = write_tmp_csv_file(data, sli_manifest)
else:
data_path = write_tmp_file(data)
if isinstance(sli_manifest, dict):
sli_manifest = json.dumps(sli_manifest)
sli_manifest_path = write_tmp_file(sli_manifest)
filename = write_tmp_zipfile((
(data_path, CSV_DATA_FILENAME),
(sli_manifest_path, DLI_MANIFEST_FILENAME),
))
os.remove(data_path)
os.remove(sli_manifest_path)
return filename
def csv_to_list(data_csv):
'''Create list of dicts from CSV string.
@param data_csv: CSV in a string
'''
reader = csv.reader(data_csv.strip().split('\n'))
header = reader.next()
data_list = []
for line in reader:
l = {}
for i, value in enumerate(header):
l[value] = line[i]
data_list.append(l)
return data_list
| comoga/gooddata-python | gooddataclient/archiver.py | Python | bsd-3-clause | 3,802 |
# -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.widget import Text, TextInput, Widget
from widgetastic_patternfly import Button, Input
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils import deferred_verpick
from cfme.utils import ParamClassName
from cfme.utils.appliance.implementations.ui import navigator, navigate_to, CFMENavigateStep
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
from cfme.utils.version import LOWEST
from widgetastic_manageiq.expression_editor import ExpressionEditor
from . import ControlExplorerView
class Expression(Widget):
ROOT = "div#condition_info_div"
def __init__(self, parent, type_, logger=None):
Widget.__init__(self, parent, logger=logger)
if type_ not in ["Scope", "Expression"]:
raise ValueError("Type should be Scope or Expression only")
else:
self.type = type_
def __locator__(self):
return self.ROOT
@property
def text_list(self):
return self.browser.element(self).text.split("\n")
@property
def text(self):
"""
In Condition details view Scope and Expression don't have any locator. So we
have to scrape whole text in the parent div and split it by "\\n". After that in text_list
we receive something like that:
.. code-block:: python
[u'Scope',
u'COUNT OF VM and Instance.Files > 150',
u'Expression',
u'VM and Instance : Boot Time BEFORE "03/04/2014 00:00"',
u'Notes',
u'No notes have been entered.',
u'Assigned to Policies',
u'This Condition is not assigned to any Policies.']
To get value of Scope or Expression firstly we find its index in the list and then just
seek next member.
"""
index = self.text_list.index(self.type)
return self.text_list[index + 1]
def read(self):
return self.text
class ConditionsAllView(ControlExplorerView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == "All {} Conditions".format(self.context["object"].FIELD_VALUE) and
self.conditions.is_opened and
self.conditions.tree.currently_selected == ["All Conditions",
"{} Conditions".format(self.context["object"].TREE_NODE)]
)
class ConditionFormCommon(ControlExplorerView):
title = Text("#explorer_title_text")
description = Input(name="description")
scope = ExpressionEditor("//img[@alt='Edit this Scope']")
expression = ExpressionEditor("//img[@alt='Edit this Expression']")
notes = TextInput(name="notes")
cancel_button = Button("Cancel")
class NewConditionView(ConditionFormCommon):
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == "Adding a new Condition" and
self.conditions.is_opened and
self.conditions.tree.currently_selected == ["All Conditions",
"{} Condition".format(self.context["object"].TREE_NODE)]
)
class EditConditionView(ConditionFormCommon):
title = Text("#explorer_title_text")
save_button = Button("Save")
reset_button = Button("Reset")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == '{} "{}"'.format(self.context["object"].FIELD_VALUE,
self.context["object"].description) and
self.conditions.is_opened and
self.conditions.tree.currently_selected == [
"All Conditions",
"{} Conditions".format(self.context["object"].TREE_NODE),
self.context["object"].description
]
)
class ConditionDetailsView(ControlExplorerView):
title = Text("#explorer_title_text")
scope = Expression("Scope")
expression = Expression("Expression")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == '{} Condition "{}"'.format(self.context["object"].FIELD_VALUE,
self.context["object"].description) and
self.conditions.is_opened
)
class ConditionPolicyDetailsView(ControlExplorerView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_control_explorer and
self.title.text == '{} Condition "{}"'.format(
self.context["object"].context_policy.PRETTY,
self.context["object"].description) and
self.policies.is_opened and
self.policies.tree.currently_selected == [
"All Policies",
"{} Policies".format(self.context["object"].context_policy.TYPE),
"{} {} Policies".format(self.context["object"].context_policy.TREE_NODE,
self.context["object"].context_policy.TYPE),
self.context["object"].context_policy.description,
self.context["object"].description
]
)
@attr.s
class BaseCondition(BaseEntity, Updateable, Pretty):
TREE_NODE = None
PRETTY = None
FIELD_VALUE = None
_param_name = ParamClassName('description')
description = attr.ib()
expression = attr.ib(default=None)
scope = attr.ib(default=None)
notes = attr.ib(default=None)
def update(self, updates):
"""Update this Condition in UI.
Args:
updates: Provided by update() context manager.
cancel: Whether to cancel the update (default False).
"""
view = navigate_to(self, "Edit")
view.fill(updates)
view.save_button.click()
view = self.create_view(ConditionDetailsView, override=updates)
assert view.is_displayed
view.flash.assert_success_message(
'Condition "{}" was saved'.format(updates.get("description", self.description)))
def delete(self, cancel=False):
"""Delete this Condition in UI.
Args:
cancel: Whether to cancel the deletion (default False).
"""
view = navigate_to(self, "Details")
view.configuration.item_select("Delete this {} Condition".format(self.FIELD_VALUE),
handle_alert=not cancel)
if cancel:
assert view.is_displayed
view.flash.assert_no_error()
else:
view = self.create_view(ConditionsAllView)
assert view.is_displayed
view.flash.assert_success_message('Condition "{}": Delete successful'.format(
self.description))
def read_expression(self):
view = navigate_to(self, "Details")
assert view.is_displayed
return view.expression.text
def read_scope(self):
view = navigate_to(self, "Details")
assert view.is_displayed
return view.scope.text
@property
def exists(self):
"""Check existence of this Condition.
Returns: :py:class:`bool` signalizing the presence of the Condition in the database.
"""
conditions = self.appliance.db.client["conditions"]
return self.appliance.db.client.session\
.query(conditions.description)\
.filter(conditions.description == self.description)\
.count() > 0
@attr.s
class ConditionCollection(BaseCollection):
ENTITY = BaseCondition
def create(self, condition_class, description, expression=None, scope=None, notes=None):
condition = condition_class(self, description, expression=expression, scope=scope,
notes=notes)
view = navigate_to(condition, "Add")
view.fill({
"description": condition.description,
"expression": condition.expression,
"scope": condition.scope,
"notes": condition.notes
})
view.add_button.click()
view = condition.create_view(ConditionDetailsView)
assert view.is_displayed
view.flash.assert_success_message('Condition "{}" was added'.format(condition.description))
return condition
def all(self):
raise NotImplementedError
@navigator.register(ConditionCollection, "All")
class AllConditions(CFMENavigateStep):
VIEW = ConditionsAllView
prerequisite = NavigateToAttribute("appliance.server", "ControlExplorer")
def step(self):
self.prerequisite_view.conditions.tree.click_path("All Conditions")
@navigator.register(BaseCondition, "Add")
class ConditionNew(CFMENavigateStep):
VIEW = NewConditionView
prerequisite = NavigateToAttribute("parent", "All")
def step(self):
self.prerequisite_view.conditions.tree.click_path(
"All Conditions",
"{} Conditions".format(self.obj.TREE_NODE)
)
self.prerequisite_view.configuration.item_select(
"Add a New {} Condition".format(self.obj.PRETTY))
@navigator.register(BaseCondition, "Edit")
class ConditionEdit(CFMENavigateStep):
VIEW = EditConditionView
prerequisite = NavigateToSibling("Details")
def step(self):
self.view.conditions.tree.click_path(
"All Conditions",
"{} Conditions".format(self.obj.TREE_NODE),
self.obj.description
)
self.prerequisite_view.configuration.item_select("Edit this Condition")
@navigator.register(BaseCondition, "Details")
class ConditionDetails(CFMENavigateStep):
VIEW = ConditionDetailsView
prerequisite = NavigateToAttribute("parent", "All")
def step(self):
self.prerequisite_view.conditions.tree.click_path(
"All Conditions",
"{} Conditions".format(self.obj.TREE_NODE),
self.obj.description
)
@navigator.register(BaseCondition, "Details in policy")
class PolicyConditionDetails(CFMENavigateStep):
VIEW = ConditionPolicyDetailsView
prerequisite = NavigateToAttribute("appliance.server", "ControlExplorer")
def step(self):
self.prerequisite_view.policies.tree.click_path(
"All Policies",
"{} Policies".format(self.obj.context_policy.TYPE),
"{} {} Policies".format(
self.obj.context_policy.TREE_NODE,
self.obj.context_policy.TYPE
),
self.obj.context_policy.description,
self.obj.description
)
class HostCondition(BaseCondition):
TREE_NODE = "Host"
FIELD_VALUE = deferred_verpick({LOWEST: "Host / Node"})
PRETTY = "Host / Node"
class VMCondition(BaseCondition):
TREE_NODE = "VM and Instance"
FIELD_VALUE = deferred_verpick({LOWEST: "VM and Instance"})
PRETTY = "VM"
class ReplicatorCondition(BaseCondition):
TREE_NODE = "Replicator"
FIELD_VALUE = deferred_verpick({
LOWEST: "Replicator",
"5.9.2": "Container Replicator"
})
PRETTY = FIELD_VALUE
class PodCondition(BaseCondition):
TREE_NODE = "Pod"
FIELD_VALUE = deferred_verpick({
LOWEST: "Pod",
"5.9.2": "Container Pod"
})
PRETTY = FIELD_VALUE
class ContainerNodeCondition(BaseCondition):
TREE_NODE = "Container Node"
FIELD_VALUE = deferred_verpick({
LOWEST: "Node",
"5.9.2": "Container Node"
})
PRETTY = FIELD_VALUE
class ContainerImageCondition(BaseCondition):
TREE_NODE = "Container Image"
FIELD_VALUE = deferred_verpick({LOWEST: "Container Image"})
PRETTY = "Container Image"
class ProviderCondition(BaseCondition):
TREE_NODE = "Provider"
FIELD_VALUE = deferred_verpick({LOWEST: "Provider"})
PRETTY = "Provider"
| lkhomenk/integration_tests | cfme/control/explorer/conditions.py | Python | gpl-2.0 | 11,889 |
from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.many_to_one and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.related_model._meta.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
elif isinstance(field, models.FileField) and value:
return format_html('<a href="{}">{}</a>', value.url, value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a ``limit_choices_to`` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
get_limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'get_limit_choices_to', None))
if not get_limit_choices_to:
return models.Q() # empty Q
limit_choices_to = get_limit_choices_to()
if isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/contrib/admin/utils.py | Python | mit | 17,422 |
import toolbox
import numpy as np
import pylab
#extract shot record
data, params = toolbox.initialise("prepro.su")
mask = data['fldr'] == 221
shot = data[mask].copy()
#agc
toolbox.agc(shot, None, **params)
params['primary'] = 'fldr'
params['secondary'] = 'tracf'
params['wiggle'] = True
toolbox.display(shot, **params)
#fk plot
params['dx'] = 33.5 #m
#~ toolbox.fk_view(shot, **params)
#~ #fk filter design
params['fkVelocity'] = 2000
params['fkSmooth'] = 20
params['fkFilter'] = toolbox.fk_design(shot, **params)
shot = toolbox.fk_filter(shot, None, **params)
toolbox.display(shot, **params)
##############end of testing
#~ data, nparams = toolbox.initialise("prepro.su")
#~ toolbox.agc(data, None, **params)
#~ data = toolbox.fk_filter(data, None, **params)
#~ #nmo
#~ params['vels'] = np.fromfile('vels_full.bin').reshape(-1, params['ns'])
#~ params['smute'] = 150
#~ toolbox.nmo(data, None, **params)
#~ data.tofile("fk_nmo_gathers.su")
#~ toolbox.agc(data, None, **params)
#~ #stack
#~ stack = toolbox.stack(data, None, **params)
#~ params['gamma'] = -1
#~ toolbox.tar(stack, None, **params)
#~ stack.tofile("fk_stack.su")
#~ #display
#~ params['primary'] = None
#~ params['secondary'] = 'cdp'
#~ toolbox.display(stack, **params)
pylab.show()
| stuliveshere/SeismicProcessing2015 | prac2_staff/05.0_fk.py | Python | mit | 1,260 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import optparse
import mechanize
import urllib
import re
import urlparse
from _winreg import *
def val2addr(val):
addr = ''
for ch in val:
addr += '%02x ' % ord(ch)
addr = addr.strip(' ').replace(' ', ':')[0:17]
return addr
def wiglePrint(username, password, netid):
browser = mechanize.Browser()
browser.open('http://wigle.net')
reqData = urllib.urlencode({'credential_0': username,
'credential_1': password})
browser.open('https://wigle.net/gps/gps/main/login', reqData)
params = {}
params['netid'] = netid
reqParams = urllib.urlencode(params)
respURL = 'http://wigle.net/gps/gps/main/confirmquery/'
resp = browser.open(respURL, reqParams).read()
mapLat = 'N/A'
mapLon = 'N/A'
rLat = re.findall(r'maplat=.*\&', resp)
if rLat:
mapLat = rLat[0].split('&')[0].split('=')[1]
rLon = re.findall(r'maplon=.*\&', resp)
if rLon:
mapLon = rLon[0].split
print '[-] Lat: ' + mapLat + ', Lon: ' + mapLon
def printNets(username, password):
net = "SOFTWARE\Microsoft\Windows NT\CurrentVersion"+\
"\NetworkList\Signatures\Unmanaged"
key = OpenKey(HKEY_LOCAL_MACHINE, net)
print '\n[*] Networks You have Joined.'
for i in range(100):
try:
guid = EnumKey(key, i)
netKey = OpenKey(key, str(guid))
(n, addr, t) = EnumValue(netKey, 5)
(n, name, t) = EnumValue(netKey, 4)
macAddr = val2addr(addr)
netName = str(name)
print '[+] ' + netName + ' ' + macAddr
wiglePrint(username, password, macAddr)
CloseKey(netKey)
except:
break
def main():
parser = optparse.OptionParser('usage %prog '+\
'-u <wigle username> -p <wigle password>')
parser.add_option('-u', dest='username', type='string',
help='specify wigle password')
parser.add_option('-p', dest='password', type='string',
help='specify wigle username')
(options, args) = parser.parse_args()
username = options.username
password = options.password
if username == None or password == None:
print parser.usage
exit(0)
else:
printNets(username, password)
if __name__ == '__main__':
main()
| psb-seclab/CTFStuff | utils/violent_python_code/CH3/1-discoverNetworks.py | Python | mit | 2,387 |
from jmclient import old_mnemonic
import pytest
@pytest.mark.parametrize(
"seedphrase, key, valid",
[
(["spiral", "squeeze", "strain", "sunset", "suspend", "sympathy",
"thigh", "throne", "total", "unseen", "weapon", "weary"],
'0028644c0028644f0028645200286455',
True),
(["pair", "bury", "lung", "swim", "orange", "doctor", "numb", "interest",
"shock", "bloom", "fragile", "screen"],
'fa92999d01431f961a26c876f55d3f6c',
True),
(["check", "squeeze", "strain", "sunset", "suspend", "sympathy",
"thigh", "throne", "total", "unseen", "weapon", "weary"],
'0028644c0028644f0028645200286455',
False),
(["qwerty", "check", "strain", "sunset", "suspend", "sympathy",
"thigh", "throne", "total", "unseen", "weapon", "weary"],
'',
False),
(["", "check", "strain", "sunset", "suspend", "sympathy",
"thigh", "throne", "total", "unseen", "weapon", "weary"],
'',
False),
(["strain", "sunset"],
'',
False),
])
def test_old_mnemonic(seedphrase, key, valid):
if valid:
assert old_mnemonic.mn_decode(seedphrase) == key
assert old_mnemonic.mn_encode(key) == seedphrase
else:
if len(key) > 0:
#test cases where the seedphrase is valid
#but must not match the provided master private key
assert old_mnemonic.mn_decode(seedphrase) != key
else:
#test cases where the seedphrase is intrinsically invalid
#Already known error condition: an incorrectly short
#word list will NOT throw an error; this is handled by calling code
if len(seedphrase) < 12:
print("For known failure case of seedphrase less than 12: ")
print(old_mnemonic.mn_decode(seedphrase))
else:
with pytest.raises(Exception) as e_info:
dummy = old_mnemonic.mn_decode(seedphrase)
print("Got this return value from mn_decode: " + str(dummy))
| AdamISZ/joinmarket-clientserver | jmclient/test/test_mnemonic.py | Python | gpl-3.0 | 2,169 |
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from .permissions import PERMISSION_VIEW_JOB_LIST
job_list = {'text': _(u'interval job list'), 'view': 'job_list', 'icon': 'time.png', 'permissions': [PERMISSION_VIEW_JOB_LIST]}
| appsembler/mayan_appsembler | apps/scheduler/links.py | Python | gpl-3.0 | 276 |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 12 11:19:05 2016
@author: birksworks
"""
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
class MLR(BaseEstimator, ClassifierMixin):
best_settings = {
'ag':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 5.3182073751190799,
'test-accuracy':0.9239
},
'amazon-f':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 5.3182073751190799,
'test-accuracy':0.5802
},
'amazon-p':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 5.3182073751190799,
'test-accuracy':0.9239
},
'dbp':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 1.0,
'test-accuracy':0.9821
},
'imdb-50K':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 2.3751035233654019,
'test-accuracy':0.8899
},
'rt-s':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 9.9380510738102377,
'test-accuracy':0.7790
},
'stsa-f':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 1.0857064404836059,
'test-accuracy':0.3946
},
'stsa-p':{
'max_iter':100,
'fit_intercept': True,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 9.9445041732210306,
'test-accuracy':0.8116
},
'yelp-p':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 1.0,
'test-accuracy':0.9821
},
'yelp-f':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 1.0,
'test-accuracy':0.6257
},
'yelp-p':{
'max_iter':100,
'fit_intercept': False,
'use_idf': True,
'ngram_range': (1, 2),
'penalty': 'l2',
'C': 1.0,
'test-accuracy':0.9530
},
}
def __init__(
self,
C=1.0,
penalty=u'l2',
max_iter=100,
fit_intercept=False,
ngram_range=(1,1),
use_idf=True,
tokenizer=None,
stop_words=None
):
self.C = C
self.penalty = penalty
self.max_iter = max_iter
self.fit_intercept = fit_intercept
self.ngram_range = ngram_range
self.use_idf = use_idf
self.tokenizer = tokenizer
self.stop_words = stop_words
self.model = None
def fit(self, X, y):
self.model = Pipeline([
('vectorizer', CountVectorizer(tokenizer=self.tokenizer, stop_words=self.stop_words, ngram_range=self.ngram_range)),
('transformer', TfidfTransformer(norm=self.penalty, use_idf=self.use_idf)),
('classifier', LogisticRegression(C=self.C, penalty=self.penalty, fit_intercept=self.fit_intercept, max_iter=self.max_iter)),
])
self.model.fit(X, y)
return self
def predict(self, X):
return self.model.predict(X)
def predict_proba(self, X):
return self.model.predict_proba(X)
def predict_log_proba(self, X):
return self.model.predict_log_proba(X)
| bobflagg/sentiment-analysis | fsa/model/mlr.py | Python | gpl-3.0 | 4,477 |
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import sys, os, re
from functools import partial
from PyQt5.Qt import (
QGridLayout, QToolButton, QIcon, QRadioButton, QMenu, QApplication, Qt,
QSize, QWidget, QLabel, QStackedLayout, QPainter, QRect, QVBoxLayout,
QCursor, QEventLoop, QKeySequence, pyqtSignal, QTimer, QHBoxLayout)
from calibre.ebooks.oeb.polish.container import Container
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.gui2 import info_dialog
from calibre.gui2.progress_indicator import ProgressIndicator
from calibre.gui2.tweak_book.editor import syntax_from_mime
from calibre.gui2.tweak_book.diff.view import DiffView
from calibre.gui2.tweak_book.widgets import Dialog
from calibre.gui2.widgets2 import HistoryLineEdit2
from calibre.utils.filenames import samefile
from calibre.utils.icu import numeric_sort_key
class BusyWidget(QWidget): # {{{
def __init__(self, parent):
QWidget.__init__(self, parent)
l = QVBoxLayout()
self.setLayout(l)
l.addStretch(10)
self.pi = ProgressIndicator(self, 128)
l.addWidget(self.pi, alignment=Qt.AlignHCenter)
self.dummy = QLabel('<h2>\xa0')
l.addSpacing(10)
l.addWidget(self.dummy, alignment=Qt.AlignHCenter)
l.addStretch(10)
self.text = _('Calculating differences, please wait...')
def paintEvent(self, ev):
br = ev.region().boundingRect()
QWidget.paintEvent(self, ev)
p = QPainter(self)
p.setClipRect(br)
f = p.font()
f.setBold(True)
f.setPointSize(20)
p.setFont(f)
p.setPen(Qt.SolidLine)
r = QRect(0, self.dummy.geometry().top() + 10, self.geometry().width(), 150)
p.drawText(r, Qt.AlignHCenter | Qt.AlignTop | Qt.TextSingleLine, self.text)
p.end()
# }}}
class Cache(object):
def __init__(self):
self._left, self._right = {}, {}
self.left, self.right = self._left.get, self._right.get
self.set_left, self.set_right = self._left.__setitem__, self._right.__setitem__
def changed_files(list_of_names1, list_of_names2, get_data1, get_data2):
list_of_names1, list_of_names2 = frozenset(list_of_names1), frozenset(list_of_names2)
changed_names = set()
cache = Cache()
common_names = list_of_names1.intersection(list_of_names2)
for name in common_names:
left, right = get_data1(name), get_data2(name)
if len(left) == len(right) and left == right:
continue
cache.set_left(name, left), cache.set_right(name, right)
changed_names.add(name)
removals = list_of_names1 - common_names
adds = set(list_of_names2 - common_names)
adata, rdata = {a:get_data2(a) for a in adds}, {r:get_data1(r) for r in removals}
ahash = {a:hash(d) for a, d in adata.iteritems()}
rhash = {r:hash(d) for r, d in rdata.iteritems()}
renamed_names, removed_names, added_names = {}, set(), set()
for name, rh in rhash.iteritems():
for n, ah in ahash.iteritems():
if ah == rh:
renamed_names[name] = n
adds.discard(n)
break
else:
cache.set_left(name, rdata[name])
removed_names.add(name)
for name in adds:
cache.set_right(name, adata[name])
added_names.add(name)
return cache, changed_names, renamed_names, removed_names, added_names
def get_decoded_raw(name):
from calibre.ebooks.chardet import xml_to_unicode, force_encoding
with open(name, 'rb') as f:
raw = f.read()
syntax = syntax_from_mime(name, guess_type(name))
if syntax is None:
try:
raw = raw.decode('utf-8')
except ValueError:
pass
elif syntax != 'raster_image':
if syntax in {'html', 'xml'}:
raw = xml_to_unicode(raw, verbose=True)[0]
else:
m = re.search(br"coding[:=]\s*([-\w.]+)", raw[:1024], flags=re.I)
if m is not None and m.group(1) != '8bit':
enc = m.group(1)
if enc == b'unicode':
enc = 'utf-8'
else:
enc = force_encoding(raw, verbose=True)
try:
raw = raw.decode(enc)
except (LookupError, ValueError):
pass
return raw, syntax
def file_diff(left, right):
(raw1, syntax1), (raw2, syntax2) = map(get_decoded_raw, (left, right))
if type(raw1) is not type(raw2):
raw1, raw2 = open(left, 'rb').read(), open(right, 'rb').read()
cache = Cache()
cache.set_left(left, raw1), cache.set_right(right, raw2)
changed_names = {} if raw1 == raw2 else {left:right}
return cache, {left:syntax1, right:syntax2}, changed_names, {}, set(), set()
def dir_diff(left, right):
ldata, rdata, lsmap, rsmap = {}, {}, {}, {}
for base, data, smap in ((left, ldata, lsmap), (right, rdata, rsmap)):
for dirpath, dirnames, filenames in os.walk(base):
for filename in filenames:
path = os.path.join(dirpath, filename)
name = os.path.relpath(path, base)
data[name], smap[name] = get_decoded_raw(path)
cache, changed_names, renamed_names, removed_names, added_names = changed_files(
ldata, rdata, ldata.get, rdata.get)
syntax_map = {name:lsmap[name] for name in changed_names}
syntax_map.update({name:lsmap[name] for name in renamed_names})
syntax_map.update({name:rsmap[name] for name in added_names})
syntax_map.update({name:lsmap[name] for name in removed_names})
return cache, syntax_map, changed_names, renamed_names, removed_names, added_names
def container_diff(left, right):
left_names, right_names = set(left.name_path_map), set(right.name_path_map)
if left.cloned or right.cloned:
# Since containers are often clones of each other, as a performance
# optimization, discard identical names that point to the same physical
# file, without needing to read the file's contents.
# First commit dirtied names
for c in (left, right):
Container.commit(c, keep_parsed=True)
samefile_names = {name for name in left_names & right_names if samefile(
left.name_path_map[name], right.name_path_map[name])}
left_names -= samefile_names
right_names -= samefile_names
cache, changed_names, renamed_names, removed_names, added_names = changed_files(
left_names, right_names, left.raw_data, right.raw_data)
def syntax(container, name):
mt = container.mime_map[name]
return syntax_from_mime(name, mt)
syntax_map = {name:syntax(left, name) for name in changed_names}
syntax_map.update({name:syntax(left, name) for name in renamed_names})
syntax_map.update({name:syntax(right, name) for name in added_names})
syntax_map.update({name:syntax(left, name) for name in removed_names})
return cache, syntax_map, changed_names, renamed_names, removed_names, added_names
def ebook_diff(path1, path2):
from calibre.ebooks.oeb.polish.container import get_container
left = get_container(path1, tweak_mode=True)
right = get_container(path2, tweak_mode=True)
return container_diff(left, right)
class Diff(Dialog):
revert_requested = pyqtSignal()
line_activated = pyqtSignal(object, object, object)
def __init__(self, revert_button_msg=None, parent=None, show_open_in_editor=False, show_as_window=False):
self.context = 3
self.beautify = False
self.apply_diff_calls = []
self.show_open_in_editor = show_open_in_editor
self.revert_button_msg = revert_button_msg
Dialog.__init__(self, _('Differences between books'), 'diff-dialog', parent=parent)
self.setWindowFlags(self.windowFlags() | Qt.WindowMinMaxButtonsHint)
if show_as_window:
self.setWindowFlags(Qt.Window)
self.view.line_activated.connect(self.line_activated)
def sizeHint(self):
geom = QApplication.instance().desktop().availableGeometry(self)
return QSize(int(0.9 * geom.width()), int(0.8 * geom.height()))
def setup_ui(self):
self.setWindowIcon(QIcon(I('diff.png')))
self.stacks = st = QStackedLayout(self)
self.busy = BusyWidget(self)
self.w = QWidget(self)
st.addWidget(self.busy), st.addWidget(self.w)
self.setLayout(st)
self.l = l = QGridLayout()
self.w.setLayout(l)
self.view = v = DiffView(self, show_open_in_editor=self.show_open_in_editor)
l.addWidget(v, l.rowCount(), 0, 1, -1)
r = l.rowCount()
self.bp = b = QToolButton(self)
b.setIcon(QIcon(I('back.png')))
b.clicked.connect(partial(self.view.next_change, -1))
b.setToolTip(_('Go to previous change') + ' [p]')
b.setText(_('&Previous change')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 0)
self.bn = b = QToolButton(self)
b.setIcon(QIcon(I('forward.png')))
b.clicked.connect(partial(self.view.next_change, 1))
b.setToolTip(_('Go to next change') + ' [n]')
b.setText(_('&Next change')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 1)
self.search = s = HistoryLineEdit2(self)
s.initialize('diff_search_history')
l.addWidget(s, r, 2)
s.setPlaceholderText(_('Search for text'))
s.returnPressed.connect(partial(self.do_search, False))
self.sbn = b = QToolButton(self)
b.setIcon(QIcon(I('arrow-down.png')))
b.clicked.connect(partial(self.do_search, False))
b.setToolTip(_('Find next match'))
b.setText(_('Next &match')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 3)
self.sbp = b = QToolButton(self)
b.setIcon(QIcon(I('arrow-up.png')))
b.clicked.connect(partial(self.do_search, True))
b.setToolTip(_('Find previous match'))
b.setText(_('P&revious match')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 4)
self.lb = b = QRadioButton(_('Left panel'), self)
b.setToolTip(_('Perform search in the left panel'))
l.addWidget(b, r, 5)
self.rb = b = QRadioButton(_('Right panel'), self)
b.setToolTip(_('Perform search in the right panel'))
l.addWidget(b, r, 6)
b.setChecked(True)
self.pb = b = QToolButton(self)
b.setIcon(QIcon(I('config.png')))
b.setText(_('&Options')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
b.setToolTip(_('Change how the differences are displayed'))
b.setPopupMode(b.InstantPopup)
m = QMenu(b)
b.setMenu(m)
cm = self.cm = QMenu(_('Lines of context around each change'))
for i in (3, 5, 10, 50):
cm.addAction(_('Show %d lines of context') % i, partial(self.change_context, i))
cm.addAction(_('Show all text'), partial(self.change_context, None))
self.beautify_action = m.addAction('', self.toggle_beautify)
self.set_beautify_action_text()
m.addMenu(cm)
l.addWidget(b, r, 7)
self.hl = QHBoxLayout()
l.addLayout(self.hl, l.rowCount(), 0, 1, -1)
self.names = QLabel('')
self.hl.addWidget(self.names, r)
self.bb.setStandardButtons(self.bb.Close)
if self.revert_button_msg is not None:
self.rvb = b = self.bb.addButton(self.revert_button_msg, self.bb.ActionRole)
b.setIcon(QIcon(I('edit-undo.png'))), b.setAutoDefault(False)
b.clicked.connect(self.revert_requested)
b.clicked.connect(self.reject)
self.bb.button(self.bb.Close).setDefault(True)
self.hl.addWidget(self.bb, r)
self.view.setFocus(Qt.OtherFocusReason)
def break_cycles(self):
self.view = None
for x in ('revert_requested', 'line_activated'):
try:
getattr(self, x).disconnect()
except:
pass
def do_search(self, reverse):
text = unicode(self.search.text())
if not text.strip():
return
v = self.view.view.left if self.lb.isChecked() else self.view.view.right
v.search(text, reverse=reverse)
def change_context(self, context):
if context == self.context:
return
self.context = context
self.refresh()
def refresh(self):
with self:
self.view.clear()
for args, kwargs in self.apply_diff_calls:
kwargs['context'] = self.context
kwargs['beautify'] = self.beautify
self.view.add_diff(*args, **kwargs)
self.view.finalize()
def toggle_beautify(self):
self.beautify = not self.beautify
self.set_beautify_action_text()
self.refresh()
def set_beautify_action_text(self):
self.beautify_action.setText(
_('Beautify files before comparing them') if not self.beautify else
_('Do not beautify files before comparing'))
def __enter__(self):
self.stacks.setCurrentIndex(0)
self.busy.pi.startAnimation()
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers)
def __exit__(self, *args):
self.busy.pi.stopAnimation()
self.stacks.setCurrentIndex(1)
QApplication.restoreOverrideCursor()
def set_names(self, names):
if isinstance(names, tuple):
self.names.setText('%s <--> %s' % names)
else:
self.names.setText('')
def ebook_diff(self, path1, path2, names=None):
self.set_names(names)
with self:
identical = self.apply_diff(_('The books are identical'), *ebook_diff(path1, path2))
self.view.finalize()
if identical:
self.reject()
def container_diff(self, left, right, identical_msg=None, names=None):
self.set_names(names)
with self:
identical = self.apply_diff(identical_msg or _('No changes found'), *container_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def file_diff(self, left, right, identical_msg=None):
with self:
identical = self.apply_diff(identical_msg or _('The files are identical'), *file_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def dir_diff(self, left, right, identical_msg=None):
with self:
identical = self.apply_diff(identical_msg or _('The directories are identical'), *dir_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def apply_diff(self, identical_msg, cache, syntax_map, changed_names, renamed_names, removed_names, added_names):
self.view.clear()
self.apply_diff_calls = calls = []
def add(args, kwargs):
self.view.add_diff(*args, **kwargs)
calls.append((args, kwargs))
if len(changed_names) + len(renamed_names) + len(removed_names) + len(added_names) < 1:
info_dialog(self, _('No changes found'), identical_msg, show=True)
return True
kwargs = lambda name: {'context':self.context, 'beautify':self.beautify, 'syntax':syntax_map.get(name, None)}
if isinstance(changed_names, dict):
for name, other_name in sorted(changed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])):
args = (name, other_name, cache.left(name), cache.right(other_name))
add(args, kwargs(name))
else:
for name in sorted(changed_names, key=numeric_sort_key):
args = (name, name, cache.left(name), cache.right(name))
add(args, kwargs(name))
for name in sorted(added_names, key=numeric_sort_key):
args = (_('[%s was added]') % name, name, None, cache.right(name))
add(args, kwargs(name))
for name in sorted(removed_names, key=numeric_sort_key):
args = (name, _('[%s was removed]') % name, cache.left(name), None)
add(args, kwargs(name))
for name, new_name in sorted(renamed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])):
args = (name, new_name, None, None)
add(args, kwargs(name))
def keyPressEvent(self, ev):
if not self.view.handle_key(ev):
if ev.key() in (Qt.Key_Enter, Qt.Key_Return):
return # The enter key is used by the search box, so prevent it closing the dialog
if ev.key() == Qt.Key_Slash:
return self.search.setFocus(Qt.OtherFocusReason)
if ev.matches(QKeySequence.Copy):
text = self.view.view.left.selected_text + self.view.view.right.selected_text
if text:
QApplication.clipboard().setText(text)
return
if ev.matches(QKeySequence.FindNext):
self.sbn.click()
return
if ev.matches(QKeySequence.FindPrevious):
self.sbp.click()
return
return Dialog.keyPressEvent(self, ev)
def compare_books(path1, path2, revert_msg=None, revert_callback=None, parent=None, names=None):
d = Diff(parent=parent, revert_button_msg=revert_msg)
if revert_msg is not None:
d.revert_requested.connect(revert_callback)
QTimer.singleShot(0, partial(d.ebook_diff, path1, path2, names=names))
d.exec_()
try:
d.revert_requested.disconnect()
except:
pass
d.break_cycles()
def main(args=sys.argv):
from calibre.gui2 import Application
left, right = args[-2:]
ext1, ext2 = left.rpartition('.')[-1].lower(), right.rpartition('.')[-1].lower()
if ext1.startswith('original_'):
ext1 = ext1.partition('_')[-1]
if ext2.startswith('original_'):
ext2 = ext2.partition('_')[-2]
if os.path.isdir(left):
attr = 'dir_diff'
elif (ext1, ext2) in {('epub', 'epub'), ('azw3', 'azw3')}:
attr = 'ebook_diff'
else:
attr = 'file_diff'
app = Application([]) # noqa
d = Diff(show_as_window=True)
func = getattr(d, attr)
QTimer.singleShot(0, lambda : func(left, right))
d.exec_()
return 0
if __name__ == '__main__':
main()
| drxaero/calibre | src/calibre/gui2/tweak_book/diff/main.py | Python | gpl-3.0 | 18,741 |
# vim: expandtab ts=4 sw=4 sts=4 fileencoding=utf-8:
#
# Copyright (C) 2007-2010 GNS3 Development Team (http://www.gns3.net/team).
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# http://www.gns3.net/contact
#
from GNS3.Node.AbstractNode import AbstractNode
from PyQt4 import QtCore, QtGui
from GNS3.Utils import translate, debug
import GNS3.Dynagen.dynamips_lib as lib
import GNS3.Globals as globals
hub_id = 1
def init_hub_id(id = 1):
global hub_id
hub_id = id
class Hub(AbstractNode):
""" Hub class implementing the Ethernet switch
"""
def __init__(self, renderer_normal, renderer_select):
AbstractNode.__init__(self, renderer_normal, renderer_select)
# assign a new hostname
global hub_id
# check if hostname has already been assigned
for node in globals.GApp.topology.nodes.itervalues():
if 'HUB' + str(hub_id) == node.hostname:
hub_id = hub_id + 1
break
self.hostname = 'HUB' + str(hub_id)
hub_id = hub_id + 1
AbstractNode.setCustomToolTip(self)
self.config = None
self.dynagen = globals.GApp.dynagen
self.e = 'Hub ' + self.hostname
self.d = None
self.hypervisor = None
self.running_config = None
self.hub = None
self.dynagen.update_running_config()
def __del__(self):
self.delete_hub()
def delete_hub(self):
""" Delete this hub
"""
if self.hub:
try:
self.hub.delete()
if self.dynagen.devices.has_key(self.hostname):
del self.dynagen.devices[self.hostname]
if self.hub in self.hypervisor.devices:
self.hypervisor.devices.remove(self.hub)
self.dynagen.update_running_config()
except lib.DynamipsErrorHandled:
pass
self.hub = None
def set_hostname(self, hostname):
""" Set a hostname
"""
self.hostname = hostname
self.e = 'Hub ' + self.hostname
self.updateToolTips()
def setCustomToolTip(self):
""" Set a custom tool tip
"""
if self.hub:
try:
self.setToolTip(self.hub.info())
except:
AbstractNode.setCustomToolTip(self)
else:
AbstractNode.setCustomToolTip(self)
def get_running_config_name(self):
""" Return node name as stored in the running config
"""
return (self.e)
def create_config(self):
""" Creates the configuration of this hub
"""
self.config = {}
# by default create 8 ports
self.config['ports'] = [1,2,3,4,5,6,7,8]
def duplicate_config(self):
""" Returns a copy of the configuration
"""
config = self.config.copy()
config['ports'] = list(self.config['ports'])
return (config)
def get_config(self):
""" Returns the local configuration copy
"""
return self.config
def set_config(self, config):
""" Set a configuration in Dynamips
config: dict
"""
self.config = config.copy()
self.config['ports'] = list(config['ports'])
globals.GApp.topology.changed = True
self.mapping()
def set_hypervisor(self, hypervisor):
""" Records a hypervisor
hypervisor: object
"""
self.hypervisor = hypervisor
self.d = self.hypervisor.host + ':' + str(self.hypervisor.port)
def autoAllocateFreePort(self):
""" Auto allocate one additional free port when all ports are occupied
"""
if len(self.config['ports']) == len(self.getConnectedInterfaceList()):
self.config['ports'].append(len(self.config['ports']) + 1)
def getInterfaces(self):
""" Returns all interfaces
"""
self.autoAllocateFreePort()
ports = map(int, self.config['ports'])
ports.sort()
return (map(str, ports))
def get_dynagen_device(self):
""" Returns the dynagen device corresponding to this hub
"""
if not self.hub:
self.hub = lib.Hub(self.hypervisor, name = self.hostname)
self.dynagen.devices[self.hostname] = self.hub
if not self.dynagen.running_config[self.d].has_key(self.e):
self.dynagen.update_running_config()
self.running_config = self.dynagen.running_config[self.d][self.e]
return (self.hub)
def set_dynagen_device(self, hub):
""" Set a dynagen device in this node, used for .net import
"""
self.hub = hub
def reconfigNode(self, new_hostname):
""" Used when changing the hostname
"""
links = self.getEdgeList().copy()
for link in links:
globals.GApp.topology.deleteLink(link)
self.delete_hub()
self.set_hostname(new_hostname)
if len(links):
self.get_dynagen_device()
for link in links:
globals.GApp.topology.addLink(link.source.id, link.srcIf, link.dest.id, link.destIf)
def configNode(self):
""" Node configuration
"""
self.create_config()
return True
def mapping(self):
""" Configure Ethernet port mapping
"""
connected_interfaces = map(int, self.getConnectedInterfaceList())
for port in self.config['ports']:
if port in connected_interfaces:
if not self.hub.nios.has_key(port):
(destnode, destinterface)= self.getConnectedNeighbor(str(port))
if destinterface.lower()[:3] == 'nio':
debug("hub_map: " + str(port) + ' to ' + destinterface)
self.dynagen.hub_to_nio(self.hub, port, destinterface)
def startNode(self):
""" Start the node
"""
self.mapping()
self.startupInterfaces()
self.state = 'running'
globals.GApp.mainWindow.treeWidget_TopologySummary.changeNodeStatus(self.hostname, 'running')
self.setCustomToolTip()
def mousePressEvent(self, event):
""" Call when the node is clicked
event: QtGui.QGraphicsSceneMouseEvent instance
"""
if globals.addingLinkFlag and globals.currentLinkType != globals.Enum.LinkType.Manual and event.button() == QtCore.Qt.LeftButton:
self.autoAllocateFreePort()
connected_ports = self.getConnectedInterfaceList()
for port in self.config['ports']:
if not str(port) in connected_ports:
self.emit(QtCore.SIGNAL("Add link"), self.id, str(port))
return
QtGui.QMessageBox.critical(globals.GApp.mainWindow, translate("Hub", "Connection"), translate("Hub", "No port available"))
# tell the scene to cancel the link addition by sending a None id and None interface
self.emit(QtCore.SIGNAL("Add link"), None, None)
else:
AbstractNode.mousePressEvent(self, event)
| dlintott/gns3 | src/GNS3/Node/Hub.py | Python | gpl-2.0 | 7,744 |
# -*- coding:utf-8 -*-
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
vm = test_lib.lib_get_specific_stub('e2e_mini/vm', 'vm')
vm_ops = None
vm_name = 'vm-' + vm.get_time_postfix()
vm_image_name = 'vm_image-' + vm.get_time_postfix()
def test():
global vm_ops
vm_ops = vm.VM()
vm_ops.create_vm(name=vm_name)
vm_ops.create_vm_image(vm_name, vm_image_name)
vm_ops.check_browser_console_log()
test_util.test_pass('Test Create VM Image Successful')
def env_recover():
global vm_ops
global vm_name
global vm_image_name
vm_ops.delete_vm(vm_name)
vm_ops.delete_image(vm_image_name)
vm_ops.close()
#Will be called only if exception happens in test().
def error_cleanup():
global vm_ops
try:
vm_ops.delete_vm()
vm_ops.close()
except:
pass
| zstackio/zstack-woodpecker | integrationtest/vm/e2e_mini/vm/test_create_vm_image.py | Python | apache-2.0 | 865 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the time synchronization state checks."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.checks import checks_test_lib
from grr.parsers import config_file
class TimeSyncTests(checks_test_lib.HostCheckTest):
checks_loaded = False
def setUp(self, *args, **kwargs):
super(TimeSyncTests, self).setUp(*args, **kwargs)
if not self.checks_loaded:
self.LoadCheck("time.yaml")
self.checks_loaded = True
def testTimeSyncBoot(self):
"""Test we handle the cases for when a time service is started at boot."""
sym = ("Missing attribute: No time synchronization service is started "
"at boot time.")
found = ["Expected state was not found"]
bad = []
good = ["/etc/rc2.d/S07ntp"]
# The failure cases. I.e. No startup file for a time service.
results = self.RunChecks(self.GenSysVInitData(bad))
self.assertCheckDetectedAnom("TIME-SYNC-BOOT", results, sym, found)
# Now the successful cases.
results = self.RunChecks(self.GenSysVInitData(good))
self.assertCheckUndetected("TIME-SYNC-BOOT", results)
def testTimeSyncRunning(self):
"""Test we handle the cases for when a time service is running or not."""
found = ["Expected state was not found"]
bad = [("foo", 233, ["/usr/local/foo", "-flags"])]
good = [(
"ntpd", 42,
["/usr/sbin/ntpd", "-p", "/var/run/ntpd.pid", "-g", "-u", "117:125"])]
# Check for when it is not running.
self.assertCheckDetectedAnom(
"TIME-SYNC-RUNNING",
self.RunChecks(self.GenProcessData(bad)),
"Missing attribute: A time synchronization service is not running.",
found)
# Now check for when it is.
self.assertCheckUndetected("TIME-SYNC-RUNNING",
self.RunChecks(self.GenProcessData(good)))
def testNtpDoesntAllowOpenQueries(self):
"""Test for checking we don't allow queries by default."""
parser = config_file.NtpdParser()
check_id = "TIME-NTP-NO-OPEN-QUERIES"
artifact_id = "NtpConfFile"
good_config = {
"/etc/ntp.conf":
"""
restrict default nomodify noquery nopeer
"""
}
bad_config = {
"/etc/ntp.conf":
"""
restrict default nomodify nopeer
"""
}
bad_default_config = {"/etc/ntp.conf": """
"""}
# A good config should pass.
results = self.RunChecks(
self.GenFileData("NtpConfFile", good_config, parser))
self.assertCheckUndetected(check_id, results)
found = ["Expected state was not found"]
sym = ("Missing attribute: ntpd.conf is configured or defaults to open "
"queries. Can allow DDoS. This configuration is an on-going "
"recommendation following the Ntp December 2014 Vulnerability "
"notice. (http://support.ntp.org/bin/view/Main/SecurityNotice)")
# A bad one should detect a problem.
results = self.RunChecks(self.GenFileData(artifact_id, bad_config, parser))
self.assertCheckDetectedAnom(check_id, results, sym, found)
# And as the default is to be queryable, check we detect an empty config.
results = self.RunChecks(
self.GenFileData(artifact_id, bad_default_config, parser))
self.assertCheckDetectedAnom(check_id, results, sym, found)
def testNtpHasMonitorDisabled(self):
"""Test for checking that monitor is disabled."""
parser = config_file.NtpdParser()
check_id = "TIME-NTP-REFLECTION"
artifact_id = "NtpConfFile"
good_config = {"/etc/ntp.conf": """
disable monitor
"""}
good_tricky_config = {
"/etc/ntp.conf":
"""
disable monitor auth
enable kernel monitor auth
disable kernel monitor
"""
}
bad_config = {"/etc/ntp.conf": """
enable monitor
"""}
bad_default_config = {"/etc/ntp.conf": """
"""}
bad_tricky_config = {
"/etc/ntp.conf":
"""
enable kernel monitor auth
disable monitor auth
enable kernel monitor
"""
}
found = ["ntpd.conf has monitor flag set to True."]
sym = ("Found: ntpd.conf is configured to allow monlist NTP reflection "
"attacks.")
results = self.RunChecks(self.GenFileData(artifact_id, good_config, parser))
self.assertCheckUndetected(check_id, results)
results = self.RunChecks(
self.GenFileData(artifact_id, good_tricky_config, parser))
self.assertCheckUndetected(check_id, results)
results = self.RunChecks(self.GenFileData(artifact_id, bad_config, parser))
self.assertCheckDetectedAnom(check_id, results, sym, found)
results = self.RunChecks(
self.GenFileData(artifact_id, bad_default_config, parser))
self.assertCheckDetectedAnom(check_id, results, sym, found)
results = self.RunChecks(
self.GenFileData(artifact_id, bad_tricky_config, parser))
self.assertCheckDetectedAnom(check_id, results, sym, found)
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| pidydx/grr | grr/checks/time_test.py | Python | apache-2.0 | 5,144 |
# coding=utf-8
from __future__ import unicode_literals
import hashlib
import random
import string
import uuid
from faker.providers.date_time import Provider as DatetimeProvider
from .. import BaseProvider
class Provider(BaseProvider):
language_codes = ('cn', 'de', 'el', 'en', 'es', 'fr', 'it', 'pt', 'ru')
@classmethod
def boolean(cls, chance_of_getting_true=50):
return random.randint(1, 100) <= chance_of_getting_true
@classmethod
def null_boolean(cls):
return {
0: None,
1: True,
-1: False
}[random.randint(-1, 1)]
@classmethod
def md5(cls, raw_output=False):
"""
Calculates the md5 hash of a given string
:example 'cfcd208495d565ef66e7dff9f98764da'
"""
res = hashlib.md5(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
@classmethod
def sha1(cls, raw_output=False):
"""
Calculates the sha1 hash of a given string
:example 'b5d86317c2a144cd04d0d7c03b2b02666fafadf2'
"""
res = hashlib.sha1(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
@classmethod
def sha256(cls, raw_output=False):
"""
Calculates the sha256 hash of a given string
:example '85086017559ccc40638fcde2fecaf295e0de7ca51b7517b6aebeaaf75b4d4654'
"""
res = hashlib.sha256(str(random.random()).encode('utf-8'))
if raw_output:
return res.digest()
return res.hexdigest()
def locale(self):
return self.language_code() + '_' + self.country_code()
@classmethod
def country_code(cls):
return cls.random_element(DatetimeProvider.countries)['code']
@classmethod
def language_code(cls):
return cls.random_element(cls.language_codes)
@classmethod
def uuid4(cls):
"""
Generates a random UUID4 string.
"""
return str(uuid.uuid4())
@classmethod
def password(cls, length=10, special_chars=True, digits=True, upper_case=True, lower_case=True):
"""
Generates a random password.
@param length: Integer. Length of a password
@param special_chars: Boolean. Whether to use special characters !@#$%^&*()_+
@param digits: Boolean. Whether to use digits
@param upper_case: Boolean. Whether to use upper letters
@param lower_case: Boolean. Whether to use lower letters
@return: String. Random password
"""
chars = ""
if special_chars:
chars += "!@#$%^&*()_+"
if digits:
chars += string.digits
if upper_case:
chars += string.ascii_uppercase
if lower_case:
chars += string.ascii_lowercase
return ''.join(random.choice(chars) for x in range(length))
| venmo/faker | faker/providers/misc/__init__.py | Python | mit | 2,967 |
# -*- coding: utf-8 -*-
# Copyright (C) 2011, 2012 Sebastian Wiesner <lunaryorn@gmail.com>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
pyudev.device._device
=====================
Device class implementation of :mod:`pyudev`.
.. moduleauthor:: Sebastian Wiesner <lunaryorn@gmail.com>
"""
# isort: FUTURE
from __future__ import absolute_import, division, print_function, unicode_literals
# isort: STDLIB
import collections
import os
import re
import sys
from datetime import timedelta
# isort: LOCAL
from pyudev._errors import (
DeviceNotFoundAtPathError,
DeviceNotFoundByFileError,
DeviceNotFoundByInterfaceIndexError,
DeviceNotFoundByKernelDeviceError,
DeviceNotFoundByNameError,
DeviceNotFoundByNumberError,
DeviceNotFoundInEnvironmentError,
)
from pyudev._util import (
ensure_byte_string,
ensure_unicode_string,
get_device_type,
string_to_bool,
udev_list_iterate,
)
# pylint: disable=too-many-lines
class Devices(object):
"""
Class for constructing :class:`Device` objects from various kinds of data.
"""
@classmethod
def from_path(cls, context, path):
"""
Create a device from a device ``path``. The ``path`` may or may not
start with the ``sysfs`` mount point:
>>> from pyudev import Context, Device
>>> context = Context()
>>> Devices.from_path(context, '/devices/platform')
Device(u'/sys/devices/platform')
>>> Devices.from_path(context, '/sys/devices/platform')
Device(u'/sys/devices/platform')
``context`` is the :class:`Context` in which to search the device.
``path`` is a device path as unicode or byte string.
Return a :class:`Device` object for the device. Raise
:exc:`DeviceNotFoundAtPathError`, if no device was found for ``path``.
.. versionadded:: 0.18
"""
if not path.startswith(context.sys_path):
path = os.path.join(context.sys_path, path.lstrip(os.sep))
return cls.from_sys_path(context, path)
@classmethod
def from_sys_path(cls, context, sys_path):
"""
Create a new device from a given ``sys_path``:
>>> from pyudev import Context, Device
>>> context = Context()
>>> Devices.from_sys_path(context, '/sys/devices/platform')
Device(u'/sys/devices/platform')
``context`` is the :class:`Context` in which to search the device.
``sys_path`` is a unicode or byte string containing the path of the
device inside ``sysfs`` with the mount point included.
Return a :class:`Device` object for the device. Raise
:exc:`DeviceNotFoundAtPathError`, if no device was found for
``sys_path``.
.. versionadded:: 0.18
"""
device = context._libudev.udev_device_new_from_syspath(
context, ensure_byte_string(sys_path)
)
if not device:
raise DeviceNotFoundAtPathError(sys_path)
return Device(context, device)
@classmethod
def from_name(cls, context, subsystem, sys_name):
"""
Create a new device from a given ``subsystem`` and a given
``sys_name``:
>>> from pyudev import Context, Device
>>> context = Context()
>>> sda = Devices.from_name(context, 'block', 'sda')
>>> sda
Device(u'/sys/devices/pci0000:00/0000:00:1f.2/host0/target0:0:0/0:0:0:0/block/sda')
>>> sda == Devices.from_path(context, '/block/sda')
``context`` is the :class:`Context` in which to search the device.
``subsystem`` and ``sys_name`` are byte or unicode strings, which
denote the subsystem and the name of the device to create.
Return a :class:`Device` object for the device. Raise
:exc:`DeviceNotFoundByNameError`, if no device was found with the given
name.
.. versionadded:: 0.18
"""
sys_name = sys_name.replace("/", "!")
device = context._libudev.udev_device_new_from_subsystem_sysname(
context, ensure_byte_string(subsystem), ensure_byte_string(sys_name)
)
if not device:
raise DeviceNotFoundByNameError(subsystem, sys_name)
return Device(context, device)
@classmethod
def from_device_number(cls, context, typ, number):
"""
Create a new device from a device ``number`` with the given device
``type``:
>>> import os
>>> from pyudev import Context, Device
>>> ctx = Context()
>>> major, minor = 8, 0
>>> device = Devices.from_device_number(context, 'block',
... os.makedev(major, minor))
>>> device
Device(u'/sys/devices/pci0000:00/0000:00:11.0/host0/target0:0:0/0:0:0:0/block/sda')
>>> os.major(device.device_number), os.minor(device.device_number)
(8, 0)
Use :func:`os.makedev` to construct a device number from a major and a
minor device number, as shown in the example above.
.. warning::
Device numbers are not unique across different device types.
Passing a correct number with a wrong type may silently yield a
wrong device object, so make sure to pass the correct device type.
``context`` is the :class:`Context`, in which to search the device.
``type`` is either ``'char'`` or ``'block'``, according to whether the
device is a character or block device. ``number`` is the device number
as integer.
Return a :class:`Device` object for the device with the given device
``number``. Raise :exc:`DeviceNotFoundByNumberError`, if no device was
found with the given device type and number.
.. versionadded:: 0.18
"""
device = context._libudev.udev_device_new_from_devnum(
context, ensure_byte_string(typ[0]), number
)
if not device:
raise DeviceNotFoundByNumberError(typ, number)
return Device(context, device)
@classmethod
def from_device_file(cls, context, filename):
"""
Create a new device from the given device file:
>>> from pyudev import Context, Device
>>> context = Context()
>>> device = Devices.from_device_file(context, '/dev/sda')
>>> device
Device(u'/sys/devices/pci0000:00/0000:00:0d.0/host2/target2:0:0/2:0:0:0/block/sda')
>>> device.device_node
u'/dev/sda'
.. warning::
Though the example seems to suggest that ``device.device_node ==
filename`` holds with ``device = Devices.from_device_file(context,
filename)``, this is only true in a majority of cases. There *can*
be devices, for which this relation is actually false! Thus, do
*not* expect :attr:`~Device.device_node` to be equal to the given
``filename`` for the returned :class:`Device`. Especially, use
:attr:`~Device.device_node` if you need the device file of a
:class:`Device` created with this method afterwards.
``context`` is the :class:`Context` in which to search the device.
``filename`` is a string containing the path of a device file.
Return a :class:`Device` representing the given device file. Raise
:exc:`DeviceNotFoundByFileError` if ``filename`` is no device file
at all or if ``filename`` does not exist or if its metadata was
inaccessible.
.. versionadded:: 0.18
"""
try:
device_type = get_device_type(filename)
device_number = os.stat(filename).st_rdev
except (EnvironmentError, ValueError) as err:
raise DeviceNotFoundByFileError(err)
return cls.from_device_number(context, device_type, device_number)
@classmethod
def from_interface_index(cls, context, ifindex):
"""
Locate a device based on the interface index.
:param `Context` context: the libudev context
:param int ifindex: the interface index
:returns: the device corresponding to the interface index
:rtype: `Device`
This method is only appropriate for network devices.
"""
network_devices = context.list_devices(subsystem="net")
dev = next(
(d for d in network_devices if d.attributes.get("ifindex") == ifindex), None
)
if dev is not None:
return dev
else:
raise DeviceNotFoundByInterfaceIndexError(ifindex)
@classmethod
def from_kernel_device(cls, context, kernel_device):
"""
Locate a device based on the kernel device.
:param `Context` context: the libudev context
:param str kernel_device: the kernel device
:returns: the device corresponding to ``kernel_device``
:rtype: `Device`
"""
switch_char = kernel_device[0]
rest = kernel_device[1:]
if switch_char in ("b", "c"):
number_re = re.compile(r"^(?P<major>\d+):(?P<minor>\d+)$")
match = number_re.match(rest)
if match:
number = os.makedev(
int(match.group("major")), int(match.group("minor"))
)
return cls.from_device_number(context, switch_char, number)
else:
raise DeviceNotFoundByKernelDeviceError(kernel_device)
elif switch_char == "n":
return cls.from_interface_index(context, rest)
elif switch_char == "+":
(subsystem, _, kernel_device_name) = rest.partition(":")
if kernel_device_name and subsystem:
return cls.from_name(context, subsystem, kernel_device_name)
else:
raise DeviceNotFoundByKernelDeviceError(kernel_device)
else:
raise DeviceNotFoundByKernelDeviceError(kernel_device)
@classmethod
def from_environment(cls, context):
"""
Create a new device from the process environment (as in
:data:`os.environ`).
This only works reliable, if the current process is called from an
udev rule, and is usually used for tools executed from ``IMPORT=``
rules. Use this method to create device objects in Python scripts
called from udev rules.
``context`` is the library :class:`Context`.
Return a :class:`Device` object constructed from the environment.
Raise :exc:`DeviceNotFoundInEnvironmentError`, if no device could be
created from the environment.
.. udevversion:: 152
.. versionadded:: 0.18
"""
device = context._libudev.udev_device_new_from_environment(context)
if not device:
raise DeviceNotFoundInEnvironmentError()
return Device(context, device)
@classmethod
def METHODS(cls): # pylint: disable=invalid-name
"""
Return methods that obtain a :class:`Device` from a variety of
different data.
:return: a list of from_* methods.
:rtype: list of class methods
.. versionadded:: 0.18
"""
return [ # pragma: no cover
cls.from_device_file,
cls.from_device_number,
cls.from_name,
cls.from_path,
cls.from_sys_path,
]
class Device(collections.abc.Mapping):
# pylint: disable=too-many-public-methods
"""
A single device with attached attributes and properties.
A device also has a set of udev-specific attributes like the path
inside ``sysfs``.
:class:`Device` objects compare equal and unequal to other devices and
to strings (based on :attr:`device_path`). However, there is no
ordering on :class:`Device` objects, and the corresponding operators
``>``, ``<``, ``<=`` and ``>=`` raise :exc:`~exceptions.TypeError`.
.. warning::
Currently, Device extends Mapping. The mapping that it stores is that
of udev property names to udev property values. This use is deprecated
and Device will no longer extend Mapping in 1.0. To look up udev
properties, use the Device.properties property.
.. warning::
**Never** use object identity (``is`` operator) to compare
:class:`Device` objects. :mod:`pyudev` may create multiple
:class:`Device` objects for the same device. Instead compare
devices by value using ``==`` or ``!=``.
:class:`Device` objects are hashable and can therefore be used as keys
in dictionaries and sets.
They can also be given directly as ``udev_device *`` to functions wrapped
through :mod:`ctypes`.
"""
@classmethod
def from_path(cls, context, path): # pragma: no cover
"""
.. versionadded:: 0.4
.. deprecated:: 0.18
Use :class:`Devices.from_path` instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use equivalent Devices method instead.",
DeprecationWarning,
stacklevel=2,
)
return Devices.from_path(context, path)
@classmethod
def from_sys_path(cls, context, sys_path): # pragma: no cover
"""
.. versionchanged:: 0.4
Raise :exc:`NoSuchDeviceError` instead of returning ``None``, if
no device was found for ``sys_path``.
.. versionchanged:: 0.5
Raise :exc:`DeviceNotFoundAtPathError` instead of
:exc:`NoSuchDeviceError`.
.. deprecated:: 0.18
Use :class:`Devices.from_sys_path` instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use equivalent Devices method instead.",
DeprecationWarning,
stacklevel=2,
)
return Devices.from_sys_path(context, sys_path)
@classmethod
def from_name(cls, context, subsystem, sys_name): # pragma: no cover
"""
.. versionadded:: 0.5
.. deprecated:: 0.18
Use :class:`Devices.from_name` instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use equivalent Devices method instead.",
DeprecationWarning,
stacklevel=2,
)
return Devices.from_name(context, subsystem, sys_name)
@classmethod
def from_device_number(cls, context, typ, number): # pragma: no cover
"""
.. versionadded:: 0.11
.. deprecated:: 0.18
Use :class:`Devices.from_device_number` instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use equivalent Devices method instead.",
DeprecationWarning,
stacklevel=2,
)
return Devices.from_device_number(context, typ, number)
@classmethod
def from_device_file(cls, context, filename): # pragma: no cover
"""
.. versionadded:: 0.15
.. deprecated:: 0.18
Use :class:`Devices.from_device_file` instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use equivalent Devices method instead.",
DeprecationWarning,
stacklevel=2,
)
return Devices.from_device_file(context, filename)
@classmethod
def from_environment(cls, context): # pragma: no cover
"""
.. versionadded:: 0.6
.. deprecated:: 0.18
Use :class:`Devices.from_environment` instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use equivalent Devices method instead.",
DeprecationWarning,
stacklevel=2,
)
return Devices.from_environment(context)
def __init__(self, context, _device):
collections.abc.Mapping.__init__(self)
self.context = context
self._as_parameter_ = _device
self._libudev = context._libudev
def __del__(self):
self._libudev.udev_device_unref(self)
def __repr__(self):
return "Device({0.sys_path!r})".format(self)
@property
def parent(self):
"""
The parent :class:`Device` or ``None``, if there is no parent
device.
"""
parent = self._libudev.udev_device_get_parent(self)
if not parent:
return None
# the parent device is not referenced, thus forcibly acquire a
# reference
return Device(self.context, self._libudev.udev_device_ref(parent))
@property
def children(self):
"""
Yield all direct children of this device.
.. note::
In udev, parent-child relationships are generally ambiguous, i.e.
a parent can have multiple children, *and* a child can have multiple
parents. Hence, `child.parent == parent` does generally *not* hold
for all `child` objects in `parent.children`. In other words,
the :attr:`parent` of a device in this property can be different
from this device!
.. note::
As the underlying library does not provide any means to directly
query the children of a device, this property performs a linear
search through all devices.
Return an iterable yielding a :class:`Device` object for each direct
child of this device.
.. udevversion:: 172
.. versionchanged:: 0.13
Requires udev version 172 now.
"""
for device in self.context.list_devices().match_parent(self):
if device != self:
yield device
@property
def ancestors(self):
"""
Yield all ancestors of this device from bottom to top.
Return an iterator yielding a :class:`Device` object for each
ancestor of this device from bottom to top.
.. versionadded:: 0.16
"""
parent = self.parent
while parent is not None:
yield parent
parent = parent.parent
def find_parent(self, subsystem, device_type=None):
"""
Find the parent device with the given ``subsystem`` and
``device_type``.
``subsystem`` is a byte or unicode string containing the name of the
subsystem, in which to search for the parent. ``device_type`` is a
byte or unicode string holding the expected device type of the parent.
It can be ``None`` (the default), which means, that no specific device
type is expected.
Return a parent :class:`Device` within the given ``subsystem`` and, if
``device_type`` is not ``None``, with the given ``device_type``, or
``None``, if this device has no parent device matching these
constraints.
.. versionadded:: 0.9
"""
subsystem = ensure_byte_string(subsystem)
if device_type is not None:
device_type = ensure_byte_string(device_type)
parent = self._libudev.udev_device_get_parent_with_subsystem_devtype(
self, subsystem, device_type
)
if not parent:
return None
# parent device is not referenced, thus forcibly acquire a reference
return Device(self.context, self._libudev.udev_device_ref(parent))
def traverse(self):
"""
Traverse all parent devices of this device from bottom to top.
Return an iterable yielding all parent devices as :class:`Device`
objects, *not* including the current device. The last yielded
:class:`Device` is the top of the device hierarchy.
.. deprecated:: 0.16
Will be removed in 1.0. Use :attr:`ancestors` instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use Device.ancestors instead.",
DeprecationWarning,
stacklevel=2,
)
return self.ancestors
@property
def sys_path(self):
"""
Absolute path of this device in ``sysfs`` including the ``sysfs``
mount point as unicode string.
"""
return ensure_unicode_string(self._libudev.udev_device_get_syspath(self))
@property
def device_path(self):
"""
Kernel device path as unicode string. This path uniquely identifies
a single device.
Unlike :attr:`sys_path`, this path does not contain the ``sysfs``
mount point. However, the path is absolute and starts with a slash
``'/'``.
"""
return ensure_unicode_string(self._libudev.udev_device_get_devpath(self))
@property
def subsystem(self):
"""
Name of the subsystem this device is part of as unicode string.
:returns: name of subsystem if found, else None
:rtype: unicode string or NoneType
"""
subsys = self._libudev.udev_device_get_subsystem(self)
return None if subsys is None else ensure_unicode_string(subsys)
@property
def sys_name(self):
"""
Device file name inside ``sysfs`` as unicode string.
"""
return ensure_unicode_string(self._libudev.udev_device_get_sysname(self))
@property
def sys_number(self):
"""
The trailing number of the :attr:`sys_name` as unicode string, or
``None``, if the device has no trailing number in its name.
.. note::
The number is returned as unicode string to preserve the exact
format of the number, especially any leading zeros:
>>> from pyudev import Context, Device
>>> context = Context()
>>> device = Devices.from_path(context, '/sys/devices/LNXSYSTM:00')
>>> device.sys_number
u'00'
To work with numbers, explicitly convert them to ints:
>>> int(device.sys_number)
0
.. versionadded:: 0.11
"""
number = self._libudev.udev_device_get_sysnum(self)
return ensure_unicode_string(number) if number is not None else None
@property
def device_type(self):
"""
Device type as unicode string, or ``None``, if the device type is
unknown.
>>> from pyudev import Context
>>> context = Context()
>>> for device in context.list_devices(subsystem='net'):
... '{0} - {1}'.format(device.sys_name, device.device_type or 'ethernet')
...
u'eth0 - ethernet'
u'wlan0 - wlan'
u'lo - ethernet'
u'vboxnet0 - ethernet'
.. versionadded:: 0.10
"""
device_type = self._libudev.udev_device_get_devtype(self)
if device_type is None:
return None
return ensure_unicode_string(device_type)
@property
def driver(self):
"""
The driver name as unicode string, or ``None``, if there is no
driver for this device.
.. versionadded:: 0.5
"""
driver = self._libudev.udev_device_get_driver(self)
return ensure_unicode_string(driver) if driver is not None else None
@property
def device_node(self):
"""
Absolute path to the device node of this device as unicode string or
``None``, if this device doesn't have a device node. The path
includes the device directory (see :attr:`Context.device_path`).
This path always points to the actual device node associated with
this device, and never to any symbolic links to this device node.
See :attr:`device_links` to get a list of symbolic links to this
device node.
.. warning::
For devices created with :meth:`from_device_file()`, the value of
this property is not necessary equal to the ``filename`` given to
:meth:`from_device_file()`.
"""
node = self._libudev.udev_device_get_devnode(self)
return ensure_unicode_string(node) if node is not None else None
@property
def device_number(self):
"""
The device number of the associated device as integer, or ``0``, if no
device number is associated.
Use :func:`os.major` and :func:`os.minor` to decompose the device
number into its major and minor number:
>>> import os
>>> from pyudev import Context, Device
>>> context = Context()
>>> sda = Devices.from_name(context, 'block', 'sda')
>>> sda.device_number
2048L
>>> (os.major(sda.device_number), os.minor(sda.device_number))
(8, 0)
For devices with an associated :attr:`device_node`, this is the same as
the ``st_rdev`` field of the stat result of the :attr:`device_node`:
>>> os.stat(sda.device_node).st_rdev
2048
.. versionadded:: 0.11
"""
return self._libudev.udev_device_get_devnum(self)
@property
def is_initialized(self):
"""
``True``, if the device is initialized, ``False`` otherwise.
A device is initialized, if udev has already handled this device and
has set up device node permissions and context, or renamed a network
device.
Consequently, this property is only implemented for devices with a
device node or for network devices. On all other devices this property
is always ``True``.
It is *not* recommended, that you use uninitialized devices.
.. seealso:: :attr:`time_since_initialized`
.. udevversion:: 165
.. versionadded:: 0.8
"""
return bool(self._libudev.udev_device_get_is_initialized(self))
@property
def time_since_initialized(self):
"""
The time elapsed since initialization as :class:`~datetime.timedelta`.
This property is only implemented on devices, which need to store
properties in the udev database. On all other devices this property is
simply zero :class:`~datetime.timedelta`.
.. seealso:: :attr:`is_initialized`
.. udevversion:: 165
.. versionadded:: 0.8
"""
microseconds = self._libudev.udev_device_get_usec_since_initialized(self)
return timedelta(microseconds=microseconds)
@property
def device_links(self):
"""
An iterator, which yields the absolute paths (including the device
directory, see :attr:`Context.device_path`) of all symbolic links
pointing to the :attr:`device_node` of this device. The paths are
unicode strings.
UDev can create symlinks to the original device node (see
:attr:`device_node`) inside the device directory. This is often
used to assign a constant, fixed device node to devices like
removeable media, which technically do not have a constant device
node, or to map a single device into multiple device hierarchies.
The property provides access to all such symbolic links, which were
created by UDev for this device.
.. warning::
Links are not necessarily resolved by
:meth:`Devices.from_device_file()`. Hence do *not* rely on
``Devices.from_device_file(context, link).device_path ==
device.device_path`` from any ``link`` in ``device.device_links``.
"""
devlinks = self._libudev.udev_device_get_devlinks_list_entry(self)
for name, _ in udev_list_iterate(self._libudev, devlinks):
yield ensure_unicode_string(name)
@property
def action(self):
"""
The device event action as string, or ``None``, if this device was not
received from a :class:`Monitor`.
Usual actions are:
``'add'``
A device has been added (e.g. a USB device was plugged in)
``'remove'``
A device has been removed (e.g. a USB device was unplugged)
``'change'``
Something about the device changed (e.g. a device property)
``'online'``
The device is online now
``'offline'``
The device is offline now
.. warning::
Though the actions listed above are the most common, this property
*may* return other values, too, so be prepared to handle unknown
actions!
.. versionadded:: 0.16
"""
action = self._libudev.udev_device_get_action(self)
return ensure_unicode_string(action) if action is not None else None
@property
def sequence_number(self):
"""
The device event sequence number as integer, or ``0`` if this device
has no sequence number, i.e. was not received from a :class:`Monitor`.
.. versionadded:: 0.16
"""
return self._libudev.udev_device_get_seqnum(self)
@property
def attributes(self):
"""
The system attributes of this device as read-only
:class:`Attributes` mapping.
System attributes are basically normal files inside the device
directory. These files contain all sorts of information about the
device, which may not be reflected by properties. These attributes
are commonly used for matching in udev rules, and can be printed
using ``udevadm info --attribute-walk``.
The values of these attributes are not always proper strings, and
can contain arbitrary bytes.
:returns: an Attributes object, useful for reading attributes
:rtype: Attributes
.. versionadded:: 0.5
"""
# do *not* cache the created object in an attribute of this class.
# Doing so creates an uncollectable reference cycle between Device and
# Attributes, because Attributes refers to this object through
# Attributes.device.
return Attributes(self)
@property
def properties(self):
"""
The udev properties of this object as read-only Properties mapping.
.. versionadded:: 0.21
"""
return Properties(self)
@property
def tags(self):
"""
A :class:`Tags` object representing the tags attached to this device.
The :class:`Tags` object supports a test for a single tag as well as
iteration over all tags:
>>> from pyudev import Context
>>> context = Context()
>>> device = next(iter(context.list_devices(tag='systemd')))
>>> 'systemd' in device.tags
True
>>> list(device.tags)
[u'seat', u'systemd', u'uaccess']
Tags are arbitrary classifiers that can be attached to devices by udev
scripts and daemons. For instance, systemd_ uses tags for multi-seat_
support.
.. _systemd: http://freedesktop.org/wiki/Software/systemd
.. _multi-seat: http://www.freedesktop.org/wiki/Software/systemd/multiseat
.. udevversion:: 154
.. versionadded:: 0.6
.. versionchanged:: 0.13
Return a :class:`Tags` object now.
"""
return Tags(self)
def __iter__(self):
"""
Iterate over the names of all properties defined for this device.
Return a generator yielding the names of all properties of this
device as unicode strings.
.. deprecated:: 0.21
Will be removed in 1.0. Access properties with Device.properties.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Access properties with Device.properties.",
DeprecationWarning,
stacklevel=2,
)
return self.properties.__iter__()
def __len__(self):
"""
Return the amount of properties defined for this device as integer.
.. deprecated:: 0.21
Will be removed in 1.0. Access properties with Device.properties.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Access properties with Device.properties.",
DeprecationWarning,
stacklevel=2,
)
return self.properties.__len__()
def __getitem__(self, prop):
"""
Get the given property from this device.
``prop`` is a unicode or byte string containing the name of the
property.
Return the property value as unicode string, or raise a
:exc:`~exceptions.KeyError`, if the given property is not defined
for this device.
.. deprecated:: 0.21
Will be removed in 1.0. Access properties with Device.properties.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Access properties with Device.properties.",
DeprecationWarning,
stacklevel=2,
)
return self.properties.__getitem__(prop)
def asint(self, prop):
"""
Get the given property from this device as integer.
``prop`` is a unicode or byte string containing the name of the
property.
Return the property value as integer. Raise a
:exc:`~exceptions.KeyError`, if the given property is not defined
for this device, or a :exc:`~exceptions.ValueError`, if the property
value cannot be converted to an integer.
.. deprecated:: 0.21
Will be removed in 1.0. Use Device.properties.asint() instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use Device.properties.asint instead.",
DeprecationWarning,
stacklevel=2,
)
return self.properties.asint(prop)
def asbool(self, prop):
"""
Get the given property from this device as boolean.
A boolean property has either a value of ``'1'`` or of ``'0'``,
where ``'1'`` stands for ``True``, and ``'0'`` for ``False``. Any
other value causes a :exc:`~exceptions.ValueError` to be raised.
``prop`` is a unicode or byte string containing the name of the
property.
Return ``True``, if the property value is ``'1'`` and ``False``, if
the property value is ``'0'``. Any other value raises a
:exc:`~exceptions.ValueError`. Raise a :exc:`~exceptions.KeyError`,
if the given property is not defined for this device.
.. deprecated:: 0.21
Will be removed in 1.0. Use Device.properties.asbool() instead.
"""
# isort: STDLIB
import warnings
warnings.warn(
"Will be removed in 1.0. Use Device.properties.asbool instead.",
DeprecationWarning,
stacklevel=2,
)
return self.properties.asbool(prop)
def __hash__(self):
return hash(self.device_path)
def __eq__(self, other):
if isinstance(other, Device):
return self.device_path == other.device_path
return self.device_path == other
def __ne__(self, other):
if isinstance(other, Device):
return self.device_path != other.device_path
return self.device_path != other
def __gt__(self, other):
raise TypeError("Device not orderable")
def __lt__(self, other):
raise TypeError("Device not orderable")
def __le__(self, other):
raise TypeError("Device not orderable")
def __ge__(self, other):
raise TypeError("Device not orderable")
class Properties(collections.abc.Mapping):
"""
udev properties :class:`Device` objects.
.. versionadded:: 0.21
"""
def __init__(self, device):
collections.abc.Mapping.__init__(self)
self.device = device
self._libudev = device._libudev
def __iter__(self):
"""
Iterate over the names of all properties defined for the device.
Return a generator yielding the names of all properties of this
device as unicode strings.
"""
properties = self._libudev.udev_device_get_properties_list_entry(self.device)
for name, _ in udev_list_iterate(self._libudev, properties):
yield ensure_unicode_string(name)
def __len__(self):
"""
Return the amount of properties defined for this device as integer.
"""
properties = self._libudev.udev_device_get_properties_list_entry(self.device)
return sum(1 for _ in udev_list_iterate(self._libudev, properties))
def __getitem__(self, prop):
"""
Get the given property from this device.
``prop`` is a unicode or byte string containing the name of the
property.
Return the property value as unicode string, or raise a
:exc:`~exceptions.KeyError`, if the given property is not defined
for this device.
"""
value = self._libudev.udev_device_get_property_value(
self.device, ensure_byte_string(prop)
)
if value is None:
raise KeyError(prop)
return ensure_unicode_string(value)
def asint(self, prop):
"""
Get the given property from this device as integer.
``prop`` is a unicode or byte string containing the name of the
property.
Return the property value as integer. Raise a
:exc:`~exceptions.KeyError`, if the given property is not defined
for this device, or a :exc:`~exceptions.ValueError`, if the property
value cannot be converted to an integer.
"""
return int(self[prop])
def asbool(self, prop):
"""
Get the given property from this device as boolean.
A boolean property has either a value of ``'1'`` or of ``'0'``,
where ``'1'`` stands for ``True``, and ``'0'`` for ``False``. Any
other value causes a :exc:`~exceptions.ValueError` to be raised.
``prop`` is a unicode or byte string containing the name of the
property.
Return ``True``, if the property value is ``'1'`` and ``False``, if
the property value is ``'0'``. Any other value raises a
:exc:`~exceptions.ValueError`. Raise a :exc:`~exceptions.KeyError`,
if the given property is not defined for this device.
"""
return string_to_bool(self[prop])
class Attributes(object):
"""
udev attributes for :class:`Device` objects.
.. versionadded:: 0.5
"""
def __init__(self, device):
self.device = device
self._libudev = device._libudev
@property
def available_attributes(self):
"""
Yield the ``available`` attributes for the device.
It is not guaranteed that a key in this list will have a value.
It is not guaranteed that a key not in this list will not have a value.
It is guaranteed that the keys in this list are the keys that libudev
considers to be "available" attributes.
If libudev version does not define udev_device_get_sysattr_list_entry()
yields nothing.
See rhbz#1267584.
"""
if not hasattr(self._libudev, "udev_device_get_sysattr_list_entry"):
return # pragma: no cover
attrs = self._libudev.udev_device_get_sysattr_list_entry(self.device)
for attribute, _ in udev_list_iterate(self._libudev, attrs):
yield ensure_unicode_string(attribute)
def _get(self, attribute):
"""
Get the given system ``attribute`` for the device.
:param attribute: the key for an attribute value
:type attribute: unicode or byte string
:returns: the value corresponding to ``attribute``
:rtype: an arbitrary sequence of bytes
:raises KeyError: if no value found
"""
value = self._libudev.udev_device_get_sysattr_value(
self.device, ensure_byte_string(attribute)
)
if value is None:
raise KeyError(attribute)
return value
def get(self, attribute, default=None):
"""
Get the given system ``attribute`` for the device.
:param attribute: the key for an attribute value
:type attribute: unicode or byte string
:param default: a default if no corresponding value found
:type default: a sequence of bytes
:returns: the value corresponding to ``attribute`` or ``default``
:rtype: object
"""
try:
return self._get(attribute)
except KeyError:
return default
def asstring(self, attribute):
"""
Get the given ``attribute`` for the device as unicode string.
:param attribute: the key for an attribute value
:type attribute: unicode or byte string
:returns: the value corresponding to ``attribute``, as unicode
:rtype: unicode
:raises KeyError: if no value found for ``attribute``
:raises UnicodeDecodeError: if value is not convertible
"""
return ensure_unicode_string(self._get(attribute))
def asint(self, attribute):
"""
Get the given ``attribute`` as an int.
:param attribute: the key for an attribute value
:type attribute: unicode or byte string
:returns: the value corresponding to ``attribute``, as an int
:rtype: int
:raises KeyError: if no value found for ``attribute``
:raises UnicodeDecodeError: if value is not convertible to unicode
:raises ValueError: if unicode value can not be converted to an int
"""
return int(self.asstring(attribute))
def asbool(self, attribute):
"""
Get the given ``attribute`` from this device as a bool.
:param attribute: the key for an attribute value
:type attribute: unicode or byte string
:returns: the value corresponding to ``attribute``, as bool
:rtype: bool
:raises KeyError: if no value found for ``attribute``
:raises UnicodeDecodeError: if value is not convertible to unicode
:raises ValueError: if unicode value can not be converted to a bool
A boolean attribute has either a value of ``'1'`` or of ``'0'``,
where ``'1'`` stands for ``True``, and ``'0'`` for ``False``. Any
other value causes a :exc:`~exceptions.ValueError` to be raised.
"""
return string_to_bool(self.asstring(attribute))
class Tags(collections.abc.Iterable, collections.abc.Container):
"""
A iterable over :class:`Device` tags.
Subclasses the ``Container`` and the ``Iterable`` ABC.
"""
# pylint: disable=too-few-public-methods
def __init__(self, device):
# pylint: disable=super-init-not-called
collections.abc.Iterable.__init__(self)
self.device = device
self._libudev = device._libudev
def _has_tag(self, tag):
"""
Whether ``tag`` exists.
:param tag: unicode string with name of tag
:rtype: bool
"""
if hasattr(self._libudev, "udev_device_has_tag"):
return bool(
self._libudev.udev_device_has_tag(self.device, ensure_byte_string(tag))
)
return any(t == tag for t in self) # pragma: no cover
def __contains__(self, tag):
"""
Check for existence of ``tag``.
``tag`` is a tag as unicode string.
Return ``True``, if ``tag`` is attached to the device, ``False``
otherwise.
"""
return self._has_tag(tag)
def __iter__(self):
"""
Iterate over all tags.
Yield each tag as unicode string.
"""
tags = self._libudev.udev_device_get_tags_list_entry(self.device)
for tag, _ in udev_list_iterate(self._libudev, tags):
yield ensure_unicode_string(tag)
| pyudev/pyudev | src/pyudev/device/_device.py | Python | lgpl-2.1 | 44,657 |
import openliveq as olq
import os
class TestClickthrough(object):
def test_load(self):
filepath = os.path.join(os.path.dirname(__file__),
"fixtures", "sample_clickthrough.tsv")
cs = []
with open(filepath) as f:
for line in f:
c = olq.Clickthrough.readline(line)
cs.append(c)
assert cs[0].query_id == 'OLQ-9998'
assert cs[0].question_id == '1167627151'
assert cs[0].rank == 1
assert cs[0].ctr == 0.5
assert cs[0].male == 0.4
assert cs[0].female == 0.6
assert cs[0].a00 == 0.1
assert cs[0].a10 == 0.1
assert cs[0].a20 == 0.1
assert cs[0].a30 == 0.1
assert cs[0].a40 == 0.1
assert cs[0].a50 == 0.1
assert cs[0].a60 == 0.4
assert cs[2].query_id == 'OLQ-9999'
assert cs[2].question_id == '1414846259'
assert cs[2].rank == 2
assert cs[2].ctr == 0.2
assert cs[2].male == 0.5
assert cs[2].female == 0.5
assert cs[2].a00 == 0.1
assert cs[2].a10 == 0.1
assert cs[2].a20 == 0.1
assert cs[2].a30 == 0.1
assert cs[2].a40 == 0.2
assert cs[2].a50 == 0.2
assert cs[2].a60 == 0.2
| mpkato/openliveq | tests/test_clickthrough.py | Python | mit | 1,259 |
"""
This test file is meant for developing purposes, providing an easy method to
test the functioning of Pastas recharge module during development.
Author: R.A. Collenteur, University of Graz.
"""
import pandas as pd
import pastas as ps
# read observations
head = pd.read_csv("notebooks/data_notebook_5/head_wellex.csv",
index_col="Date", parse_dates=True)
# Create the time series model
ml = ps.Model(head, name="head")
# read weather data
rain = pd.read_csv("notebooks/data_notebook_5/prec_wellex.csv",
index_col="Date", parse_dates=True)
evap = pd.read_csv("notebooks/data_notebook_5/evap_wellex.csv",
index_col="Date", parse_dates=True)
# Create stress
rm = ps.RechargeModel(prec=rain, evap=evap, rfunc=ps.Exponential,
name='recharge')
ml.add_stressmodel(rm)
well = pd.read_csv("notebooks/data_notebook_5/well_wellex.csv",
index_col="Date", parse_dates=True) / 1e6
sm = ps.StressModel(well, rfunc=ps.Exponential, name="well", up=False)
ml.add_stressmodel(sm)
# Solve
ml.solve(noise=True)
ml.plots.results()
| pastas/pasta | examples/example_well.py | Python | mit | 1,118 |
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for python.py."""
import os
import unittest2 as unittest
from python import PythonChecker
class PythonCheckerTest(unittest.TestCase):
"""Tests the PythonChecker class."""
def test_init(self):
"""Test __init__() method."""
def _mock_handle_style_error(self):
pass
checker = PythonChecker("foo.txt", _mock_handle_style_error)
self.assertEqual(checker._file_path, "foo.txt")
self.assertEqual(checker._handle_style_error,
_mock_handle_style_error)
def test_check(self):
"""Test check() method."""
errors = []
def _mock_handle_style_error(line_number, category, confidence,
message):
error = (line_number, category, confidence, message)
errors.append(error)
current_dir = os.path.dirname(__file__)
file_path = os.path.join(current_dir, "python_unittest_input.py")
checker = PythonChecker(file_path, _mock_handle_style_error)
checker.check(lines=[])
self.assertEqual(errors, [
(4, "pep8/W291", 5, "trailing whitespace"),
(4, "pylint/E0602", 5, "Undefined variable 'error'"),
])
def test_pylint_false_positives(self):
"""Test that pylint false positives are suppressed."""
errors = []
def _mock_handle_style_error(line_number, category, confidence,
message):
error = (line_number, category, confidence, message)
errors.append(error)
current_dir = os.path.dirname(__file__)
file_path = os.path.join(current_dir, "python_unittest_falsepositives.py")
checker = PythonChecker(file_path, _mock_handle_style_error)
checker.check(lines=[])
self.assertEqual(errors, [])
| klim-iv/phantomjs-qt5 | src/webkit/Tools/Scripts/webkitpy/style/checkers/python_unittest.py | Python | bsd-3-clause | 3,202 |
import os
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import dj_database_url
from werkzeug.contrib.cache import RedisCache, NullCache
here = os.path.abspath(os.path.dirname(__file__))
urlparse.uses_netloc.append('redis')
redis_conf = dj_database_url.config('REDIS_URL')
class BaseConfig:
"""Base class all configuration inherits from."""
DEBUG = True
CACHE = NullCache()
CACHE_TIMEOUT = 0
SECRET_KEY = None
class DevelopmentConfig(BaseConfig):
"""Development specific configuration."""
SECRET_KEY = 'my_secret_key'
class ProductionConfig(BaseConfig):
"""Production specific configuration."""
DEBUG = False
CACHE_TIMEOUT = 60 * 24 * 7
CACHE = RedisCache(host=redis_conf.get('HOST'),
port=redis_conf.get('PORT'),
password=redis_conf.get('PASSWORD'))
class TestingConfig(BaseConfig):
"""Settings related to testing."""
TESTING = True
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'testing': TestingConfig,
'default': DevelopmentConfig
}
| renstrom/imdb-api | imdb/config.py | Python | mit | 1,139 |
#!/usr/bin/env python
import sys, os, math
import matplotlib.pyplot as plt
#-Configuration ----------------------------------------------------------------
sizes = [ 2**n for n in range(6, 13) ]
modes = ['Naive', 'SMB', 'CRB', 'CRB-T', 'CRB-TR']
#-Script Body ------------------------------------------------------------------
if __name__ == "__main__":
#-Build the OpenCL project--------------------------------------------------
if not 'build' in os.listdir('.'):
os.system('mkdir build')
os.chdir('build')
print '# Building the OpenCL project ...'
if (os.system('cmake ..') != 0):
sys.exit('# Please check your OpenCL installation.')
os.system('make')
#-Run the Benchmark---------------------------------------------------------
print '# Benchmark starts here ...'
results = [[] for i in range(len(modes))]
for i, size in enumerate(sizes):
path = 'bench.log'
with open(path, 'w+') as fo:
# run the 'cpp' executable ...
n_iteration = max(1, int((2 ** 14 / size)))
print ('[step %s/%s] size : %4s | iter : %3s'
% (i+1, len(sizes), size, n_iteration))
cmd_line = ('./GEMM -s %s -i %s -r > %s'
% (size, n_iteration, path))
status = os.system(cmd_line)
scores = fo.read()[:-1]
# process the output ...
if (status != 0):
print '# Iteration failed :\n', scores
sys.exit()
else:
for i, time in enumerate(scores.split('\n')):
tflops = (size ** 3 / float(time)) * 2 * 1e-9
results[i].append(tflops)
print '# Benchmark completed !'
#-Display the Results-------------------------------------------------------
fig,axes = plt.subplots()
# size and name
fig.set_size_inches(8, 4, forward=True)
fig.subplots_adjust(bottom=0.15)
fig.canvas.set_window_title('GEMM - benchmark')
# axes
axes.set_xlim([0, len(sizes)-1])
axes.set_ylim([0, 1.1 * max([max(l) for l in results])])
axes.xaxis.set_ticks(range(0, len(sizes)))
axes.xaxis.set_ticklabels(sizes)
# plotting
def plot_entry(n):
markers = ['o', '^', 's', 'D', 'v']
colors = ['purple', 'royalblue', 'r', 'orange', 'k']
return plt.plot(results[n], color=colors[n],
linestyle='-', marker=markers[n],
markeredgewidth=1, markeredgecolor=colors[n],
markerfacecolor=colors[n], markersize=6)[0]
print '# Plotting the results ...'
plots = [ plot_entry(n) for n in range(0, len(modes)) ]
# legend
plt.legend(plots, modes, loc='upper left', fontsize = '14')
# background grid
plt.grid(True, which="major", linestyle=':')
plt.grid(True, which="minor", linestyle=':', alpha=0.25)
plt.minorticks_on()
# labels
plt.xlabel('matrix size (M=N=K)', fontsize=14)
plt.ylabel('effective performance (TFLOPS)', fontsize=14)
plt.title('Performance Comparison of the Proposed Kernels', size=16)
# and that's it!
plt.show()
print '# Exiting ...'
| Cryst4L/Blaze | run.py | Python | mit | 3,195 |
# coding: utf-8
'''
MAGE Query Language
'''
from pyparsing import Forward, Group, Suppress, Optional, ZeroOrMore, Combine, \
Word, alphas, nums, QuotedString, CaselessLiteral, FollowedBy
from ref.models.instances import ComponentInstance, ComponentInstanceField, ComponentInstanceRelation
from django.db.models.query import Prefetch
def __build_grammar():
expr = Forward()
k_select = CaselessLiteral("SELECT")
k_from = CaselessLiteral("FROM")
k_where = CaselessLiteral("WHERE")
k_and = CaselessLiteral("AND")
k_instances = CaselessLiteral("INSTANCES")
qs = QuotedString("'", escQuote="''")
identifier = Combine(Word(alphas + "_", exact=1) + Optional(Word(nums + alphas + "_")))("identifier")
navigation = Group(identifier + ZeroOrMore(Suppress(".") + identifier))("navigation")
filter_predicate = Group(
navigation + Suppress("=") + (qs('value') | (Suppress('(') + expr('subquery') + Suppress(')'))))('predicate')
where_clause = Group(Suppress(k_where) + filter_predicate + ZeroOrMore(Suppress(k_and) + filter_predicate))('where')
# Pre filters
impl = Optional(Suppress(CaselessLiteral("implementation"))) + qs('impl')
cic = Suppress(CaselessLiteral("offer")) + qs('cic')
lc = Suppress(CaselessLiteral("lc")) + qs('lc')
envt = Suppress(CaselessLiteral("environment")) + qs('envt')
pre_filter = Optional(envt) + Optional(lc) + Optional(cic) + Optional(impl) + FollowedBy(k_instances)
# Dict query (only select some elements and navigate)
nl_expr = Group(navigation + ZeroOrMore(Suppress(',') + navigation) + FollowedBy(k_from))('selector')
# The sum of all fears
select = Group(
Suppress(k_select) + Optional(nl_expr + Suppress(k_from)) + pre_filter + Suppress(k_instances) + Optional(
where_clause) + Optional(CaselessLiteral('WITH COMPUTATIONS')('compute')))('select')
expr << select
return expr
__grammar = __build_grammar()
def run(query, return_sensitive_data=False):
expr = __grammar.parseString(query)
return __run(expr, return_sensitive_data)
# return __grammar.parseString(query)
def __run(q, return_sensitive_data):
if q.select != None:
return __select_compo(q.select, return_sensitive_data)
def __select_compo(q, return_sensitive_data):
rs = ComponentInstance.objects.filter(deleted=False)
if q.lc:
rs = rs.filter(instanciates__implements__name=q.lc)
if q.cic:
rs = rs.filter(instanciates__name=q.cic)
if q.impl:
rs = rs.filter(description__name=q.impl)
if q.envt:
rs = rs.filter(environments__name=q.envt)
if q.where:
for predicate in q.where:
## Special keys begin with 'mage_', normal keys without 'mage_' are CI attributes
if len(predicate.navigation) == 1 and predicate.navigation[0].lower().startswith('mage_'):
key = predicate.navigation[0]
if key.lower() == "mage_type":
rs = rs.filter(description__name=predicate.value)
if key.lower() == "mage_id":
rs = rs.filter(id=predicate.value)
if key.lower() == "mage_envt":
rs = rs.filter(environments__name=predicate.value)
if key.lower() == "mage_backup":
rs = rs.filter(include_in_envt_backup=(predicate.value.upper() == 'TRUE'))
continue
## Key analysis: last part is always a simple field, others are relationship fields
r = {}
prefix = ""
for part in predicate.navigation[0:-1]:
r[prefix + 'rel_target_set__field__name'] = part
prefix = prefix + 'relationships__'
## Add last item - the attribute name itself, constraint by the value
val = None
if predicate.value:
val = predicate.value
elif predicate.subquery:
tmp = __select_compo(predicate.subquery, return_sensitive_data)
if not type(tmp) == list:
raise Exception('subqueries must always return a single field')
if len(tmp) != 1:
raise Exception('subqueries must return a single value')
val = list(tmp[0].values())[0]
if predicate.navigation[-1] == '_id':
r[prefix + 'id'] = val
else:
r[prefix + 'field_set__field__name'] = predicate.navigation[-1]
## MQL supports % as a wildcard in first and last position only.
## Because we don't want dependency on an external Django LIKE module.
escaped_val = val.replace("\%", "")
if escaped_val.endswith("%") and escaped_val.startswith("%"):
r[prefix + 'field_set__value__contains'] = val[1:-1]
elif escaped_val.endswith("%"):
r[prefix + 'field_set__value__startswith'] = val[:-1]
print(r)
elif escaped_val.startswith("%"):
r[prefix + 'field_set__value__endswith'] = val[1:]
else:
r[prefix + 'field_set__value'] = val
rs = rs.filter(**r)
if not q.selector:
return __to_dict(rs, use_computed_fields=q.compute, return_sensitive_data=return_sensitive_data)
else:
return __to_dict(rs, q.selector, return_sensitive_data=return_sensitive_data)
def __to_dict(rs, selector=None, optim=True, use_computed_fields=False, return_sensitive_data=False):
'''Navigations are done entirely in memory to avoid hitting too much the database'''
res = []
## All data
if not selector:
rs = rs.prefetch_related(Prefetch('field_set', queryset=ComponentInstanceField.objects.select_related('field')))
rs = rs.prefetch_related(
Prefetch('rel_target_set', queryset=ComponentInstanceRelation.objects.select_related('field')))
rs = rs.select_related('description')
rs = rs.prefetch_related('environments')
for ci in rs.all():
compo = {}
res.append(compo)
compo['mage_id'] = ci.id
compo['mage_cic_id'] = ci.instanciates_id
compo['mage_deleted'] = ci.deleted
compo['mage_description_id'] = ci.description_id
compo['mage_description_name'] = ci.description.name
compo['mage_environments'] = ','.join([e.name for e in ci.environments.all()])
for fi in ci.field_set.all():
if not return_sensitive_data and fi.field.sensitive:
continue
compo[fi.field.name] = fi.value
for fi in ci.rel_target_set.all():
if not return_sensitive_data and fi.field.sensitive:
continue
key = fi.field.name + '_id'
if key in compo:
compo[key] = '%s,%s' % (compo[key], fi.target_id)
else:
compo[key] = fi.target_id
if use_computed_fields:
for cf in ci.description.computed_field_set.all():
compo[cf.name] = cf.resolve(ci)
return res
else:
if optim:
## Preload two levels of data
rs = rs.prefetch_related(
Prefetch('field_set', queryset=ComponentInstanceField.objects.select_related('field')))
rs = rs.prefetch_related(Prefetch('rel_target_set',
queryset=ComponentInstanceRelation.objects.select_related('field',
'target')))
rs = rs.prefetch_related(Prefetch('rel_target_set__target__field_set',
queryset=ComponentInstanceField.objects.select_related('field')))
## Fetch!
for ci in rs.all():
compo = {}
res.append(compo)
for navigation in selector:
print(navigation)
tmp = ci
for idn in navigation:
if navigation.asList().index(idn) == len(navigation) - 1:
# the end of the line is always a value field
key = '_'.join(navigation.asList())
found = False
for fi in tmp.field_set.all():
if fi.field.name == idn:
found = True
compo[key] = fi.value
if not return_sensitive_data and fi.field.sensitive:
raise Exception('logged-in user has no access to field %s' % idn)
if not found:
## Special field?
if idn == 'mage_id':
compo[key] = ci.pk
elif idn == 'mage_cic_id':
compo[key] = ci.instanciates_id
elif idn == 'mage_deleted':
compo[key] = ci.deleted
elif idn == 'mage_description_id':
compo[key] = ci.description_id
elif idn == 'mage_description_name':
compo[key] = ci.description.name
elif idn == 'mage_environments':
compo[key] = ','.join([e.name for e in ci.environments.all()])
else:
compo[key] = None
else:
# navigation
found = False
for rel in tmp.rel_target_set.all():
if rel.field.name == idn:
tmp = rel.target
found = True
if not return_sensitive_data and rel.field.sensitive:
raise Exception('logged-in user has no access to field %s' % idn)
break
if not found:
raise Exception("'%s' is not a valid relationship attribute" % idn)
return res
| marcanpilami/MAGE | ref/mql.py | Python | apache-2.0 | 10,438 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
BPMN diagram for FOMDES process 1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from bpmn_pgv import *
import pygraphviz as pgv
__author__ = 'mapologo'
PROCESS_LABEL = "Recepción y Evaluación de Solicitudes 2"
# A graph for FOMDES processes
F = pgv.AGraph(strict=False, directed=True)
F.graph_attr.update(label="", rankdir="TB", splines="ortho", labelloc="b",
size="8, 7.5", forcelabels="true", ranksep="0.25", fontname="Liberation Sans Narrow Condensed")
F.node_attr.update(fontname="Liberation Sans Narrow Condensed")
F.edge_attr.update(fontname="Liberation Sans Narrow Condensed", fontsize="10")
ae_cluster = {"ae0": ("Recibir Expediente", "start"),
"ae1": ("Planificar las rutas", "human"),
"ae2": ("Realizar la Inspección", "human"),
"ae3": ("¿Requiere Garantía?", "exclusive"),
"ae4": ("Realizar avalúo de la garantía", "human"),
"ae5": ("", "exclusive"),
"ae6": ("Realizar Informe Técnico", "human"),
"ae7": ("Enviar Expediente al Gerente de Crédito", "message"),
"ae8": ("", "end")}
ae_edges = {"ae0": {"ae1": {}},
"ae1": {"ae2": {}},
"ae2": {"ae3": {}},
"ae3": {"ae4": {"xlabel": "Si"}, "ae5": {"xlabel": "No"}},
"ae4": {"ae5": {}},
"ae5": {"ae6": {}},
"ae6": {"ae7": {}},
"ae7": {"ae8": {}}}
AE = add_cluster(F, "ae", "Análisis Económico", ae_cluster, ae_edges)
gc_cluster = {"gc0": ("Recibir Expediente", "start"),
"gc1": ("Revisión del Expediente", "human"),
"gc2": ("Dictamen del Informe Técnico", "complex"),
"gc3": ("Enviar lista de expedientes a Secretaría Ejecutiva", "message"),
"gc4": ("", "end")}
gc_edges = {"gc0": {"gc1": {}},
"gc1": {"gc2": {}},
"gc2": {"gc3": {"xlabel": "para consideración"}},
"gc3": {"gc4": {}}}
GC = add_cluster(F, "gc", "Gerencia de Crédito", gc_cluster, gc_edges)
F.add_node("SIGEFOMDES Crédito", image=IMAGE_PATH + "database.png", shape="plaintext", label="", xlabel="SIGEFOMDES Crédito")
global_edges = {"Ánálisis Jurídico": {"ae0": {"style": "dashed"}},
"ae7": {"gc0": {"style": "dashed"}},
"ae0": {"gc0": {"style": "invis"}},
"ae2": {"SIGEFOMDES Crédito": {"style": "dashed"}},
"ae4": {"SIGEFOMDES Crédito": {"style": "dashed"}},
"ae6": {"SIGEFOMDES Crédito": {"style": "dashed"}},
"SIGEFOMDES Crédito": {"gc1": {"style": "dashed"}},
"gc3": {"Secretaría Ejecutiva": {"style": "dashed"}}}
add_edges(F, global_edges)
F.draw("proc22.png", prog='dot')
F.write("proc22.dot")
| sani-coop/tinjaca | doc/informe1/_graphviz/fomdes_proc22.py | Python | gpl-2.0 | 2,940 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.